file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
impl_encryption.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use openssl::hash::{self, MessageDigest};
use tidb_query_codegen::rpn_fn;
use tidb_query_datatype::expr::{Error, EvalContext};
use tidb_query_common::Result;
use tidb_query_datatype::codec::data_type::*;
use tidb_query_shared_expr::rand::{gen_random_bytes, MAX_RAND_BYTES_LENGTH};
const SHA0: i64 = 0;
const SHA224: i64 = 224;
const SHA256: i64 = 256;
const SHA384: i64 = 384;
const SHA512: i64 = 512;
#[rpn_fn(nullable)]
#[inline]
pub fn md5(arg: Option<BytesRef>) -> Result<Option<Bytes>> {
match arg {
Some(arg) => hex_digest(MessageDigest::md5(), arg).map(Some),
None => Ok(None),
}
}
#[rpn_fn(nullable)]
#[inline]
pub fn sha1(arg: Option<BytesRef>) -> Result<Option<Bytes>> {
match arg {
Some(arg) => hex_digest(MessageDigest::sha1(), arg).map(Some),
None => Ok(None),
}
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn sha2(
ctx: &mut EvalContext,
input: Option<BytesRef>,
hash_length: Option<&Int>,
) -> Result<Option<Bytes>> {
match (input, hash_length) {
(Some(input), Some(hash_length)) => {
let sha2 = match *hash_length {
SHA0 | SHA256 => MessageDigest::sha256(),
SHA224 => MessageDigest::sha224(),
SHA384 => MessageDigest::sha384(),
SHA512 => MessageDigest::sha512(),
_ => {
ctx.warnings
.append_warning(Error::incorrect_parameters("sha2"));
return Ok(None);
}
};
hex_digest(sha2, input).map(Some)
}
_ => Ok(None),
}
}
// https://dev.mysql.com/doc/refman/5.7/en/password-hashing.html
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn password(ctx: &mut EvalContext, input: Option<BytesRef>) -> Result<Option<Bytes>> {
ctx.warnings.append_warning(Error::Other(box_err!(
"Warning: Deprecated syntax PASSWORD"
)));
match input {
Some(bytes) => {
if bytes.is_empty() {
Ok(Some(Vec::new()))
} else {
let hash1 = hex_digest(MessageDigest::sha1(), bytes)?;
let mut hash2 = hex_digest(MessageDigest::sha1(), hash1.as_slice())?;
hash2.insert(0, b'*');
Ok(Some(hash2))
}
}
None => Ok(None),
}
}
#[inline]
fn hex_digest(hashtype: MessageDigest, input: &[u8]) -> Result<Bytes> {
hash::hash(hashtype, input)
.map(|digest| hex::encode(digest).into_bytes())
.map_err(|e| box_err!("OpenSSL error: {:?}", e))
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn uncompressed_length(ctx: &mut EvalContext, arg: Option<BytesRef>) -> Result<Option<Int>> {
use byteorder::{ByteOrder, LittleEndian};
Ok(arg.as_ref().map(|s| {
if s.is_empty() {
0
} else if s.len() <= 4 {
ctx.warnings.append_warning(Error::zlib_data_corrupted());
0
} else {
Int::from(LittleEndian::read_u32(&s[0..4]))
}
}))
}
#[rpn_fn(nullable, capture = [ctx])]
#[inline]
pub fn random_bytes(_ctx: &mut EvalContext, arg: Option<&Int>) -> Result<Option<Bytes>> {
match arg {
Some(arg) => {
if *arg < 1 || *arg > MAX_RAND_BYTES_LENGTH {
return Err(Error::overflow("length", "random_bytes").into());
}
Ok(Some(gen_random_bytes(*arg as usize)))
}
_ => Ok(None),
}
}
#[cfg(test)]
mod tests {
use tipb::ScalarFuncSig;
use super::*;
use crate::types::test_util::RpnFnScalarEvaluator;
fn test_unary_func_ok_none<'a, I: EvaluableRef<'a>, O: EvaluableRet>(sig: ScalarFuncSig)
where
O: PartialEq,
Option<I>: Into<ScalarValue>,
Option<O>: From<ScalarValue>,
{
assert_eq!(
None,
RpnFnScalarEvaluator::new()
.push_param(Option::<I>::None)
.evaluate::<O>(sig)
.unwrap()
);
}
#[test]
fn test_md5() {
let test_cases = vec![
(vec![], "d41d8cd98f00b204e9800998ecf8427e"),
(b"a".to_vec(), "0cc175b9c0f1b6a831c399e269772661"),
(b"ab".to_vec(), "187ef4436122d1cc2f40dc2b92f0eba0"),
(b"abc".to_vec(), "900150983cd24fb0d6963f7d28e17f72"),
(b"123".to_vec(), "202cb962ac59075b964b07152d234b70"),
(
"你好".as_bytes().to_vec(),
"7eca689f0d3389d9dea66ae112e5cfd7",
),
(
"分布式データベース".as_bytes().to_vec(),
"63c0354797bd261e2cbf8581147eeeda",
),
(vec![0xc0, 0x80], "b26555f33aedac7b2684438cc5d4d05e"),
(vec![0xED, 0xA0, 0x80], "546d3dc8de10fbf8b448f678a47901e4"),
];
for (arg, expect_output) in test_cases {
let expect_output = Some(Bytes::from(expect_output));
let output = RpnFnScalarEvaluator::new()
.push_param(arg)
.evaluate::<Bytes>(ScalarFuncSig::Md5)
.unwrap();
assert_eq!(output, expect_output);
}
test_unary_func_ok_none::<BytesRef, Bytes>(ScalarFuncSig::Md5);
}
#[test]
fn test_sha1() {
let test_cases = vec![
(vec![], "da39a3ee5e6b4b0d3255bfef95601890afd80709"),
(b"a".to_vec(), "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8"),
(b"ab".to_vec(), "da23614e02469a0d7c7bd1bdab5c9c474b1904dc"),
(b"abc".to_vec(), "a9993e364706816aba3e25717850c26c9cd0d89d"),
(b"123".to_vec(), "40bd001563085fc35165329ea1ff5c5ecbdbbeef"),
(
"你好".as_bytes().to_vec(),
"440ee0853ad1e99f962b63e459ef992d7c211722",
),
(
"分布式データベース".as_bytes().to_vec(),
"82aa64080df2ca37550ddfc3419d75ac1df3e0d0",
),
(vec![0xc0, 0x80], "8bf4822782a21d7ac68ece130ac36987548003bd"),
(
vec![0xED, 0xA0, 0x80],
"10db70ec072d000c68dd95879f9b831e43a859fd",
),
];
for (arg, expect_output) in test_cases {
let expect_output = Some(Bytes::from(expect_output));
let output = RpnFnScalarEvaluator::new()
.push_param(arg)
.evaluate::<Bytes>(ScalarFuncSig::Sha1)
.unwrap();
assert_eq!(output, expect_output);
}
test_unary_func_ok_none::<BytesRef, Bytes>(ScalarFuncSig::Sha1);
}
#[test]
fn test_uncompressed_length() {
let cases = vec![
(Some(""), Some(0)),
(
Some("0B000000789CCB48CDC9C95728CF2FCA4901001A0B045D"),
Some(11),
),
(
Some("0C000000789CCB48CDC9C95728CF2F32303402001D8004202E"),
Some(12),
),
(Some("020000000000"), Some(2)),
(Some("0000000001"), Some(0)),
(
Some("02000000789CCB48CDC9C95728CF2FCA4901001A0B045D"),
Some(2),
),
(Some("010203"), Some(0)),
(Some("01020304"), Some(0)),
(None, None),
];
for (s, exp) in cases {
let s = s.map(|inner| hex::decode(inner.as_bytes().to_vec()).unwrap());
let output = RpnFnScalarEvaluator::new()
.push_param(s)
.evaluate(ScalarFuncSig::UncompressedLength)
.unwrap();
assert_eq!(output, exp);
}
}
#[test]
fn test_sha2() {
let cases = vec![
| "pingcap", 0, "2871823be240f8ecd1d72f24c99eaa2e58af18b4b8ba99a4fc2823ba5c43930a"),
("pingcap", 224, "cd036dc9bec69e758401379c522454ea24a6327b48724b449b40c6b7"),
("pingcap", 256, "2871823be240f8ecd1d72f24c99eaa2e58af18b4b8ba99a4fc2823ba5c43930a"),
("pingcap", 384, "c50955b6b0c7b9919740d956849eedcb0f0f90bf8a34e8c1f4e071e3773f53bd6f8f16c04425ff728bed04de1b63db51"),
("pingcap", 512, "ea903c574370774c4844a83b7122105a106e04211673810e1baae7c2ae7aba2cf07465e02f6c413126111ef74a417232683ce7ba210052e63c15fc82204aad80"),
("13572468", 0, "1c91ab1c162fd0cae60a5bb9880f3e7d5a133a65b6057a644b26973d9c55dcfe"),
("13572468", 224, "8ad67735bbf49576219f364f4640d595357a440358d15bf6815a16e4"),
("13572468", 256, "1c91ab1c162fd0cae60a5bb9880f3e7d5a133a65b6057a644b26973d9c55dcfe"),
("13572468.123", 384, "3b4ee302435dc1e15251efd9f3982b1ca6fe4ac778d3260b7bbf3bea613849677eda830239420e448e4c6dc7c2649d89"),
("13572468.123", 512, "4820aa3f2760836557dc1f2d44a0ba7596333fdb60c8a1909481862f4ab0921c00abb23d57b7e67a970363cc3fcb78b25b6a0d45cdcac0e87aa0c96bc51f7f96"),
];
for (input_str, hash_length_i64, exp_str) in cases {
let exp = Some(Bytes::from(exp_str));
let got = RpnFnScalarEvaluator::new()
.push_param(Some(Bytes::from(input_str)))
.push_param(Some(Int::from(hash_length_i64)))
.evaluate::<Bytes>(ScalarFuncSig::Sha2)
.unwrap();
assert_eq!(got, exp, "sha2('{:?}', {:?})", input_str, hash_length_i64);
}
let null_cases = vec![
(ScalarValue::Bytes(None), ScalarValue::Int(Some(1))),
(
ScalarValue::Bytes(Some(b"13572468".to_vec())),
ScalarValue::Int(None),
),
(ScalarValue::Bytes(None), ScalarValue::Int(None)),
(
ScalarValue::Bytes(Some(b"pingcap".to_vec())),
ScalarValue::Int(Some(-1)),
),
(
ScalarValue::Bytes(Some(b"13572468".to_vec())),
ScalarValue::Int(Some(999)),
),
];
for (input_str, hash_length_i64) in null_cases {
assert!(RpnFnScalarEvaluator::new()
.push_param(input_str)
.push_param(hash_length_i64)
.evaluate::<Bytes>(ScalarFuncSig::Sha2)
.unwrap()
.is_none())
}
}
#[test]
fn test_random_bytes() {
let cases = vec![1, 32, 233, 1024];
for len in cases {
let got = RpnFnScalarEvaluator::new()
.push_param(Some(Int::from(len as i64)))
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.unwrap();
assert_eq!(got.unwrap().len(), len);
}
let overflow_tests = vec![
ScalarValue::Int(Some(-32)),
ScalarValue::Int(Some(1025)),
ScalarValue::Int(Some(0)),
];
for len in overflow_tests {
assert!(RpnFnScalarEvaluator::new()
.push_param(len)
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.is_err(),);
}
//test NULL case
assert!(RpnFnScalarEvaluator::new()
.push_param(ScalarValue::Int(None))
.evaluate::<Bytes>(ScalarFuncSig::RandomBytes)
.unwrap()
.is_none())
}
#[test]
fn test_password() {
let cases = vec![
("TiKV", "*cca644408381f962dba8dfb9889db1371ee74208"),
("Pingcap", "*f33bc75eac70ac317621fbbfa560d6251c43cf8a"),
("rust", "*090c2b08e0c1776910e777b917c2185be6554c2e"),
("database", "*02e86b4af5219d0ba6c974908aea62d42eb7da24"),
("raft", "*b23a77787ed44e62ef2570f03ce8982d119fb699"),
];
for (input, output) in cases {
let res = RpnFnScalarEvaluator::new()
.push_param(Some(Bytes::from(input)))
.evaluate::<Bytes>(ScalarFuncSig::Password)
.unwrap();
assert_eq!(res, Some(Bytes::from(output)))
}
// test for null
let res = RpnFnScalarEvaluator::new()
.push_param(ScalarValue::Bytes(None))
.evaluate::<Bytes>(ScalarFuncSig::Password)
.unwrap();
assert_eq!(None, res)
}
}
| ( | identifier_name |
tae-ui.js | // Text Adaptation Engine User Interface (tae-ui.js)
//-----------------------------------------------------------------------------
// This JavaScript contains the functionality related to the User Interface
// which enriches the Interactive Front-End component with the features of
// the Text Adaptation Engine component.
// - It uses the methods implemented in tae-core.js
// - The Text Adaptation Engine server side code is available in:
// https://github.com/SIMPATICOProject/SimpaticoTAEServer
//-----------------------------------------------------------------------------
var taeUI = (function () {
var instance; // Singleton Instance of the UI component
var featureEnabled = false;
function Singleton () {
// Component-related variables
var primaryColor = '';
var secondaryColor = '';
var elementsToEnhanceClassName = '';
var simplifyBoxTitle = '';
var simplifyBoxClassName = '';
var wordPropertiesClassName = '';
var synonymLabel = '';
var definitionLabel = '';
var emptyText = '';
// Internal usage variables
var paragraphs = []; // Used to store all the tagged paragraphs
var originalStyles = []; // Used to store the tagged paragraphs CSSstyles
var simplifyBoxIdSuffix = '-simp-text-paragraph';
// Component-related methods and behaviour
function initComponent(parameters) {
primaryColor = parameters.primaryColor;
secondaryColor = parameters.secondaryColor;
elementsToEnhanceClassName = parameters.elementsToEnhanceClassName;
simplifyBoxTitle = parameters.simplifyBoxTitle;
simplifyBoxClassName = parameters.simplifyBoxClassName;
wordPropertiesClassName = parameters.wordPropertiesClassName;
synonymLabel = parameters.synonymLabel || 'Synonyms';
definitionLabel = parameters.definitionLabel || 'Definitions';
wikipediaLabel = parameters.wikipediaLabel || 'Wikipedia';
emptyText = parameters.emptyText || 'no simplification found for the text';
taeCORE.getInstance().init({
endpoint: parameters.endpoint,
language: parameters.language
});
}
function enableComponentFeatures() {
if (featureEnabled) return;
featureEnabled = true;
// Gets the tagged paragraphs the first time
if (paragraphs.length === 0) {
paragraphs = document.getElementsByClassName(elementsToEnhanceClassName);
}
// Add special format and add a couple of attributes to the paragraphs
var paragrapId = 1;
var paragraphName = '';
for (var i = 0, len = paragraphs.length; i < len; i++) {
if (paragraphs[i].className.indexOf(elementsToEnhanceClassName + "-active") < 0) paragraphs[i].className += ' '+elementsToEnhanceClassName + "-active";
paragraphName = "taeParagraph" + paragrapId;
// Store original style
// originalStyles[i] = paragraphs[i].style;
// paragraphs[i].style.position = 'relative';
// paragraphs[i].style.borderLeft = "12px solid " + primaryColor;
// paragraphs[i].style.borderRadius = "16px";
//
// paragraphs[i].style.padding = '0px 0px 0px 8px';
// paragraphs[i].style.margin = '0px 0px 8px 0px';
paragraphs[i].setAttribute("id", paragraphName);
paragraphs[i].setAttribute("onclick",
"taeUI.getInstance()." +
"paragraphEvent('" + paragraphName + "');");
var loadingImage = document.createElement("img");
loadingImage.setAttribute("src", "img/loader.gif");
loadingImage.setAttribute("id", "loading_"+paragraphName);
loadingImage.style.display = "none";
paragraphs[i].appendChild(loadingImage);
paragrapId++;
}
}
function disableComponentFeatures() {
if (!featureEnabled) return;
featureEnabled = false;
// Remove Question Boxes
var questionsBoxes = document.getElementsByClassName(simplifyBoxClassName);
for (var i = questionsBoxes.length - 1; i >= 0; i--) {
questionsBoxes[i].parentNode.removeChild(questionsBoxes[i]);
}
// Reformat the paragraphs with the original style
for (var i = 0, len = paragraphs.length; i < len; i++) {
// Restore the original style
paragraphs[i].style = originalStyles[i];
paragraphs[i].className = paragraphs[i].className.replace(elementsToEnhanceClassName + "-active", "");
// Remove the onclick event to enhance the paragraph
paragraphs[i].removeAttribute("onclick");
}
}
// It uses the log component to register the produced events
var logger = function(event, details) {
var nop = function(){};
if (logCORE != null) return logCORE.getInstance().taeLogger;
else return {logParagraph: nop, logPhrase: nop, logWord: nop, logFreetext: nop};
}
// If the Component feature is enabled it calls to the TAE engine instance to
// get the simplifications related to the paragraph passed as parameter
// - paragraphID: the id of the paragraph which has produced the event
function paragraphEvent(paragraphID) {
if (!featureEnabled) return;
var currentParagraph = document.getElementById(paragraphID + simplifyBoxIdSuffix);
if ( currentParagraph === null) {
logger().logParagraph(simpaticoEservice, paragraphID);
currentParagraph = document.getElementById(paragraphID);
var text = currentParagraph.textContent ? currentParagraph.textContent : currentParagraph.innerText;//IE uses innerText
taeCORE.getInstance().simplifyText(paragraphID, text, showSimplificationBox);
} else {
hideSimplificationBox(paragraphID);
}
}
// It creates the HTML content of a complex word
// Used by createSimplifiedTextHTML(...)
// - item: the object wich contains the description passed as parameter
function createSimplifiedWordLabel(item) {
return '<span class="simp-word" ' +
'onclick="taeUI.getInstance().wordEvent(event, this)">' +
item.originalValue +
'</span>';
}
// It creates the HTML content of a simplified paraghaph
// Used by getSimplifiedText(...)
// - originalText: the original text contained in a paragraph
// - simplifications: A list of simplified words of the text
function createSimplifiedTextHTML(originalText, simplifications) {
Array.prototype.keySort = function(key, desc){
this.sort(function(a, b) {
var result = desc ? (a[key] < b[key]) : (a[key] > b[key]);
return result ? 1 : -1;
});
return this;
}
simplifications.keySort('start');
if (simplifications.length == 0)
{
var result = emptyText;//'No hay palabras que necesiten ser simplificadas';
}else{
var result = originalText;
var item = '';
// for each simplified word add an element containing it
for (var i = simplifications.length -1; i >= 0; i--) |
}
return result;
}
// Method used to cancel the propagation of the events
// - event: the event to cancel
function cancelEventPropagation(event) {
event = event || window.event // cross-browser event
if (event.stopPropagation) {
event.stopPropagation(); // W3C standard variant
} else {
event.cancelBubble = true; // IE variant
}
}
// Function called when an user clicks on a difficult word
// It manages the event and shows the synonyms and definition of the
// selected word calling to showWordProperties(...)
// - event: the click event. It is cancelled
// - wordHTMLelement: the element that contains the word
function wordEvent(event, wordHTMLelement) {
cancelEventPropagation(event);
showWordProperties(wordHTMLelement);
}
// Function called when an user clicks on a highlighted word
// It shows synonyms and the definition of the word contailed by the
// HTML element passed as parameter
// - wordHTMLelement: the element that contains the word
function showWordProperties(wordHTMLelement) {
var simplifiedBoxNode = document.getElementById(wordHTMLelement
.parentNode
.parentNode
.parentNode.id);
var paragraphId = simplifiedBoxNode.parentNode.id;
var currentBox = simplifiedBoxNode
.getElementsByClassName(wordPropertiesClassName)[0];
// If the currentBox is not created, create and attach it
if (currentBox == null) {
currentBox = document.createElement('li');
currentBox.className = wordPropertiesClassName;
currentBox.setAttribute("onclick",
"taeUI.getInstance()." +
"wordPropertiesEvent(event,'" + paragraphId + "');");
simplifiedBoxNode.getElementsByTagName('ul')[0].appendChild(currentBox);
}
// Get the synonyms and definition
var definition = taeCORE.getInstance()
.termDefinition(paragraphId, wordHTMLelement.innerHTML);
var synonyms = taeCORE.getInstance()
.termSynonyms(paragraphId, wordHTMLelement.innerHTML);
var wiki = taeCORE.getInstance()
.termWikipedia(paragraphId, wordHTMLelement.innerHTML);
// Update the content
currentBox.innerHTML = '<b>' + wordHTMLelement.innerText + '</b></br>';
if (definition != null) // If the word has definition show it
currentBox.innerHTML += '<i>' + definitionLabel + ':' + '</i>'
+ definition
+ '</br>';
if (synonyms != null) // If the word has synonyms show them
currentBox.innerHTML += '<i>' + synonymLabel +':' + '</i>' + synonyms;
if (wiki != null) // If the word has a wikipedia link
currentBox.innerHTML += '<br/><i>' + wikipediaLabel +':' + '</i><a target="_blank" href="'+wiki+'">' + wiki + '<a/>';
logger().logWord(simpaticoEservice, wordHTMLelement.innerHTML);
}
// Function called when an user clicks on a WordProperties box
// It hides the selected WordProperties box
// - event: the click event. It is cancelled
// - paragraphID: the id paragraph that contains the WordProperties box
function hideWordProperties(event, paragraphID) {
cancelEventPropagation(event);
var paragraphNode = document.getElementById(paragraphID);
var currentBox = paragraphNode
.getElementsByClassName(wordPropertiesClassName)[0];
if (currentBox != null) {
currentBox.parentNode.removeChild(currentBox);
}
}
// Draw the simplification box
// - paragraphID: the id of the paragraph
// - originalText: the original text contained in the paragraph
// - response: the JSON Object of the questions related to the paragraph
function showSimplificationBox(paragraphID, originalText, response) {
// Create the Simplification Box div
var questionsBox = document.createElement('div');
questionsBox.id = paragraphID + simplifyBoxIdSuffix;
questionsBox.className = simplifyBoxClassName;
// 1. The title is attached
var questionsHtml = '<p>' + simplifyBoxTitle + '</p>';
// 2. The simplification is attached
questionsHtml += '<ul>';
questionsHtml += '<li>' + createSimplifiedTextHTML(
originalText,
response.simplifications) + '</li>';
questionsHtml += '</ul>';
// 3. The Simplification Box div is attached to the corresponding paragraph
questionsBox.innerHTML = questionsHtml;
document.getElementById(paragraphID).appendChild(questionsBox);
document.getElementById('loading_'+paragraphID).style.display = "none";
} //showSimplificationBox
// Hide the simplification box attached to a paragraph passed as paramether
// - paragraphID: the id of the paragraph
function hideSimplificationBox(paragraphID) {
var sBoxToRemove = document.getElementById(paragraphID + simplifyBoxIdSuffix);
sBoxToRemove.parentNode.removeChild(sBoxToRemove);
}
return {
// Public definitions
init: initComponent, // Called only one time
enable: enableComponentFeatures, // Called when the Component button is enabled
disable: disableComponentFeatures, // Called when the Component button is disabled or another one enabled
isEnabled: function() { return featureEnabled;}, // Returns if the feature is enabled
paragraphEvent: paragraphEvent,
wordEvent: wordEvent,
wordPropertiesEvent: hideWordProperties
};
}
return {
getInstance: function() {
if(!instance) instance = Singleton();
return instance;
}
};
})(); | {
item = simplifications[i];
console.log(item);
result = result.substring(0, item.start) +
createSimplifiedWordLabel(item) +
result.substring(item.end, result.length);
} | conditional_block |
tae-ui.js | // Text Adaptation Engine User Interface (tae-ui.js)
//-----------------------------------------------------------------------------
// This JavaScript contains the functionality related to the User Interface
// which enriches the Interactive Front-End component with the features of
// the Text Adaptation Engine component.
// - It uses the methods implemented in tae-core.js
// - The Text Adaptation Engine server side code is available in:
// https://github.com/SIMPATICOProject/SimpaticoTAEServer
//-----------------------------------------------------------------------------
|
// Component-related variables
var primaryColor = '';
var secondaryColor = '';
var elementsToEnhanceClassName = '';
var simplifyBoxTitle = '';
var simplifyBoxClassName = '';
var wordPropertiesClassName = '';
var synonymLabel = '';
var definitionLabel = '';
var emptyText = '';
// Internal usage variables
var paragraphs = []; // Used to store all the tagged paragraphs
var originalStyles = []; // Used to store the tagged paragraphs CSSstyles
var simplifyBoxIdSuffix = '-simp-text-paragraph';
// Component-related methods and behaviour
function initComponent(parameters) {
primaryColor = parameters.primaryColor;
secondaryColor = parameters.secondaryColor;
elementsToEnhanceClassName = parameters.elementsToEnhanceClassName;
simplifyBoxTitle = parameters.simplifyBoxTitle;
simplifyBoxClassName = parameters.simplifyBoxClassName;
wordPropertiesClassName = parameters.wordPropertiesClassName;
synonymLabel = parameters.synonymLabel || 'Synonyms';
definitionLabel = parameters.definitionLabel || 'Definitions';
wikipediaLabel = parameters.wikipediaLabel || 'Wikipedia';
emptyText = parameters.emptyText || 'no simplification found for the text';
taeCORE.getInstance().init({
endpoint: parameters.endpoint,
language: parameters.language
});
}
function enableComponentFeatures() {
if (featureEnabled) return;
featureEnabled = true;
// Gets the tagged paragraphs the first time
if (paragraphs.length === 0) {
paragraphs = document.getElementsByClassName(elementsToEnhanceClassName);
}
// Add special format and add a couple of attributes to the paragraphs
var paragrapId = 1;
var paragraphName = '';
for (var i = 0, len = paragraphs.length; i < len; i++) {
if (paragraphs[i].className.indexOf(elementsToEnhanceClassName + "-active") < 0) paragraphs[i].className += ' '+elementsToEnhanceClassName + "-active";
paragraphName = "taeParagraph" + paragrapId;
// Store original style
// originalStyles[i] = paragraphs[i].style;
// paragraphs[i].style.position = 'relative';
// paragraphs[i].style.borderLeft = "12px solid " + primaryColor;
// paragraphs[i].style.borderRadius = "16px";
//
// paragraphs[i].style.padding = '0px 0px 0px 8px';
// paragraphs[i].style.margin = '0px 0px 8px 0px';
paragraphs[i].setAttribute("id", paragraphName);
paragraphs[i].setAttribute("onclick",
"taeUI.getInstance()." +
"paragraphEvent('" + paragraphName + "');");
var loadingImage = document.createElement("img");
loadingImage.setAttribute("src", "img/loader.gif");
loadingImage.setAttribute("id", "loading_"+paragraphName);
loadingImage.style.display = "none";
paragraphs[i].appendChild(loadingImage);
paragrapId++;
}
}
function disableComponentFeatures() {
if (!featureEnabled) return;
featureEnabled = false;
// Remove Question Boxes
var questionsBoxes = document.getElementsByClassName(simplifyBoxClassName);
for (var i = questionsBoxes.length - 1; i >= 0; i--) {
questionsBoxes[i].parentNode.removeChild(questionsBoxes[i]);
}
// Reformat the paragraphs with the original style
for (var i = 0, len = paragraphs.length; i < len; i++) {
// Restore the original style
paragraphs[i].style = originalStyles[i];
paragraphs[i].className = paragraphs[i].className.replace(elementsToEnhanceClassName + "-active", "");
// Remove the onclick event to enhance the paragraph
paragraphs[i].removeAttribute("onclick");
}
}
// It uses the log component to register the produced events
var logger = function(event, details) {
var nop = function(){};
if (logCORE != null) return logCORE.getInstance().taeLogger;
else return {logParagraph: nop, logPhrase: nop, logWord: nop, logFreetext: nop};
}
// If the Component feature is enabled it calls to the TAE engine instance to
// get the simplifications related to the paragraph passed as parameter
// - paragraphID: the id of the paragraph which has produced the event
function paragraphEvent(paragraphID) {
if (!featureEnabled) return;
var currentParagraph = document.getElementById(paragraphID + simplifyBoxIdSuffix);
if ( currentParagraph === null) {
logger().logParagraph(simpaticoEservice, paragraphID);
currentParagraph = document.getElementById(paragraphID);
var text = currentParagraph.textContent ? currentParagraph.textContent : currentParagraph.innerText;//IE uses innerText
taeCORE.getInstance().simplifyText(paragraphID, text, showSimplificationBox);
} else {
hideSimplificationBox(paragraphID);
}
}
// It creates the HTML content of a complex word
// Used by createSimplifiedTextHTML(...)
// - item: the object wich contains the description passed as parameter
function createSimplifiedWordLabel(item) {
return '<span class="simp-word" ' +
'onclick="taeUI.getInstance().wordEvent(event, this)">' +
item.originalValue +
'</span>';
}
// It creates the HTML content of a simplified paraghaph
// Used by getSimplifiedText(...)
// - originalText: the original text contained in a paragraph
// - simplifications: A list of simplified words of the text
function createSimplifiedTextHTML(originalText, simplifications) {
Array.prototype.keySort = function(key, desc){
this.sort(function(a, b) {
var result = desc ? (a[key] < b[key]) : (a[key] > b[key]);
return result ? 1 : -1;
});
return this;
}
simplifications.keySort('start');
if (simplifications.length == 0)
{
var result = emptyText;//'No hay palabras que necesiten ser simplificadas';
}else{
var result = originalText;
var item = '';
// for each simplified word add an element containing it
for (var i = simplifications.length -1; i >= 0; i--) {
item = simplifications[i];
console.log(item);
result = result.substring(0, item.start) +
createSimplifiedWordLabel(item) +
result.substring(item.end, result.length);
}
}
return result;
}
// Method used to cancel the propagation of the events
// - event: the event to cancel
function cancelEventPropagation(event) {
event = event || window.event // cross-browser event
if (event.stopPropagation) {
event.stopPropagation(); // W3C standard variant
} else {
event.cancelBubble = true; // IE variant
}
}
// Function called when an user clicks on a difficult word
// It manages the event and shows the synonyms and definition of the
// selected word calling to showWordProperties(...)
// - event: the click event. It is cancelled
// - wordHTMLelement: the element that contains the word
function wordEvent(event, wordHTMLelement) {
cancelEventPropagation(event);
showWordProperties(wordHTMLelement);
}
// Function called when an user clicks on a highlighted word
// It shows synonyms and the definition of the word contailed by the
// HTML element passed as parameter
// - wordHTMLelement: the element that contains the word
function showWordProperties(wordHTMLelement) {
var simplifiedBoxNode = document.getElementById(wordHTMLelement
.parentNode
.parentNode
.parentNode.id);
var paragraphId = simplifiedBoxNode.parentNode.id;
var currentBox = simplifiedBoxNode
.getElementsByClassName(wordPropertiesClassName)[0];
// If the currentBox is not created, create and attach it
if (currentBox == null) {
currentBox = document.createElement('li');
currentBox.className = wordPropertiesClassName;
currentBox.setAttribute("onclick",
"taeUI.getInstance()." +
"wordPropertiesEvent(event,'" + paragraphId + "');");
simplifiedBoxNode.getElementsByTagName('ul')[0].appendChild(currentBox);
}
// Get the synonyms and definition
var definition = taeCORE.getInstance()
.termDefinition(paragraphId, wordHTMLelement.innerHTML);
var synonyms = taeCORE.getInstance()
.termSynonyms(paragraphId, wordHTMLelement.innerHTML);
var wiki = taeCORE.getInstance()
.termWikipedia(paragraphId, wordHTMLelement.innerHTML);
// Update the content
currentBox.innerHTML = '<b>' + wordHTMLelement.innerText + '</b></br>';
if (definition != null) // If the word has definition show it
currentBox.innerHTML += '<i>' + definitionLabel + ':' + '</i>'
+ definition
+ '</br>';
if (synonyms != null) // If the word has synonyms show them
currentBox.innerHTML += '<i>' + synonymLabel +':' + '</i>' + synonyms;
if (wiki != null) // If the word has a wikipedia link
currentBox.innerHTML += '<br/><i>' + wikipediaLabel +':' + '</i><a target="_blank" href="'+wiki+'">' + wiki + '<a/>';
logger().logWord(simpaticoEservice, wordHTMLelement.innerHTML);
}
// Function called when an user clicks on a WordProperties box
// It hides the selected WordProperties box
// - event: the click event. It is cancelled
// - paragraphID: the id paragraph that contains the WordProperties box
function hideWordProperties(event, paragraphID) {
cancelEventPropagation(event);
var paragraphNode = document.getElementById(paragraphID);
var currentBox = paragraphNode
.getElementsByClassName(wordPropertiesClassName)[0];
if (currentBox != null) {
currentBox.parentNode.removeChild(currentBox);
}
}
// Draw the simplification box
// - paragraphID: the id of the paragraph
// - originalText: the original text contained in the paragraph
// - response: the JSON Object of the questions related to the paragraph
function showSimplificationBox(paragraphID, originalText, response) {
// Create the Simplification Box div
var questionsBox = document.createElement('div');
questionsBox.id = paragraphID + simplifyBoxIdSuffix;
questionsBox.className = simplifyBoxClassName;
// 1. The title is attached
var questionsHtml = '<p>' + simplifyBoxTitle + '</p>';
// 2. The simplification is attached
questionsHtml += '<ul>';
questionsHtml += '<li>' + createSimplifiedTextHTML(
originalText,
response.simplifications) + '</li>';
questionsHtml += '</ul>';
// 3. The Simplification Box div is attached to the corresponding paragraph
questionsBox.innerHTML = questionsHtml;
document.getElementById(paragraphID).appendChild(questionsBox);
document.getElementById('loading_'+paragraphID).style.display = "none";
} //showSimplificationBox
// Hide the simplification box attached to a paragraph passed as paramether
// - paragraphID: the id of the paragraph
function hideSimplificationBox(paragraphID) {
var sBoxToRemove = document.getElementById(paragraphID + simplifyBoxIdSuffix);
sBoxToRemove.parentNode.removeChild(sBoxToRemove);
}
return {
// Public definitions
init: initComponent, // Called only one time
enable: enableComponentFeatures, // Called when the Component button is enabled
disable: disableComponentFeatures, // Called when the Component button is disabled or another one enabled
isEnabled: function() { return featureEnabled;}, // Returns if the feature is enabled
paragraphEvent: paragraphEvent,
wordEvent: wordEvent,
wordPropertiesEvent: hideWordProperties
};
}
return {
getInstance: function() {
if(!instance) instance = Singleton();
return instance;
}
};
})(); | var taeUI = (function () {
var instance; // Singleton Instance of the UI component
var featureEnabled = false;
function Singleton () {
| random_line_split |
tae-ui.js | // Text Adaptation Engine User Interface (tae-ui.js)
//-----------------------------------------------------------------------------
// This JavaScript contains the functionality related to the User Interface
// which enriches the Interactive Front-End component with the features of
// the Text Adaptation Engine component.
// - It uses the methods implemented in tae-core.js
// - The Text Adaptation Engine server side code is available in:
// https://github.com/SIMPATICOProject/SimpaticoTAEServer
//-----------------------------------------------------------------------------
var taeUI = (function () {
var instance; // Singleton Instance of the UI component
var featureEnabled = false;
function Singleton () {
// Component-related variables
var primaryColor = '';
var secondaryColor = '';
var elementsToEnhanceClassName = '';
var simplifyBoxTitle = '';
var simplifyBoxClassName = '';
var wordPropertiesClassName = '';
var synonymLabel = '';
var definitionLabel = '';
var emptyText = '';
// Internal usage variables
var paragraphs = []; // Used to store all the tagged paragraphs
var originalStyles = []; // Used to store the tagged paragraphs CSSstyles
var simplifyBoxIdSuffix = '-simp-text-paragraph';
// Component-related methods and behaviour
function initComponent(parameters) {
primaryColor = parameters.primaryColor;
secondaryColor = parameters.secondaryColor;
elementsToEnhanceClassName = parameters.elementsToEnhanceClassName;
simplifyBoxTitle = parameters.simplifyBoxTitle;
simplifyBoxClassName = parameters.simplifyBoxClassName;
wordPropertiesClassName = parameters.wordPropertiesClassName;
synonymLabel = parameters.synonymLabel || 'Synonyms';
definitionLabel = parameters.definitionLabel || 'Definitions';
wikipediaLabel = parameters.wikipediaLabel || 'Wikipedia';
emptyText = parameters.emptyText || 'no simplification found for the text';
taeCORE.getInstance().init({
endpoint: parameters.endpoint,
language: parameters.language
});
}
function enableComponentFeatures() {
if (featureEnabled) return;
featureEnabled = true;
// Gets the tagged paragraphs the first time
if (paragraphs.length === 0) {
paragraphs = document.getElementsByClassName(elementsToEnhanceClassName);
}
// Add special format and add a couple of attributes to the paragraphs
var paragrapId = 1;
var paragraphName = '';
for (var i = 0, len = paragraphs.length; i < len; i++) {
if (paragraphs[i].className.indexOf(elementsToEnhanceClassName + "-active") < 0) paragraphs[i].className += ' '+elementsToEnhanceClassName + "-active";
paragraphName = "taeParagraph" + paragrapId;
// Store original style
// originalStyles[i] = paragraphs[i].style;
// paragraphs[i].style.position = 'relative';
// paragraphs[i].style.borderLeft = "12px solid " + primaryColor;
// paragraphs[i].style.borderRadius = "16px";
//
// paragraphs[i].style.padding = '0px 0px 0px 8px';
// paragraphs[i].style.margin = '0px 0px 8px 0px';
paragraphs[i].setAttribute("id", paragraphName);
paragraphs[i].setAttribute("onclick",
"taeUI.getInstance()." +
"paragraphEvent('" + paragraphName + "');");
var loadingImage = document.createElement("img");
loadingImage.setAttribute("src", "img/loader.gif");
loadingImage.setAttribute("id", "loading_"+paragraphName);
loadingImage.style.display = "none";
paragraphs[i].appendChild(loadingImage);
paragrapId++;
}
}
function disableComponentFeatures() {
if (!featureEnabled) return;
featureEnabled = false;
// Remove Question Boxes
var questionsBoxes = document.getElementsByClassName(simplifyBoxClassName);
for (var i = questionsBoxes.length - 1; i >= 0; i--) {
questionsBoxes[i].parentNode.removeChild(questionsBoxes[i]);
}
// Reformat the paragraphs with the original style
for (var i = 0, len = paragraphs.length; i < len; i++) {
// Restore the original style
paragraphs[i].style = originalStyles[i];
paragraphs[i].className = paragraphs[i].className.replace(elementsToEnhanceClassName + "-active", "");
// Remove the onclick event to enhance the paragraph
paragraphs[i].removeAttribute("onclick");
}
}
// It uses the log component to register the produced events
var logger = function(event, details) {
var nop = function(){};
if (logCORE != null) return logCORE.getInstance().taeLogger;
else return {logParagraph: nop, logPhrase: nop, logWord: nop, logFreetext: nop};
}
// If the Component feature is enabled it calls to the TAE engine instance to
// get the simplifications related to the paragraph passed as parameter
// - paragraphID: the id of the paragraph which has produced the event
function paragraphEvent(paragraphID) {
if (!featureEnabled) return;
var currentParagraph = document.getElementById(paragraphID + simplifyBoxIdSuffix);
if ( currentParagraph === null) {
logger().logParagraph(simpaticoEservice, paragraphID);
currentParagraph = document.getElementById(paragraphID);
var text = currentParagraph.textContent ? currentParagraph.textContent : currentParagraph.innerText;//IE uses innerText
taeCORE.getInstance().simplifyText(paragraphID, text, showSimplificationBox);
} else {
hideSimplificationBox(paragraphID);
}
}
// It creates the HTML content of a complex word
// Used by createSimplifiedTextHTML(...)
// - item: the object wich contains the description passed as parameter
function createSimplifiedWordLabel(item) {
return '<span class="simp-word" ' +
'onclick="taeUI.getInstance().wordEvent(event, this)">' +
item.originalValue +
'</span>';
}
// It creates the HTML content of a simplified paraghaph
// Used by getSimplifiedText(...)
// - originalText: the original text contained in a paragraph
// - simplifications: A list of simplified words of the text
function createSimplifiedTextHTML(originalText, simplifications) {
Array.prototype.keySort = function(key, desc){
this.sort(function(a, b) {
var result = desc ? (a[key] < b[key]) : (a[key] > b[key]);
return result ? 1 : -1;
});
return this;
}
simplifications.keySort('start');
if (simplifications.length == 0)
{
var result = emptyText;//'No hay palabras que necesiten ser simplificadas';
}else{
var result = originalText;
var item = '';
// for each simplified word add an element containing it
for (var i = simplifications.length -1; i >= 0; i--) {
item = simplifications[i];
console.log(item);
result = result.substring(0, item.start) +
createSimplifiedWordLabel(item) +
result.substring(item.end, result.length);
}
}
return result;
}
// Method used to cancel the propagation of the events
// - event: the event to cancel
function cancelEventPropagation(event) {
event = event || window.event // cross-browser event
if (event.stopPropagation) {
event.stopPropagation(); // W3C standard variant
} else {
event.cancelBubble = true; // IE variant
}
}
// Function called when an user clicks on a difficult word
// It manages the event and shows the synonyms and definition of the
// selected word calling to showWordProperties(...)
// - event: the click event. It is cancelled
// - wordHTMLelement: the element that contains the word
function wordEvent(event, wordHTMLelement) {
cancelEventPropagation(event);
showWordProperties(wordHTMLelement);
}
// Function called when an user clicks on a highlighted word
// It shows synonyms and the definition of the word contailed by the
// HTML element passed as parameter
// - wordHTMLelement: the element that contains the word
function showWordProperties(wordHTMLelement) {
var simplifiedBoxNode = document.getElementById(wordHTMLelement
.parentNode
.parentNode
.parentNode.id);
var paragraphId = simplifiedBoxNode.parentNode.id;
var currentBox = simplifiedBoxNode
.getElementsByClassName(wordPropertiesClassName)[0];
// If the currentBox is not created, create and attach it
if (currentBox == null) {
currentBox = document.createElement('li');
currentBox.className = wordPropertiesClassName;
currentBox.setAttribute("onclick",
"taeUI.getInstance()." +
"wordPropertiesEvent(event,'" + paragraphId + "');");
simplifiedBoxNode.getElementsByTagName('ul')[0].appendChild(currentBox);
}
// Get the synonyms and definition
var definition = taeCORE.getInstance()
.termDefinition(paragraphId, wordHTMLelement.innerHTML);
var synonyms = taeCORE.getInstance()
.termSynonyms(paragraphId, wordHTMLelement.innerHTML);
var wiki = taeCORE.getInstance()
.termWikipedia(paragraphId, wordHTMLelement.innerHTML);
// Update the content
currentBox.innerHTML = '<b>' + wordHTMLelement.innerText + '</b></br>';
if (definition != null) // If the word has definition show it
currentBox.innerHTML += '<i>' + definitionLabel + ':' + '</i>'
+ definition
+ '</br>';
if (synonyms != null) // If the word has synonyms show them
currentBox.innerHTML += '<i>' + synonymLabel +':' + '</i>' + synonyms;
if (wiki != null) // If the word has a wikipedia link
currentBox.innerHTML += '<br/><i>' + wikipediaLabel +':' + '</i><a target="_blank" href="'+wiki+'">' + wiki + '<a/>';
logger().logWord(simpaticoEservice, wordHTMLelement.innerHTML);
}
// Function called when an user clicks on a WordProperties box
// It hides the selected WordProperties box
// - event: the click event. It is cancelled
// - paragraphID: the id paragraph that contains the WordProperties box
function hideWordProperties(event, paragraphID) {
cancelEventPropagation(event);
var paragraphNode = document.getElementById(paragraphID);
var currentBox = paragraphNode
.getElementsByClassName(wordPropertiesClassName)[0];
if (currentBox != null) {
currentBox.parentNode.removeChild(currentBox);
}
}
// Draw the simplification box
// - paragraphID: the id of the paragraph
// - originalText: the original text contained in the paragraph
// - response: the JSON Object of the questions related to the paragraph
function | (paragraphID, originalText, response) {
// Create the Simplification Box div
var questionsBox = document.createElement('div');
questionsBox.id = paragraphID + simplifyBoxIdSuffix;
questionsBox.className = simplifyBoxClassName;
// 1. The title is attached
var questionsHtml = '<p>' + simplifyBoxTitle + '</p>';
// 2. The simplification is attached
questionsHtml += '<ul>';
questionsHtml += '<li>' + createSimplifiedTextHTML(
originalText,
response.simplifications) + '</li>';
questionsHtml += '</ul>';
// 3. The Simplification Box div is attached to the corresponding paragraph
questionsBox.innerHTML = questionsHtml;
document.getElementById(paragraphID).appendChild(questionsBox);
document.getElementById('loading_'+paragraphID).style.display = "none";
} //showSimplificationBox
// Hide the simplification box attached to a paragraph passed as paramether
// - paragraphID: the id of the paragraph
function hideSimplificationBox(paragraphID) {
var sBoxToRemove = document.getElementById(paragraphID + simplifyBoxIdSuffix);
sBoxToRemove.parentNode.removeChild(sBoxToRemove);
}
return {
// Public definitions
init: initComponent, // Called only one time
enable: enableComponentFeatures, // Called when the Component button is enabled
disable: disableComponentFeatures, // Called when the Component button is disabled or another one enabled
isEnabled: function() { return featureEnabled;}, // Returns if the feature is enabled
paragraphEvent: paragraphEvent,
wordEvent: wordEvent,
wordPropertiesEvent: hideWordProperties
};
}
return {
getInstance: function() {
if(!instance) instance = Singleton();
return instance;
}
};
})(); | showSimplificationBox | identifier_name |
tae-ui.js | // Text Adaptation Engine User Interface (tae-ui.js)
//-----------------------------------------------------------------------------
// This JavaScript contains the functionality related to the User Interface
// which enriches the Interactive Front-End component with the features of
// the Text Adaptation Engine component.
// - It uses the methods implemented in tae-core.js
// - The Text Adaptation Engine server side code is available in:
// https://github.com/SIMPATICOProject/SimpaticoTAEServer
//-----------------------------------------------------------------------------
var taeUI = (function () {
var instance; // Singleton Instance of the UI component
var featureEnabled = false;
function Singleton () {
// Component-related variables
var primaryColor = '';
var secondaryColor = '';
var elementsToEnhanceClassName = '';
var simplifyBoxTitle = '';
var simplifyBoxClassName = '';
var wordPropertiesClassName = '';
var synonymLabel = '';
var definitionLabel = '';
var emptyText = '';
// Internal usage variables
var paragraphs = []; // Used to store all the tagged paragraphs
var originalStyles = []; // Used to store the tagged paragraphs CSSstyles
var simplifyBoxIdSuffix = '-simp-text-paragraph';
// Component-related methods and behaviour
function initComponent(parameters) {
primaryColor = parameters.primaryColor;
secondaryColor = parameters.secondaryColor;
elementsToEnhanceClassName = parameters.elementsToEnhanceClassName;
simplifyBoxTitle = parameters.simplifyBoxTitle;
simplifyBoxClassName = parameters.simplifyBoxClassName;
wordPropertiesClassName = parameters.wordPropertiesClassName;
synonymLabel = parameters.synonymLabel || 'Synonyms';
definitionLabel = parameters.definitionLabel || 'Definitions';
wikipediaLabel = parameters.wikipediaLabel || 'Wikipedia';
emptyText = parameters.emptyText || 'no simplification found for the text';
taeCORE.getInstance().init({
endpoint: parameters.endpoint,
language: parameters.language
});
}
function enableComponentFeatures() |
function disableComponentFeatures() {
if (!featureEnabled) return;
featureEnabled = false;
// Remove Question Boxes
var questionsBoxes = document.getElementsByClassName(simplifyBoxClassName);
for (var i = questionsBoxes.length - 1; i >= 0; i--) {
questionsBoxes[i].parentNode.removeChild(questionsBoxes[i]);
}
// Reformat the paragraphs with the original style
for (var i = 0, len = paragraphs.length; i < len; i++) {
// Restore the original style
paragraphs[i].style = originalStyles[i];
paragraphs[i].className = paragraphs[i].className.replace(elementsToEnhanceClassName + "-active", "");
// Remove the onclick event to enhance the paragraph
paragraphs[i].removeAttribute("onclick");
}
}
// It uses the log component to register the produced events
var logger = function(event, details) {
var nop = function(){};
if (logCORE != null) return logCORE.getInstance().taeLogger;
else return {logParagraph: nop, logPhrase: nop, logWord: nop, logFreetext: nop};
}
// If the Component feature is enabled it calls to the TAE engine instance to
// get the simplifications related to the paragraph passed as parameter
// - paragraphID: the id of the paragraph which has produced the event
function paragraphEvent(paragraphID) {
if (!featureEnabled) return;
var currentParagraph = document.getElementById(paragraphID + simplifyBoxIdSuffix);
if ( currentParagraph === null) {
logger().logParagraph(simpaticoEservice, paragraphID);
currentParagraph = document.getElementById(paragraphID);
var text = currentParagraph.textContent ? currentParagraph.textContent : currentParagraph.innerText;//IE uses innerText
taeCORE.getInstance().simplifyText(paragraphID, text, showSimplificationBox);
} else {
hideSimplificationBox(paragraphID);
}
}
// It creates the HTML content of a complex word
// Used by createSimplifiedTextHTML(...)
// - item: the object wich contains the description passed as parameter
function createSimplifiedWordLabel(item) {
return '<span class="simp-word" ' +
'onclick="taeUI.getInstance().wordEvent(event, this)">' +
item.originalValue +
'</span>';
}
// It creates the HTML content of a simplified paraghaph
// Used by getSimplifiedText(...)
// - originalText: the original text contained in a paragraph
// - simplifications: A list of simplified words of the text
function createSimplifiedTextHTML(originalText, simplifications) {
Array.prototype.keySort = function(key, desc){
this.sort(function(a, b) {
var result = desc ? (a[key] < b[key]) : (a[key] > b[key]);
return result ? 1 : -1;
});
return this;
}
simplifications.keySort('start');
if (simplifications.length == 0)
{
var result = emptyText;//'No hay palabras que necesiten ser simplificadas';
}else{
var result = originalText;
var item = '';
// for each simplified word add an element containing it
for (var i = simplifications.length -1; i >= 0; i--) {
item = simplifications[i];
console.log(item);
result = result.substring(0, item.start) +
createSimplifiedWordLabel(item) +
result.substring(item.end, result.length);
}
}
return result;
}
// Method used to cancel the propagation of the events
// - event: the event to cancel
function cancelEventPropagation(event) {
event = event || window.event // cross-browser event
if (event.stopPropagation) {
event.stopPropagation(); // W3C standard variant
} else {
event.cancelBubble = true; // IE variant
}
}
// Function called when an user clicks on a difficult word
// It manages the event and shows the synonyms and definition of the
// selected word calling to showWordProperties(...)
// - event: the click event. It is cancelled
// - wordHTMLelement: the element that contains the word
function wordEvent(event, wordHTMLelement) {
cancelEventPropagation(event);
showWordProperties(wordHTMLelement);
}
// Function called when an user clicks on a highlighted word
// It shows synonyms and the definition of the word contailed by the
// HTML element passed as parameter
// - wordHTMLelement: the element that contains the word
function showWordProperties(wordHTMLelement) {
var simplifiedBoxNode = document.getElementById(wordHTMLelement
.parentNode
.parentNode
.parentNode.id);
var paragraphId = simplifiedBoxNode.parentNode.id;
var currentBox = simplifiedBoxNode
.getElementsByClassName(wordPropertiesClassName)[0];
// If the currentBox is not created, create and attach it
if (currentBox == null) {
currentBox = document.createElement('li');
currentBox.className = wordPropertiesClassName;
currentBox.setAttribute("onclick",
"taeUI.getInstance()." +
"wordPropertiesEvent(event,'" + paragraphId + "');");
simplifiedBoxNode.getElementsByTagName('ul')[0].appendChild(currentBox);
}
// Get the synonyms and definition
var definition = taeCORE.getInstance()
.termDefinition(paragraphId, wordHTMLelement.innerHTML);
var synonyms = taeCORE.getInstance()
.termSynonyms(paragraphId, wordHTMLelement.innerHTML);
var wiki = taeCORE.getInstance()
.termWikipedia(paragraphId, wordHTMLelement.innerHTML);
// Update the content
currentBox.innerHTML = '<b>' + wordHTMLelement.innerText + '</b></br>';
if (definition != null) // If the word has definition show it
currentBox.innerHTML += '<i>' + definitionLabel + ':' + '</i>'
+ definition
+ '</br>';
if (synonyms != null) // If the word has synonyms show them
currentBox.innerHTML += '<i>' + synonymLabel +':' + '</i>' + synonyms;
if (wiki != null) // If the word has a wikipedia link
currentBox.innerHTML += '<br/><i>' + wikipediaLabel +':' + '</i><a target="_blank" href="'+wiki+'">' + wiki + '<a/>';
logger().logWord(simpaticoEservice, wordHTMLelement.innerHTML);
}
// Function called when an user clicks on a WordProperties box
// It hides the selected WordProperties box
// - event: the click event. It is cancelled
// - paragraphID: the id paragraph that contains the WordProperties box
function hideWordProperties(event, paragraphID) {
cancelEventPropagation(event);
var paragraphNode = document.getElementById(paragraphID);
var currentBox = paragraphNode
.getElementsByClassName(wordPropertiesClassName)[0];
if (currentBox != null) {
currentBox.parentNode.removeChild(currentBox);
}
}
// Draw the simplification box
// - paragraphID: the id of the paragraph
// - originalText: the original text contained in the paragraph
// - response: the JSON Object of the questions related to the paragraph
function showSimplificationBox(paragraphID, originalText, response) {
// Create the Simplification Box div
var questionsBox = document.createElement('div');
questionsBox.id = paragraphID + simplifyBoxIdSuffix;
questionsBox.className = simplifyBoxClassName;
// 1. The title is attached
var questionsHtml = '<p>' + simplifyBoxTitle + '</p>';
// 2. The simplification is attached
questionsHtml += '<ul>';
questionsHtml += '<li>' + createSimplifiedTextHTML(
originalText,
response.simplifications) + '</li>';
questionsHtml += '</ul>';
// 3. The Simplification Box div is attached to the corresponding paragraph
questionsBox.innerHTML = questionsHtml;
document.getElementById(paragraphID).appendChild(questionsBox);
document.getElementById('loading_'+paragraphID).style.display = "none";
} //showSimplificationBox
// Hide the simplification box attached to a paragraph passed as paramether
// - paragraphID: the id of the paragraph
function hideSimplificationBox(paragraphID) {
var sBoxToRemove = document.getElementById(paragraphID + simplifyBoxIdSuffix);
sBoxToRemove.parentNode.removeChild(sBoxToRemove);
}
return {
// Public definitions
init: initComponent, // Called only one time
enable: enableComponentFeatures, // Called when the Component button is enabled
disable: disableComponentFeatures, // Called when the Component button is disabled or another one enabled
isEnabled: function() { return featureEnabled;}, // Returns if the feature is enabled
paragraphEvent: paragraphEvent,
wordEvent: wordEvent,
wordPropertiesEvent: hideWordProperties
};
}
return {
getInstance: function() {
if(!instance) instance = Singleton();
return instance;
}
};
})(); | {
if (featureEnabled) return;
featureEnabled = true;
// Gets the tagged paragraphs the first time
if (paragraphs.length === 0) {
paragraphs = document.getElementsByClassName(elementsToEnhanceClassName);
}
// Add special format and add a couple of attributes to the paragraphs
var paragrapId = 1;
var paragraphName = '';
for (var i = 0, len = paragraphs.length; i < len; i++) {
if (paragraphs[i].className.indexOf(elementsToEnhanceClassName + "-active") < 0) paragraphs[i].className += ' '+elementsToEnhanceClassName + "-active";
paragraphName = "taeParagraph" + paragrapId;
// Store original style
// originalStyles[i] = paragraphs[i].style;
// paragraphs[i].style.position = 'relative';
// paragraphs[i].style.borderLeft = "12px solid " + primaryColor;
// paragraphs[i].style.borderRadius = "16px";
//
// paragraphs[i].style.padding = '0px 0px 0px 8px';
// paragraphs[i].style.margin = '0px 0px 8px 0px';
paragraphs[i].setAttribute("id", paragraphName);
paragraphs[i].setAttribute("onclick",
"taeUI.getInstance()." +
"paragraphEvent('" + paragraphName + "');");
var loadingImage = document.createElement("img");
loadingImage.setAttribute("src", "img/loader.gif");
loadingImage.setAttribute("id", "loading_"+paragraphName);
loadingImage.style.display = "none";
paragraphs[i].appendChild(loadingImage);
paragrapId++;
}
} | identifier_body |
data_utils.py | from torch.utils.data import TensorDataset
import torch
import numpy as np
import random
from torch.utils.data.sampler import RandomSampler, SubsetRandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
import os
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, seq_length=None, guid=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.seq_length = seq_length
self.label_id = label_id
self.guid = guid
def get_tensor_data(output_mode, features, ):
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
else:
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
all_seq_lengths = torch.tensor([f.seq_length for f in features], dtype=torch.long)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
s_ids = [f.guid for f in features]
tensor_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,
all_seq_lengths)
return tensor_data, s_ids
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode, is_master=True):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0 and is_master:
print("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
seq_length = len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index == 0 and is_master:
print("*** Example ***")
print("guid: %s" % (example.guid))
print("tokens: %s" % " ".join([str(x) for x in tokens]))
print("input_ids: %s" % " ".join([str(x) for x in input_ids]))
print("input_mask: %s" % " ".join([str(x) for x in input_mask]))
print("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
print("label: {}".format(example.label))
print("label_id: {}".format(label_id))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
seq_length=seq_length,
guid=example.guid))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def load_glue_dataset(config):
from bert_fineturn.data_processor.glue import glue_processors as processors
from bert_fineturn.data_processor.glue import glue_output_modes as output_modes
from transformers import BertConfig, BertTokenizer
task_name = config.datasets
config.is_master = True
config.multi_gpu = False
processor = processors[task_name.lower()]()
output_mode = output_modes[task_name.lower()]
label_list = processor.get_labels()
if output_mode == 'classification':
n_classes = len(label_list)
else:
n_classes = 1
sids = dict()
tokenizer = BertTokenizer.from_pretrained('teacher_utils/bert_base_uncased', do_lower_case=True)
train_examples = processor.get_train_examples(config.data_src_path)
train_features = convert_examples_to_features(train_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
train_data, train_sids = get_tensor_data(output_mode, train_features)
eval_examples = processor.get_dev_examples(config.data_src_path)
eval_features = convert_examples_to_features(eval_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
eval_data, eval_sids = get_tensor_data(output_mode, eval_features)
test_examples = processor.get_test_examples(config.data_src_path)
test_features = convert_examples_to_features(test_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
test_data, test_sids = get_tensor_data(output_mode, test_features)
train_eval_data, _ = get_tensor_data(output_mode, eval_features)
if not config.multi_gpu:
train_sampler = RandomSampler(train_data)
train_eval_sampler = RandomSampler(train_eval_data)
else:
train_sampler = DistributedSampler(train_data)
train_eval_sampler = DistributedSampler(train_eval_data)
eval_sampler = SequentialSampler(eval_data)
test_sampler = SequentialSampler(test_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=config.batch_size)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=config.batch_size)
train_eval_dataloader = DataLoader(train_eval_data, sampler=train_eval_sampler, batch_size=config.batch_size)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=config.batch_size)
bert_config = BertConfig.from_pretrained("teacher_utils/bert_base_uncased/config.json")
config.bert_config = bert_config
sids = {"train": train_sids, "test": test_sids, "dev": eval_sids}
return train_dataloader, train_eval_dataloader, eval_dataloader, test_dataloader, output_mode, n_classes, config, sids
from torch.utils.data.sampler import Sampler
class OrderdedSampler(Sampler):
def __init__(self, dataset, order):
self._dataset = dataset
self._train_data_list = order
self._train_data_list
def __len__(self):
return len(self._dataset)
def __iter__(self):
random.shuffle(self._train_data_list)
for index in self._train_data_list:
yield self._dataset[index]
def check_data_vaild(data1, data2):
# data1, data2 = next(iter(data1)), next(iter(data2))
def pad_replace(x):
x = np.array(x)
pad_mask = np.array([not(i == '[PAD]' or i == "<pad>") for i in x])
new_x = x[pad_mask].tolist() + [f'[PAD] * { - sum(pad_mask - 1)}']
return new_x
def | (x):
t = sum(x)
new_x = f"1 * {t}, 0 * {len(x) - t}"
return new_x
with open('/data/lxk/NLP/github/darts-KD/data/MRPC-nas/embedding/vocab.txt') as f:
vocab1 = {i:x.strip() for i, x in enumerate(f.readlines())}
with open('/data/lxk/NLP/github/darts-KD/teacher_utils/teacher_model/MRPC/vocab.txt') as f:
vocab2 = {i:x.strip() for i, x in enumerate(f.readlines())}
sent_words = torch.split(data1[0], 1, dim=1)
sent_words = [torch.squeeze(x, dim=1) for x in sent_words]
mask = [x.ne(0) for x in sent_words]
if len(mask) > 1:
mask = torch.logical_or(mask[0], mask[1])
else:
mask = mask[0]
print("SENT1:", pad_replace([vocab1[x.item()] for x in data1[0][0][0]]))
if data1[0].shape[1] == 2:
print("SENT2:", pad_replace([vocab1[x.item()] for x in data1[0][0][1]]))
print("MASK:", mask_replace(mask[0]))
print("LABEL:", data1[2][0].item())
input_ids, input_mask, segment_ids, label_ids, seq_lengths = data2
print("TEACHER SENT:", pad_replace([vocab2[x.item()] for x in input_ids[0]]))
print("TEACHER MASK", mask_replace(input_mask[0]))
print("TEACHER LABEL", label_ids[0].item())
class RandomSamplerByOrder(Sampler):
r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify :attr:`num_samples` to draw.
Arguments:
data_source (Dataset): dataset to sample from
replacement (bool): samples are drawn with replacement if ``True``, default=``False``
num_samples (int): number of samples to draw, default=`len(dataset)`. This argument
is supposed to be specified only when `replacement` is ``True``.
"""
def __init__(self, data_source, replacement=False, num_samples=None):
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
if not isinstance(self.replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("With replacement=False, num_samples should not be specified, "
"since a random permute will be performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self):
# dataset size might change at runtime
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self):
n = len(self.data_source)
if self.replacement:
return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())
return iter(torch.randperm(n).tolist())
def __len__(self):
return self.num_samples
def bert_batch_split(data, rank, device=None):
if device == torch.device('cpu'):
data = [x for x in data]
else:
data = [x.to(f"cuda:{rank}", non_blocking=True) for x in data]
input_ids, input_mask, segment_ids, label_ids, seq_lengths = data
X = [input_ids, input_mask, segment_ids, seq_lengths]
Y = label_ids
return X, Y
| mask_replace | identifier_name |
data_utils.py | from torch.utils.data import TensorDataset
import torch
import numpy as np
import random
from torch.utils.data.sampler import RandomSampler, SubsetRandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
import os
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, seq_length=None, guid=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.seq_length = seq_length
self.label_id = label_id
self.guid = guid
def get_tensor_data(output_mode, features, ):
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
else:
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
all_seq_lengths = torch.tensor([f.seq_length for f in features], dtype=torch.long)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
s_ids = [f.guid for f in features]
tensor_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,
all_seq_lengths)
return tensor_data, s_ids
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode, is_master=True):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0 and is_master:
print("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
seq_length = len(input_ids) | input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index == 0 and is_master:
print("*** Example ***")
print("guid: %s" % (example.guid))
print("tokens: %s" % " ".join([str(x) for x in tokens]))
print("input_ids: %s" % " ".join([str(x) for x in input_ids]))
print("input_mask: %s" % " ".join([str(x) for x in input_mask]))
print("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
print("label: {}".format(example.label))
print("label_id: {}".format(label_id))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
seq_length=seq_length,
guid=example.guid))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def load_glue_dataset(config):
from bert_fineturn.data_processor.glue import glue_processors as processors
from bert_fineturn.data_processor.glue import glue_output_modes as output_modes
from transformers import BertConfig, BertTokenizer
task_name = config.datasets
config.is_master = True
config.multi_gpu = False
processor = processors[task_name.lower()]()
output_mode = output_modes[task_name.lower()]
label_list = processor.get_labels()
if output_mode == 'classification':
n_classes = len(label_list)
else:
n_classes = 1
sids = dict()
tokenizer = BertTokenizer.from_pretrained('teacher_utils/bert_base_uncased', do_lower_case=True)
train_examples = processor.get_train_examples(config.data_src_path)
train_features = convert_examples_to_features(train_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
train_data, train_sids = get_tensor_data(output_mode, train_features)
eval_examples = processor.get_dev_examples(config.data_src_path)
eval_features = convert_examples_to_features(eval_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
eval_data, eval_sids = get_tensor_data(output_mode, eval_features)
test_examples = processor.get_test_examples(config.data_src_path)
test_features = convert_examples_to_features(test_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
test_data, test_sids = get_tensor_data(output_mode, test_features)
train_eval_data, _ = get_tensor_data(output_mode, eval_features)
if not config.multi_gpu:
train_sampler = RandomSampler(train_data)
train_eval_sampler = RandomSampler(train_eval_data)
else:
train_sampler = DistributedSampler(train_data)
train_eval_sampler = DistributedSampler(train_eval_data)
eval_sampler = SequentialSampler(eval_data)
test_sampler = SequentialSampler(test_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=config.batch_size)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=config.batch_size)
train_eval_dataloader = DataLoader(train_eval_data, sampler=train_eval_sampler, batch_size=config.batch_size)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=config.batch_size)
bert_config = BertConfig.from_pretrained("teacher_utils/bert_base_uncased/config.json")
config.bert_config = bert_config
sids = {"train": train_sids, "test": test_sids, "dev": eval_sids}
return train_dataloader, train_eval_dataloader, eval_dataloader, test_dataloader, output_mode, n_classes, config, sids
from torch.utils.data.sampler import Sampler
class OrderdedSampler(Sampler):
def __init__(self, dataset, order):
self._dataset = dataset
self._train_data_list = order
self._train_data_list
def __len__(self):
return len(self._dataset)
def __iter__(self):
random.shuffle(self._train_data_list)
for index in self._train_data_list:
yield self._dataset[index]
def check_data_vaild(data1, data2):
# data1, data2 = next(iter(data1)), next(iter(data2))
def pad_replace(x):
x = np.array(x)
pad_mask = np.array([not(i == '[PAD]' or i == "<pad>") for i in x])
new_x = x[pad_mask].tolist() + [f'[PAD] * { - sum(pad_mask - 1)}']
return new_x
def mask_replace(x):
t = sum(x)
new_x = f"1 * {t}, 0 * {len(x) - t}"
return new_x
with open('/data/lxk/NLP/github/darts-KD/data/MRPC-nas/embedding/vocab.txt') as f:
vocab1 = {i:x.strip() for i, x in enumerate(f.readlines())}
with open('/data/lxk/NLP/github/darts-KD/teacher_utils/teacher_model/MRPC/vocab.txt') as f:
vocab2 = {i:x.strip() for i, x in enumerate(f.readlines())}
sent_words = torch.split(data1[0], 1, dim=1)
sent_words = [torch.squeeze(x, dim=1) for x in sent_words]
mask = [x.ne(0) for x in sent_words]
if len(mask) > 1:
mask = torch.logical_or(mask[0], mask[1])
else:
mask = mask[0]
print("SENT1:", pad_replace([vocab1[x.item()] for x in data1[0][0][0]]))
if data1[0].shape[1] == 2:
print("SENT2:", pad_replace([vocab1[x.item()] for x in data1[0][0][1]]))
print("MASK:", mask_replace(mask[0]))
print("LABEL:", data1[2][0].item())
input_ids, input_mask, segment_ids, label_ids, seq_lengths = data2
print("TEACHER SENT:", pad_replace([vocab2[x.item()] for x in input_ids[0]]))
print("TEACHER MASK", mask_replace(input_mask[0]))
print("TEACHER LABEL", label_ids[0].item())
class RandomSamplerByOrder(Sampler):
r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify :attr:`num_samples` to draw.
Arguments:
data_source (Dataset): dataset to sample from
replacement (bool): samples are drawn with replacement if ``True``, default=``False``
num_samples (int): number of samples to draw, default=`len(dataset)`. This argument
is supposed to be specified only when `replacement` is ``True``.
"""
def __init__(self, data_source, replacement=False, num_samples=None):
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
if not isinstance(self.replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("With replacement=False, num_samples should not be specified, "
"since a random permute will be performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self):
# dataset size might change at runtime
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self):
n = len(self.data_source)
if self.replacement:
return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())
return iter(torch.randperm(n).tolist())
def __len__(self):
return self.num_samples
def bert_batch_split(data, rank, device=None):
if device == torch.device('cpu'):
data = [x for x in data]
else:
data = [x.to(f"cuda:{rank}", non_blocking=True) for x in data]
input_ids, input_mask, segment_ids, label_ids, seq_lengths = data
X = [input_ids, input_mask, segment_ids, seq_lengths]
Y = label_ids
return X, Y |
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding | random_line_split |
data_utils.py | from torch.utils.data import TensorDataset
import torch
import numpy as np
import random
from torch.utils.data.sampler import RandomSampler, SubsetRandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
import os
class InputFeatures(object):
|
def get_tensor_data(output_mode, features, ):
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
else:
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
all_seq_lengths = torch.tensor([f.seq_length for f in features], dtype=torch.long)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
s_ids = [f.guid for f in features]
tensor_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,
all_seq_lengths)
return tensor_data, s_ids
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode, is_master=True):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0 and is_master:
print("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
seq_length = len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index == 0 and is_master:
print("*** Example ***")
print("guid: %s" % (example.guid))
print("tokens: %s" % " ".join([str(x) for x in tokens]))
print("input_ids: %s" % " ".join([str(x) for x in input_ids]))
print("input_mask: %s" % " ".join([str(x) for x in input_mask]))
print("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
print("label: {}".format(example.label))
print("label_id: {}".format(label_id))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
seq_length=seq_length,
guid=example.guid))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def load_glue_dataset(config):
from bert_fineturn.data_processor.glue import glue_processors as processors
from bert_fineturn.data_processor.glue import glue_output_modes as output_modes
from transformers import BertConfig, BertTokenizer
task_name = config.datasets
config.is_master = True
config.multi_gpu = False
processor = processors[task_name.lower()]()
output_mode = output_modes[task_name.lower()]
label_list = processor.get_labels()
if output_mode == 'classification':
n_classes = len(label_list)
else:
n_classes = 1
sids = dict()
tokenizer = BertTokenizer.from_pretrained('teacher_utils/bert_base_uncased', do_lower_case=True)
train_examples = processor.get_train_examples(config.data_src_path)
train_features = convert_examples_to_features(train_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
train_data, train_sids = get_tensor_data(output_mode, train_features)
eval_examples = processor.get_dev_examples(config.data_src_path)
eval_features = convert_examples_to_features(eval_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
eval_data, eval_sids = get_tensor_data(output_mode, eval_features)
test_examples = processor.get_test_examples(config.data_src_path)
test_features = convert_examples_to_features(test_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
test_data, test_sids = get_tensor_data(output_mode, test_features)
train_eval_data, _ = get_tensor_data(output_mode, eval_features)
if not config.multi_gpu:
train_sampler = RandomSampler(train_data)
train_eval_sampler = RandomSampler(train_eval_data)
else:
train_sampler = DistributedSampler(train_data)
train_eval_sampler = DistributedSampler(train_eval_data)
eval_sampler = SequentialSampler(eval_data)
test_sampler = SequentialSampler(test_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=config.batch_size)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=config.batch_size)
train_eval_dataloader = DataLoader(train_eval_data, sampler=train_eval_sampler, batch_size=config.batch_size)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=config.batch_size)
bert_config = BertConfig.from_pretrained("teacher_utils/bert_base_uncased/config.json")
config.bert_config = bert_config
sids = {"train": train_sids, "test": test_sids, "dev": eval_sids}
return train_dataloader, train_eval_dataloader, eval_dataloader, test_dataloader, output_mode, n_classes, config, sids
from torch.utils.data.sampler import Sampler
class OrderdedSampler(Sampler):
def __init__(self, dataset, order):
self._dataset = dataset
self._train_data_list = order
self._train_data_list
def __len__(self):
return len(self._dataset)
def __iter__(self):
random.shuffle(self._train_data_list)
for index in self._train_data_list:
yield self._dataset[index]
def check_data_vaild(data1, data2):
# data1, data2 = next(iter(data1)), next(iter(data2))
def pad_replace(x):
x = np.array(x)
pad_mask = np.array([not(i == '[PAD]' or i == "<pad>") for i in x])
new_x = x[pad_mask].tolist() + [f'[PAD] * { - sum(pad_mask - 1)}']
return new_x
def mask_replace(x):
t = sum(x)
new_x = f"1 * {t}, 0 * {len(x) - t}"
return new_x
with open('/data/lxk/NLP/github/darts-KD/data/MRPC-nas/embedding/vocab.txt') as f:
vocab1 = {i:x.strip() for i, x in enumerate(f.readlines())}
with open('/data/lxk/NLP/github/darts-KD/teacher_utils/teacher_model/MRPC/vocab.txt') as f:
vocab2 = {i:x.strip() for i, x in enumerate(f.readlines())}
sent_words = torch.split(data1[0], 1, dim=1)
sent_words = [torch.squeeze(x, dim=1) for x in sent_words]
mask = [x.ne(0) for x in sent_words]
if len(mask) > 1:
mask = torch.logical_or(mask[0], mask[1])
else:
mask = mask[0]
print("SENT1:", pad_replace([vocab1[x.item()] for x in data1[0][0][0]]))
if data1[0].shape[1] == 2:
print("SENT2:", pad_replace([vocab1[x.item()] for x in data1[0][0][1]]))
print("MASK:", mask_replace(mask[0]))
print("LABEL:", data1[2][0].item())
input_ids, input_mask, segment_ids, label_ids, seq_lengths = data2
print("TEACHER SENT:", pad_replace([vocab2[x.item()] for x in input_ids[0]]))
print("TEACHER MASK", mask_replace(input_mask[0]))
print("TEACHER LABEL", label_ids[0].item())
class RandomSamplerByOrder(Sampler):
r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify :attr:`num_samples` to draw.
Arguments:
data_source (Dataset): dataset to sample from
replacement (bool): samples are drawn with replacement if ``True``, default=``False``
num_samples (int): number of samples to draw, default=`len(dataset)`. This argument
is supposed to be specified only when `replacement` is ``True``.
"""
def __init__(self, data_source, replacement=False, num_samples=None):
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
if not isinstance(self.replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("With replacement=False, num_samples should not be specified, "
"since a random permute will be performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self):
# dataset size might change at runtime
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self):
n = len(self.data_source)
if self.replacement:
return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())
return iter(torch.randperm(n).tolist())
def __len__(self):
return self.num_samples
def bert_batch_split(data, rank, device=None):
if device == torch.device('cpu'):
data = [x for x in data]
else:
data = [x.to(f"cuda:{rank}", non_blocking=True) for x in data]
input_ids, input_mask, segment_ids, label_ids, seq_lengths = data
X = [input_ids, input_mask, segment_ids, seq_lengths]
Y = label_ids
return X, Y
| """A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, seq_length=None, guid=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.seq_length = seq_length
self.label_id = label_id
self.guid = guid | identifier_body |
data_utils.py | from torch.utils.data import TensorDataset
import torch
import numpy as np
import random
from torch.utils.data.sampler import RandomSampler, SubsetRandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
import os
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, seq_length=None, guid=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.seq_length = seq_length
self.label_id = label_id
self.guid = guid
def get_tensor_data(output_mode, features, ):
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
else:
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
all_seq_lengths = torch.tensor([f.seq_length for f in features], dtype=torch.long)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
s_ids = [f.guid for f in features]
tensor_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,
all_seq_lengths)
return tensor_data, s_ids
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode, is_master=True):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0 and is_master:
print("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
seq_length = len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index == 0 and is_master:
print("*** Example ***")
print("guid: %s" % (example.guid))
print("tokens: %s" % " ".join([str(x) for x in tokens]))
print("input_ids: %s" % " ".join([str(x) for x in input_ids]))
print("input_mask: %s" % " ".join([str(x) for x in input_mask]))
print("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
print("label: {}".format(example.label))
print("label_id: {}".format(label_id))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
seq_length=seq_length,
guid=example.guid))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def load_glue_dataset(config):
from bert_fineturn.data_processor.glue import glue_processors as processors
from bert_fineturn.data_processor.glue import glue_output_modes as output_modes
from transformers import BertConfig, BertTokenizer
task_name = config.datasets
config.is_master = True
config.multi_gpu = False
processor = processors[task_name.lower()]()
output_mode = output_modes[task_name.lower()]
label_list = processor.get_labels()
if output_mode == 'classification':
|
else:
n_classes = 1
sids = dict()
tokenizer = BertTokenizer.from_pretrained('teacher_utils/bert_base_uncased', do_lower_case=True)
train_examples = processor.get_train_examples(config.data_src_path)
train_features = convert_examples_to_features(train_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
train_data, train_sids = get_tensor_data(output_mode, train_features)
eval_examples = processor.get_dev_examples(config.data_src_path)
eval_features = convert_examples_to_features(eval_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
eval_data, eval_sids = get_tensor_data(output_mode, eval_features)
test_examples = processor.get_test_examples(config.data_src_path)
test_features = convert_examples_to_features(test_examples, label_list,
config.max_seq_length, tokenizer,
output_mode, config.is_master)
test_data, test_sids = get_tensor_data(output_mode, test_features)
train_eval_data, _ = get_tensor_data(output_mode, eval_features)
if not config.multi_gpu:
train_sampler = RandomSampler(train_data)
train_eval_sampler = RandomSampler(train_eval_data)
else:
train_sampler = DistributedSampler(train_data)
train_eval_sampler = DistributedSampler(train_eval_data)
eval_sampler = SequentialSampler(eval_data)
test_sampler = SequentialSampler(test_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=config.batch_size)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=config.batch_size)
train_eval_dataloader = DataLoader(train_eval_data, sampler=train_eval_sampler, batch_size=config.batch_size)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=config.batch_size)
bert_config = BertConfig.from_pretrained("teacher_utils/bert_base_uncased/config.json")
config.bert_config = bert_config
sids = {"train": train_sids, "test": test_sids, "dev": eval_sids}
return train_dataloader, train_eval_dataloader, eval_dataloader, test_dataloader, output_mode, n_classes, config, sids
from torch.utils.data.sampler import Sampler
class OrderdedSampler(Sampler):
def __init__(self, dataset, order):
self._dataset = dataset
self._train_data_list = order
self._train_data_list
def __len__(self):
return len(self._dataset)
def __iter__(self):
random.shuffle(self._train_data_list)
for index in self._train_data_list:
yield self._dataset[index]
def check_data_vaild(data1, data2):
# data1, data2 = next(iter(data1)), next(iter(data2))
def pad_replace(x):
x = np.array(x)
pad_mask = np.array([not(i == '[PAD]' or i == "<pad>") for i in x])
new_x = x[pad_mask].tolist() + [f'[PAD] * { - sum(pad_mask - 1)}']
return new_x
def mask_replace(x):
t = sum(x)
new_x = f"1 * {t}, 0 * {len(x) - t}"
return new_x
with open('/data/lxk/NLP/github/darts-KD/data/MRPC-nas/embedding/vocab.txt') as f:
vocab1 = {i:x.strip() for i, x in enumerate(f.readlines())}
with open('/data/lxk/NLP/github/darts-KD/teacher_utils/teacher_model/MRPC/vocab.txt') as f:
vocab2 = {i:x.strip() for i, x in enumerate(f.readlines())}
sent_words = torch.split(data1[0], 1, dim=1)
sent_words = [torch.squeeze(x, dim=1) for x in sent_words]
mask = [x.ne(0) for x in sent_words]
if len(mask) > 1:
mask = torch.logical_or(mask[0], mask[1])
else:
mask = mask[0]
print("SENT1:", pad_replace([vocab1[x.item()] for x in data1[0][0][0]]))
if data1[0].shape[1] == 2:
print("SENT2:", pad_replace([vocab1[x.item()] for x in data1[0][0][1]]))
print("MASK:", mask_replace(mask[0]))
print("LABEL:", data1[2][0].item())
input_ids, input_mask, segment_ids, label_ids, seq_lengths = data2
print("TEACHER SENT:", pad_replace([vocab2[x.item()] for x in input_ids[0]]))
print("TEACHER MASK", mask_replace(input_mask[0]))
print("TEACHER LABEL", label_ids[0].item())
class RandomSamplerByOrder(Sampler):
r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify :attr:`num_samples` to draw.
Arguments:
data_source (Dataset): dataset to sample from
replacement (bool): samples are drawn with replacement if ``True``, default=``False``
num_samples (int): number of samples to draw, default=`len(dataset)`. This argument
is supposed to be specified only when `replacement` is ``True``.
"""
def __init__(self, data_source, replacement=False, num_samples=None):
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
if not isinstance(self.replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("With replacement=False, num_samples should not be specified, "
"since a random permute will be performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self):
# dataset size might change at runtime
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self):
n = len(self.data_source)
if self.replacement:
return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())
return iter(torch.randperm(n).tolist())
def __len__(self):
return self.num_samples
def bert_batch_split(data, rank, device=None):
if device == torch.device('cpu'):
data = [x for x in data]
else:
data = [x.to(f"cuda:{rank}", non_blocking=True) for x in data]
input_ids, input_mask, segment_ids, label_ids, seq_lengths = data
X = [input_ids, input_mask, segment_ids, seq_lengths]
Y = label_ids
return X, Y
| n_classes = len(label_list) | conditional_block |
d.rs | // maybe like Problem D. Descending in the Dark round 2 2012
/*
Bipartite matching
Grid
BFS
Cycles
Hard
*/
use crate::algo::graph::flow::*;
use crate::algo::graph::*;
use crate::util::grid::constants::*;
use crate::util::grid::{Grid, GridCoord, GridRowColVec};
use crate::util::input::*;
//use std::thread;
use bimap::BiMap;
use bit_vec::BitVec;
use indexmap::IndexSet;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::default::Default;
use std::fmt;
use std::fmt::{Display, Formatter};
use std::io::Write;
use std::sync::mpsc::channel;
use std::time::Instant;
use threadpool::ThreadPool;
pub fn solve_all_cases()
{
run_cases(
&["D-small-practice", "D-large-practice"],
"y2017round2",
|reader, buffer| {
//let mut children: Vec<thread::JoinHandle<_>> = vec![];
let pool = ThreadPool::new(6);
let (tx, rx) = channel();
let t = reader.read_int();
for case in 1..=t {
let (C, R, M) = reader.read_tuple_3::<usize>();
let mut grid: Grid<Tile> = Grid::new(R, C);
for r in 0..R {
let row = reader.read_chars(C);
for (c, t) in row.iter().enumerate() {
grid[(r, c)] = Tile::from(*t);
}
}
let tx = tx.clone();
pool.execute(move || {
let now = Instant::now();
let _ = writeln!(::std::io::stderr(), "Starting {} of {} ", case, t);
let s = solve(case, &mut grid, M);
tx.send((case, s)).expect("Channel is there");
let duration = now.elapsed();
let secs = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1e9f64;
let _ = writeln!(
::std::io::stderr(),
"Finished #{} in {:.2} second(s)",
case,
secs
);
});
}
let mut output = rx.iter().take(t as usize).collect::<Vec<_>>();
output.sort();
for (_, s) in output {
write!(buffer, "{}", s).unwrap();
}
},
);
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum Tile
{
Empty,
Building,
Soldier,
Turret,
}
use self::Tile::*;
use crate::util::codejam::run_cases;
impl Tile
{
fn to_char(self) -> char
{
match self {
Empty => '.',
Building => '#',
Soldier => 'S',
Turret => 'T',
}
}
}
impl From<char> for Tile
{
fn from(item: char) -> Self
{
match item {
'.' => Empty,
'#' => Building,
'S' => Soldier,
'T' => Turret,
_ => panic!("Character not recognized: {}", item),
}
}
}
impl Display for Tile
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
write!(f, "{}", self.to_char())
}
}
impl Default for Tile
{
fn default() -> Tile
{
Empty
}
}
//problem specific code
fn reachable(grid: &Grid<Tile>, location: &GridCoord) -> HashSet<GridRowColVec>
{
let mut r = HashSet::new();
//debug!("\nTracing {} starting at {}", location, direction);
for direction in DIRECTIONS.iter() {
let mut loc: GridRowColVec = location.convert();
for _ in 0..=grid.R + grid.C {
loc += direction;
if let Some(tile) = grid.get_value(&loc) {
match *tile {
Building => {
break;
}
_ => {
r.insert(loc.clone());
}
};
} else {
break;
}
}
}
r
}
/*
impl<L, R> FromIterator<(L, R)> for BiMap<L, R>
{
fn from_iter<I: IntoIterator<Item = (L, R)>>(iter: I) -> Self
{
let mut c = BiMap::new();
for i in iter {
c.insert(i.0, i.1);
}
c
}
}*/
fn solve<'a>(case_no: u32, grid: &mut Grid<Tile>, M_soldier_limit: usize) -> String
{
debug!(
"Solving case {}\nM={}\n{}\n",
case_no, M_soldier_limit, grid
);
//original solider & turret index to location map
let S_map = grid
.filter_by_val(&Soldier)
.enumerate()
.collect::<BiMap<_, _>>();
let turret_locations = grid.filter_by_val(&Turret).collect::<Vec<_>>();
//precalucate what squares a turret can reach
let turret_reachable_squares_list = turret_locations
.iter()
.map(|t_loc| reachable(&grid, &t_loc))
.collect::<Vec<_>>();
let T_map = turret_locations
.into_iter()
.enumerate()
.collect::<BiMap<_, _>>();
let S = grid.filter_by_val(&Soldier).count();
let T = grid.filter_by_val(&Turret).count();
//Construct the initial Graph
let G_edges = build_graph(
&grid,
false,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
let mut G = FlowGraph::new(2 + S + T, 4);
for uv in G_edges {
G.add_edge(uv.0, uv.1, 1, 1);
}
let source = S + T;
let sink = S + T + 1;
let vertex_to_string = |v: usize| match v {
s if s < S => format!("Soldier #{} ({:?})", s + 1, *S_map.get_by_left(&s).unwrap()),
t if t >= S && t < S + T => format!(
"Turret #{} ({:?})",
t - S + 1,
*T_map.get_by_left(&(t - S)).unwrap()
),
v if v == sink => "Sink".to_string(),
_source => "Source".to_string(),
};
//BFS for each soldier
//will be in left to right order, then top down order
//Now find max matching of G (G has an edge from soldier s to turret t if and only if soldier s can destroy turret t after all other turrets have been destroyed)
for s in 0..S {
G.add_edge(source, s, 1, 1);
}
for t in S..S + T {
G.add_edge(t, sink, 1, 1);
}
let (R, flow) = G.dinic(source, sink);
let mut ans = format!("Case #{}: {}\n", case_no, R);
//Compute initial matching
let mut M = flow
.iter()
.enumerate()
.filter(|&(_e, f)| *f > 0)
//map to u->v
.map(|(e, _f)| (G.graph.endp[e ^ 1], G.graph.endp[e]))
//leave out source and sink nodes
.filter(|&(u, v)| u != source && v != sink)
.collect::<Vec<_>>();
debug!(
"Edges in M initial matching=\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let mut r = R;
while r > 0 {
//Let us define the graph G' with the same nodes as G, but an edge between soldier s and turret t only exists in G' if s can destroy t with the other turrets active
let Gprime = build_graph(
&grid,
true,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
//Now build graph H
let mut H = Graph::new(S + T, 4);
let soldiers_in_m = M.iter().map(|&(s, _t)| s).collect::<Vec<_>>();
for &(s, t) in Gprime.iter() {
if soldiers_in_m.contains(&s) {
H.add_edge(s, t);
}
}
for &(s, t) in M.iter() {
H.add_edge(t, s);
}
debug!(
"Current matching M =\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in G'=\n{}\n",
Gprime
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in H=\n{}\n",
H.edges()
.map(|(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let turrets_in_M = M.iter().map(|&(_s, t)| t).collect::<Vec<_>>();
//find an edge (s,t') where t' is not in m
let st_prime = Gprime.iter().find(|&(_s, t)| !turrets_in_M.contains(t));
if st_prime.is_some() {
let &(s, t) = st_prime.unwrap();
debug!("Found (s,t') s={} t'={}", s, t - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
//Also remove from current matching
let to_remove = M
.iter()
.position(|&(s_in_m, _t)| s_in_m == s)
.expect("Soldier should be in mapping");
M.remove(to_remove);
continue;
}
//Now we need to find a cycle
//Start at a soldier in H
let soldier_in_h = H.edges().filter(|&(u, _v)| u <= S).next().unwrap().0;
let mut cycle_edges = VecDeque::new();
let mut edge = (
soldier_in_h,
H.adj_list_with_edges(soldier_in_h).next().unwrap().1,
);
let mut visited = BitVec::from_elem(H.num_v(), false);
| cycle_edges.push_back(edge);
debug!(
"pushed Edge {:?} ",
format!("{}->{}", vertex_to_string(edge.0), vertex_to_string(edge.1))
);
//adj list returns an (internal edge index, next vertex)
edge = (edge.1, H.adj_list_with_edges(edge.1).next().unwrap().1);
debug!("Edge {:?} ", edge);
}
//cut to the actual cycle found
let cycle_end = cycle_edges.back().unwrap().1;
let cycle_start = cycle_edges
.iter()
.position(|&(u, _v)| u == cycle_end)
.unwrap();
cycle_edges.drain(0..cycle_start);
debug!(
"Cycle C =\n{}\n",
cycle_edges
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Consider a new matching M' of G consisting of the edges of M whose reverse is not in C, p
// lus the edges in C whose reverse is not in M. That is, M' is M but exchanging the edges
// present in C in some direction. M' in this case is also a matching of G of the same size as M
//because it is a cycle, we know we have new edges from G' to replace the ones removed from M
let mut M_new: Vec<(usize, usize)> = Vec::new();
M_new.extend(M.iter().filter(|&&(u, v)| !cycle_edges.contains(&(v, u))));
M_new.extend(cycle_edges.iter().filter(|&&(u, v)| !M.contains(&(v, u))));
debug!(
"New matching M =\n{}\n",
M_new
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Find all edges from G' which are actions we can take
let st_actions = M_new
.iter()
.filter(|&uv| Gprime.contains(uv))
.collect::<Vec<_>>();
for &&(s, t) in st_actions.iter() {
debug!("Taking actions from g' s {} t {}", s + 1, t + 1 - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
}
M = M_new;
}
ans
}
fn build_graph(
grid: &Grid<Tile>,
is_g_prime: bool,
M: usize,
s_mapping: &BiMap<usize, GridCoord>,
t_mapping: &BiMap<usize, GridCoord>,
turret_reachable_squares_list: &Vec<HashSet<GridRowColVec>>,
) -> IndexSet<(usize, usize)>
{
let mut G: IndexSet<(usize, usize)> = IndexSet::new();
let turret_locations = grid.filter_by_val(&Turret).collect::<HashSet<_>>();
/*
for (turret_index, turret_squares) in turret_squares_list.iter().enumerate() {
debug!("Turret {} can see {:?}", turret_index, turret_squares);
}
*/
let soldier_locations = grid.filter_by_val(&Soldier).collect::<Vec<_>>();
let S = soldier_locations.len();
let T = turret_reachable_squares_list.len();
for (_soldier_index, soldier_loc) in soldier_locations.iter().enumerate() {
//debug!("BFS search on soldier {} @ {}", soldier_index, soldier_loc);
//Node is location, distance, seen_turret
let mut queue: VecDeque<(GridRowColVec, usize, bool)> = VecDeque::new();
let mut visited = BitVec::from_elem(grid.C * grid.R, false);
queue.push_back((soldier_loc.convert(), 0, false));
visited.set(soldier_loc.data[0] * grid.C + soldier_loc.data[1], true);
while !queue.is_empty() {
let (loc, dist, seen_turret) = queue.pop_front().unwrap();
let visible_turrets = turret_reachable_squares_list
.iter()
.enumerate()
.filter(|(turret_index, turret_squares)| {
turret_locations.contains(t_mapping.get_by_left(turret_index).unwrap())
&& turret_squares.contains(&loc)
})
.map(|(turret_index, _)| turret_index);
let mut turret_visible = false;
for turret_index in visible_turrets {
turret_visible = true;
if !is_g_prime || (!seen_turret && is_g_prime) {
let s_vertex = *s_mapping.get_by_right(soldier_loc).unwrap();
//The turret index is already using the original grids index
/*debug!("Found s{} t{} mapped to soldier {} => {} at loc {}",
soldier_index, turret_index, s_vertex, t_vertex, loc);*/
G.insert((s_vertex, s_mapping.len() + turret_index));
}
}
//no need to queue once we have been shot by a turret
if is_g_prime && turret_visible {
continue;
}
/*
debug!(
"Viewing {} dist {} seen turret? {} turret visible? {}",
loc, dist, seen_turret, turret_visible
);*/
for dir in DIRECTIONS.iter() {
let new_loc = loc.clone() + dir;
if let Some(tile) = grid.get_value(&new_loc) {
if *tile == Building {
continue;
}
let newLocIndex = (new_loc.data[0] * grid.C as i64 + new_loc.data[1]) as usize;
if visited[newLocIndex] {
continue;
}
visited.set(newLocIndex, true);
let new_dist = dist + 1;
if new_dist > M {
continue;
}
let new_seen_turret = seen_turret || turret_visible;
queue.push_back((new_loc, new_dist, new_seen_turret));
}
}
}
}
debug!("Built graph from\n{}\n S={} T={}", grid, S, T);
G
}
impl Display for Grid<Tile>
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
for r in 0..self.R {
for c in 0..self.C {
if let Err(err) = write!(f, "{}", self[(r, c)]) {
return Err(err);
}
}
if let Err(err) = writeln!(f, "") {
return Err(err);
}
}
write!(f, "")
}
} | while !visited[edge.0] {
visited.set(edge.0, true); | random_line_split |
d.rs | // maybe like Problem D. Descending in the Dark round 2 2012
/*
Bipartite matching
Grid
BFS
Cycles
Hard
*/
use crate::algo::graph::flow::*;
use crate::algo::graph::*;
use crate::util::grid::constants::*;
use crate::util::grid::{Grid, GridCoord, GridRowColVec};
use crate::util::input::*;
//use std::thread;
use bimap::BiMap;
use bit_vec::BitVec;
use indexmap::IndexSet;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::default::Default;
use std::fmt;
use std::fmt::{Display, Formatter};
use std::io::Write;
use std::sync::mpsc::channel;
use std::time::Instant;
use threadpool::ThreadPool;
pub fn solve_all_cases()
{
run_cases(
&["D-small-practice", "D-large-practice"],
"y2017round2",
|reader, buffer| {
//let mut children: Vec<thread::JoinHandle<_>> = vec![];
let pool = ThreadPool::new(6);
let (tx, rx) = channel();
let t = reader.read_int();
for case in 1..=t {
let (C, R, M) = reader.read_tuple_3::<usize>();
let mut grid: Grid<Tile> = Grid::new(R, C);
for r in 0..R {
let row = reader.read_chars(C);
for (c, t) in row.iter().enumerate() {
grid[(r, c)] = Tile::from(*t);
}
}
let tx = tx.clone();
pool.execute(move || {
let now = Instant::now();
let _ = writeln!(::std::io::stderr(), "Starting {} of {} ", case, t);
let s = solve(case, &mut grid, M);
tx.send((case, s)).expect("Channel is there");
let duration = now.elapsed();
let secs = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1e9f64;
let _ = writeln!(
::std::io::stderr(),
"Finished #{} in {:.2} second(s)",
case,
secs
);
});
}
let mut output = rx.iter().take(t as usize).collect::<Vec<_>>();
output.sort();
for (_, s) in output {
write!(buffer, "{}", s).unwrap();
}
},
);
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum Tile
{
Empty,
Building,
Soldier,
Turret,
}
use self::Tile::*;
use crate::util::codejam::run_cases;
impl Tile
{
fn to_char(self) -> char
{
match self {
Empty => '.',
Building => '#',
Soldier => 'S',
Turret => 'T',
}
}
}
impl From<char> for Tile
{
fn from(item: char) -> Self
{
match item {
'.' => Empty,
'#' => Building,
'S' => Soldier,
'T' => Turret,
_ => panic!("Character not recognized: {}", item),
}
}
}
impl Display for Tile
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
write!(f, "{}", self.to_char())
}
}
impl Default for Tile
{
fn default() -> Tile
{
Empty
}
}
//problem specific code
fn reachable(grid: &Grid<Tile>, location: &GridCoord) -> HashSet<GridRowColVec>
|
/*
impl<L, R> FromIterator<(L, R)> for BiMap<L, R>
{
fn from_iter<I: IntoIterator<Item = (L, R)>>(iter: I) -> Self
{
let mut c = BiMap::new();
for i in iter {
c.insert(i.0, i.1);
}
c
}
}*/
fn solve<'a>(case_no: u32, grid: &mut Grid<Tile>, M_soldier_limit: usize) -> String
{
debug!(
"Solving case {}\nM={}\n{}\n",
case_no, M_soldier_limit, grid
);
//original solider & turret index to location map
let S_map = grid
.filter_by_val(&Soldier)
.enumerate()
.collect::<BiMap<_, _>>();
let turret_locations = grid.filter_by_val(&Turret).collect::<Vec<_>>();
//precalucate what squares a turret can reach
let turret_reachable_squares_list = turret_locations
.iter()
.map(|t_loc| reachable(&grid, &t_loc))
.collect::<Vec<_>>();
let T_map = turret_locations
.into_iter()
.enumerate()
.collect::<BiMap<_, _>>();
let S = grid.filter_by_val(&Soldier).count();
let T = grid.filter_by_val(&Turret).count();
//Construct the initial Graph
let G_edges = build_graph(
&grid,
false,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
let mut G = FlowGraph::new(2 + S + T, 4);
for uv in G_edges {
G.add_edge(uv.0, uv.1, 1, 1);
}
let source = S + T;
let sink = S + T + 1;
let vertex_to_string = |v: usize| match v {
s if s < S => format!("Soldier #{} ({:?})", s + 1, *S_map.get_by_left(&s).unwrap()),
t if t >= S && t < S + T => format!(
"Turret #{} ({:?})",
t - S + 1,
*T_map.get_by_left(&(t - S)).unwrap()
),
v if v == sink => "Sink".to_string(),
_source => "Source".to_string(),
};
//BFS for each soldier
//will be in left to right order, then top down order
//Now find max matching of G (G has an edge from soldier s to turret t if and only if soldier s can destroy turret t after all other turrets have been destroyed)
for s in 0..S {
G.add_edge(source, s, 1, 1);
}
for t in S..S + T {
G.add_edge(t, sink, 1, 1);
}
let (R, flow) = G.dinic(source, sink);
let mut ans = format!("Case #{}: {}\n", case_no, R);
//Compute initial matching
let mut M = flow
.iter()
.enumerate()
.filter(|&(_e, f)| *f > 0)
//map to u->v
.map(|(e, _f)| (G.graph.endp[e ^ 1], G.graph.endp[e]))
//leave out source and sink nodes
.filter(|&(u, v)| u != source && v != sink)
.collect::<Vec<_>>();
debug!(
"Edges in M initial matching=\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let mut r = R;
while r > 0 {
//Let us define the graph G' with the same nodes as G, but an edge between soldier s and turret t only exists in G' if s can destroy t with the other turrets active
let Gprime = build_graph(
&grid,
true,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
//Now build graph H
let mut H = Graph::new(S + T, 4);
let soldiers_in_m = M.iter().map(|&(s, _t)| s).collect::<Vec<_>>();
for &(s, t) in Gprime.iter() {
if soldiers_in_m.contains(&s) {
H.add_edge(s, t);
}
}
for &(s, t) in M.iter() {
H.add_edge(t, s);
}
debug!(
"Current matching M =\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in G'=\n{}\n",
Gprime
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in H=\n{}\n",
H.edges()
.map(|(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let turrets_in_M = M.iter().map(|&(_s, t)| t).collect::<Vec<_>>();
//find an edge (s,t') where t' is not in m
let st_prime = Gprime.iter().find(|&(_s, t)| !turrets_in_M.contains(t));
if st_prime.is_some() {
let &(s, t) = st_prime.unwrap();
debug!("Found (s,t') s={} t'={}", s, t - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
//Also remove from current matching
let to_remove = M
.iter()
.position(|&(s_in_m, _t)| s_in_m == s)
.expect("Soldier should be in mapping");
M.remove(to_remove);
continue;
}
//Now we need to find a cycle
//Start at a soldier in H
let soldier_in_h = H.edges().filter(|&(u, _v)| u <= S).next().unwrap().0;
let mut cycle_edges = VecDeque::new();
let mut edge = (
soldier_in_h,
H.adj_list_with_edges(soldier_in_h).next().unwrap().1,
);
let mut visited = BitVec::from_elem(H.num_v(), false);
while !visited[edge.0] {
visited.set(edge.0, true);
cycle_edges.push_back(edge);
debug!(
"pushed Edge {:?} ",
format!("{}->{}", vertex_to_string(edge.0), vertex_to_string(edge.1))
);
//adj list returns an (internal edge index, next vertex)
edge = (edge.1, H.adj_list_with_edges(edge.1).next().unwrap().1);
debug!("Edge {:?} ", edge);
}
//cut to the actual cycle found
let cycle_end = cycle_edges.back().unwrap().1;
let cycle_start = cycle_edges
.iter()
.position(|&(u, _v)| u == cycle_end)
.unwrap();
cycle_edges.drain(0..cycle_start);
debug!(
"Cycle C =\n{}\n",
cycle_edges
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Consider a new matching M' of G consisting of the edges of M whose reverse is not in C, p
// lus the edges in C whose reverse is not in M. That is, M' is M but exchanging the edges
// present in C in some direction. M' in this case is also a matching of G of the same size as M
//because it is a cycle, we know we have new edges from G' to replace the ones removed from M
let mut M_new: Vec<(usize, usize)> = Vec::new();
M_new.extend(M.iter().filter(|&&(u, v)| !cycle_edges.contains(&(v, u))));
M_new.extend(cycle_edges.iter().filter(|&&(u, v)| !M.contains(&(v, u))));
debug!(
"New matching M =\n{}\n",
M_new
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Find all edges from G' which are actions we can take
let st_actions = M_new
.iter()
.filter(|&uv| Gprime.contains(uv))
.collect::<Vec<_>>();
for &&(s, t) in st_actions.iter() {
debug!("Taking actions from g' s {} t {}", s + 1, t + 1 - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
}
M = M_new;
}
ans
}
fn build_graph(
grid: &Grid<Tile>,
is_g_prime: bool,
M: usize,
s_mapping: &BiMap<usize, GridCoord>,
t_mapping: &BiMap<usize, GridCoord>,
turret_reachable_squares_list: &Vec<HashSet<GridRowColVec>>,
) -> IndexSet<(usize, usize)>
{
let mut G: IndexSet<(usize, usize)> = IndexSet::new();
let turret_locations = grid.filter_by_val(&Turret).collect::<HashSet<_>>();
/*
for (turret_index, turret_squares) in turret_squares_list.iter().enumerate() {
debug!("Turret {} can see {:?}", turret_index, turret_squares);
}
*/
let soldier_locations = grid.filter_by_val(&Soldier).collect::<Vec<_>>();
let S = soldier_locations.len();
let T = turret_reachable_squares_list.len();
for (_soldier_index, soldier_loc) in soldier_locations.iter().enumerate() {
//debug!("BFS search on soldier {} @ {}", soldier_index, soldier_loc);
//Node is location, distance, seen_turret
let mut queue: VecDeque<(GridRowColVec, usize, bool)> = VecDeque::new();
let mut visited = BitVec::from_elem(grid.C * grid.R, false);
queue.push_back((soldier_loc.convert(), 0, false));
visited.set(soldier_loc.data[0] * grid.C + soldier_loc.data[1], true);
while !queue.is_empty() {
let (loc, dist, seen_turret) = queue.pop_front().unwrap();
let visible_turrets = turret_reachable_squares_list
.iter()
.enumerate()
.filter(|(turret_index, turret_squares)| {
turret_locations.contains(t_mapping.get_by_left(turret_index).unwrap())
&& turret_squares.contains(&loc)
})
.map(|(turret_index, _)| turret_index);
let mut turret_visible = false;
for turret_index in visible_turrets {
turret_visible = true;
if !is_g_prime || (!seen_turret && is_g_prime) {
let s_vertex = *s_mapping.get_by_right(soldier_loc).unwrap();
//The turret index is already using the original grids index
/*debug!("Found s{} t{} mapped to soldier {} => {} at loc {}",
soldier_index, turret_index, s_vertex, t_vertex, loc);*/
G.insert((s_vertex, s_mapping.len() + turret_index));
}
}
//no need to queue once we have been shot by a turret
if is_g_prime && turret_visible {
continue;
}
/*
debug!(
"Viewing {} dist {} seen turret? {} turret visible? {}",
loc, dist, seen_turret, turret_visible
);*/
for dir in DIRECTIONS.iter() {
let new_loc = loc.clone() + dir;
if let Some(tile) = grid.get_value(&new_loc) {
if *tile == Building {
continue;
}
let newLocIndex = (new_loc.data[0] * grid.C as i64 + new_loc.data[1]) as usize;
if visited[newLocIndex] {
continue;
}
visited.set(newLocIndex, true);
let new_dist = dist + 1;
if new_dist > M {
continue;
}
let new_seen_turret = seen_turret || turret_visible;
queue.push_back((new_loc, new_dist, new_seen_turret));
}
}
}
}
debug!("Built graph from\n{}\n S={} T={}", grid, S, T);
G
}
impl Display for Grid<Tile>
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
for r in 0..self.R {
for c in 0..self.C {
if let Err(err) = write!(f, "{}", self[(r, c)]) {
return Err(err);
}
}
if let Err(err) = writeln!(f, "") {
return Err(err);
}
}
write!(f, "")
}
}
| {
let mut r = HashSet::new();
//debug!("\nTracing {} starting at {}", location, direction);
for direction in DIRECTIONS.iter() {
let mut loc: GridRowColVec = location.convert();
for _ in 0..=grid.R + grid.C {
loc += direction;
if let Some(tile) = grid.get_value(&loc) {
match *tile {
Building => {
break;
}
_ => {
r.insert(loc.clone());
}
};
} else {
break;
}
}
}
r
} | identifier_body |
d.rs | // maybe like Problem D. Descending in the Dark round 2 2012
/*
Bipartite matching
Grid
BFS
Cycles
Hard
*/
use crate::algo::graph::flow::*;
use crate::algo::graph::*;
use crate::util::grid::constants::*;
use crate::util::grid::{Grid, GridCoord, GridRowColVec};
use crate::util::input::*;
//use std::thread;
use bimap::BiMap;
use bit_vec::BitVec;
use indexmap::IndexSet;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::default::Default;
use std::fmt;
use std::fmt::{Display, Formatter};
use std::io::Write;
use std::sync::mpsc::channel;
use std::time::Instant;
use threadpool::ThreadPool;
pub fn solve_all_cases()
{
run_cases(
&["D-small-practice", "D-large-practice"],
"y2017round2",
|reader, buffer| {
//let mut children: Vec<thread::JoinHandle<_>> = vec![];
let pool = ThreadPool::new(6);
let (tx, rx) = channel();
let t = reader.read_int();
for case in 1..=t {
let (C, R, M) = reader.read_tuple_3::<usize>();
let mut grid: Grid<Tile> = Grid::new(R, C);
for r in 0..R {
let row = reader.read_chars(C);
for (c, t) in row.iter().enumerate() {
grid[(r, c)] = Tile::from(*t);
}
}
let tx = tx.clone();
pool.execute(move || {
let now = Instant::now();
let _ = writeln!(::std::io::stderr(), "Starting {} of {} ", case, t);
let s = solve(case, &mut grid, M);
tx.send((case, s)).expect("Channel is there");
let duration = now.elapsed();
let secs = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1e9f64;
let _ = writeln!(
::std::io::stderr(),
"Finished #{} in {:.2} second(s)",
case,
secs
);
});
}
let mut output = rx.iter().take(t as usize).collect::<Vec<_>>();
output.sort();
for (_, s) in output {
write!(buffer, "{}", s).unwrap();
}
},
);
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum Tile
{
Empty,
Building,
Soldier,
Turret,
}
use self::Tile::*;
use crate::util::codejam::run_cases;
impl Tile
{
fn to_char(self) -> char
{
match self {
Empty => '.',
Building => '#',
Soldier => 'S',
Turret => 'T',
}
}
}
impl From<char> for Tile
{
fn from(item: char) -> Self
{
match item {
'.' => Empty,
'#' => Building,
'S' => Soldier,
'T' => Turret,
_ => panic!("Character not recognized: {}", item),
}
}
}
impl Display for Tile
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
write!(f, "{}", self.to_char())
}
}
impl Default for Tile
{
fn default() -> Tile
{
Empty
}
}
//problem specific code
fn reachable(grid: &Grid<Tile>, location: &GridCoord) -> HashSet<GridRowColVec>
{
let mut r = HashSet::new();
//debug!("\nTracing {} starting at {}", location, direction);
for direction in DIRECTIONS.iter() {
let mut loc: GridRowColVec = location.convert();
for _ in 0..=grid.R + grid.C {
loc += direction;
if let Some(tile) = grid.get_value(&loc) {
match *tile {
Building => {
break;
}
_ => {
r.insert(loc.clone());
}
};
} else {
break;
}
}
}
r
}
/*
impl<L, R> FromIterator<(L, R)> for BiMap<L, R>
{
fn from_iter<I: IntoIterator<Item = (L, R)>>(iter: I) -> Self
{
let mut c = BiMap::new();
for i in iter {
c.insert(i.0, i.1);
}
c
}
}*/
fn solve<'a>(case_no: u32, grid: &mut Grid<Tile>, M_soldier_limit: usize) -> String
{
debug!(
"Solving case {}\nM={}\n{}\n",
case_no, M_soldier_limit, grid
);
//original solider & turret index to location map
let S_map = grid
.filter_by_val(&Soldier)
.enumerate()
.collect::<BiMap<_, _>>();
let turret_locations = grid.filter_by_val(&Turret).collect::<Vec<_>>();
//precalucate what squares a turret can reach
let turret_reachable_squares_list = turret_locations
.iter()
.map(|t_loc| reachable(&grid, &t_loc))
.collect::<Vec<_>>();
let T_map = turret_locations
.into_iter()
.enumerate()
.collect::<BiMap<_, _>>();
let S = grid.filter_by_val(&Soldier).count();
let T = grid.filter_by_val(&Turret).count();
//Construct the initial Graph
let G_edges = build_graph(
&grid,
false,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
let mut G = FlowGraph::new(2 + S + T, 4);
for uv in G_edges {
G.add_edge(uv.0, uv.1, 1, 1);
}
let source = S + T;
let sink = S + T + 1;
let vertex_to_string = |v: usize| match v {
s if s < S => format!("Soldier #{} ({:?})", s + 1, *S_map.get_by_left(&s).unwrap()),
t if t >= S && t < S + T => format!(
"Turret #{} ({:?})",
t - S + 1,
*T_map.get_by_left(&(t - S)).unwrap()
),
v if v == sink => "Sink".to_string(),
_source => "Source".to_string(),
};
//BFS for each soldier
//will be in left to right order, then top down order
//Now find max matching of G (G has an edge from soldier s to turret t if and only if soldier s can destroy turret t after all other turrets have been destroyed)
for s in 0..S {
G.add_edge(source, s, 1, 1);
}
for t in S..S + T {
G.add_edge(t, sink, 1, 1);
}
let (R, flow) = G.dinic(source, sink);
let mut ans = format!("Case #{}: {}\n", case_no, R);
//Compute initial matching
let mut M = flow
.iter()
.enumerate()
.filter(|&(_e, f)| *f > 0)
//map to u->v
.map(|(e, _f)| (G.graph.endp[e ^ 1], G.graph.endp[e]))
//leave out source and sink nodes
.filter(|&(u, v)| u != source && v != sink)
.collect::<Vec<_>>();
debug!(
"Edges in M initial matching=\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let mut r = R;
while r > 0 {
//Let us define the graph G' with the same nodes as G, but an edge between soldier s and turret t only exists in G' if s can destroy t with the other turrets active
let Gprime = build_graph(
&grid,
true,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
//Now build graph H
let mut H = Graph::new(S + T, 4);
let soldiers_in_m = M.iter().map(|&(s, _t)| s).collect::<Vec<_>>();
for &(s, t) in Gprime.iter() {
if soldiers_in_m.contains(&s) {
H.add_edge(s, t);
}
}
for &(s, t) in M.iter() {
H.add_edge(t, s);
}
debug!(
"Current matching M =\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in G'=\n{}\n",
Gprime
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in H=\n{}\n",
H.edges()
.map(|(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let turrets_in_M = M.iter().map(|&(_s, t)| t).collect::<Vec<_>>();
//find an edge (s,t') where t' is not in m
let st_prime = Gprime.iter().find(|&(_s, t)| !turrets_in_M.contains(t));
if st_prime.is_some() {
let &(s, t) = st_prime.unwrap();
debug!("Found (s,t') s={} t'={}", s, t - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
//Also remove from current matching
let to_remove = M
.iter()
.position(|&(s_in_m, _t)| s_in_m == s)
.expect("Soldier should be in mapping");
M.remove(to_remove);
continue;
}
//Now we need to find a cycle
//Start at a soldier in H
let soldier_in_h = H.edges().filter(|&(u, _v)| u <= S).next().unwrap().0;
let mut cycle_edges = VecDeque::new();
let mut edge = (
soldier_in_h,
H.adj_list_with_edges(soldier_in_h).next().unwrap().1,
);
let mut visited = BitVec::from_elem(H.num_v(), false);
while !visited[edge.0] {
visited.set(edge.0, true);
cycle_edges.push_back(edge);
debug!(
"pushed Edge {:?} ",
format!("{}->{}", vertex_to_string(edge.0), vertex_to_string(edge.1))
);
//adj list returns an (internal edge index, next vertex)
edge = (edge.1, H.adj_list_with_edges(edge.1).next().unwrap().1);
debug!("Edge {:?} ", edge);
}
//cut to the actual cycle found
let cycle_end = cycle_edges.back().unwrap().1;
let cycle_start = cycle_edges
.iter()
.position(|&(u, _v)| u == cycle_end)
.unwrap();
cycle_edges.drain(0..cycle_start);
debug!(
"Cycle C =\n{}\n",
cycle_edges
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Consider a new matching M' of G consisting of the edges of M whose reverse is not in C, p
// lus the edges in C whose reverse is not in M. That is, M' is M but exchanging the edges
// present in C in some direction. M' in this case is also a matching of G of the same size as M
//because it is a cycle, we know we have new edges from G' to replace the ones removed from M
let mut M_new: Vec<(usize, usize)> = Vec::new();
M_new.extend(M.iter().filter(|&&(u, v)| !cycle_edges.contains(&(v, u))));
M_new.extend(cycle_edges.iter().filter(|&&(u, v)| !M.contains(&(v, u))));
debug!(
"New matching M =\n{}\n",
M_new
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Find all edges from G' which are actions we can take
let st_actions = M_new
.iter()
.filter(|&uv| Gprime.contains(uv))
.collect::<Vec<_>>();
for &&(s, t) in st_actions.iter() {
debug!("Taking actions from g' s {} t {}", s + 1, t + 1 - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
}
M = M_new;
}
ans
}
fn build_graph(
grid: &Grid<Tile>,
is_g_prime: bool,
M: usize,
s_mapping: &BiMap<usize, GridCoord>,
t_mapping: &BiMap<usize, GridCoord>,
turret_reachable_squares_list: &Vec<HashSet<GridRowColVec>>,
) -> IndexSet<(usize, usize)>
{
let mut G: IndexSet<(usize, usize)> = IndexSet::new();
let turret_locations = grid.filter_by_val(&Turret).collect::<HashSet<_>>();
/*
for (turret_index, turret_squares) in turret_squares_list.iter().enumerate() {
debug!("Turret {} can see {:?}", turret_index, turret_squares);
}
*/
let soldier_locations = grid.filter_by_val(&Soldier).collect::<Vec<_>>();
let S = soldier_locations.len();
let T = turret_reachable_squares_list.len();
for (_soldier_index, soldier_loc) in soldier_locations.iter().enumerate() {
//debug!("BFS search on soldier {} @ {}", soldier_index, soldier_loc);
//Node is location, distance, seen_turret
let mut queue: VecDeque<(GridRowColVec, usize, bool)> = VecDeque::new();
let mut visited = BitVec::from_elem(grid.C * grid.R, false);
queue.push_back((soldier_loc.convert(), 0, false));
visited.set(soldier_loc.data[0] * grid.C + soldier_loc.data[1], true);
while !queue.is_empty() {
let (loc, dist, seen_turret) = queue.pop_front().unwrap();
let visible_turrets = turret_reachable_squares_list
.iter()
.enumerate()
.filter(|(turret_index, turret_squares)| {
turret_locations.contains(t_mapping.get_by_left(turret_index).unwrap())
&& turret_squares.contains(&loc)
})
.map(|(turret_index, _)| turret_index);
let mut turret_visible = false;
for turret_index in visible_turrets {
turret_visible = true;
if !is_g_prime || (!seen_turret && is_g_prime) {
let s_vertex = *s_mapping.get_by_right(soldier_loc).unwrap();
//The turret index is already using the original grids index
/*debug!("Found s{} t{} mapped to soldier {} => {} at loc {}",
soldier_index, turret_index, s_vertex, t_vertex, loc);*/
G.insert((s_vertex, s_mapping.len() + turret_index));
}
}
//no need to queue once we have been shot by a turret
if is_g_prime && turret_visible {
continue;
}
/*
debug!(
"Viewing {} dist {} seen turret? {} turret visible? {}",
loc, dist, seen_turret, turret_visible
);*/
for dir in DIRECTIONS.iter() {
let new_loc = loc.clone() + dir;
if let Some(tile) = grid.get_value(&new_loc) {
if *tile == Building {
continue;
}
let newLocIndex = (new_loc.data[0] * grid.C as i64 + new_loc.data[1]) as usize;
if visited[newLocIndex] {
continue;
}
visited.set(newLocIndex, true);
let new_dist = dist + 1;
if new_dist > M {
continue;
}
let new_seen_turret = seen_turret || turret_visible;
queue.push_back((new_loc, new_dist, new_seen_turret));
}
}
}
}
debug!("Built graph from\n{}\n S={} T={}", grid, S, T);
G
}
impl Display for Grid<Tile>
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
for r in 0..self.R {
for c in 0..self.C {
if let Err(err) = write!(f, "{}", self[(r, c)]) |
}
if let Err(err) = writeln!(f, "") {
return Err(err);
}
}
write!(f, "")
}
}
| {
return Err(err);
} | conditional_block |
d.rs | // maybe like Problem D. Descending in the Dark round 2 2012
/*
Bipartite matching
Grid
BFS
Cycles
Hard
*/
use crate::algo::graph::flow::*;
use crate::algo::graph::*;
use crate::util::grid::constants::*;
use crate::util::grid::{Grid, GridCoord, GridRowColVec};
use crate::util::input::*;
//use std::thread;
use bimap::BiMap;
use bit_vec::BitVec;
use indexmap::IndexSet;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::default::Default;
use std::fmt;
use std::fmt::{Display, Formatter};
use std::io::Write;
use std::sync::mpsc::channel;
use std::time::Instant;
use threadpool::ThreadPool;
pub fn solve_all_cases()
{
run_cases(
&["D-small-practice", "D-large-practice"],
"y2017round2",
|reader, buffer| {
//let mut children: Vec<thread::JoinHandle<_>> = vec![];
let pool = ThreadPool::new(6);
let (tx, rx) = channel();
let t = reader.read_int();
for case in 1..=t {
let (C, R, M) = reader.read_tuple_3::<usize>();
let mut grid: Grid<Tile> = Grid::new(R, C);
for r in 0..R {
let row = reader.read_chars(C);
for (c, t) in row.iter().enumerate() {
grid[(r, c)] = Tile::from(*t);
}
}
let tx = tx.clone();
pool.execute(move || {
let now = Instant::now();
let _ = writeln!(::std::io::stderr(), "Starting {} of {} ", case, t);
let s = solve(case, &mut grid, M);
tx.send((case, s)).expect("Channel is there");
let duration = now.elapsed();
let secs = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1e9f64;
let _ = writeln!(
::std::io::stderr(),
"Finished #{} in {:.2} second(s)",
case,
secs
);
});
}
let mut output = rx.iter().take(t as usize).collect::<Vec<_>>();
output.sort();
for (_, s) in output {
write!(buffer, "{}", s).unwrap();
}
},
);
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum Tile
{
Empty,
Building,
Soldier,
Turret,
}
use self::Tile::*;
use crate::util::codejam::run_cases;
impl Tile
{
fn to_char(self) -> char
{
match self {
Empty => '.',
Building => '#',
Soldier => 'S',
Turret => 'T',
}
}
}
impl From<char> for Tile
{
fn from(item: char) -> Self
{
match item {
'.' => Empty,
'#' => Building,
'S' => Soldier,
'T' => Turret,
_ => panic!("Character not recognized: {}", item),
}
}
}
impl Display for Tile
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
write!(f, "{}", self.to_char())
}
}
impl Default for Tile
{
fn default() -> Tile
{
Empty
}
}
//problem specific code
fn reachable(grid: &Grid<Tile>, location: &GridCoord) -> HashSet<GridRowColVec>
{
let mut r = HashSet::new();
//debug!("\nTracing {} starting at {}", location, direction);
for direction in DIRECTIONS.iter() {
let mut loc: GridRowColVec = location.convert();
for _ in 0..=grid.R + grid.C {
loc += direction;
if let Some(tile) = grid.get_value(&loc) {
match *tile {
Building => {
break;
}
_ => {
r.insert(loc.clone());
}
};
} else {
break;
}
}
}
r
}
/*
impl<L, R> FromIterator<(L, R)> for BiMap<L, R>
{
fn from_iter<I: IntoIterator<Item = (L, R)>>(iter: I) -> Self
{
let mut c = BiMap::new();
for i in iter {
c.insert(i.0, i.1);
}
c
}
}*/
fn | <'a>(case_no: u32, grid: &mut Grid<Tile>, M_soldier_limit: usize) -> String
{
debug!(
"Solving case {}\nM={}\n{}\n",
case_no, M_soldier_limit, grid
);
//original solider & turret index to location map
let S_map = grid
.filter_by_val(&Soldier)
.enumerate()
.collect::<BiMap<_, _>>();
let turret_locations = grid.filter_by_val(&Turret).collect::<Vec<_>>();
//precalucate what squares a turret can reach
let turret_reachable_squares_list = turret_locations
.iter()
.map(|t_loc| reachable(&grid, &t_loc))
.collect::<Vec<_>>();
let T_map = turret_locations
.into_iter()
.enumerate()
.collect::<BiMap<_, _>>();
let S = grid.filter_by_val(&Soldier).count();
let T = grid.filter_by_val(&Turret).count();
//Construct the initial Graph
let G_edges = build_graph(
&grid,
false,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
let mut G = FlowGraph::new(2 + S + T, 4);
for uv in G_edges {
G.add_edge(uv.0, uv.1, 1, 1);
}
let source = S + T;
let sink = S + T + 1;
let vertex_to_string = |v: usize| match v {
s if s < S => format!("Soldier #{} ({:?})", s + 1, *S_map.get_by_left(&s).unwrap()),
t if t >= S && t < S + T => format!(
"Turret #{} ({:?})",
t - S + 1,
*T_map.get_by_left(&(t - S)).unwrap()
),
v if v == sink => "Sink".to_string(),
_source => "Source".to_string(),
};
//BFS for each soldier
//will be in left to right order, then top down order
//Now find max matching of G (G has an edge from soldier s to turret t if and only if soldier s can destroy turret t after all other turrets have been destroyed)
for s in 0..S {
G.add_edge(source, s, 1, 1);
}
for t in S..S + T {
G.add_edge(t, sink, 1, 1);
}
let (R, flow) = G.dinic(source, sink);
let mut ans = format!("Case #{}: {}\n", case_no, R);
//Compute initial matching
let mut M = flow
.iter()
.enumerate()
.filter(|&(_e, f)| *f > 0)
//map to u->v
.map(|(e, _f)| (G.graph.endp[e ^ 1], G.graph.endp[e]))
//leave out source and sink nodes
.filter(|&(u, v)| u != source && v != sink)
.collect::<Vec<_>>();
debug!(
"Edges in M initial matching=\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let mut r = R;
while r > 0 {
//Let us define the graph G' with the same nodes as G, but an edge between soldier s and turret t only exists in G' if s can destroy t with the other turrets active
let Gprime = build_graph(
&grid,
true,
M_soldier_limit,
&S_map,
&T_map,
&turret_reachable_squares_list,
);
//Now build graph H
let mut H = Graph::new(S + T, 4);
let soldiers_in_m = M.iter().map(|&(s, _t)| s).collect::<Vec<_>>();
for &(s, t) in Gprime.iter() {
if soldiers_in_m.contains(&s) {
H.add_edge(s, t);
}
}
for &(s, t) in M.iter() {
H.add_edge(t, s);
}
debug!(
"Current matching M =\n{}\n",
M.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in G'=\n{}\n",
Gprime
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
debug!(
"Edges in H=\n{}\n",
H.edges()
.map(|(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
let turrets_in_M = M.iter().map(|&(_s, t)| t).collect::<Vec<_>>();
//find an edge (s,t') where t' is not in m
let st_prime = Gprime.iter().find(|&(_s, t)| !turrets_in_M.contains(t));
if st_prime.is_some() {
let &(s, t) = st_prime.unwrap();
debug!("Found (s,t') s={} t'={}", s, t - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
//Also remove from current matching
let to_remove = M
.iter()
.position(|&(s_in_m, _t)| s_in_m == s)
.expect("Soldier should be in mapping");
M.remove(to_remove);
continue;
}
//Now we need to find a cycle
//Start at a soldier in H
let soldier_in_h = H.edges().filter(|&(u, _v)| u <= S).next().unwrap().0;
let mut cycle_edges = VecDeque::new();
let mut edge = (
soldier_in_h,
H.adj_list_with_edges(soldier_in_h).next().unwrap().1,
);
let mut visited = BitVec::from_elem(H.num_v(), false);
while !visited[edge.0] {
visited.set(edge.0, true);
cycle_edges.push_back(edge);
debug!(
"pushed Edge {:?} ",
format!("{}->{}", vertex_to_string(edge.0), vertex_to_string(edge.1))
);
//adj list returns an (internal edge index, next vertex)
edge = (edge.1, H.adj_list_with_edges(edge.1).next().unwrap().1);
debug!("Edge {:?} ", edge);
}
//cut to the actual cycle found
let cycle_end = cycle_edges.back().unwrap().1;
let cycle_start = cycle_edges
.iter()
.position(|&(u, _v)| u == cycle_end)
.unwrap();
cycle_edges.drain(0..cycle_start);
debug!(
"Cycle C =\n{}\n",
cycle_edges
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Consider a new matching M' of G consisting of the edges of M whose reverse is not in C, p
// lus the edges in C whose reverse is not in M. That is, M' is M but exchanging the edges
// present in C in some direction. M' in this case is also a matching of G of the same size as M
//because it is a cycle, we know we have new edges from G' to replace the ones removed from M
let mut M_new: Vec<(usize, usize)> = Vec::new();
M_new.extend(M.iter().filter(|&&(u, v)| !cycle_edges.contains(&(v, u))));
M_new.extend(cycle_edges.iter().filter(|&&(u, v)| !M.contains(&(v, u))));
debug!(
"New matching M =\n{}\n",
M_new
.iter()
.map(|&(u, v)| format!("{}->{}", vertex_to_string(u), vertex_to_string(v)))
.collect::<Vec<_>>()
.join("\n")
);
//Find all edges from G' which are actions we can take
let st_actions = M_new
.iter()
.filter(|&uv| Gprime.contains(uv))
.collect::<Vec<_>>();
for &&(s, t) in st_actions.iter() {
debug!("Taking actions from g' s {} t {}", s + 1, t + 1 - S);
ans += &format!("{} {}\n", s + 1, t - S + 1);
grid[S_map.get_by_left(&s).unwrap()] = Empty;
grid[T_map.get_by_left(&(t - S)).unwrap()] = Empty;
r -= 1;
}
M = M_new;
}
ans
}
fn build_graph(
grid: &Grid<Tile>,
is_g_prime: bool,
M: usize,
s_mapping: &BiMap<usize, GridCoord>,
t_mapping: &BiMap<usize, GridCoord>,
turret_reachable_squares_list: &Vec<HashSet<GridRowColVec>>,
) -> IndexSet<(usize, usize)>
{
let mut G: IndexSet<(usize, usize)> = IndexSet::new();
let turret_locations = grid.filter_by_val(&Turret).collect::<HashSet<_>>();
/*
for (turret_index, turret_squares) in turret_squares_list.iter().enumerate() {
debug!("Turret {} can see {:?}", turret_index, turret_squares);
}
*/
let soldier_locations = grid.filter_by_val(&Soldier).collect::<Vec<_>>();
let S = soldier_locations.len();
let T = turret_reachable_squares_list.len();
for (_soldier_index, soldier_loc) in soldier_locations.iter().enumerate() {
//debug!("BFS search on soldier {} @ {}", soldier_index, soldier_loc);
//Node is location, distance, seen_turret
let mut queue: VecDeque<(GridRowColVec, usize, bool)> = VecDeque::new();
let mut visited = BitVec::from_elem(grid.C * grid.R, false);
queue.push_back((soldier_loc.convert(), 0, false));
visited.set(soldier_loc.data[0] * grid.C + soldier_loc.data[1], true);
while !queue.is_empty() {
let (loc, dist, seen_turret) = queue.pop_front().unwrap();
let visible_turrets = turret_reachable_squares_list
.iter()
.enumerate()
.filter(|(turret_index, turret_squares)| {
turret_locations.contains(t_mapping.get_by_left(turret_index).unwrap())
&& turret_squares.contains(&loc)
})
.map(|(turret_index, _)| turret_index);
let mut turret_visible = false;
for turret_index in visible_turrets {
turret_visible = true;
if !is_g_prime || (!seen_turret && is_g_prime) {
let s_vertex = *s_mapping.get_by_right(soldier_loc).unwrap();
//The turret index is already using the original grids index
/*debug!("Found s{} t{} mapped to soldier {} => {} at loc {}",
soldier_index, turret_index, s_vertex, t_vertex, loc);*/
G.insert((s_vertex, s_mapping.len() + turret_index));
}
}
//no need to queue once we have been shot by a turret
if is_g_prime && turret_visible {
continue;
}
/*
debug!(
"Viewing {} dist {} seen turret? {} turret visible? {}",
loc, dist, seen_turret, turret_visible
);*/
for dir in DIRECTIONS.iter() {
let new_loc = loc.clone() + dir;
if let Some(tile) = grid.get_value(&new_loc) {
if *tile == Building {
continue;
}
let newLocIndex = (new_loc.data[0] * grid.C as i64 + new_loc.data[1]) as usize;
if visited[newLocIndex] {
continue;
}
visited.set(newLocIndex, true);
let new_dist = dist + 1;
if new_dist > M {
continue;
}
let new_seen_turret = seen_turret || turret_visible;
queue.push_back((new_loc, new_dist, new_seen_turret));
}
}
}
}
debug!("Built graph from\n{}\n S={} T={}", grid, S, T);
G
}
impl Display for Grid<Tile>
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result
{
for r in 0..self.R {
for c in 0..self.C {
if let Err(err) = write!(f, "{}", self[(r, c)]) {
return Err(err);
}
}
if let Err(err) = writeln!(f, "") {
return Err(err);
}
}
write!(f, "")
}
}
| solve | identifier_name |
resource_fusion_sec_azure.go | /*
Copyright 2021, Pure Storage Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cbs
import (
"context"
"fmt"
"log"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/profiles/latest/resources/mgmt/managedapplications"
"github.com/Azure/go-autorest/autorest/to"
mapset "github.com/deckarep/golang-set"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
// Default managed application plan
const (
defaultFusionSECPlanName = "pure_sec_1_0_0"
defaultFusionSECPlanProduct = "pure_fusion_storage_endpoint_collection"
defaultFusionSECPlanPublisher = "purestoragemarketplaceadmin"
defaultFusionSECPlanVersion = "1.0.3"
)
var fusionSECAzureTemplateTags = []string{
"Microsoft.Network/loadBalancers",
"Microsoft.ManagedIdentity/userAssignedIdentities",
}
var fusionSECAzureParams = []interface{}{
"fusionSECName",
"location",
"loadBalancerNetworkRg",
"loadBalancerNetworkName",
"loadBalancerSubnet",
}
var renamedFusionSECAzureParams = map[string]string{}
var fusionSECAzureTFOutputs = []string{
"applicationName",
"managedResourceGroupName",
"hmvip0",
"hmvip1",
"loadBalancerFullIdentityId",
}
func | () *schema.Resource {
return &schema.Resource{
CreateContext: resourceFusionSECAzureCreate,
ReadContext: resourceFusionSECAzureRead,
UpdateContext: resourceFusionSECAzureUpdate,
DeleteContext: resourceFusionSECAzureDelete,
Schema: map[string]*schema.Schema{
"resource_group_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateAzureResourceGroupName,
},
"location": {
Type: schema.TypeString,
Required: true,
},
// parameters
"fusion_sec_name": {
Description: "The name of the Fusion Storage Endpoint Collection (SEC). 0-59 alphanumeric characters only.",
Type: schema.TypeString,
Required: true,
ValidateFunc: validateAzureManagedApplicationName,
},
"load_balancer_network_rg": {
Type: schema.TypeString,
Required: true,
},
"load_balancer_network_name": {
Type: schema.TypeString,
Required: true,
},
"load_balancer_subnet": {
Type: schema.TypeString,
Required: true,
},
"jit_approval_group_object_ids": {
Description: "This is a list of Azure group object IDs for people who are allowed to approve JIT requests",
Required: true,
Type: schema.TypeList,
Elem: &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validation.IsUUID,
},
},
"plan": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
"product": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
"publisher": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
"version": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
},
},
},
"tags": {
Type: schema.TypeMap,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validation.StringIsNotEmpty,
},
},
// Outputs
"application_name": {
Type: schema.TypeString,
Computed: true,
},
"managed_resource_group_name": {
Type: schema.TypeString,
Computed: true,
},
"hmvip0": {
Type: schema.TypeString,
Computed: true,
},
"hmvip1": {
Type: schema.TypeString,
Computed: true,
},
"load_balancer_full_identity_id": {
Type: schema.TypeString,
Computed: true,
},
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(30 * time.Minute),
Read: schema.DefaultTimeout(5 * time.Minute),
Delete: schema.DefaultTimeout(30 * time.Minute),
},
}
}
func resourceFusionSECAzureCreate(ctx context.Context, d *schema.ResourceData, m interface{}) (returnedDiags diag.Diagnostics) {
tflog.Trace(ctx, "resourceFusionSECAzurereate")
azureClient, diags := m.(*CbsService).azureClientService(ctx)
if diags.HasError() {
return diags
}
name := d.Get("fusion_sec_name").(string)
managedResourceGroup := toAzureManagedResourceGroup(name)
resourceGroupName := d.Get("resource_group_name").(string)
if d.IsNewResource() {
existing, err := azureClient.AppsGet(ctx, resourceGroupName, name)
if err != nil {
if !responseWasNotFound(existing.Response) {
return diag.Errorf("failed to check for presence of existing Managed Application Name %q (Resource Group %q): %+v", name, resourceGroupName, err)
}
}
if existing.ID != nil && *existing.ID != "" {
return diag.Errorf(
"A resource with the name %q, Resource Group %q and ID %q already exists - to be managed via Terraform this resource needs to be imported into the State.",
name,
resourceGroupName,
*existing.ID,
)
}
}
parameters := managedapplications.Application{
Location: to.StringPtr(d.Get("location").(string)),
}
targetResourceGroupId := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s", azureClient.SubscriptionID(), managedResourceGroup)
parameters.ApplicationProperties = &managedapplications.ApplicationProperties{
ManagedResourceGroupID: to.StringPtr(targetResourceGroupId),
}
parameters.Kind = to.StringPtr("MarketPlace")
if v, ok1 := d.GetOk("plan"); ok1 && len(v.([]interface{})) > 0 {
parameters.Plan = expandPlan(v.([]interface{}))
} else {
parameters.Plan = &managedapplications.Plan{
Name: to.StringPtr(defaultFusionSECPlanName),
Product: to.StringPtr(defaultFusionSECPlanPublisher),
Publisher: to.StringPtr(defaultFusionSECPlanPublisher),
Version: to.StringPtr(defaultFusionSECPlanVersion),
}
}
parameters.Parameters = make(map[string]interface{})
setAppParameter := func(key string, value interface{}) {
(parameters.Parameters.(map[string]interface{}))[key] = map[string]interface{}{"value": value}
}
for _, value := range fusionSECAzureParams {
valueStr := value.(string)
setAppParameter(valueStr, d.Get(templateToTFParam(valueStr, renamedFusionSECAzureParams)))
}
returnedDiags = setAzureJitAccessPolicy(¶meters, d)
if v, ok := d.GetOk("tags"); ok {
tags := v.(map[string]interface{})
tagsMap := make(map[string]interface{})
for _, tag := range fusionSECAzureTemplateTags {
tagsMap[tag] = tags
}
setAppParameter("tagsByResource", tagsMap)
}
// Error out now, before we create resources
if returnedDiags.HasError() {
return returnedDiags
}
tflog.Trace(ctx, "resourceFusionSECAzureCreate AppsCreateOrUpdate")
err := azureClient.AppsCreateOrUpdate(ctx, resourceGroupName, name, parameters)
defer func() {
if returnedDiags.HasError() {
if err = azureClient.AppsDelete(ctx, resourceGroupName, name); err != nil {
tflog.Error(
ctx,
fmt.Sprintf(
"failed to delete Managed Application %q (Resource Group %q) after failed CreateOrUpdate operation: %+v",
name,
resourceGroupName,
err,
),
)
}
}
}()
if err != nil {
return diag.FromErr(err)
}
resp, err := azureClient.AppsGet(ctx, resourceGroupName, name)
if err != nil {
return diag.Errorf("failed to retrieve Managed Application %q (Resource Group %q): %+v", name, resourceGroupName, err)
}
if resp.ID == nil || *resp.ID == "" {
return diag.Errorf("cannot read Managed Application %q (Resource Group %q) ID", name, resourceGroupName)
}
d.SetId(*resp.ID)
diags = resourceFusionSECAzureRead(ctx, d, m)
if diags.HasError() {
returnedDiags = append(returnedDiags, diags...)
}
return returnedDiags
}
func resourceFusionSECAzureRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
tflog.Trace(ctx, "resourceFusionSECAzureRead")
azureClient, diags := m.(*CbsService).azureClientService(ctx)
if diags.HasError() {
return diags
}
v, ok := d.GetOk("fusion_sec_name")
if !ok {
log.Printf("[WARN] No Managed Application found with Id %q, removing from state", d.Id())
d.SetId("")
return nil
}
appName := v.(string)
managedResourceGroup := toAzureManagedResourceGroup(appName)
resourceGroup := d.Get("resource_group_name").(string)
resp, err := azureClient.AppsGet(ctx, resourceGroup, appName)
if err != nil {
if responseWasNotFound(resp.Response) {
log.Printf("[WARN] Managed Application %q does not exist - removing from state", d.Id())
d.SetId("")
return nil
}
return diag.Errorf("failed to read Managed Application %q (Resource Group %q): %+v", appName, resourceGroup, err)
}
if err := d.Set("application_name", appName); err != nil {
return diag.FromErr(err)
}
if err := d.Set("managed_resource_group_name", managedResourceGroup); err != nil {
return diag.FromErr(err)
}
if err := d.Set("resource_group_name", resourceGroup); err != nil {
return diag.FromErr(err)
}
if err := d.Set("location", resp.Location); err != nil {
return diag.FromErr(err)
}
if props := resp.ApplicationProperties; props != nil {
params := formatAzureParameters(props.Parameters)
fusionSECParamSet := mapset.NewSetFromSlice(fusionSECAzureParams)
for k, v := range params {
// SecureString parameters will always have a null value, so ignore them
if v.valType != "SecureString" {
if k == "tagsByResource" {
maps := v.value.(map[string]interface{})
for _, tagValue := range maps {
if err := d.Set("tags", tagValue); err != nil {
return diag.FromErr(err)
}
break
}
}
if fusionSECParamSet.Contains(k) {
if err := d.Set(templateToTFParam(k, renamedFusionSECAzureParams), v.value); err != nil {
return diag.FromErr(err)
}
}
}
}
if err := d.Set("jit_approval_group_object_ids", flattenAzureJitApprovalGroupIds(props.JitAccessPolicy)); err != nil {
return diag.FromErr(err)
}
outputs := props.Outputs.(map[string]interface{})
fusionSECAzureTFOutputSet := mapset.NewSet()
for _, s := range fusionSECAzureTFOutputs {
fusionSECAzureTFOutputSet.Add(s)
}
for k, v := range outputs {
if v != nil {
v := v.(map[string]interface{})
if fusionSECAzureTFOutputSet.Contains(k) {
if !strings.HasPrefix(k, "hmvip") {
k = toSnake(k)
}
if err := d.Set(k, v["value"]); err != nil {
return diag.FromErr(err)
}
}
}
}
}
return nil
}
func resourceFusionSECAzureUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
tflog.Trace(ctx, "resourceFusionSECAzureUpdate")
diags := resourceArrayAzureRead(ctx, d, m)
if diags.HasError() {
return diags
}
return diag.Errorf("Updates are not supported.")
}
func resourceFusionSECAzureDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
tflog.Trace(ctx, "resourceFusionSECAzureDelete")
azureClient, diags := m.(*CbsService).azureClientService(ctx)
if diags.HasError() {
return diags
}
resourceGroup := d.Get("resource_group_name").(string)
appName := d.Get("fusion_sec_name").(string)
err := azureClient.AppsDelete(ctx, resourceGroup, appName)
return diag.FromErr(err)
}
| resourceFusionSECAzure | identifier_name |
resource_fusion_sec_azure.go | /*
Copyright 2021, Pure Storage Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cbs
import (
"context"
"fmt"
"log"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/profiles/latest/resources/mgmt/managedapplications"
"github.com/Azure/go-autorest/autorest/to"
mapset "github.com/deckarep/golang-set"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
// Default managed application plan
const (
defaultFusionSECPlanName = "pure_sec_1_0_0"
defaultFusionSECPlanProduct = "pure_fusion_storage_endpoint_collection"
defaultFusionSECPlanPublisher = "purestoragemarketplaceadmin"
defaultFusionSECPlanVersion = "1.0.3"
)
var fusionSECAzureTemplateTags = []string{
"Microsoft.Network/loadBalancers",
"Microsoft.ManagedIdentity/userAssignedIdentities",
}
var fusionSECAzureParams = []interface{}{
"fusionSECName",
"location",
"loadBalancerNetworkRg",
"loadBalancerNetworkName",
"loadBalancerSubnet",
}
var renamedFusionSECAzureParams = map[string]string{}
var fusionSECAzureTFOutputs = []string{
"applicationName",
"managedResourceGroupName",
"hmvip0",
"hmvip1",
"loadBalancerFullIdentityId",
}
func resourceFusionSECAzure() *schema.Resource {
return &schema.Resource{
CreateContext: resourceFusionSECAzureCreate,
ReadContext: resourceFusionSECAzureRead,
UpdateContext: resourceFusionSECAzureUpdate,
DeleteContext: resourceFusionSECAzureDelete,
Schema: map[string]*schema.Schema{
"resource_group_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateAzureResourceGroupName,
},
"location": {
Type: schema.TypeString,
Required: true,
},
// parameters
"fusion_sec_name": {
Description: "The name of the Fusion Storage Endpoint Collection (SEC). 0-59 alphanumeric characters only.",
Type: schema.TypeString,
Required: true,
ValidateFunc: validateAzureManagedApplicationName,
},
"load_balancer_network_rg": {
Type: schema.TypeString,
Required: true,
},
"load_balancer_network_name": {
Type: schema.TypeString,
Required: true,
},
"load_balancer_subnet": {
Type: schema.TypeString,
Required: true,
},
"jit_approval_group_object_ids": {
Description: "This is a list of Azure group object IDs for people who are allowed to approve JIT requests",
Required: true,
Type: schema.TypeList,
Elem: &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validation.IsUUID,
},
},
"plan": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
"product": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
"publisher": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
"version": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
},
},
},
"tags": {
Type: schema.TypeMap,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validation.StringIsNotEmpty,
},
},
// Outputs
"application_name": {
Type: schema.TypeString,
Computed: true,
},
"managed_resource_group_name": {
Type: schema.TypeString,
Computed: true,
},
"hmvip0": {
Type: schema.TypeString,
Computed: true,
},
"hmvip1": {
Type: schema.TypeString,
Computed: true,
},
"load_balancer_full_identity_id": {
Type: schema.TypeString,
Computed: true,
},
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(30 * time.Minute),
Read: schema.DefaultTimeout(5 * time.Minute),
Delete: schema.DefaultTimeout(30 * time.Minute),
},
}
}
func resourceFusionSECAzureCreate(ctx context.Context, d *schema.ResourceData, m interface{}) (returnedDiags diag.Diagnostics) {
tflog.Trace(ctx, "resourceFusionSECAzurereate")
azureClient, diags := m.(*CbsService).azureClientService(ctx)
if diags.HasError() {
return diags
}
name := d.Get("fusion_sec_name").(string)
managedResourceGroup := toAzureManagedResourceGroup(name)
resourceGroupName := d.Get("resource_group_name").(string)
if d.IsNewResource() {
existing, err := azureClient.AppsGet(ctx, resourceGroupName, name)
if err != nil {
if !responseWasNotFound(existing.Response) {
return diag.Errorf("failed to check for presence of existing Managed Application Name %q (Resource Group %q): %+v", name, resourceGroupName, err)
}
}
if existing.ID != nil && *existing.ID != "" {
return diag.Errorf(
"A resource with the name %q, Resource Group %q and ID %q already exists - to be managed via Terraform this resource needs to be imported into the State.",
name,
resourceGroupName,
*existing.ID,
)
}
}
parameters := managedapplications.Application{
Location: to.StringPtr(d.Get("location").(string)),
}
targetResourceGroupId := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s", azureClient.SubscriptionID(), managedResourceGroup)
parameters.ApplicationProperties = &managedapplications.ApplicationProperties{
ManagedResourceGroupID: to.StringPtr(targetResourceGroupId),
}
parameters.Kind = to.StringPtr("MarketPlace")
if v, ok1 := d.GetOk("plan"); ok1 && len(v.([]interface{})) > 0 {
parameters.Plan = expandPlan(v.([]interface{}))
} else {
parameters.Plan = &managedapplications.Plan{
Name: to.StringPtr(defaultFusionSECPlanName),
Product: to.StringPtr(defaultFusionSECPlanPublisher),
Publisher: to.StringPtr(defaultFusionSECPlanPublisher),
Version: to.StringPtr(defaultFusionSECPlanVersion),
}
}
parameters.Parameters = make(map[string]interface{})
setAppParameter := func(key string, value interface{}) {
(parameters.Parameters.(map[string]interface{}))[key] = map[string]interface{}{"value": value}
}
for _, value := range fusionSECAzureParams |
returnedDiags = setAzureJitAccessPolicy(¶meters, d)
if v, ok := d.GetOk("tags"); ok {
tags := v.(map[string]interface{})
tagsMap := make(map[string]interface{})
for _, tag := range fusionSECAzureTemplateTags {
tagsMap[tag] = tags
}
setAppParameter("tagsByResource", tagsMap)
}
// Error out now, before we create resources
if returnedDiags.HasError() {
return returnedDiags
}
tflog.Trace(ctx, "resourceFusionSECAzureCreate AppsCreateOrUpdate")
err := azureClient.AppsCreateOrUpdate(ctx, resourceGroupName, name, parameters)
defer func() {
if returnedDiags.HasError() {
if err = azureClient.AppsDelete(ctx, resourceGroupName, name); err != nil {
tflog.Error(
ctx,
fmt.Sprintf(
"failed to delete Managed Application %q (Resource Group %q) after failed CreateOrUpdate operation: %+v",
name,
resourceGroupName,
err,
),
)
}
}
}()
if err != nil {
return diag.FromErr(err)
}
resp, err := azureClient.AppsGet(ctx, resourceGroupName, name)
if err != nil {
return diag.Errorf("failed to retrieve Managed Application %q (Resource Group %q): %+v", name, resourceGroupName, err)
}
if resp.ID == nil || *resp.ID == "" {
return diag.Errorf("cannot read Managed Application %q (Resource Group %q) ID", name, resourceGroupName)
}
d.SetId(*resp.ID)
diags = resourceFusionSECAzureRead(ctx, d, m)
if diags.HasError() {
returnedDiags = append(returnedDiags, diags...)
}
return returnedDiags
}
func resourceFusionSECAzureRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
tflog.Trace(ctx, "resourceFusionSECAzureRead")
azureClient, diags := m.(*CbsService).azureClientService(ctx)
if diags.HasError() {
return diags
}
v, ok := d.GetOk("fusion_sec_name")
if !ok {
log.Printf("[WARN] No Managed Application found with Id %q, removing from state", d.Id())
d.SetId("")
return nil
}
appName := v.(string)
managedResourceGroup := toAzureManagedResourceGroup(appName)
resourceGroup := d.Get("resource_group_name").(string)
resp, err := azureClient.AppsGet(ctx, resourceGroup, appName)
if err != nil {
if responseWasNotFound(resp.Response) {
log.Printf("[WARN] Managed Application %q does not exist - removing from state", d.Id())
d.SetId("")
return nil
}
return diag.Errorf("failed to read Managed Application %q (Resource Group %q): %+v", appName, resourceGroup, err)
}
if err := d.Set("application_name", appName); err != nil {
return diag.FromErr(err)
}
if err := d.Set("managed_resource_group_name", managedResourceGroup); err != nil {
return diag.FromErr(err)
}
if err := d.Set("resource_group_name", resourceGroup); err != nil {
return diag.FromErr(err)
}
if err := d.Set("location", resp.Location); err != nil {
return diag.FromErr(err)
}
if props := resp.ApplicationProperties; props != nil {
params := formatAzureParameters(props.Parameters)
fusionSECParamSet := mapset.NewSetFromSlice(fusionSECAzureParams)
for k, v := range params {
// SecureString parameters will always have a null value, so ignore them
if v.valType != "SecureString" {
if k == "tagsByResource" {
maps := v.value.(map[string]interface{})
for _, tagValue := range maps {
if err := d.Set("tags", tagValue); err != nil {
return diag.FromErr(err)
}
break
}
}
if fusionSECParamSet.Contains(k) {
if err := d.Set(templateToTFParam(k, renamedFusionSECAzureParams), v.value); err != nil {
return diag.FromErr(err)
}
}
}
}
if err := d.Set("jit_approval_group_object_ids", flattenAzureJitApprovalGroupIds(props.JitAccessPolicy)); err != nil {
return diag.FromErr(err)
}
outputs := props.Outputs.(map[string]interface{})
fusionSECAzureTFOutputSet := mapset.NewSet()
for _, s := range fusionSECAzureTFOutputs {
fusionSECAzureTFOutputSet.Add(s)
}
for k, v := range outputs {
if v != nil {
v := v.(map[string]interface{})
if fusionSECAzureTFOutputSet.Contains(k) {
if !strings.HasPrefix(k, "hmvip") {
k = toSnake(k)
}
if err := d.Set(k, v["value"]); err != nil {
return diag.FromErr(err)
}
}
}
}
}
return nil
}
func resourceFusionSECAzureUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
tflog.Trace(ctx, "resourceFusionSECAzureUpdate")
diags := resourceArrayAzureRead(ctx, d, m)
if diags.HasError() {
return diags
}
return diag.Errorf("Updates are not supported.")
}
func resourceFusionSECAzureDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
tflog.Trace(ctx, "resourceFusionSECAzureDelete")
azureClient, diags := m.(*CbsService).azureClientService(ctx)
if diags.HasError() {
return diags
}
resourceGroup := d.Get("resource_group_name").(string)
appName := d.Get("fusion_sec_name").(string)
err := azureClient.AppsDelete(ctx, resourceGroup, appName)
return diag.FromErr(err)
}
| {
valueStr := value.(string)
setAppParameter(valueStr, d.Get(templateToTFParam(valueStr, renamedFusionSECAzureParams)))
} | conditional_block |
resource_fusion_sec_azure.go | /*
Copyright 2021, Pure Storage Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cbs
import (
"context"
"fmt"
"log"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/profiles/latest/resources/mgmt/managedapplications"
"github.com/Azure/go-autorest/autorest/to"
mapset "github.com/deckarep/golang-set"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
// Default managed application plan
const (
defaultFusionSECPlanName = "pure_sec_1_0_0"
defaultFusionSECPlanProduct = "pure_fusion_storage_endpoint_collection"
defaultFusionSECPlanPublisher = "purestoragemarketplaceadmin"
defaultFusionSECPlanVersion = "1.0.3"
)
var fusionSECAzureTemplateTags = []string{
"Microsoft.Network/loadBalancers",
"Microsoft.ManagedIdentity/userAssignedIdentities",
}
var fusionSECAzureParams = []interface{}{
"fusionSECName",
"location",
"loadBalancerNetworkRg",
"loadBalancerNetworkName",
"loadBalancerSubnet",
}
var renamedFusionSECAzureParams = map[string]string{}
var fusionSECAzureTFOutputs = []string{
"applicationName",
"managedResourceGroupName",
"hmvip0",
"hmvip1",
"loadBalancerFullIdentityId",
}
func resourceFusionSECAzure() *schema.Resource |
func resourceFusionSECAzureCreate(ctx context.Context, d *schema.ResourceData, m interface{}) (returnedDiags diag.Diagnostics) {
tflog.Trace(ctx, "resourceFusionSECAzurereate")
azureClient, diags := m.(*CbsService).azureClientService(ctx)
if diags.HasError() {
return diags
}
name := d.Get("fusion_sec_name").(string)
managedResourceGroup := toAzureManagedResourceGroup(name)
resourceGroupName := d.Get("resource_group_name").(string)
if d.IsNewResource() {
existing, err := azureClient.AppsGet(ctx, resourceGroupName, name)
if err != nil {
if !responseWasNotFound(existing.Response) {
return diag.Errorf("failed to check for presence of existing Managed Application Name %q (Resource Group %q): %+v", name, resourceGroupName, err)
}
}
if existing.ID != nil && *existing.ID != "" {
return diag.Errorf(
"A resource with the name %q, Resource Group %q and ID %q already exists - to be managed via Terraform this resource needs to be imported into the State.",
name,
resourceGroupName,
*existing.ID,
)
}
}
parameters := managedapplications.Application{
Location: to.StringPtr(d.Get("location").(string)),
}
targetResourceGroupId := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s", azureClient.SubscriptionID(), managedResourceGroup)
parameters.ApplicationProperties = &managedapplications.ApplicationProperties{
ManagedResourceGroupID: to.StringPtr(targetResourceGroupId),
}
parameters.Kind = to.StringPtr("MarketPlace")
if v, ok1 := d.GetOk("plan"); ok1 && len(v.([]interface{})) > 0 {
parameters.Plan = expandPlan(v.([]interface{}))
} else {
parameters.Plan = &managedapplications.Plan{
Name: to.StringPtr(defaultFusionSECPlanName),
Product: to.StringPtr(defaultFusionSECPlanPublisher),
Publisher: to.StringPtr(defaultFusionSECPlanPublisher),
Version: to.StringPtr(defaultFusionSECPlanVersion),
}
}
parameters.Parameters = make(map[string]interface{})
setAppParameter := func(key string, value interface{}) {
(parameters.Parameters.(map[string]interface{}))[key] = map[string]interface{}{"value": value}
}
for _, value := range fusionSECAzureParams {
valueStr := value.(string)
setAppParameter(valueStr, d.Get(templateToTFParam(valueStr, renamedFusionSECAzureParams)))
}
returnedDiags = setAzureJitAccessPolicy(¶meters, d)
if v, ok := d.GetOk("tags"); ok {
tags := v.(map[string]interface{})
tagsMap := make(map[string]interface{})
for _, tag := range fusionSECAzureTemplateTags {
tagsMap[tag] = tags
}
setAppParameter("tagsByResource", tagsMap)
}
// Error out now, before we create resources
if returnedDiags.HasError() {
return returnedDiags
}
tflog.Trace(ctx, "resourceFusionSECAzureCreate AppsCreateOrUpdate")
err := azureClient.AppsCreateOrUpdate(ctx, resourceGroupName, name, parameters)
defer func() {
if returnedDiags.HasError() {
if err = azureClient.AppsDelete(ctx, resourceGroupName, name); err != nil {
tflog.Error(
ctx,
fmt.Sprintf(
"failed to delete Managed Application %q (Resource Group %q) after failed CreateOrUpdate operation: %+v",
name,
resourceGroupName,
err,
),
)
}
}
}()
if err != nil {
return diag.FromErr(err)
}
resp, err := azureClient.AppsGet(ctx, resourceGroupName, name)
if err != nil {
return diag.Errorf("failed to retrieve Managed Application %q (Resource Group %q): %+v", name, resourceGroupName, err)
}
if resp.ID == nil || *resp.ID == "" {
return diag.Errorf("cannot read Managed Application %q (Resource Group %q) ID", name, resourceGroupName)
}
d.SetId(*resp.ID)
diags = resourceFusionSECAzureRead(ctx, d, m)
if diags.HasError() {
returnedDiags = append(returnedDiags, diags...)
}
return returnedDiags
}
func resourceFusionSECAzureRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
tflog.Trace(ctx, "resourceFusionSECAzureRead")
azureClient, diags := m.(*CbsService).azureClientService(ctx)
if diags.HasError() {
return diags
}
v, ok := d.GetOk("fusion_sec_name")
if !ok {
log.Printf("[WARN] No Managed Application found with Id %q, removing from state", d.Id())
d.SetId("")
return nil
}
appName := v.(string)
managedResourceGroup := toAzureManagedResourceGroup(appName)
resourceGroup := d.Get("resource_group_name").(string)
resp, err := azureClient.AppsGet(ctx, resourceGroup, appName)
if err != nil {
if responseWasNotFound(resp.Response) {
log.Printf("[WARN] Managed Application %q does not exist - removing from state", d.Id())
d.SetId("")
return nil
}
return diag.Errorf("failed to read Managed Application %q (Resource Group %q): %+v", appName, resourceGroup, err)
}
if err := d.Set("application_name", appName); err != nil {
return diag.FromErr(err)
}
if err := d.Set("managed_resource_group_name", managedResourceGroup); err != nil {
return diag.FromErr(err)
}
if err := d.Set("resource_group_name", resourceGroup); err != nil {
return diag.FromErr(err)
}
if err := d.Set("location", resp.Location); err != nil {
return diag.FromErr(err)
}
if props := resp.ApplicationProperties; props != nil {
params := formatAzureParameters(props.Parameters)
fusionSECParamSet := mapset.NewSetFromSlice(fusionSECAzureParams)
for k, v := range params {
// SecureString parameters will always have a null value, so ignore them
if v.valType != "SecureString" {
if k == "tagsByResource" {
maps := v.value.(map[string]interface{})
for _, tagValue := range maps {
if err := d.Set("tags", tagValue); err != nil {
return diag.FromErr(err)
}
break
}
}
if fusionSECParamSet.Contains(k) {
if err := d.Set(templateToTFParam(k, renamedFusionSECAzureParams), v.value); err != nil {
return diag.FromErr(err)
}
}
}
}
if err := d.Set("jit_approval_group_object_ids", flattenAzureJitApprovalGroupIds(props.JitAccessPolicy)); err != nil {
return diag.FromErr(err)
}
outputs := props.Outputs.(map[string]interface{})
fusionSECAzureTFOutputSet := mapset.NewSet()
for _, s := range fusionSECAzureTFOutputs {
fusionSECAzureTFOutputSet.Add(s)
}
for k, v := range outputs {
if v != nil {
v := v.(map[string]interface{})
if fusionSECAzureTFOutputSet.Contains(k) {
if !strings.HasPrefix(k, "hmvip") {
k = toSnake(k)
}
if err := d.Set(k, v["value"]); err != nil {
return diag.FromErr(err)
}
}
}
}
}
return nil
}
func resourceFusionSECAzureUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
tflog.Trace(ctx, "resourceFusionSECAzureUpdate")
diags := resourceArrayAzureRead(ctx, d, m)
if diags.HasError() {
return diags
}
return diag.Errorf("Updates are not supported.")
}
func resourceFusionSECAzureDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
tflog.Trace(ctx, "resourceFusionSECAzureDelete")
azureClient, diags := m.(*CbsService).azureClientService(ctx)
if diags.HasError() {
return diags
}
resourceGroup := d.Get("resource_group_name").(string)
appName := d.Get("fusion_sec_name").(string)
err := azureClient.AppsDelete(ctx, resourceGroup, appName)
return diag.FromErr(err)
}
| {
return &schema.Resource{
CreateContext: resourceFusionSECAzureCreate,
ReadContext: resourceFusionSECAzureRead,
UpdateContext: resourceFusionSECAzureUpdate,
DeleteContext: resourceFusionSECAzureDelete,
Schema: map[string]*schema.Schema{
"resource_group_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateAzureResourceGroupName,
},
"location": {
Type: schema.TypeString,
Required: true,
},
// parameters
"fusion_sec_name": {
Description: "The name of the Fusion Storage Endpoint Collection (SEC). 0-59 alphanumeric characters only.",
Type: schema.TypeString,
Required: true,
ValidateFunc: validateAzureManagedApplicationName,
},
"load_balancer_network_rg": {
Type: schema.TypeString,
Required: true,
},
"load_balancer_network_name": {
Type: schema.TypeString,
Required: true,
},
"load_balancer_subnet": {
Type: schema.TypeString,
Required: true,
},
"jit_approval_group_object_ids": {
Description: "This is a list of Azure group object IDs for people who are allowed to approve JIT requests",
Required: true,
Type: schema.TypeList,
Elem: &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validation.IsUUID,
},
},
"plan": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
"product": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
"publisher": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
"version": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
},
},
},
"tags": {
Type: schema.TypeMap,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validation.StringIsNotEmpty,
},
},
// Outputs
"application_name": {
Type: schema.TypeString,
Computed: true,
},
"managed_resource_group_name": {
Type: schema.TypeString,
Computed: true,
},
"hmvip0": {
Type: schema.TypeString,
Computed: true,
},
"hmvip1": {
Type: schema.TypeString,
Computed: true,
},
"load_balancer_full_identity_id": {
Type: schema.TypeString,
Computed: true,
},
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(30 * time.Minute),
Read: schema.DefaultTimeout(5 * time.Minute),
Delete: schema.DefaultTimeout(30 * time.Minute),
},
}
} | identifier_body |
resource_fusion_sec_azure.go | /*
Copyright 2021, Pure Storage Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cbs
import (
"context"
"fmt"
"log"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/profiles/latest/resources/mgmt/managedapplications"
"github.com/Azure/go-autorest/autorest/to"
mapset "github.com/deckarep/golang-set"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
// Default managed application plan
const (
defaultFusionSECPlanName = "pure_sec_1_0_0"
defaultFusionSECPlanProduct = "pure_fusion_storage_endpoint_collection"
defaultFusionSECPlanPublisher = "purestoragemarketplaceadmin"
defaultFusionSECPlanVersion = "1.0.3"
)
var fusionSECAzureTemplateTags = []string{
"Microsoft.Network/loadBalancers",
"Microsoft.ManagedIdentity/userAssignedIdentities",
}
var fusionSECAzureParams = []interface{}{
"fusionSECName",
"location",
"loadBalancerNetworkRg",
"loadBalancerNetworkName",
"loadBalancerSubnet",
}
var renamedFusionSECAzureParams = map[string]string{}
var fusionSECAzureTFOutputs = []string{
"applicationName",
"managedResourceGroupName",
"hmvip0",
"hmvip1",
"loadBalancerFullIdentityId",
}
func resourceFusionSECAzure() *schema.Resource {
return &schema.Resource{
CreateContext: resourceFusionSECAzureCreate,
ReadContext: resourceFusionSECAzureRead,
UpdateContext: resourceFusionSECAzureUpdate,
DeleteContext: resourceFusionSECAzureDelete,
Schema: map[string]*schema.Schema{
"resource_group_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateAzureResourceGroupName,
},
"location": {
Type: schema.TypeString,
Required: true,
},
// parameters
"fusion_sec_name": {
Description: "The name of the Fusion Storage Endpoint Collection (SEC). 0-59 alphanumeric characters only.",
Type: schema.TypeString,
Required: true,
ValidateFunc: validateAzureManagedApplicationName,
},
"load_balancer_network_rg": {
Type: schema.TypeString,
Required: true,
},
"load_balancer_network_name": {
Type: schema.TypeString,
Required: true,
},
"load_balancer_subnet": { | Description: "This is a list of Azure group object IDs for people who are allowed to approve JIT requests",
Required: true,
Type: schema.TypeList,
Elem: &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validation.IsUUID,
},
},
"plan": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
"product": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
"publisher": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
"version": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
},
},
},
"tags": {
Type: schema.TypeMap,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validation.StringIsNotEmpty,
},
},
// Outputs
"application_name": {
Type: schema.TypeString,
Computed: true,
},
"managed_resource_group_name": {
Type: schema.TypeString,
Computed: true,
},
"hmvip0": {
Type: schema.TypeString,
Computed: true,
},
"hmvip1": {
Type: schema.TypeString,
Computed: true,
},
"load_balancer_full_identity_id": {
Type: schema.TypeString,
Computed: true,
},
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(30 * time.Minute),
Read: schema.DefaultTimeout(5 * time.Minute),
Delete: schema.DefaultTimeout(30 * time.Minute),
},
}
}
func resourceFusionSECAzureCreate(ctx context.Context, d *schema.ResourceData, m interface{}) (returnedDiags diag.Diagnostics) {
tflog.Trace(ctx, "resourceFusionSECAzurereate")
azureClient, diags := m.(*CbsService).azureClientService(ctx)
if diags.HasError() {
return diags
}
name := d.Get("fusion_sec_name").(string)
managedResourceGroup := toAzureManagedResourceGroup(name)
resourceGroupName := d.Get("resource_group_name").(string)
if d.IsNewResource() {
existing, err := azureClient.AppsGet(ctx, resourceGroupName, name)
if err != nil {
if !responseWasNotFound(existing.Response) {
return diag.Errorf("failed to check for presence of existing Managed Application Name %q (Resource Group %q): %+v", name, resourceGroupName, err)
}
}
if existing.ID != nil && *existing.ID != "" {
return diag.Errorf(
"A resource with the name %q, Resource Group %q and ID %q already exists - to be managed via Terraform this resource needs to be imported into the State.",
name,
resourceGroupName,
*existing.ID,
)
}
}
parameters := managedapplications.Application{
Location: to.StringPtr(d.Get("location").(string)),
}
targetResourceGroupId := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s", azureClient.SubscriptionID(), managedResourceGroup)
parameters.ApplicationProperties = &managedapplications.ApplicationProperties{
ManagedResourceGroupID: to.StringPtr(targetResourceGroupId),
}
parameters.Kind = to.StringPtr("MarketPlace")
if v, ok1 := d.GetOk("plan"); ok1 && len(v.([]interface{})) > 0 {
parameters.Plan = expandPlan(v.([]interface{}))
} else {
parameters.Plan = &managedapplications.Plan{
Name: to.StringPtr(defaultFusionSECPlanName),
Product: to.StringPtr(defaultFusionSECPlanPublisher),
Publisher: to.StringPtr(defaultFusionSECPlanPublisher),
Version: to.StringPtr(defaultFusionSECPlanVersion),
}
}
parameters.Parameters = make(map[string]interface{})
setAppParameter := func(key string, value interface{}) {
(parameters.Parameters.(map[string]interface{}))[key] = map[string]interface{}{"value": value}
}
for _, value := range fusionSECAzureParams {
valueStr := value.(string)
setAppParameter(valueStr, d.Get(templateToTFParam(valueStr, renamedFusionSECAzureParams)))
}
returnedDiags = setAzureJitAccessPolicy(¶meters, d)
if v, ok := d.GetOk("tags"); ok {
tags := v.(map[string]interface{})
tagsMap := make(map[string]interface{})
for _, tag := range fusionSECAzureTemplateTags {
tagsMap[tag] = tags
}
setAppParameter("tagsByResource", tagsMap)
}
// Error out now, before we create resources
if returnedDiags.HasError() {
return returnedDiags
}
tflog.Trace(ctx, "resourceFusionSECAzureCreate AppsCreateOrUpdate")
err := azureClient.AppsCreateOrUpdate(ctx, resourceGroupName, name, parameters)
defer func() {
if returnedDiags.HasError() {
if err = azureClient.AppsDelete(ctx, resourceGroupName, name); err != nil {
tflog.Error(
ctx,
fmt.Sprintf(
"failed to delete Managed Application %q (Resource Group %q) after failed CreateOrUpdate operation: %+v",
name,
resourceGroupName,
err,
),
)
}
}
}()
if err != nil {
return diag.FromErr(err)
}
resp, err := azureClient.AppsGet(ctx, resourceGroupName, name)
if err != nil {
return diag.Errorf("failed to retrieve Managed Application %q (Resource Group %q): %+v", name, resourceGroupName, err)
}
if resp.ID == nil || *resp.ID == "" {
return diag.Errorf("cannot read Managed Application %q (Resource Group %q) ID", name, resourceGroupName)
}
d.SetId(*resp.ID)
diags = resourceFusionSECAzureRead(ctx, d, m)
if diags.HasError() {
returnedDiags = append(returnedDiags, diags...)
}
return returnedDiags
}
func resourceFusionSECAzureRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
tflog.Trace(ctx, "resourceFusionSECAzureRead")
azureClient, diags := m.(*CbsService).azureClientService(ctx)
if diags.HasError() {
return diags
}
v, ok := d.GetOk("fusion_sec_name")
if !ok {
log.Printf("[WARN] No Managed Application found with Id %q, removing from state", d.Id())
d.SetId("")
return nil
}
appName := v.(string)
managedResourceGroup := toAzureManagedResourceGroup(appName)
resourceGroup := d.Get("resource_group_name").(string)
resp, err := azureClient.AppsGet(ctx, resourceGroup, appName)
if err != nil {
if responseWasNotFound(resp.Response) {
log.Printf("[WARN] Managed Application %q does not exist - removing from state", d.Id())
d.SetId("")
return nil
}
return diag.Errorf("failed to read Managed Application %q (Resource Group %q): %+v", appName, resourceGroup, err)
}
if err := d.Set("application_name", appName); err != nil {
return diag.FromErr(err)
}
if err := d.Set("managed_resource_group_name", managedResourceGroup); err != nil {
return diag.FromErr(err)
}
if err := d.Set("resource_group_name", resourceGroup); err != nil {
return diag.FromErr(err)
}
if err := d.Set("location", resp.Location); err != nil {
return diag.FromErr(err)
}
if props := resp.ApplicationProperties; props != nil {
params := formatAzureParameters(props.Parameters)
fusionSECParamSet := mapset.NewSetFromSlice(fusionSECAzureParams)
for k, v := range params {
// SecureString parameters will always have a null value, so ignore them
if v.valType != "SecureString" {
if k == "tagsByResource" {
maps := v.value.(map[string]interface{})
for _, tagValue := range maps {
if err := d.Set("tags", tagValue); err != nil {
return diag.FromErr(err)
}
break
}
}
if fusionSECParamSet.Contains(k) {
if err := d.Set(templateToTFParam(k, renamedFusionSECAzureParams), v.value); err != nil {
return diag.FromErr(err)
}
}
}
}
if err := d.Set("jit_approval_group_object_ids", flattenAzureJitApprovalGroupIds(props.JitAccessPolicy)); err != nil {
return diag.FromErr(err)
}
outputs := props.Outputs.(map[string]interface{})
fusionSECAzureTFOutputSet := mapset.NewSet()
for _, s := range fusionSECAzureTFOutputs {
fusionSECAzureTFOutputSet.Add(s)
}
for k, v := range outputs {
if v != nil {
v := v.(map[string]interface{})
if fusionSECAzureTFOutputSet.Contains(k) {
if !strings.HasPrefix(k, "hmvip") {
k = toSnake(k)
}
if err := d.Set(k, v["value"]); err != nil {
return diag.FromErr(err)
}
}
}
}
}
return nil
}
func resourceFusionSECAzureUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
tflog.Trace(ctx, "resourceFusionSECAzureUpdate")
diags := resourceArrayAzureRead(ctx, d, m)
if diags.HasError() {
return diags
}
return diag.Errorf("Updates are not supported.")
}
func resourceFusionSECAzureDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
tflog.Trace(ctx, "resourceFusionSECAzureDelete")
azureClient, diags := m.(*CbsService).azureClientService(ctx)
if diags.HasError() {
return diags
}
resourceGroup := d.Get("resource_group_name").(string)
appName := d.Get("fusion_sec_name").(string)
err := azureClient.AppsDelete(ctx, resourceGroup, appName)
return diag.FromErr(err)
} | Type: schema.TypeString,
Required: true,
},
"jit_approval_group_object_ids": { | random_line_split |
iter.go | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"fmt"
"unicode/utf8"
)
// MaxSegmentSize is the maximum size of a byte buffer needed to consider any
// sequence of starter and non-starter runes for the purpose of normalization.
const MaxSegmentSize = maxByteBufferSize
// An Iter iterates over a string or byte slice, while normalizing it
// to a given Form.
type Iter struct {
rb reorderBuffer
buf [maxByteBufferSize]byte
info Properties // first character saved from previous iteration
next iterFunc // implementation of next depends on form
asciiF iterFunc
p int // current position in input source
multiSeg []byte // remainder of multi-segment decomposition
}
type iterFunc func(*Iter) []byte
// Init initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) Init(f Form, src []byte) {
i.p = 0
if len(src) == 0 {
i.setDone()
i.rb.nsrc = 0
return
}
i.multiSeg = nil
i.rb.init(f, src)
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIBytes
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
}
// InitString initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) InitString(f Form, src string) {
i.p = 0
if len(src) == 0 {
i.setDone()
i.rb.nsrc = 0
return
}
i.multiSeg = nil
i.rb.initString(f, src)
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIString
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
}
// Seek sets the segment to be returned by the next call to Next to start
// at position p. It is the responsibility of the caller to set p to the
// start of a segment.
func (i *Iter) Seek(offset int64, whence int) (int64, error) {
var abs int64
switch whence {
case 0:
abs = offset
case 1:
abs = int64(i.p) + offset
case 2:
abs = int64(i.rb.nsrc) + offset
default:
return 0, fmt.Errorf("norm: invalid whence")
}
if abs < 0 {
return 0, fmt.Errorf("norm: negative position")
}
if int(abs) >= i.rb.nsrc {
i.setDone()
return int64(i.p), nil
}
i.p = int(abs)
i.multiSeg = nil
i.next = i.rb.f.nextMain
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
return abs, nil
}
// returnSlice returns a slice of the underlying input type as a byte slice.
// If the underlying is of type []byte, it will simply return a slice.
// If the underlying is of type string, it will copy the slice to the buffer
// and return that.
func (i *Iter) returnSlice(a, b int) []byte {
if i.rb.src.bytes == nil {
return i.buf[:copy(i.buf[:], i.rb.src.str[a:b])]
}
return i.rb.src.bytes[a:b]
}
// Pos returns the byte position at which the next call to Next will commence processing.
func (i *Iter) Pos() int {
return i.p
}
func (i *Iter) setDone() {
i.next = nextDone
i.p = i.rb.nsrc
}
// Done returns true if there is no more input to process.
func (i *Iter) Done() bool {
return i.p >= i.rb.nsrc
}
// Next returns f(i.input[i.Pos():n]), where n is a boundary of i.input.
// For any input a and b for which f(a) == f(b), subsequent calls
// to Next will return the same segments.
// Modifying runes are grouped together with the preceding starter, if such a starter exists.
// Although not guaranteed, n will typically be the smallest possible n.
func (i *Iter) Next() []byte {
return i.next(i)
}
func nextASCIIBytes(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
p0 := i.p
i.setDone()
return i.rb.src.bytes[p0:p]
}
if i.rb.src.bytes[p] < utf8.RuneSelf {
p0 := i.p
i.p = p
return i.rb.src.bytes[p0:p]
}
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
func nextASCIIString(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
i.buf[0] = i.rb.src.str[i.p]
i.setDone()
return i.buf[:1]
}
if i.rb.src.str[p] < utf8.RuneSelf {
i.buf[0] = i.rb.src.str[i.p]
i.p = p
return i.buf[:1]
}
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
func nextHangul(i *Iter) []byte {
p := i.p
next := p + hangulUTF8Size
if next >= i.rb.nsrc {
i.setDone()
} else if i.rb.src.hangul(next) == 0 {
i.rb.ss.next(i.info)
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
i.p = next
return i.buf[:decomposeHangul(i.buf[:], i.rb.src.hangul(p))]
}
func nextDone(i *Iter) []byte {
return nil
}
// nextMulti is used for iterating over multi-segment decompositions
// for decomposing normal forms.
func nextMulti(i *Iter) []byte {
j := 0
d := i.multiSeg
// skip first rune
for j = 1; j < len(d) && !utf8.RuneStart(d[j]); j++ {
}
for j < len(d) {
info := i.rb.f.info(input{bytes: d}, j)
if info.BoundaryBefore() {
i.multiSeg = d[j:]
return d[:j]
}
j += int(info.size)
}
// treat last segment as normal decomposition
i.next = i.rb.f.nextMain
return i.next(i)
}
// nextMultiNorm is used for iterating over multi-segment decompositions
// for composing normal forms.
func nextMultiNorm(i *Iter) []byte {
j := 0
d := i.multiSeg
for j < len(d) {
info := i.rb.f.info(input{bytes: d}, j)
if info.BoundaryBefore() {
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
i.rb.insertUnsafe(input{bytes: d}, j, info)
i.multiSeg = d[j+int(info.size):]
return seg
}
i.rb.insertUnsafe(input{bytes: d}, j, info)
j += int(info.size)
}
i.multiSeg = nil
i.next = nextComposed
return doNormComposed(i)
}
// nextDecomposed is the implementation of Next for forms NFD and NFKD.
func nextDecomposed(i *Iter) (next []byte) {
outp := 0
inCopyStart, outCopyStart := i.p, 0
for {
if sz := int(i.info.size); sz <= 1 {
i.rb.ss = 0
p := i.p
i.p++ // ASCII or illegal byte. Either way, advance by 1.
if i.p >= i.rb.nsrc {
i.setDone()
return i.returnSlice(p, i.p)
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.next = i.asciiF
return i.returnSlice(p, i.p)
}
outp++
} else if d := i.info.Decomposition(); d != nil {
// Note: If leading CCC != 0, then len(d) == 2 and last is also non-zero.
// Case 1: there is a leftover to copy. In this case the decomposition
// must begin with a modifier and should always be appended.
// Case 2: no leftover. Simply return d if followed by a ccc == 0 value.
p := outp + len(d)
if outp > 0 {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
// TODO: this condition should not be possible, but we leave it
// in for defensive purposes.
if p > len(i.buf) {
return i.buf[:outp]
}
} else if i.info.multiSegment() {
// outp must be 0 as multi-segment decompositions always
// start a new segment.
if i.multiSeg == nil {
i.multiSeg = d
i.next = nextMulti
return nextMulti(i)
}
// We are in the last segment. Treat as normal decomposition.
d = i.multiSeg
i.multiSeg = nil
p = len(d)
}
prevCC := i.info.tccc
if i.p += sz; i.p >= i.rb.nsrc {
i.setDone()
i.info = Properties{} // Force BoundaryBefore to succeed.
} else {
i.info = i.rb.f.info(i.rb.src, i.p)
}
switch i.rb.ss.next(i.info) {
case ssOverflow:
i.next = nextCGJDecompose
fallthrough
case ssStarter:
if outp > 0 {
copy(i.buf[outp:], d)
return i.buf[:p]
}
return d
}
copy(i.buf[outp:], d)
outp = p
inCopyStart, outCopyStart = i.p, outp
if i.info.ccc < prevCC {
goto doNorm
}
continue
} else if r := i.rb.src.hangul(i.p); r != 0 {
outp = decomposeHangul(i.buf[:], r)
i.p += hangulUTF8Size
inCopyStart, outCopyStart = i.p, outp
if i.p >= i.rb.nsrc {
i.setDone()
break
} else if i.rb.src.hangul(i.p) != 0 {
i.next = nextHangul
return i.buf[:outp]
}
} else {
p := outp + sz
if p > len(i.buf) {
break
}
outp = p
i.p += sz
}
if i.p >= i.rb.nsrc {
i.setDone()
break
}
prevCC := i.info.tccc
i.info = i.rb.f.info(i.rb.src, i.p)
if v := i.rb.ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJDecompose
break
}
if i.info.ccc < prevCC {
goto doNorm
}
}
if outCopyStart == 0 | else if inCopyStart < i.p {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
}
return i.buf[:outp]
doNorm:
// Insert what we have decomposed so far in the reorderBuffer.
// As we will only reorder, there will always be enough room.
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
i.rb.insertDecomposed(i.buf[0:outp])
return doNormDecomposed(i)
}
func doNormDecomposed(i *Iter) []byte {
for {
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if i.info.ccc == 0 {
break
}
if s := i.rb.ss.next(i.info); s == ssOverflow {
i.next = nextCGJDecompose
break
}
}
// new segment or too many combining characters: exit normalization
return i.buf[:i.rb.flushCopy(i.buf[:])]
}
func nextCGJDecompose(i *Iter) []byte {
i.rb.ss = 0
i.rb.insertCGJ()
i.next = nextDecomposed
i.rb.ss.first(i.info)
buf := doNormDecomposed(i)
return buf
}
// nextComposed is the implementation of Next for forms NFC and NFKC.
func nextComposed(i *Iter) []byte {
outp, startp := 0, i.p
var prevCC uint8
for {
if !i.info.isYesC() {
goto doNorm
}
prevCC = i.info.tccc
sz := int(i.info.size)
if sz == 0 {
sz = 1 // illegal rune: copy byte-by-byte
}
p := outp + sz
if p > len(i.buf) {
break
}
outp = p
i.p += sz
if i.p >= i.rb.nsrc {
i.setDone()
break
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.rb.ss = 0
i.next = i.asciiF
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if v := i.rb.ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJCompose
break
}
if i.info.ccc < prevCC {
goto doNorm
}
}
return i.returnSlice(startp, i.p)
doNorm:
// reset to start position
i.p = startp
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
if i.info.multiSegment() {
d := i.info.Decomposition()
info := i.rb.f.info(input{bytes: d}, 0)
i.rb.insertUnsafe(input{bytes: d}, 0, info)
i.multiSeg = d[int(info.size):]
i.next = nextMultiNorm
return nextMultiNorm(i)
}
i.rb.ss.first(i.info)
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
return doNormComposed(i)
}
func doNormComposed(i *Iter) []byte {
// First rune should already be inserted.
for {
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if s := i.rb.ss.next(i.info); s == ssStarter {
break
} else if s == ssOverflow {
i.next = nextCGJCompose
break
}
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
}
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
return seg
}
func nextCGJCompose(i *Iter) []byte {
i.rb.ss = 0 // instead of first
i.rb.insertCGJ()
i.next = nextComposed
// Note that we treat any rune with nLeadingNonStarters > 0 as a non-starter,
// even if they are not. This is particularly dubious for U+FF9E and UFF9A.
// If we ever change that, insert a check here.
i.rb.ss.first(i.info)
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
return doNormComposed(i)
}
| {
return i.returnSlice(inCopyStart, i.p)
} | conditional_block |
iter.go | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"fmt"
"unicode/utf8"
)
// MaxSegmentSize is the maximum size of a byte buffer needed to consider any
// sequence of starter and non-starter runes for the purpose of normalization.
const MaxSegmentSize = maxByteBufferSize
// An Iter iterates over a string or byte slice, while normalizing it
// to a given Form.
type Iter struct {
rb reorderBuffer
buf [maxByteBufferSize]byte
info Properties // first character saved from previous iteration
next iterFunc // implementation of next depends on form
asciiF iterFunc
p int // current position in input source
multiSeg []byte // remainder of multi-segment decomposition
}
type iterFunc func(*Iter) []byte
// Init initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) Init(f Form, src []byte) {
i.p = 0
if len(src) == 0 {
i.setDone()
i.rb.nsrc = 0
return
}
i.multiSeg = nil
i.rb.init(f, src)
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIBytes
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
}
// InitString initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) InitString(f Form, src string) {
i.p = 0
if len(src) == 0 {
i.setDone()
i.rb.nsrc = 0
return
}
i.multiSeg = nil
i.rb.initString(f, src)
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIString
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
}
// Seek sets the segment to be returned by the next call to Next to start
// at position p. It is the responsibility of the caller to set p to the
// start of a segment.
func (i *Iter) Seek(offset int64, whence int) (int64, error) {
var abs int64
switch whence {
case 0:
abs = offset
case 1:
abs = int64(i.p) + offset
case 2:
abs = int64(i.rb.nsrc) + offset
default:
return 0, fmt.Errorf("norm: invalid whence")
}
if abs < 0 {
return 0, fmt.Errorf("norm: negative position")
}
if int(abs) >= i.rb.nsrc {
i.setDone()
return int64(i.p), nil
}
i.p = int(abs)
i.multiSeg = nil
i.next = i.rb.f.nextMain
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
return abs, nil
}
// returnSlice returns a slice of the underlying input type as a byte slice.
// If the underlying is of type []byte, it will simply return a slice.
// If the underlying is of type string, it will copy the slice to the buffer
// and return that.
func (i *Iter) returnSlice(a, b int) []byte {
if i.rb.src.bytes == nil {
return i.buf[:copy(i.buf[:], i.rb.src.str[a:b])]
}
return i.rb.src.bytes[a:b]
}
// Pos returns the byte position at which the next call to Next will commence processing.
func (i *Iter) Pos() int {
return i.p
}
func (i *Iter) | () {
i.next = nextDone
i.p = i.rb.nsrc
}
// Done returns true if there is no more input to process.
func (i *Iter) Done() bool {
return i.p >= i.rb.nsrc
}
// Next returns f(i.input[i.Pos():n]), where n is a boundary of i.input.
// For any input a and b for which f(a) == f(b), subsequent calls
// to Next will return the same segments.
// Modifying runes are grouped together with the preceding starter, if such a starter exists.
// Although not guaranteed, n will typically be the smallest possible n.
func (i *Iter) Next() []byte {
return i.next(i)
}
func nextASCIIBytes(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
p0 := i.p
i.setDone()
return i.rb.src.bytes[p0:p]
}
if i.rb.src.bytes[p] < utf8.RuneSelf {
p0 := i.p
i.p = p
return i.rb.src.bytes[p0:p]
}
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
func nextASCIIString(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
i.buf[0] = i.rb.src.str[i.p]
i.setDone()
return i.buf[:1]
}
if i.rb.src.str[p] < utf8.RuneSelf {
i.buf[0] = i.rb.src.str[i.p]
i.p = p
return i.buf[:1]
}
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
func nextHangul(i *Iter) []byte {
p := i.p
next := p + hangulUTF8Size
if next >= i.rb.nsrc {
i.setDone()
} else if i.rb.src.hangul(next) == 0 {
i.rb.ss.next(i.info)
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
i.p = next
return i.buf[:decomposeHangul(i.buf[:], i.rb.src.hangul(p))]
}
func nextDone(i *Iter) []byte {
return nil
}
// nextMulti is used for iterating over multi-segment decompositions
// for decomposing normal forms.
func nextMulti(i *Iter) []byte {
j := 0
d := i.multiSeg
// skip first rune
for j = 1; j < len(d) && !utf8.RuneStart(d[j]); j++ {
}
for j < len(d) {
info := i.rb.f.info(input{bytes: d}, j)
if info.BoundaryBefore() {
i.multiSeg = d[j:]
return d[:j]
}
j += int(info.size)
}
// treat last segment as normal decomposition
i.next = i.rb.f.nextMain
return i.next(i)
}
// nextMultiNorm is used for iterating over multi-segment decompositions
// for composing normal forms.
func nextMultiNorm(i *Iter) []byte {
j := 0
d := i.multiSeg
for j < len(d) {
info := i.rb.f.info(input{bytes: d}, j)
if info.BoundaryBefore() {
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
i.rb.insertUnsafe(input{bytes: d}, j, info)
i.multiSeg = d[j+int(info.size):]
return seg
}
i.rb.insertUnsafe(input{bytes: d}, j, info)
j += int(info.size)
}
i.multiSeg = nil
i.next = nextComposed
return doNormComposed(i)
}
// nextDecomposed is the implementation of Next for forms NFD and NFKD.
func nextDecomposed(i *Iter) (next []byte) {
outp := 0
inCopyStart, outCopyStart := i.p, 0
for {
if sz := int(i.info.size); sz <= 1 {
i.rb.ss = 0
p := i.p
i.p++ // ASCII or illegal byte. Either way, advance by 1.
if i.p >= i.rb.nsrc {
i.setDone()
return i.returnSlice(p, i.p)
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.next = i.asciiF
return i.returnSlice(p, i.p)
}
outp++
} else if d := i.info.Decomposition(); d != nil {
// Note: If leading CCC != 0, then len(d) == 2 and last is also non-zero.
// Case 1: there is a leftover to copy. In this case the decomposition
// must begin with a modifier and should always be appended.
// Case 2: no leftover. Simply return d if followed by a ccc == 0 value.
p := outp + len(d)
if outp > 0 {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
// TODO: this condition should not be possible, but we leave it
// in for defensive purposes.
if p > len(i.buf) {
return i.buf[:outp]
}
} else if i.info.multiSegment() {
// outp must be 0 as multi-segment decompositions always
// start a new segment.
if i.multiSeg == nil {
i.multiSeg = d
i.next = nextMulti
return nextMulti(i)
}
// We are in the last segment. Treat as normal decomposition.
d = i.multiSeg
i.multiSeg = nil
p = len(d)
}
prevCC := i.info.tccc
if i.p += sz; i.p >= i.rb.nsrc {
i.setDone()
i.info = Properties{} // Force BoundaryBefore to succeed.
} else {
i.info = i.rb.f.info(i.rb.src, i.p)
}
switch i.rb.ss.next(i.info) {
case ssOverflow:
i.next = nextCGJDecompose
fallthrough
case ssStarter:
if outp > 0 {
copy(i.buf[outp:], d)
return i.buf[:p]
}
return d
}
copy(i.buf[outp:], d)
outp = p
inCopyStart, outCopyStart = i.p, outp
if i.info.ccc < prevCC {
goto doNorm
}
continue
} else if r := i.rb.src.hangul(i.p); r != 0 {
outp = decomposeHangul(i.buf[:], r)
i.p += hangulUTF8Size
inCopyStart, outCopyStart = i.p, outp
if i.p >= i.rb.nsrc {
i.setDone()
break
} else if i.rb.src.hangul(i.p) != 0 {
i.next = nextHangul
return i.buf[:outp]
}
} else {
p := outp + sz
if p > len(i.buf) {
break
}
outp = p
i.p += sz
}
if i.p >= i.rb.nsrc {
i.setDone()
break
}
prevCC := i.info.tccc
i.info = i.rb.f.info(i.rb.src, i.p)
if v := i.rb.ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJDecompose
break
}
if i.info.ccc < prevCC {
goto doNorm
}
}
if outCopyStart == 0 {
return i.returnSlice(inCopyStart, i.p)
} else if inCopyStart < i.p {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
}
return i.buf[:outp]
doNorm:
// Insert what we have decomposed so far in the reorderBuffer.
// As we will only reorder, there will always be enough room.
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
i.rb.insertDecomposed(i.buf[0:outp])
return doNormDecomposed(i)
}
func doNormDecomposed(i *Iter) []byte {
for {
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if i.info.ccc == 0 {
break
}
if s := i.rb.ss.next(i.info); s == ssOverflow {
i.next = nextCGJDecompose
break
}
}
// new segment or too many combining characters: exit normalization
return i.buf[:i.rb.flushCopy(i.buf[:])]
}
func nextCGJDecompose(i *Iter) []byte {
i.rb.ss = 0
i.rb.insertCGJ()
i.next = nextDecomposed
i.rb.ss.first(i.info)
buf := doNormDecomposed(i)
return buf
}
// nextComposed is the implementation of Next for forms NFC and NFKC.
func nextComposed(i *Iter) []byte {
outp, startp := 0, i.p
var prevCC uint8
for {
if !i.info.isYesC() {
goto doNorm
}
prevCC = i.info.tccc
sz := int(i.info.size)
if sz == 0 {
sz = 1 // illegal rune: copy byte-by-byte
}
p := outp + sz
if p > len(i.buf) {
break
}
outp = p
i.p += sz
if i.p >= i.rb.nsrc {
i.setDone()
break
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.rb.ss = 0
i.next = i.asciiF
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if v := i.rb.ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJCompose
break
}
if i.info.ccc < prevCC {
goto doNorm
}
}
return i.returnSlice(startp, i.p)
doNorm:
// reset to start position
i.p = startp
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
if i.info.multiSegment() {
d := i.info.Decomposition()
info := i.rb.f.info(input{bytes: d}, 0)
i.rb.insertUnsafe(input{bytes: d}, 0, info)
i.multiSeg = d[int(info.size):]
i.next = nextMultiNorm
return nextMultiNorm(i)
}
i.rb.ss.first(i.info)
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
return doNormComposed(i)
}
func doNormComposed(i *Iter) []byte {
// First rune should already be inserted.
for {
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if s := i.rb.ss.next(i.info); s == ssStarter {
break
} else if s == ssOverflow {
i.next = nextCGJCompose
break
}
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
}
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
return seg
}
func nextCGJCompose(i *Iter) []byte {
i.rb.ss = 0 // instead of first
i.rb.insertCGJ()
i.next = nextComposed
// Note that we treat any rune with nLeadingNonStarters > 0 as a non-starter,
// even if they are not. This is particularly dubious for U+FF9E and UFF9A.
// If we ever change that, insert a check here.
i.rb.ss.first(i.info)
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
return doNormComposed(i)
}
| setDone | identifier_name |
iter.go | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"fmt"
"unicode/utf8"
)
// MaxSegmentSize is the maximum size of a byte buffer needed to consider any
// sequence of starter and non-starter runes for the purpose of normalization.
const MaxSegmentSize = maxByteBufferSize
// An Iter iterates over a string or byte slice, while normalizing it
// to a given Form.
type Iter struct {
rb reorderBuffer
buf [maxByteBufferSize]byte
info Properties // first character saved from previous iteration
next iterFunc // implementation of next depends on form
asciiF iterFunc
p int // current position in input source
multiSeg []byte // remainder of multi-segment decomposition
}
type iterFunc func(*Iter) []byte
// Init initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) Init(f Form, src []byte) {
i.p = 0
if len(src) == 0 {
i.setDone()
i.rb.nsrc = 0
return
}
i.multiSeg = nil
i.rb.init(f, src)
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIBytes
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
}
// InitString initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) InitString(f Form, src string) {
i.p = 0
if len(src) == 0 {
i.setDone()
i.rb.nsrc = 0
return
}
i.multiSeg = nil
i.rb.initString(f, src)
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIString
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
}
// Seek sets the segment to be returned by the next call to Next to start
// at position p. It is the responsibility of the caller to set p to the
// start of a segment.
func (i *Iter) Seek(offset int64, whence int) (int64, error) {
var abs int64
switch whence {
case 0:
abs = offset
case 1:
abs = int64(i.p) + offset
case 2:
abs = int64(i.rb.nsrc) + offset
default:
return 0, fmt.Errorf("norm: invalid whence")
}
if abs < 0 {
return 0, fmt.Errorf("norm: negative position")
}
if int(abs) >= i.rb.nsrc {
i.setDone()
return int64(i.p), nil
}
i.p = int(abs)
i.multiSeg = nil
i.next = i.rb.f.nextMain
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
return abs, nil
}
// returnSlice returns a slice of the underlying input type as a byte slice.
// If the underlying is of type []byte, it will simply return a slice.
// If the underlying is of type string, it will copy the slice to the buffer
// and return that.
func (i *Iter) returnSlice(a, b int) []byte {
if i.rb.src.bytes == nil {
return i.buf[:copy(i.buf[:], i.rb.src.str[a:b])]
}
return i.rb.src.bytes[a:b]
}
// Pos returns the byte position at which the next call to Next will commence processing.
func (i *Iter) Pos() int {
return i.p
}
func (i *Iter) setDone() {
i.next = nextDone
i.p = i.rb.nsrc
}
// Done returns true if there is no more input to process.
func (i *Iter) Done() bool {
return i.p >= i.rb.nsrc
}
// Next returns f(i.input[i.Pos():n]), where n is a boundary of i.input.
// For any input a and b for which f(a) == f(b), subsequent calls
// to Next will return the same segments.
// Modifying runes are grouped together with the preceding starter, if such a starter exists.
// Although not guaranteed, n will typically be the smallest possible n.
func (i *Iter) Next() []byte {
return i.next(i)
}
func nextASCIIBytes(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
p0 := i.p
i.setDone()
return i.rb.src.bytes[p0:p]
}
if i.rb.src.bytes[p] < utf8.RuneSelf {
p0 := i.p
i.p = p
return i.rb.src.bytes[p0:p]
}
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
func nextASCIIString(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
i.buf[0] = i.rb.src.str[i.p]
i.setDone()
return i.buf[:1]
}
if i.rb.src.str[p] < utf8.RuneSelf {
i.buf[0] = i.rb.src.str[i.p]
i.p = p
return i.buf[:1]
}
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
func nextHangul(i *Iter) []byte {
p := i.p
next := p + hangulUTF8Size
if next >= i.rb.nsrc {
i.setDone()
} else if i.rb.src.hangul(next) == 0 {
i.rb.ss.next(i.info)
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
i.p = next
return i.buf[:decomposeHangul(i.buf[:], i.rb.src.hangul(p))]
}
func nextDone(i *Iter) []byte {
return nil
}
// nextMulti is used for iterating over multi-segment decompositions
// for decomposing normal forms.
func nextMulti(i *Iter) []byte {
j := 0
d := i.multiSeg
// skip first rune
for j = 1; j < len(d) && !utf8.RuneStart(d[j]); j++ {
}
for j < len(d) {
info := i.rb.f.info(input{bytes: d}, j)
if info.BoundaryBefore() {
i.multiSeg = d[j:]
return d[:j]
}
j += int(info.size)
}
// treat last segment as normal decomposition
i.next = i.rb.f.nextMain
return i.next(i)
}
// nextMultiNorm is used for iterating over multi-segment decompositions
// for composing normal forms.
func nextMultiNorm(i *Iter) []byte {
j := 0
d := i.multiSeg
for j < len(d) {
info := i.rb.f.info(input{bytes: d}, j)
if info.BoundaryBefore() {
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
i.rb.insertUnsafe(input{bytes: d}, j, info)
i.multiSeg = d[j+int(info.size):]
return seg
}
i.rb.insertUnsafe(input{bytes: d}, j, info)
j += int(info.size)
}
i.multiSeg = nil
i.next = nextComposed
return doNormComposed(i)
}
// nextDecomposed is the implementation of Next for forms NFD and NFKD.
func nextDecomposed(i *Iter) (next []byte) {
outp := 0
inCopyStart, outCopyStart := i.p, 0
for {
if sz := int(i.info.size); sz <= 1 {
i.rb.ss = 0
p := i.p
i.p++ // ASCII or illegal byte. Either way, advance by 1. | if i.p >= i.rb.nsrc {
i.setDone()
return i.returnSlice(p, i.p)
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.next = i.asciiF
return i.returnSlice(p, i.p)
}
outp++
} else if d := i.info.Decomposition(); d != nil {
// Note: If leading CCC != 0, then len(d) == 2 and last is also non-zero.
// Case 1: there is a leftover to copy. In this case the decomposition
// must begin with a modifier and should always be appended.
// Case 2: no leftover. Simply return d if followed by a ccc == 0 value.
p := outp + len(d)
if outp > 0 {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
// TODO: this condition should not be possible, but we leave it
// in for defensive purposes.
if p > len(i.buf) {
return i.buf[:outp]
}
} else if i.info.multiSegment() {
// outp must be 0 as multi-segment decompositions always
// start a new segment.
if i.multiSeg == nil {
i.multiSeg = d
i.next = nextMulti
return nextMulti(i)
}
// We are in the last segment. Treat as normal decomposition.
d = i.multiSeg
i.multiSeg = nil
p = len(d)
}
prevCC := i.info.tccc
if i.p += sz; i.p >= i.rb.nsrc {
i.setDone()
i.info = Properties{} // Force BoundaryBefore to succeed.
} else {
i.info = i.rb.f.info(i.rb.src, i.p)
}
switch i.rb.ss.next(i.info) {
case ssOverflow:
i.next = nextCGJDecompose
fallthrough
case ssStarter:
if outp > 0 {
copy(i.buf[outp:], d)
return i.buf[:p]
}
return d
}
copy(i.buf[outp:], d)
outp = p
inCopyStart, outCopyStart = i.p, outp
if i.info.ccc < prevCC {
goto doNorm
}
continue
} else if r := i.rb.src.hangul(i.p); r != 0 {
outp = decomposeHangul(i.buf[:], r)
i.p += hangulUTF8Size
inCopyStart, outCopyStart = i.p, outp
if i.p >= i.rb.nsrc {
i.setDone()
break
} else if i.rb.src.hangul(i.p) != 0 {
i.next = nextHangul
return i.buf[:outp]
}
} else {
p := outp + sz
if p > len(i.buf) {
break
}
outp = p
i.p += sz
}
if i.p >= i.rb.nsrc {
i.setDone()
break
}
prevCC := i.info.tccc
i.info = i.rb.f.info(i.rb.src, i.p)
if v := i.rb.ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJDecompose
break
}
if i.info.ccc < prevCC {
goto doNorm
}
}
if outCopyStart == 0 {
return i.returnSlice(inCopyStart, i.p)
} else if inCopyStart < i.p {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
}
return i.buf[:outp]
doNorm:
// Insert what we have decomposed so far in the reorderBuffer.
// As we will only reorder, there will always be enough room.
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
i.rb.insertDecomposed(i.buf[0:outp])
return doNormDecomposed(i)
}
func doNormDecomposed(i *Iter) []byte {
for {
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if i.info.ccc == 0 {
break
}
if s := i.rb.ss.next(i.info); s == ssOverflow {
i.next = nextCGJDecompose
break
}
}
// new segment or too many combining characters: exit normalization
return i.buf[:i.rb.flushCopy(i.buf[:])]
}
func nextCGJDecompose(i *Iter) []byte {
i.rb.ss = 0
i.rb.insertCGJ()
i.next = nextDecomposed
i.rb.ss.first(i.info)
buf := doNormDecomposed(i)
return buf
}
// nextComposed is the implementation of Next for forms NFC and NFKC.
func nextComposed(i *Iter) []byte {
outp, startp := 0, i.p
var prevCC uint8
for {
if !i.info.isYesC() {
goto doNorm
}
prevCC = i.info.tccc
sz := int(i.info.size)
if sz == 0 {
sz = 1 // illegal rune: copy byte-by-byte
}
p := outp + sz
if p > len(i.buf) {
break
}
outp = p
i.p += sz
if i.p >= i.rb.nsrc {
i.setDone()
break
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.rb.ss = 0
i.next = i.asciiF
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if v := i.rb.ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJCompose
break
}
if i.info.ccc < prevCC {
goto doNorm
}
}
return i.returnSlice(startp, i.p)
doNorm:
// reset to start position
i.p = startp
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
if i.info.multiSegment() {
d := i.info.Decomposition()
info := i.rb.f.info(input{bytes: d}, 0)
i.rb.insertUnsafe(input{bytes: d}, 0, info)
i.multiSeg = d[int(info.size):]
i.next = nextMultiNorm
return nextMultiNorm(i)
}
i.rb.ss.first(i.info)
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
return doNormComposed(i)
}
func doNormComposed(i *Iter) []byte {
// First rune should already be inserted.
for {
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if s := i.rb.ss.next(i.info); s == ssStarter {
break
} else if s == ssOverflow {
i.next = nextCGJCompose
break
}
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
}
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
return seg
}
func nextCGJCompose(i *Iter) []byte {
i.rb.ss = 0 // instead of first
i.rb.insertCGJ()
i.next = nextComposed
// Note that we treat any rune with nLeadingNonStarters > 0 as a non-starter,
// even if they are not. This is particularly dubious for U+FF9E and UFF9A.
// If we ever change that, insert a check here.
i.rb.ss.first(i.info)
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
return doNormComposed(i)
} | random_line_split | |
iter.go | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"fmt"
"unicode/utf8"
)
// MaxSegmentSize is the maximum size of a byte buffer needed to consider any
// sequence of starter and non-starter runes for the purpose of normalization.
const MaxSegmentSize = maxByteBufferSize
// An Iter iterates over a string or byte slice, while normalizing it
// to a given Form.
type Iter struct {
rb reorderBuffer
buf [maxByteBufferSize]byte
info Properties // first character saved from previous iteration
next iterFunc // implementation of next depends on form
asciiF iterFunc
p int // current position in input source
multiSeg []byte // remainder of multi-segment decomposition
}
type iterFunc func(*Iter) []byte
// Init initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) Init(f Form, src []byte) {
i.p = 0
if len(src) == 0 {
i.setDone()
i.rb.nsrc = 0
return
}
i.multiSeg = nil
i.rb.init(f, src)
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIBytes
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
}
// InitString initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) InitString(f Form, src string) {
i.p = 0
if len(src) == 0 {
i.setDone()
i.rb.nsrc = 0
return
}
i.multiSeg = nil
i.rb.initString(f, src)
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIString
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
}
// Seek sets the segment to be returned by the next call to Next to start
// at position p. It is the responsibility of the caller to set p to the
// start of a segment.
func (i *Iter) Seek(offset int64, whence int) (int64, error) {
var abs int64
switch whence {
case 0:
abs = offset
case 1:
abs = int64(i.p) + offset
case 2:
abs = int64(i.rb.nsrc) + offset
default:
return 0, fmt.Errorf("norm: invalid whence")
}
if abs < 0 {
return 0, fmt.Errorf("norm: negative position")
}
if int(abs) >= i.rb.nsrc {
i.setDone()
return int64(i.p), nil
}
i.p = int(abs)
i.multiSeg = nil
i.next = i.rb.f.nextMain
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
return abs, nil
}
// returnSlice returns a slice of the underlying input type as a byte slice.
// If the underlying is of type []byte, it will simply return a slice.
// If the underlying is of type string, it will copy the slice to the buffer
// and return that.
func (i *Iter) returnSlice(a, b int) []byte {
if i.rb.src.bytes == nil {
return i.buf[:copy(i.buf[:], i.rb.src.str[a:b])]
}
return i.rb.src.bytes[a:b]
}
// Pos returns the byte position at which the next call to Next will commence processing.
func (i *Iter) Pos() int {
return i.p
}
func (i *Iter) setDone() {
i.next = nextDone
i.p = i.rb.nsrc
}
// Done returns true if there is no more input to process.
func (i *Iter) Done() bool {
return i.p >= i.rb.nsrc
}
// Next returns f(i.input[i.Pos():n]), where n is a boundary of i.input.
// For any input a and b for which f(a) == f(b), subsequent calls
// to Next will return the same segments.
// Modifying runes are grouped together with the preceding starter, if such a starter exists.
// Although not guaranteed, n will typically be the smallest possible n.
func (i *Iter) Next() []byte {
return i.next(i)
}
func nextASCIIBytes(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
p0 := i.p
i.setDone()
return i.rb.src.bytes[p0:p]
}
if i.rb.src.bytes[p] < utf8.RuneSelf {
p0 := i.p
i.p = p
return i.rb.src.bytes[p0:p]
}
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
func nextASCIIString(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
i.buf[0] = i.rb.src.str[i.p]
i.setDone()
return i.buf[:1]
}
if i.rb.src.str[p] < utf8.RuneSelf {
i.buf[0] = i.rb.src.str[i.p]
i.p = p
return i.buf[:1]
}
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
func nextHangul(i *Iter) []byte {
p := i.p
next := p + hangulUTF8Size
if next >= i.rb.nsrc {
i.setDone()
} else if i.rb.src.hangul(next) == 0 {
i.rb.ss.next(i.info)
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
i.p = next
return i.buf[:decomposeHangul(i.buf[:], i.rb.src.hangul(p))]
}
func nextDone(i *Iter) []byte {
return nil
}
// nextMulti is used for iterating over multi-segment decompositions
// for decomposing normal forms.
func nextMulti(i *Iter) []byte |
// nextMultiNorm is used for iterating over multi-segment decompositions
// for composing normal forms.
func nextMultiNorm(i *Iter) []byte {
j := 0
d := i.multiSeg
for j < len(d) {
info := i.rb.f.info(input{bytes: d}, j)
if info.BoundaryBefore() {
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
i.rb.insertUnsafe(input{bytes: d}, j, info)
i.multiSeg = d[j+int(info.size):]
return seg
}
i.rb.insertUnsafe(input{bytes: d}, j, info)
j += int(info.size)
}
i.multiSeg = nil
i.next = nextComposed
return doNormComposed(i)
}
// nextDecomposed is the implementation of Next for forms NFD and NFKD.
func nextDecomposed(i *Iter) (next []byte) {
outp := 0
inCopyStart, outCopyStart := i.p, 0
for {
if sz := int(i.info.size); sz <= 1 {
i.rb.ss = 0
p := i.p
i.p++ // ASCII or illegal byte. Either way, advance by 1.
if i.p >= i.rb.nsrc {
i.setDone()
return i.returnSlice(p, i.p)
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.next = i.asciiF
return i.returnSlice(p, i.p)
}
outp++
} else if d := i.info.Decomposition(); d != nil {
// Note: If leading CCC != 0, then len(d) == 2 and last is also non-zero.
// Case 1: there is a leftover to copy. In this case the decomposition
// must begin with a modifier and should always be appended.
// Case 2: no leftover. Simply return d if followed by a ccc == 0 value.
p := outp + len(d)
if outp > 0 {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
// TODO: this condition should not be possible, but we leave it
// in for defensive purposes.
if p > len(i.buf) {
return i.buf[:outp]
}
} else if i.info.multiSegment() {
// outp must be 0 as multi-segment decompositions always
// start a new segment.
if i.multiSeg == nil {
i.multiSeg = d
i.next = nextMulti
return nextMulti(i)
}
// We are in the last segment. Treat as normal decomposition.
d = i.multiSeg
i.multiSeg = nil
p = len(d)
}
prevCC := i.info.tccc
if i.p += sz; i.p >= i.rb.nsrc {
i.setDone()
i.info = Properties{} // Force BoundaryBefore to succeed.
} else {
i.info = i.rb.f.info(i.rb.src, i.p)
}
switch i.rb.ss.next(i.info) {
case ssOverflow:
i.next = nextCGJDecompose
fallthrough
case ssStarter:
if outp > 0 {
copy(i.buf[outp:], d)
return i.buf[:p]
}
return d
}
copy(i.buf[outp:], d)
outp = p
inCopyStart, outCopyStart = i.p, outp
if i.info.ccc < prevCC {
goto doNorm
}
continue
} else if r := i.rb.src.hangul(i.p); r != 0 {
outp = decomposeHangul(i.buf[:], r)
i.p += hangulUTF8Size
inCopyStart, outCopyStart = i.p, outp
if i.p >= i.rb.nsrc {
i.setDone()
break
} else if i.rb.src.hangul(i.p) != 0 {
i.next = nextHangul
return i.buf[:outp]
}
} else {
p := outp + sz
if p > len(i.buf) {
break
}
outp = p
i.p += sz
}
if i.p >= i.rb.nsrc {
i.setDone()
break
}
prevCC := i.info.tccc
i.info = i.rb.f.info(i.rb.src, i.p)
if v := i.rb.ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJDecompose
break
}
if i.info.ccc < prevCC {
goto doNorm
}
}
if outCopyStart == 0 {
return i.returnSlice(inCopyStart, i.p)
} else if inCopyStart < i.p {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
}
return i.buf[:outp]
doNorm:
// Insert what we have decomposed so far in the reorderBuffer.
// As we will only reorder, there will always be enough room.
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
i.rb.insertDecomposed(i.buf[0:outp])
return doNormDecomposed(i)
}
func doNormDecomposed(i *Iter) []byte {
for {
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if i.info.ccc == 0 {
break
}
if s := i.rb.ss.next(i.info); s == ssOverflow {
i.next = nextCGJDecompose
break
}
}
// new segment or too many combining characters: exit normalization
return i.buf[:i.rb.flushCopy(i.buf[:])]
}
func nextCGJDecompose(i *Iter) []byte {
i.rb.ss = 0
i.rb.insertCGJ()
i.next = nextDecomposed
i.rb.ss.first(i.info)
buf := doNormDecomposed(i)
return buf
}
// nextComposed is the implementation of Next for forms NFC and NFKC.
func nextComposed(i *Iter) []byte {
outp, startp := 0, i.p
var prevCC uint8
for {
if !i.info.isYesC() {
goto doNorm
}
prevCC = i.info.tccc
sz := int(i.info.size)
if sz == 0 {
sz = 1 // illegal rune: copy byte-by-byte
}
p := outp + sz
if p > len(i.buf) {
break
}
outp = p
i.p += sz
if i.p >= i.rb.nsrc {
i.setDone()
break
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.rb.ss = 0
i.next = i.asciiF
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if v := i.rb.ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJCompose
break
}
if i.info.ccc < prevCC {
goto doNorm
}
}
return i.returnSlice(startp, i.p)
doNorm:
// reset to start position
i.p = startp
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
if i.info.multiSegment() {
d := i.info.Decomposition()
info := i.rb.f.info(input{bytes: d}, 0)
i.rb.insertUnsafe(input{bytes: d}, 0, info)
i.multiSeg = d[int(info.size):]
i.next = nextMultiNorm
return nextMultiNorm(i)
}
i.rb.ss.first(i.info)
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
return doNormComposed(i)
}
func doNormComposed(i *Iter) []byte {
// First rune should already be inserted.
for {
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if s := i.rb.ss.next(i.info); s == ssStarter {
break
} else if s == ssOverflow {
i.next = nextCGJCompose
break
}
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
}
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
return seg
}
func nextCGJCompose(i *Iter) []byte {
i.rb.ss = 0 // instead of first
i.rb.insertCGJ()
i.next = nextComposed
// Note that we treat any rune with nLeadingNonStarters > 0 as a non-starter,
// even if they are not. This is particularly dubious for U+FF9E and UFF9A.
// If we ever change that, insert a check here.
i.rb.ss.first(i.info)
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
return doNormComposed(i)
}
| {
j := 0
d := i.multiSeg
// skip first rune
for j = 1; j < len(d) && !utf8.RuneStart(d[j]); j++ {
}
for j < len(d) {
info := i.rb.f.info(input{bytes: d}, j)
if info.BoundaryBefore() {
i.multiSeg = d[j:]
return d[:j]
}
j += int(info.size)
}
// treat last segment as normal decomposition
i.next = i.rb.f.nextMain
return i.next(i)
} | identifier_body |
consulta-ine.page.ts | import { Component, OnInit } from '@angular/core';
import { InAppBrowser } from '@ionic-native/in-app-browser/ngx';
import { JsonData } from 'src/app/services/actividades/model/json-data.model';
import { AlertController, NavController } from '@ionic/angular';
import { DocumentosService } from 'src/app/services/documentos/documentos.service';
import { OauthService } from 'src/app/services/oauth.service';
import { LoginService } from 'src/app/services/login.service';
import { ActivitiesService } from 'src/app/services/actividades/activities-service';
import { GuardarStorageService } from 'src/app/services/guardar-storage.service';
import { JsonMetadata } from 'src/app/services/actividades/model/json-metadata.model';
import { JsonDatosActivity } from 'src/app/services/actividades/model/json-datos-activity.model';
import { HttpErrorResponse } from '@angular/common/http';
import { DataFile } from 'src/app/services/documentos/model/data-file.model';
import { JsonRequest } from 'src/app/services/documentos/model/jsonRequest.model';
import { Imagen } from 'src/app/herramientas/imagen';
import { Camera, CameraOptions } from '@ionic-native/camera/ngx';
import { LoadingService } from 'src/app/services/loading.service';
import { Cliente } from '../../tipo-identificacion/consulta-similitud-confirmacion/model/Cliente.model';
import { JsonPersonalData } from 'src/app/services/actividades/model/json-personal-data.model';
import { JsonOperationData } from 'src/app/services/actividades/model/json-operation-data.model';
import { JsonInnerData } from 'src/app/services/actividades/model/json-inner-data.model';
import { ActivatedRoute } from '@angular/router';
@Component({
selector: 'app-consulta-ine',
templateUrl: './consulta-ine.page.html',
styleUrls: ['./consulta-ine.page.scss'],
})
export class ConsultaInePage implements OnInit {
cardValid: boolean;
browser: any;
esCargando: boolean;
capturasINE: string;
frontImg: string;
isenabled: boolean;
isValidoSpinnerFront: boolean;
frontImg2: any;
secuenciaId: number;
client: ClientData;
pData: JsonPersonalData;
validINE: string;
constructor(
private iab: InAppBrowser,
private alertCtrl: AlertController,
private navCtrl: NavController,
private documentosService: DocumentosService,
private oauth: OauthService,
private login: LoginService,
private loading: LoadingService,
private activityService: ActivitiesService,
private saveS: GuardarStorageService,
private activitiesService: ActivitiesService,
private route: ActivatedRoute,
public camera: Camera) {
this.route.queryParams.subscribe(params => {
if (params) {
this.pData = JSON.parse(params.client);
this.pData.observations = this.validINE;
}
});
this.cardValid = true;
this.validINE = 'Identificación válida';
this.secuenciaId = 0;
if (this.saveS.getTipoFlujo() === "alhajas")
{
this.secuenciaId = 4;
}
else
{
this.secuenciaId = 9;
}
}
ngOnInit() {
}
getImagenFront() {
this.isValidoSpinnerFront = true;
const options: CameraOptions = {
quality: 70,
destinationType: this.camera.DestinationType.DATA_URL,
encodingType: this.camera.EncodingType.JPEG,
mediaType: this.camera.MediaType.PICTURE
};
this.camera.getPicture(options).then((imageData) => {
this.frontImg = 'data:image/jpeg;base64,' + imageData;
this.frontImg = imageData;
this.saveS.guardarStorageImagenF(this.frontImg);
if (this.frontImg) {
// enable the button
this.isenabled = true;
this.isValidoSpinnerFront = false;
} else {
// disable the button
this.isenabled = false;
this.isValidoSpinnerFront = false;
}
}, (err) => {
// Handle error
console.log('Camera issue:' + err, );
});
this.isValidoSpinnerFront = false;
}
openB() {
this.browser = this.iab.create('https://listanominal.ine.mx', '_system');
}
openGallery() {
const cameraOptions = {
sourceType: this.camera.PictureSourceType.PHOTOLIBRARY,
destinationType: this.camera.DestinationType.DATA_URL,
encodingType: this.camera.EncodingType.JPEG,
quality: 70,
targetWidth: 1000,
targetHeight: 1000,
correctOrientation: true
};
/* this.camera.getPicture(cameraOptions)
.then(file_uri => this.frontImg = file_uri,
err => console.log(err)); */
this.camera.getPicture(cameraOptions).then((imageData) => {
this.frontImg = 'data:image/jpeg;base64,' + imageData;
this.frontImg2 = imageData;
this.saveS.guardarStorageImagenF(this.frontImg);
if (this.frontImg) {
// enable the button
this.isenabled = true;
this.isValidoSpinnerFront = false;
} else {
// disable the button
this.isenabled = false;
this.isValidoSpinnerFront = false;
}
}, (err) => {
// Handle error
console.log('Camera issue:' + err);
});
this.isValidoSpinnerFront = false;
}
goCallBack() {
console.log('Typescript callback has been called');
this.browser.hide();
}
ch | event): void {
let mensajeError = '';
const file: File = $event.target.files[0];
console.log('changeListenerF');
if ((file.type !== 'image/jpeg' && file.type !== 'image/png') || (file.size > 1000000)) {
mensajeError = 'Formato y/o tamaño de imagen incorrecto';
} else {
const myReader: FileReader = new FileReader();
myReader.onloadend = (e) => {
this.frontImg = myReader.result.toString();
console.log('frontImg');
console.log(this.frontImg);
// this.saveS.guardarStorageImagenF(this.capturasINE);
// this.imgCapturada.emit(capturas);
};
myReader.readAsDataURL(file);
}
if (this.frontImg) {
// enable the button
this.isenabled = true;
this.isValidoSpinnerFront = false;
} else {
// disable the button
this.isenabled = false;
this.isValidoSpinnerFront = false;
}
}
goCargarDocumento() {
try {
const imagen = new Imagen();
const blobAnverso = imagen.convertirImagenEnBlob(this.frontImg);
console.log('blobAnverso'); console.log(blobAnverso);
if (blobAnverso) {
this.cargarDocumento(blobAnverso, this.saveS.getBearerToken());
} else {
alert('Imagen Invalida');
}
} catch (error) {
console.log(error);
}
}
cargarDocumento(fileAnverso: any, bearerToken: string) {
this.loading.present('Cargando...');
// this.actualizarActivity('EN PROCESO');
this.esCargando = true;
const date = new Date();
const dataFile1 = new DataFile(
'bid:Anverso', 'Nombre', 'Primer apellido', 'Segundo apellido', '123549', date.toISOString(), 'RES_BURO', '123123');
const jsonRequest = new JsonRequest('IDOFA', this.saveS.getOperationID(), 'OK', '', '');
this.documentosService.cargarDocumento(jsonRequest, dataFile1, fileAnverso, bearerToken).subscribe(
(respuesta: any) => {
console.log('cargarDocumento respuesta', respuesta);
if (respuesta['resultOK']) {
// this.actualizarActivity('FINALIZADO');
this.esCargando = false;
this.loading.dismiss();
// alert('Archivo guardado con éxito');
// this.navCtrl.navigateRoot('info-grales');
this.pData.observations=this.validINE;
this.guardarDatos(this.pData);
} else {
// alert(respuesta['message']);
this.loading.dismiss();
this.esCargando = false;
}
},
(error: HttpErrorResponse) => {
this.loading.dismiss();
console.log(error);
switch (error['status']) {
case 401:
alert('Es necesario iniciar session, nuemente para continuar');
this.navCtrl.navigateRoot('login');
break;
case 404:
alert('Es necesario iniciar session, nuemente para continuar');
this.navCtrl.navigateRoot('login');
break;
case 500:
alert('Por favor, reintentar para continuar');
this.cargarDocumento(fileAnverso, bearerToken);
break;
case 501:
alert('Por favor, reintentar para continuar');
this.cargarDocumento(fileAnverso, bearerToken);
break;
default:
alert('Es necesario iniciar session, nuemente para continuar');
this.navCtrl.navigateRoot('login');
break;
}
// alert('Hubo un error al enviar los datos, intenta de nuevo');
});
}
actualizarActivity(estatus: string) {
const productId = 1;
const jsonData = new JsonData( productId, this.saveS.getSystemCode(),
estatus, '1', '', this.secuenciaId, 1, this.saveS.getPersonId());
// {'id': this.saveS.getPersonId(), 'observations': this.validINE.toString()}
console.log('jsonData validINE :');
console.log(jsonData);
const jsonMetaData = new JsonMetadata(0, '', 0, 0, 1, 1);
/* const jsonData = {
'data': {
'productId': 1,
'code': '',
'activityStatus': 'FINALIZADO',
'activityValue': null,
'data': '{"personal_data":{"id":' + this.saveS.getPersonId() + ',"observations":"' + this.validINE + '}}',
'secuence': 17,
'workflowId': 1,
'personId': this.saveS.getPersonId()
},
'metadata': {
'accuracy': 0,
'deviceInfo': '',
'latutide': 0,
'longitude': 0,
'timeZoneId': 1,
'userId': this.saveS.getResultLogin()
},
'operationId': this.saveS.getOperationID()
}; */
const jsonDatosActivity = new JsonDatosActivity(jsonData, jsonMetaData, this.saveS.getOperationID());
// jsonDatosActivity
this.activityService.actualizarDatosActivityINE(jsonDatosActivity,
this.login.token).subscribe(
(resultado: any) => {
});
}
guardarDatos(json: JsonPersonalData) {
// tslint:disable-next-line: max-line-length
console.log('Guardando datos:: consulta INE');
console.log(json);
json.code=null;
const jsonPersonalData = json;
//this.pData = jsonPersonalData;
const operationData = new JsonOperationData('bid');
const jsonInnerData = new JsonInnerData(jsonPersonalData);
const jsonInnerDataString = JSON.stringify(jsonInnerData);
const jsonData = new JsonData(1, "","FINALIZADO","2", jsonInnerDataString, this.secuenciaId, 1, this.saveS.getPersonId());
const jsonMetaData = new JsonMetadata(0,"",0,0,1,1);
const jsonDatosActivity = new JsonDatosActivity(jsonData, jsonMetaData, this.saveS.getOperationID());
console.log('Guardando datos COMPLETOS:: consulta INE');
console.log(jsonDatosActivity);
this.saveS.setJsonDatosActivity(jsonDatosActivity);
this.activitiesService.actualizarDatosActivity(jsonDatosActivity, this.saveS.getBearerToken()).subscribe(
(resultado: any) => {
console.log(resultado);
if (resultado.code === -9999) {
this.navCtrl.navigateRoot('finalizar');
} else {
alert('Error al Guardar los datos');
}
},
(err: HttpErrorResponse) => {
console.log(err);
});
}
logout() {
this.login.finalizar();
}
isValidoChange(event) {
if (this.cardValid) {
this.validINE = 'Identificación válida';
console.log('isValidoChange');
console.log('isValidoChange', this.cardValid + '::' + this.validINE);
} else {
this.validINE = 'Identificación no válida';
console.log('isValidoChange');
console.log('isValidoChange', this.cardValid + '::' + this.validINE);
}
}
}
| angeListenerINE($ | identifier_name |
consulta-ine.page.ts | import { Component, OnInit } from '@angular/core';
import { InAppBrowser } from '@ionic-native/in-app-browser/ngx';
import { JsonData } from 'src/app/services/actividades/model/json-data.model';
import { AlertController, NavController } from '@ionic/angular';
import { DocumentosService } from 'src/app/services/documentos/documentos.service';
import { OauthService } from 'src/app/services/oauth.service';
import { LoginService } from 'src/app/services/login.service';
import { ActivitiesService } from 'src/app/services/actividades/activities-service';
import { GuardarStorageService } from 'src/app/services/guardar-storage.service';
import { JsonMetadata } from 'src/app/services/actividades/model/json-metadata.model';
import { JsonDatosActivity } from 'src/app/services/actividades/model/json-datos-activity.model';
import { HttpErrorResponse } from '@angular/common/http';
import { DataFile } from 'src/app/services/documentos/model/data-file.model';
import { JsonRequest } from 'src/app/services/documentos/model/jsonRequest.model';
import { Imagen } from 'src/app/herramientas/imagen';
import { Camera, CameraOptions } from '@ionic-native/camera/ngx';
import { LoadingService } from 'src/app/services/loading.service';
import { Cliente } from '../../tipo-identificacion/consulta-similitud-confirmacion/model/Cliente.model';
import { JsonPersonalData } from 'src/app/services/actividades/model/json-personal-data.model';
import { JsonOperationData } from 'src/app/services/actividades/model/json-operation-data.model';
import { JsonInnerData } from 'src/app/services/actividades/model/json-inner-data.model';
import { ActivatedRoute } from '@angular/router';
@Component({
selector: 'app-consulta-ine',
templateUrl: './consulta-ine.page.html',
styleUrls: ['./consulta-ine.page.scss'],
})
export class ConsultaInePage implements OnInit {
cardValid: boolean;
browser: any;
esCargando: boolean;
capturasINE: string;
frontImg: string;
isenabled: boolean;
isValidoSpinnerFront: boolean;
frontImg2: any;
secuenciaId: number;
client: ClientData;
pData: JsonPersonalData;
validINE: string;
constructor(
private iab: InAppBrowser,
private alertCtrl: AlertController,
private navCtrl: NavController,
private documentosService: DocumentosService,
private oauth: OauthService,
private login: LoginService,
private loading: LoadingService,
private activityService: ActivitiesService,
private saveS: GuardarStorageService,
private activitiesService: ActivitiesService,
private route: ActivatedRoute,
public camera: Camera) {
this.route.queryParams.subscribe(params => {
if (params) {
this.pData = JSON.parse(params.client);
this.pData.observations = this.validINE;
}
});
this.cardValid = true;
this.validINE = 'Identificación válida';
this.secuenciaId = 0;
if (this.saveS.getTipoFlujo() === "alhajas")
{
this.secuenciaId = 4;
}
else
{
this.secuenciaId = 9;
}
}
ngOnInit() {
}
getImagenFront() {
this.isValidoSpinnerFront = true;
const options: CameraOptions = {
quality: 70,
destinationType: this.camera.DestinationType.DATA_URL,
encodingType: this.camera.EncodingType.JPEG,
mediaType: this.camera.MediaType.PICTURE
};
this.camera.getPicture(options).then((imageData) => {
this.frontImg = 'data:image/jpeg;base64,' + imageData;
this.frontImg = imageData;
this.saveS.guardarStorageImagenF(this.frontImg);
if (this.frontImg) {
// enable the button
this.isenabled = true;
this.isValidoSpinnerFront = false;
} else {
// disable the button
this.isenabled = false;
this.isValidoSpinnerFront = false;
}
}, (err) => {
// Handle error
console.log('Camera issue:' + err, );
});
this.isValidoSpinnerFront = false;
}
openB() {
this.browser = this.iab.create('https://listanominal.ine.mx', '_system');
}
openGallery() {
const cameraOptions = {
sourceType: this.camera.PictureSourceType.PHOTOLIBRARY,
destinationType: this.camera.DestinationType.DATA_URL,
encodingType: this.camera.EncodingType.JPEG,
quality: 70,
targetWidth: 1000,
targetHeight: 1000, | /* this.camera.getPicture(cameraOptions)
.then(file_uri => this.frontImg = file_uri,
err => console.log(err)); */
this.camera.getPicture(cameraOptions).then((imageData) => {
this.frontImg = 'data:image/jpeg;base64,' + imageData;
this.frontImg2 = imageData;
this.saveS.guardarStorageImagenF(this.frontImg);
if (this.frontImg) {
// enable the button
this.isenabled = true;
this.isValidoSpinnerFront = false;
} else {
// disable the button
this.isenabled = false;
this.isValidoSpinnerFront = false;
}
}, (err) => {
// Handle error
console.log('Camera issue:' + err);
});
this.isValidoSpinnerFront = false;
}
goCallBack() {
console.log('Typescript callback has been called');
this.browser.hide();
}
changeListenerINE($event): void {
let mensajeError = '';
const file: File = $event.target.files[0];
console.log('changeListenerF');
if ((file.type !== 'image/jpeg' && file.type !== 'image/png') || (file.size > 1000000)) {
mensajeError = 'Formato y/o tamaño de imagen incorrecto';
} else {
const myReader: FileReader = new FileReader();
myReader.onloadend = (e) => {
this.frontImg = myReader.result.toString();
console.log('frontImg');
console.log(this.frontImg);
// this.saveS.guardarStorageImagenF(this.capturasINE);
// this.imgCapturada.emit(capturas);
};
myReader.readAsDataURL(file);
}
if (this.frontImg) {
// enable the button
this.isenabled = true;
this.isValidoSpinnerFront = false;
} else {
// disable the button
this.isenabled = false;
this.isValidoSpinnerFront = false;
}
}
goCargarDocumento() {
try {
const imagen = new Imagen();
const blobAnverso = imagen.convertirImagenEnBlob(this.frontImg);
console.log('blobAnverso'); console.log(blobAnverso);
if (blobAnverso) {
this.cargarDocumento(blobAnverso, this.saveS.getBearerToken());
} else {
alert('Imagen Invalida');
}
} catch (error) {
console.log(error);
}
}
cargarDocumento(fileAnverso: any, bearerToken: string) {
this.loading.present('Cargando...');
// this.actualizarActivity('EN PROCESO');
this.esCargando = true;
const date = new Date();
const dataFile1 = new DataFile(
'bid:Anverso', 'Nombre', 'Primer apellido', 'Segundo apellido', '123549', date.toISOString(), 'RES_BURO', '123123');
const jsonRequest = new JsonRequest('IDOFA', this.saveS.getOperationID(), 'OK', '', '');
this.documentosService.cargarDocumento(jsonRequest, dataFile1, fileAnverso, bearerToken).subscribe(
(respuesta: any) => {
console.log('cargarDocumento respuesta', respuesta);
if (respuesta['resultOK']) {
// this.actualizarActivity('FINALIZADO');
this.esCargando = false;
this.loading.dismiss();
// alert('Archivo guardado con éxito');
// this.navCtrl.navigateRoot('info-grales');
this.pData.observations=this.validINE;
this.guardarDatos(this.pData);
} else {
// alert(respuesta['message']);
this.loading.dismiss();
this.esCargando = false;
}
},
(error: HttpErrorResponse) => {
this.loading.dismiss();
console.log(error);
switch (error['status']) {
case 401:
alert('Es necesario iniciar session, nuemente para continuar');
this.navCtrl.navigateRoot('login');
break;
case 404:
alert('Es necesario iniciar session, nuemente para continuar');
this.navCtrl.navigateRoot('login');
break;
case 500:
alert('Por favor, reintentar para continuar');
this.cargarDocumento(fileAnverso, bearerToken);
break;
case 501:
alert('Por favor, reintentar para continuar');
this.cargarDocumento(fileAnverso, bearerToken);
break;
default:
alert('Es necesario iniciar session, nuemente para continuar');
this.navCtrl.navigateRoot('login');
break;
}
// alert('Hubo un error al enviar los datos, intenta de nuevo');
});
}
actualizarActivity(estatus: string) {
const productId = 1;
const jsonData = new JsonData( productId, this.saveS.getSystemCode(),
estatus, '1', '', this.secuenciaId, 1, this.saveS.getPersonId());
// {'id': this.saveS.getPersonId(), 'observations': this.validINE.toString()}
console.log('jsonData validINE :');
console.log(jsonData);
const jsonMetaData = new JsonMetadata(0, '', 0, 0, 1, 1);
/* const jsonData = {
'data': {
'productId': 1,
'code': '',
'activityStatus': 'FINALIZADO',
'activityValue': null,
'data': '{"personal_data":{"id":' + this.saveS.getPersonId() + ',"observations":"' + this.validINE + '}}',
'secuence': 17,
'workflowId': 1,
'personId': this.saveS.getPersonId()
},
'metadata': {
'accuracy': 0,
'deviceInfo': '',
'latutide': 0,
'longitude': 0,
'timeZoneId': 1,
'userId': this.saveS.getResultLogin()
},
'operationId': this.saveS.getOperationID()
}; */
const jsonDatosActivity = new JsonDatosActivity(jsonData, jsonMetaData, this.saveS.getOperationID());
// jsonDatosActivity
this.activityService.actualizarDatosActivityINE(jsonDatosActivity,
this.login.token).subscribe(
(resultado: any) => {
});
}
guardarDatos(json: JsonPersonalData) {
// tslint:disable-next-line: max-line-length
console.log('Guardando datos:: consulta INE');
console.log(json);
json.code=null;
const jsonPersonalData = json;
//this.pData = jsonPersonalData;
const operationData = new JsonOperationData('bid');
const jsonInnerData = new JsonInnerData(jsonPersonalData);
const jsonInnerDataString = JSON.stringify(jsonInnerData);
const jsonData = new JsonData(1, "","FINALIZADO","2", jsonInnerDataString, this.secuenciaId, 1, this.saveS.getPersonId());
const jsonMetaData = new JsonMetadata(0,"",0,0,1,1);
const jsonDatosActivity = new JsonDatosActivity(jsonData, jsonMetaData, this.saveS.getOperationID());
console.log('Guardando datos COMPLETOS:: consulta INE');
console.log(jsonDatosActivity);
this.saveS.setJsonDatosActivity(jsonDatosActivity);
this.activitiesService.actualizarDatosActivity(jsonDatosActivity, this.saveS.getBearerToken()).subscribe(
(resultado: any) => {
console.log(resultado);
if (resultado.code === -9999) {
this.navCtrl.navigateRoot('finalizar');
} else {
alert('Error al Guardar los datos');
}
},
(err: HttpErrorResponse) => {
console.log(err);
});
}
logout() {
this.login.finalizar();
}
isValidoChange(event) {
if (this.cardValid) {
this.validINE = 'Identificación válida';
console.log('isValidoChange');
console.log('isValidoChange', this.cardValid + '::' + this.validINE);
} else {
this.validINE = 'Identificación no válida';
console.log('isValidoChange');
console.log('isValidoChange', this.cardValid + '::' + this.validINE);
}
}
} | correctOrientation: true
};
| random_line_split |
consulta-ine.page.ts | import { Component, OnInit } from '@angular/core';
import { InAppBrowser } from '@ionic-native/in-app-browser/ngx';
import { JsonData } from 'src/app/services/actividades/model/json-data.model';
import { AlertController, NavController } from '@ionic/angular';
import { DocumentosService } from 'src/app/services/documentos/documentos.service';
import { OauthService } from 'src/app/services/oauth.service';
import { LoginService } from 'src/app/services/login.service';
import { ActivitiesService } from 'src/app/services/actividades/activities-service';
import { GuardarStorageService } from 'src/app/services/guardar-storage.service';
import { JsonMetadata } from 'src/app/services/actividades/model/json-metadata.model';
import { JsonDatosActivity } from 'src/app/services/actividades/model/json-datos-activity.model';
import { HttpErrorResponse } from '@angular/common/http';
import { DataFile } from 'src/app/services/documentos/model/data-file.model';
import { JsonRequest } from 'src/app/services/documentos/model/jsonRequest.model';
import { Imagen } from 'src/app/herramientas/imagen';
import { Camera, CameraOptions } from '@ionic-native/camera/ngx';
import { LoadingService } from 'src/app/services/loading.service';
import { Cliente } from '../../tipo-identificacion/consulta-similitud-confirmacion/model/Cliente.model';
import { JsonPersonalData } from 'src/app/services/actividades/model/json-personal-data.model';
import { JsonOperationData } from 'src/app/services/actividades/model/json-operation-data.model';
import { JsonInnerData } from 'src/app/services/actividades/model/json-inner-data.model';
import { ActivatedRoute } from '@angular/router';
@Component({
selector: 'app-consulta-ine',
templateUrl: './consulta-ine.page.html',
styleUrls: ['./consulta-ine.page.scss'],
})
export class ConsultaInePage implements OnInit {
cardValid: boolean;
browser: any;
esCargando: boolean;
capturasINE: string;
frontImg: string;
isenabled: boolean;
isValidoSpinnerFront: boolean;
frontImg2: any;
secuenciaId: number;
client: ClientData;
pData: JsonPersonalData;
validINE: string;
constructor(
private iab: InAppBrowser,
private alertCtrl: AlertController,
private navCtrl: NavController,
private documentosService: DocumentosService,
private oauth: OauthService,
private login: LoginService,
private loading: LoadingService,
private activityService: ActivitiesService,
private saveS: GuardarStorageService,
private activitiesService: ActivitiesService,
private route: ActivatedRoute,
public camera: Camera) {
this.route.queryParams.subscribe(params => {
if (params) {
this.pData = JSON.parse(params.client);
this.pData.observations = this.validINE;
}
});
this.cardValid = true;
this.validINE = 'Identificación válida';
this.secuenciaId = 0;
if (this.saveS.getTipoFlujo() === "alhajas")
{
this.secuenciaId = 4;
}
else
{
this.secuenciaId = 9;
}
}
ngOnInit() {
}
getImagenFront() {
this.isValidoSpinnerFront = true;
const options: CameraOptions = {
quality: 70,
destinationType: this.camera.DestinationType.DATA_URL,
encodingType: this.camera.EncodingType.JPEG,
mediaType: this.camera.MediaType.PICTURE
};
this.camera.getPicture(options).then((imageData) => {
this.frontImg = 'data:image/jpeg;base64,' + imageData;
this.frontImg = imageData;
this.saveS.guardarStorageImagenF(this.frontImg);
if (this.frontImg) {
// enable the button
this.isenabled = true;
this.isValidoSpinnerFront = false;
} else {
// disable the button
this.isenabled = false;
this.isValidoSpinnerFront = false;
}
}, (err) => {
// Handle error
console.log('Camera issue:' + err, );
});
this.isValidoSpinnerFront = false;
}
openB() {
this.browser = this.iab.create('https://listanominal.ine.mx', '_system');
}
openGallery() {
const cameraOptions = {
sourceType: this.camera.PictureSourceType.PHOTOLIBRARY,
destinationType: this.camera.DestinationType.DATA_URL,
encodingType: this.camera.EncodingType.JPEG,
quality: 70,
targetWidth: 1000,
targetHeight: 1000,
correctOrientation: true
};
/* this.camera.getPicture(cameraOptions)
.then(file_uri => this.frontImg = file_uri,
err => console.log(err)); */
this.camera.getPicture(cameraOptions).then((imageData) => {
this.frontImg = 'data:image/jpeg;base64,' + imageData;
this.frontImg2 = imageData;
this.saveS.guardarStorageImagenF(this.frontImg);
if (this.frontImg) {
// enable the button
this.isenabled = true;
this.isValidoSpinnerFront = false;
} else {
// disable the button
this.isenabled = false;
this.isValidoSpinnerFront = false;
}
}, (err) => {
// Handle error
console.log('Camera issue:' + err);
});
this.isValidoSpinnerFront = false;
}
goCallBack() {
console.log('Typescript callback has been called');
this.browser.hide();
}
changeListenerINE($event): void {
let mensajeError = '';
const file: File = $event.target.files[0];
console.log('changeListenerF');
if ((file.type !== 'image/jpeg' && file.type !== 'image/png') || (file.size > 1000000)) {
mensajeError = 'Formato y/o tamaño de imagen incorrecto';
} else {
const myReader: FileReader = new FileReader();
myReader.onloadend = (e) => {
this.frontImg = myReader.result.toString();
console.log('frontImg');
console.log(this.frontImg);
// this.saveS.guardarStorageImagenF(this.capturasINE);
// this.imgCapturada.emit(capturas);
};
myReader.readAsDataURL(file);
}
if (this.frontImg) {
// enable the button
this.isenabled = true;
this.isValidoSpinnerFront = false;
} else {
|
goCargarDocumento() {
try {
const imagen = new Imagen();
const blobAnverso = imagen.convertirImagenEnBlob(this.frontImg);
console.log('blobAnverso'); console.log(blobAnverso);
if (blobAnverso) {
this.cargarDocumento(blobAnverso, this.saveS.getBearerToken());
} else {
alert('Imagen Invalida');
}
} catch (error) {
console.log(error);
}
}
cargarDocumento(fileAnverso: any, bearerToken: string) {
this.loading.present('Cargando...');
// this.actualizarActivity('EN PROCESO');
this.esCargando = true;
const date = new Date();
const dataFile1 = new DataFile(
'bid:Anverso', 'Nombre', 'Primer apellido', 'Segundo apellido', '123549', date.toISOString(), 'RES_BURO', '123123');
const jsonRequest = new JsonRequest('IDOFA', this.saveS.getOperationID(), 'OK', '', '');
this.documentosService.cargarDocumento(jsonRequest, dataFile1, fileAnverso, bearerToken).subscribe(
(respuesta: any) => {
console.log('cargarDocumento respuesta', respuesta);
if (respuesta['resultOK']) {
// this.actualizarActivity('FINALIZADO');
this.esCargando = false;
this.loading.dismiss();
// alert('Archivo guardado con éxito');
// this.navCtrl.navigateRoot('info-grales');
this.pData.observations=this.validINE;
this.guardarDatos(this.pData);
} else {
// alert(respuesta['message']);
this.loading.dismiss();
this.esCargando = false;
}
},
(error: HttpErrorResponse) => {
this.loading.dismiss();
console.log(error);
switch (error['status']) {
case 401:
alert('Es necesario iniciar session, nuemente para continuar');
this.navCtrl.navigateRoot('login');
break;
case 404:
alert('Es necesario iniciar session, nuemente para continuar');
this.navCtrl.navigateRoot('login');
break;
case 500:
alert('Por favor, reintentar para continuar');
this.cargarDocumento(fileAnverso, bearerToken);
break;
case 501:
alert('Por favor, reintentar para continuar');
this.cargarDocumento(fileAnverso, bearerToken);
break;
default:
alert('Es necesario iniciar session, nuemente para continuar');
this.navCtrl.navigateRoot('login');
break;
}
// alert('Hubo un error al enviar los datos, intenta de nuevo');
});
}
actualizarActivity(estatus: string) {
const productId = 1;
const jsonData = new JsonData( productId, this.saveS.getSystemCode(),
estatus, '1', '', this.secuenciaId, 1, this.saveS.getPersonId());
// {'id': this.saveS.getPersonId(), 'observations': this.validINE.toString()}
console.log('jsonData validINE :');
console.log(jsonData);
const jsonMetaData = new JsonMetadata(0, '', 0, 0, 1, 1);
/* const jsonData = {
'data': {
'productId': 1,
'code': '',
'activityStatus': 'FINALIZADO',
'activityValue': null,
'data': '{"personal_data":{"id":' + this.saveS.getPersonId() + ',"observations":"' + this.validINE + '}}',
'secuence': 17,
'workflowId': 1,
'personId': this.saveS.getPersonId()
},
'metadata': {
'accuracy': 0,
'deviceInfo': '',
'latutide': 0,
'longitude': 0,
'timeZoneId': 1,
'userId': this.saveS.getResultLogin()
},
'operationId': this.saveS.getOperationID()
}; */
const jsonDatosActivity = new JsonDatosActivity(jsonData, jsonMetaData, this.saveS.getOperationID());
// jsonDatosActivity
this.activityService.actualizarDatosActivityINE(jsonDatosActivity,
this.login.token).subscribe(
(resultado: any) => {
});
}
guardarDatos(json: JsonPersonalData) {
// tslint:disable-next-line: max-line-length
console.log('Guardando datos:: consulta INE');
console.log(json);
json.code=null;
const jsonPersonalData = json;
//this.pData = jsonPersonalData;
const operationData = new JsonOperationData('bid');
const jsonInnerData = new JsonInnerData(jsonPersonalData);
const jsonInnerDataString = JSON.stringify(jsonInnerData);
const jsonData = new JsonData(1, "","FINALIZADO","2", jsonInnerDataString, this.secuenciaId, 1, this.saveS.getPersonId());
const jsonMetaData = new JsonMetadata(0,"",0,0,1,1);
const jsonDatosActivity = new JsonDatosActivity(jsonData, jsonMetaData, this.saveS.getOperationID());
console.log('Guardando datos COMPLETOS:: consulta INE');
console.log(jsonDatosActivity);
this.saveS.setJsonDatosActivity(jsonDatosActivity);
this.activitiesService.actualizarDatosActivity(jsonDatosActivity, this.saveS.getBearerToken()).subscribe(
(resultado: any) => {
console.log(resultado);
if (resultado.code === -9999) {
this.navCtrl.navigateRoot('finalizar');
} else {
alert('Error al Guardar los datos');
}
},
(err: HttpErrorResponse) => {
console.log(err);
});
}
logout() {
this.login.finalizar();
}
isValidoChange(event) {
if (this.cardValid) {
this.validINE = 'Identificación válida';
console.log('isValidoChange');
console.log('isValidoChange', this.cardValid + '::' + this.validINE);
} else {
this.validINE = 'Identificación no válida';
console.log('isValidoChange');
console.log('isValidoChange', this.cardValid + '::' + this.validINE);
}
}
}
| // disable the button
this.isenabled = false;
this.isValidoSpinnerFront = false;
}
}
| conditional_block |
consulta-ine.page.ts | import { Component, OnInit } from '@angular/core';
import { InAppBrowser } from '@ionic-native/in-app-browser/ngx';
import { JsonData } from 'src/app/services/actividades/model/json-data.model';
import { AlertController, NavController } from '@ionic/angular';
import { DocumentosService } from 'src/app/services/documentos/documentos.service';
import { OauthService } from 'src/app/services/oauth.service';
import { LoginService } from 'src/app/services/login.service';
import { ActivitiesService } from 'src/app/services/actividades/activities-service';
import { GuardarStorageService } from 'src/app/services/guardar-storage.service';
import { JsonMetadata } from 'src/app/services/actividades/model/json-metadata.model';
import { JsonDatosActivity } from 'src/app/services/actividades/model/json-datos-activity.model';
import { HttpErrorResponse } from '@angular/common/http';
import { DataFile } from 'src/app/services/documentos/model/data-file.model';
import { JsonRequest } from 'src/app/services/documentos/model/jsonRequest.model';
import { Imagen } from 'src/app/herramientas/imagen';
import { Camera, CameraOptions } from '@ionic-native/camera/ngx';
import { LoadingService } from 'src/app/services/loading.service';
import { Cliente } from '../../tipo-identificacion/consulta-similitud-confirmacion/model/Cliente.model';
import { JsonPersonalData } from 'src/app/services/actividades/model/json-personal-data.model';
import { JsonOperationData } from 'src/app/services/actividades/model/json-operation-data.model';
import { JsonInnerData } from 'src/app/services/actividades/model/json-inner-data.model';
import { ActivatedRoute } from '@angular/router';
@Component({
selector: 'app-consulta-ine',
templateUrl: './consulta-ine.page.html',
styleUrls: ['./consulta-ine.page.scss'],
})
export class ConsultaInePage implements OnInit {
cardValid: boolean;
browser: any;
esCargando: boolean;
capturasINE: string;
frontImg: string;
isenabled: boolean;
isValidoSpinnerFront: boolean;
frontImg2: any;
secuenciaId: number;
client: ClientData;
pData: JsonPersonalData;
validINE: string;
constructor(
private iab: InAppBrowser,
private alertCtrl: AlertController,
private navCtrl: NavController,
private documentosService: DocumentosService,
private oauth: OauthService,
private login: LoginService,
private loading: LoadingService,
private activityService: ActivitiesService,
private saveS: GuardarStorageService,
private activitiesService: ActivitiesService,
private route: ActivatedRoute,
public camera: Camera) {
this.route.queryParams.subscribe(params => {
if (params) {
this.pData = JSON.parse(params.client);
this.pData.observations = this.validINE;
}
});
this.cardValid = true;
this.validINE = 'Identificación válida';
this.secuenciaId = 0;
if (this.saveS.getTipoFlujo() === "alhajas")
{
this.secuenciaId = 4;
}
else
{
this.secuenciaId = 9;
}
}
ngOnInit() {
| getImagenFront() {
this.isValidoSpinnerFront = true;
const options: CameraOptions = {
quality: 70,
destinationType: this.camera.DestinationType.DATA_URL,
encodingType: this.camera.EncodingType.JPEG,
mediaType: this.camera.MediaType.PICTURE
};
this.camera.getPicture(options).then((imageData) => {
this.frontImg = 'data:image/jpeg;base64,' + imageData;
this.frontImg = imageData;
this.saveS.guardarStorageImagenF(this.frontImg);
if (this.frontImg) {
// enable the button
this.isenabled = true;
this.isValidoSpinnerFront = false;
} else {
// disable the button
this.isenabled = false;
this.isValidoSpinnerFront = false;
}
}, (err) => {
// Handle error
console.log('Camera issue:' + err, );
});
this.isValidoSpinnerFront = false;
}
openB() {
this.browser = this.iab.create('https://listanominal.ine.mx', '_system');
}
openGallery() {
const cameraOptions = {
sourceType: this.camera.PictureSourceType.PHOTOLIBRARY,
destinationType: this.camera.DestinationType.DATA_URL,
encodingType: this.camera.EncodingType.JPEG,
quality: 70,
targetWidth: 1000,
targetHeight: 1000,
correctOrientation: true
};
/* this.camera.getPicture(cameraOptions)
.then(file_uri => this.frontImg = file_uri,
err => console.log(err)); */
this.camera.getPicture(cameraOptions).then((imageData) => {
this.frontImg = 'data:image/jpeg;base64,' + imageData;
this.frontImg2 = imageData;
this.saveS.guardarStorageImagenF(this.frontImg);
if (this.frontImg) {
// enable the button
this.isenabled = true;
this.isValidoSpinnerFront = false;
} else {
// disable the button
this.isenabled = false;
this.isValidoSpinnerFront = false;
}
}, (err) => {
// Handle error
console.log('Camera issue:' + err);
});
this.isValidoSpinnerFront = false;
}
goCallBack() {
console.log('Typescript callback has been called');
this.browser.hide();
}
changeListenerINE($event): void {
let mensajeError = '';
const file: File = $event.target.files[0];
console.log('changeListenerF');
if ((file.type !== 'image/jpeg' && file.type !== 'image/png') || (file.size > 1000000)) {
mensajeError = 'Formato y/o tamaño de imagen incorrecto';
} else {
const myReader: FileReader = new FileReader();
myReader.onloadend = (e) => {
this.frontImg = myReader.result.toString();
console.log('frontImg');
console.log(this.frontImg);
// this.saveS.guardarStorageImagenF(this.capturasINE);
// this.imgCapturada.emit(capturas);
};
myReader.readAsDataURL(file);
}
if (this.frontImg) {
// enable the button
this.isenabled = true;
this.isValidoSpinnerFront = false;
} else {
// disable the button
this.isenabled = false;
this.isValidoSpinnerFront = false;
}
}
goCargarDocumento() {
try {
const imagen = new Imagen();
const blobAnverso = imagen.convertirImagenEnBlob(this.frontImg);
console.log('blobAnverso'); console.log(blobAnverso);
if (blobAnverso) {
this.cargarDocumento(blobAnverso, this.saveS.getBearerToken());
} else {
alert('Imagen Invalida');
}
} catch (error) {
console.log(error);
}
}
cargarDocumento(fileAnverso: any, bearerToken: string) {
this.loading.present('Cargando...');
// this.actualizarActivity('EN PROCESO');
this.esCargando = true;
const date = new Date();
const dataFile1 = new DataFile(
'bid:Anverso', 'Nombre', 'Primer apellido', 'Segundo apellido', '123549', date.toISOString(), 'RES_BURO', '123123');
const jsonRequest = new JsonRequest('IDOFA', this.saveS.getOperationID(), 'OK', '', '');
this.documentosService.cargarDocumento(jsonRequest, dataFile1, fileAnverso, bearerToken).subscribe(
(respuesta: any) => {
console.log('cargarDocumento respuesta', respuesta);
if (respuesta['resultOK']) {
// this.actualizarActivity('FINALIZADO');
this.esCargando = false;
this.loading.dismiss();
// alert('Archivo guardado con éxito');
// this.navCtrl.navigateRoot('info-grales');
this.pData.observations=this.validINE;
this.guardarDatos(this.pData);
} else {
// alert(respuesta['message']);
this.loading.dismiss();
this.esCargando = false;
}
},
(error: HttpErrorResponse) => {
this.loading.dismiss();
console.log(error);
switch (error['status']) {
case 401:
alert('Es necesario iniciar session, nuemente para continuar');
this.navCtrl.navigateRoot('login');
break;
case 404:
alert('Es necesario iniciar session, nuemente para continuar');
this.navCtrl.navigateRoot('login');
break;
case 500:
alert('Por favor, reintentar para continuar');
this.cargarDocumento(fileAnverso, bearerToken);
break;
case 501:
alert('Por favor, reintentar para continuar');
this.cargarDocumento(fileAnverso, bearerToken);
break;
default:
alert('Es necesario iniciar session, nuemente para continuar');
this.navCtrl.navigateRoot('login');
break;
}
// alert('Hubo un error al enviar los datos, intenta de nuevo');
});
}
actualizarActivity(estatus: string) {
const productId = 1;
const jsonData = new JsonData( productId, this.saveS.getSystemCode(),
estatus, '1', '', this.secuenciaId, 1, this.saveS.getPersonId());
// {'id': this.saveS.getPersonId(), 'observations': this.validINE.toString()}
console.log('jsonData validINE :');
console.log(jsonData);
const jsonMetaData = new JsonMetadata(0, '', 0, 0, 1, 1);
/* const jsonData = {
'data': {
'productId': 1,
'code': '',
'activityStatus': 'FINALIZADO',
'activityValue': null,
'data': '{"personal_data":{"id":' + this.saveS.getPersonId() + ',"observations":"' + this.validINE + '}}',
'secuence': 17,
'workflowId': 1,
'personId': this.saveS.getPersonId()
},
'metadata': {
'accuracy': 0,
'deviceInfo': '',
'latutide': 0,
'longitude': 0,
'timeZoneId': 1,
'userId': this.saveS.getResultLogin()
},
'operationId': this.saveS.getOperationID()
}; */
const jsonDatosActivity = new JsonDatosActivity(jsonData, jsonMetaData, this.saveS.getOperationID());
// jsonDatosActivity
this.activityService.actualizarDatosActivityINE(jsonDatosActivity,
this.login.token).subscribe(
(resultado: any) => {
});
}
guardarDatos(json: JsonPersonalData) {
// tslint:disable-next-line: max-line-length
console.log('Guardando datos:: consulta INE');
console.log(json);
json.code=null;
const jsonPersonalData = json;
//this.pData = jsonPersonalData;
const operationData = new JsonOperationData('bid');
const jsonInnerData = new JsonInnerData(jsonPersonalData);
const jsonInnerDataString = JSON.stringify(jsonInnerData);
const jsonData = new JsonData(1, "","FINALIZADO","2", jsonInnerDataString, this.secuenciaId, 1, this.saveS.getPersonId());
const jsonMetaData = new JsonMetadata(0,"",0,0,1,1);
const jsonDatosActivity = new JsonDatosActivity(jsonData, jsonMetaData, this.saveS.getOperationID());
console.log('Guardando datos COMPLETOS:: consulta INE');
console.log(jsonDatosActivity);
this.saveS.setJsonDatosActivity(jsonDatosActivity);
this.activitiesService.actualizarDatosActivity(jsonDatosActivity, this.saveS.getBearerToken()).subscribe(
(resultado: any) => {
console.log(resultado);
if (resultado.code === -9999) {
this.navCtrl.navigateRoot('finalizar');
} else {
alert('Error al Guardar los datos');
}
},
(err: HttpErrorResponse) => {
console.log(err);
});
}
logout() {
this.login.finalizar();
}
isValidoChange(event) {
if (this.cardValid) {
this.validINE = 'Identificación válida';
console.log('isValidoChange');
console.log('isValidoChange', this.cardValid + '::' + this.validINE);
} else {
this.validINE = 'Identificación no válida';
console.log('isValidoChange');
console.log('isValidoChange', this.cardValid + '::' + this.validINE);
}
}
}
| }
| identifier_body |
simple_http.rs | // SPDX-License-Identifier: CC0-1.0
//! This module implements a minimal and non standard conforming HTTP 1.0
//! round-tripper that works with the bitcoind RPC server. This can be used
//! if minimal dependencies are a goal and synchronous communication is ok.
#[cfg(feature = "proxy")]
use socks::Socks5Stream;
use std::io::{BufRead, BufReader, Read, Write};
#[cfg(not(jsonrpc_fuzz))]
use std::net::TcpStream;
use std::net::{SocketAddr, ToSocketAddrs};
use std::sync::{Arc, Mutex, MutexGuard};
use std::time::Duration;
use std::{error, fmt, io, net, num};
use crate::client::Transport;
use crate::http::DEFAULT_PORT;
#[cfg(feature = "proxy")]
use crate::http::DEFAULT_PROXY_PORT;
use crate::{Request, Response};
/// Absolute maximum content length allowed before cutting off the response.
const FINAL_RESP_ALLOC: u64 = 1024 * 1024 * 1024;
#[cfg(not(jsonrpc_fuzz))]
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(15);
#[cfg(jsonrpc_fuzz)]
const DEFAULT_TIMEOUT: Duration = Duration::from_millis(1);
/// Simple HTTP transport that implements the necessary subset of HTTP for
/// running a bitcoind RPC client.
#[derive(Clone, Debug)]
pub struct SimpleHttpTransport {
addr: net::SocketAddr,
path: String,
timeout: Duration,
/// The value of the `Authorization` HTTP header.
basic_auth: Option<String>,
#[cfg(feature = "proxy")]
proxy_addr: net::SocketAddr,
#[cfg(feature = "proxy")]
proxy_auth: Option<(String, String)>,
sock: Arc<Mutex<Option<BufReader<TcpStream>>>>,
}
impl Default for SimpleHttpTransport {
fn default() -> Self {
SimpleHttpTransport {
addr: net::SocketAddr::new(
net::IpAddr::V4(net::Ipv4Addr::new(127, 0, 0, 1)),
DEFAULT_PORT,
),
path: "/".to_owned(),
timeout: DEFAULT_TIMEOUT,
basic_auth: None,
#[cfg(feature = "proxy")]
proxy_addr: net::SocketAddr::new(
net::IpAddr::V4(net::Ipv4Addr::new(127, 0, 0, 1)),
DEFAULT_PROXY_PORT,
),
#[cfg(feature = "proxy")]
proxy_auth: None,
sock: Arc::new(Mutex::new(None)),
}
}
}
impl SimpleHttpTransport {
/// Constructs a new [`SimpleHttpTransport`] with default parameters.
pub fn new() -> Self {
SimpleHttpTransport::default()
}
/// Returns a builder for [`SimpleHttpTransport`].
pub fn builder() -> Builder {
Builder::new()
}
/// Replaces the URL of the transport.
pub fn set_url(&mut self, url: &str) -> Result<(), Error> {
let url = check_url(url)?;
self.addr = url.0;
self.path = url.1;
Ok(())
}
/// Replaces only the path part of the URL.
pub fn set_url_path(&mut self, path: String) {
self.path = path;
}
fn request<R>(&self, req: impl serde::Serialize) -> Result<R, Error>
where
R: for<'a> serde::de::Deserialize<'a>,
{
match self.try_request(req) {
Ok(response) => Ok(response),
Err(err) => {
// No part of this codebase should panic, so unwrapping a mutex lock is fine
*self.sock.lock().expect("poisoned mutex") = None;
Err(err)
}
}
}
#[cfg(feature = "proxy")]
fn fresh_socket(&self) -> Result<TcpStream, Error> {
let stream = if let Some((username, password)) = &self.proxy_auth {
Socks5Stream::connect_with_password(
self.proxy_addr,
self.addr,
username.as_str(),
password.as_str(),
)?
} else {
Socks5Stream::connect(self.proxy_addr, self.addr)?
};
Ok(stream.into_inner())
}
#[cfg(not(feature = "proxy"))]
fn fresh_socket(&self) -> Result<TcpStream, Error> {
let stream = TcpStream::connect_timeout(&self.addr, self.timeout)?;
stream.set_read_timeout(Some(self.timeout))?;
stream.set_write_timeout(Some(self.timeout))?;
Ok(stream)
}
fn try_request<R>(&self, req: impl serde::Serialize) -> Result<R, Error>
where
R: for<'a> serde::de::Deserialize<'a>,
{
// No part of this codebase should panic, so unwrapping a mutex lock is fine
let mut sock_lock: MutexGuard<Option<_>> = self.sock.lock().expect("poisoned mutex");
if sock_lock.is_none() {
*sock_lock = Some(BufReader::new(self.fresh_socket()?));
};
// In the immediately preceding block, we made sure that `sock` is non-`None`,
// so unwrapping here is fine.
let sock: &mut BufReader<_> = sock_lock.as_mut().unwrap();
// Serialize the body first so we can set the Content-Length header.
let body = serde_json::to_vec(&req)?;
let mut request_bytes = Vec::new();
request_bytes.write_all(b"POST ")?;
request_bytes.write_all(self.path.as_bytes())?;
request_bytes.write_all(b" HTTP/1.1\r\n")?;
// Write headers
request_bytes.write_all(b"host: ")?;
request_bytes.write_all(self.addr.to_string().as_bytes())?;
request_bytes.write_all(b"\r\n")?;
request_bytes.write_all(b"Content-Type: application/json\r\n")?;
request_bytes.write_all(b"Content-Length: ")?;
request_bytes.write_all(body.len().to_string().as_bytes())?;
request_bytes.write_all(b"\r\n")?;
if let Some(ref auth) = self.basic_auth {
request_bytes.write_all(b"Authorization: ")?;
request_bytes.write_all(auth.as_ref())?;
request_bytes.write_all(b"\r\n")?;
}
// Write body
request_bytes.write_all(b"\r\n")?;
request_bytes.write_all(&body)?;
// Send HTTP request
let write_success = sock.get_mut().write_all(request_bytes.as_slice()).is_ok()
&& sock.get_mut().flush().is_ok();
// This indicates the socket is broken so let's retry the send once with a fresh socket
if !write_success {
*sock.get_mut() = self.fresh_socket()?;
sock.get_mut().write_all(request_bytes.as_slice())?;
sock.get_mut().flush()?;
}
// Parse first HTTP response header line
let mut header_buf = String::new();
let read_success = sock.read_line(&mut header_buf).is_ok();
// This is another possible indication that the socket is broken so let's retry the send once
// with a fresh socket IF the write attempt has not already experienced a failure
if (!read_success || header_buf.is_empty()) && write_success {
*sock.get_mut() = self.fresh_socket()?;
sock.get_mut().write_all(request_bytes.as_slice())?;
sock.get_mut().flush()?;
sock.read_line(&mut header_buf)?;
}
if header_buf.len() < 12 {
return Err(Error::HttpResponseTooShort {
actual: header_buf.len(),
needed: 12,
});
}
if !header_buf.as_bytes()[..12].is_ascii() {
return Err(Error::HttpResponseNonAsciiHello(header_buf.as_bytes()[..12].to_vec()));
}
if !header_buf.starts_with("HTTP/1.1 ") {
return Err(Error::HttpResponseBadHello {
actual: header_buf[0..9].into(),
expected: "HTTP/1.1 ".into(),
});
}
let response_code = match header_buf[9..12].parse::<u16>() {
Ok(n) => n,
Err(e) => return Err(Error::HttpResponseBadStatus(header_buf[9..12].into(), e)),
};
// Parse response header fields
let mut content_length = None;
loop {
header_buf.clear();
sock.read_line(&mut header_buf)?;
if header_buf == "\r\n" {
break;
}
header_buf.make_ascii_lowercase();
const CONTENT_LENGTH: &str = "content-length: ";
if let Some(s) = header_buf.strip_prefix(CONTENT_LENGTH) {
content_length = Some(
s.trim()
.parse::<u64>()
.map_err(|e| Error::HttpResponseBadContentLength(s.into(), e))?,
);
}
}
if response_code == 401 {
// There is no body in a 401 response, so don't try to read it
return Err(Error::HttpErrorCode(response_code));
}
// Read up to `content_length` bytes. Note that if there is no content-length
// header, we will assume an effectively infinite content length, i.e. we will
// just keep reading from the socket until it is closed.
let mut reader = match content_length {
None => sock.take(FINAL_RESP_ALLOC),
Some(n) if n > FINAL_RESP_ALLOC => {
return Err(Error::HttpResponseContentLengthTooLarge {
length: n,
max: FINAL_RESP_ALLOC,
});
}
Some(n) => sock.take(n),
};
// Attempt to parse the response. Don't check the HTTP error code until
// after parsing, since Bitcoin Core will often return a descriptive JSON
// error structure which is more useful than the error code.
match serde_json::from_reader(&mut reader) {
Ok(s) => {
if content_length.is_some() {
reader.bytes().count(); // consume any trailing bytes
}
Ok(s)
}
Err(e) => {
// If the response was not 200, assume the parse failed because of that
if response_code != 200 {
Err(Error::HttpErrorCode(response_code))
} else {
// If it was 200 then probably it was legitimately a parse error
Err(e.into())
}
}
}
}
}
/// Does some very basic manual URL parsing because the uri/url crates
/// all have unicode-normalization as a dependency and that's broken.
fn check_url(url: &str) -> Result<(SocketAddr, String), Error> {
// The fallback port in case no port was provided.
// This changes when the http or https scheme was provided.
let mut fallback_port = DEFAULT_PORT;
// We need to get the hostname and the port.
// (1) Split scheme
let after_scheme = {
let mut split = url.splitn(2, "://");
let s = split.next().unwrap();
match split.next() {
None => s, // no scheme present
Some(after) => {
// Check if the scheme is http or https.
if s == "http" {
fallback_port = 80;
} else if s == "https" {
fallback_port = 443;
} else {
return Err(Error::url(url, "scheme should be http or https"));
}
after
}
}
};
// (2) split off path
let (before_path, path) = {
if let Some(slash) = after_scheme.find('/') {
(&after_scheme[0..slash], &after_scheme[slash..])
} else {
(after_scheme, "/")
}
};
// (3) split off auth part
let after_auth = {
let mut split = before_path.splitn(2, '@');
let s = split.next().unwrap();
split.next().unwrap_or(s)
};
// (4) Parse into socket address.
// At this point we either have <host_name> or <host_name_>:<port>
// `std::net::ToSocketAddrs` requires `&str` to have <host_name_>:<port> format.
let mut addr = match after_auth.to_socket_addrs() {
Ok(addr) => addr,
Err(_) => {
// Invalid socket address. Try to add port.
format!("{}:{}", after_auth, fallback_port).to_socket_addrs()?
}
};
match addr.next() {
Some(a) => Ok((a, path.to_owned())),
None => Err(Error::url(url, "invalid hostname: error extracting socket address")),
}
}
impl Transport for SimpleHttpTransport {
fn send_request(&self, req: Request) -> Result<Response, crate::Error> {
Ok(self.request(req)?)
}
fn send_batch(&self, reqs: &[Request]) -> Result<Vec<Response>, crate::Error> {
Ok(self.request(reqs)?)
}
fn fmt_target(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "http://{}:{}{}", self.addr.ip(), self.addr.port(), self.path)
}
}
/// Builder for simple bitcoind [`SimpleHttpTransport`].
#[derive(Clone, Debug)]
pub struct Builder {
tp: SimpleHttpTransport,
}
impl Builder {
/// Constructs a new [`Builder`] with default configuration.
pub fn new() -> Builder {
Builder {
tp: SimpleHttpTransport::new(),
}
}
/// Sets the timeout after which requests will abort if they aren't finished.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.tp.timeout = timeout;
self
}
/// Sets the URL of the server to the transport.
pub fn url(mut self, url: &str) -> Result<Self, Error> {
self.tp.set_url(url)?;
Ok(self)
}
/// Adds authentication information to the transport.
pub fn auth<S: AsRef<str>>(mut self, user: S, pass: Option<S>) -> Self {
let mut auth = user.as_ref().to_owned();
auth.push(':');
if let Some(ref pass) = pass {
auth.push_str(pass.as_ref());
}
self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(auth.as_bytes())));
self
}
/// Adds authentication information to the transport using a cookie string ('user:pass').
pub fn cookie_auth<S: AsRef<str>>(mut self, cookie: S) -> Self {
self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(cookie.as_ref().as_bytes())));
self
}
/// Adds proxy address to the transport for SOCKS5 proxy.
#[cfg(feature = "proxy")]
pub fn proxy_addr<S: AsRef<str>>(mut self, proxy_addr: S) -> Result<Self, Error> {
// We don't expect path in proxy address.
self.tp.proxy_addr = check_url(proxy_addr.as_ref())?.0;
Ok(self)
}
/// Adds optional proxy authentication as ('username', 'password').
#[cfg(feature = "proxy")]
pub fn proxy_auth<S: AsRef<str>>(mut self, user: S, pass: S) -> Self {
self.tp.proxy_auth =
Some((user, pass)).map(|(u, p)| (u.as_ref().to_string(), p.as_ref().to_string()));
self
}
/// Builds the final [`SimpleHttpTransport`].
pub fn build(self) -> SimpleHttpTransport {
self.tp
}
}
impl Default for Builder {
fn default() -> Self {
Builder::new()
}
}
impl crate::Client {
/// Creates a new JSON-RPC client using a bare-minimum HTTP transport.
pub fn simple_http(
url: &str,
user: Option<String>,
pass: Option<String>,
) -> Result<crate::Client, Error> {
let mut builder = Builder::new().url(url)?;
if let Some(user) = user {
builder = builder.auth(user, pass);
}
Ok(crate::Client::with_transport(builder.build()))
}
/// Creates a new JSON_RPC client using a HTTP-Socks5 proxy transport.
#[cfg(feature = "proxy")]
pub fn http_proxy(
url: &str,
user: Option<String>,
pass: Option<String>,
proxy_addr: &str,
proxy_auth: Option<(&str, &str)>,
) -> Result<crate::Client, Error> {
let mut builder = Builder::new().url(url)?;
if let Some(user) = user {
builder = builder.auth(user, pass);
}
builder = builder.proxy_addr(proxy_addr)?;
if let Some((user, pass)) = proxy_auth {
builder = builder.proxy_auth(user, pass);
}
let tp = builder.build();
Ok(crate::Client::with_transport(tp))
}
}
/// Error that can happen when sending requests.
#[derive(Debug)]
pub enum Error {
/// An invalid URL was passed.
InvalidUrl {
/// The URL passed.
url: String,
/// The reason the URL is invalid.
reason: &'static str,
},
/// An error occurred on the socket layer.
SocketError(io::Error),
/// The HTTP response was too short to even fit a HTTP 1.1 header.
HttpResponseTooShort {
/// The total length of the response.
actual: usize,
/// Minimum length we can parse.
needed: usize,
},
/// The HTTP response started with a HTTP/1.1 line which was not ASCII.
HttpResponseNonAsciiHello(Vec<u8>),
/// The HTTP response did not start with HTTP/1.1
HttpResponseBadHello {
/// Actual HTTP-whatever string.
actual: String,
/// The hello string of the HTTP version we support.
expected: String,
},
/// Could not parse the status value as a number.
HttpResponseBadStatus(String, num::ParseIntError),
/// Could not parse the status value as a number.
HttpResponseBadContentLength(String, num::ParseIntError),
/// The indicated content-length header exceeded our maximum.
HttpResponseContentLengthTooLarge {
/// The length indicated in the content-length header.
length: u64,
/// Our hard maximum on number of bytes we'll try to read.
max: u64,
},
/// Unexpected HTTP error code (non-200).
HttpErrorCode(u16),
/// Received EOF before getting as many bytes as were indicated by the content-length header.
IncompleteResponse {
/// The content-length header.
content_length: u64,
/// The number of bytes we actually read.
n_read: u64,
},
/// JSON parsing error.
Json(serde_json::Error),
}
impl Error {
/// Utility method to create [`Error::InvalidUrl`] variants.
fn url<U: Into<String>>(url: U, reason: &'static str) -> Error {
Error::InvalidUrl {
url: url.into(),
reason,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use Error::*;
match *self {
InvalidUrl {
ref url,
ref reason,
} => write!(f, "invalid URL '{}': {}", url, reason),
SocketError(ref e) => write!(f, "Couldn't connect to host: {}", e),
HttpResponseTooShort {
ref actual,
ref needed,
} => {
write!(f, "HTTP response too short: length {}, needed {}.", actual, needed)
}
HttpResponseNonAsciiHello(ref bytes) => {
write!(f, "HTTP response started with non-ASCII {:?}", bytes)
}
HttpResponseBadHello {
ref actual,
ref expected,
} => {
write!(f, "HTTP response started with `{}`; expected `{}`.", actual, expected)
}
HttpResponseBadStatus(ref status, ref err) => {
write!(f, "HTTP response had bad status code `{}`: {}.", status, err)
}
HttpResponseBadContentLength(ref len, ref err) => {
write!(f, "HTTP response had bad content length `{}`: {}.", len, err)
}
HttpResponseContentLengthTooLarge {
length,
max,
} => {
write!(f, "HTTP response content length {} exceeds our max {}.", length, max)
}
HttpErrorCode(c) => write!(f, "unexpected HTTP code: {}", c),
IncompleteResponse {
content_length,
n_read,
} => {
write!(
f,
"read {} bytes but HTTP response content-length header was {}.",
n_read, content_length
)
}
Json(ref e) => write!(f, "JSON error: {}", e),
}
}
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error + 'static)> |
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::SocketError(e)
}
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Self {
Error::Json(e)
}
}
impl From<Error> for crate::Error {
fn from(e: Error) -> crate::Error {
match e {
Error::Json(e) => crate::Error::Json(e),
e => crate::Error::Transport(Box::new(e)),
}
}
}
/// Global mutex used by the fuzzing harness to inject data into the read end of the TCP stream.
#[cfg(jsonrpc_fuzz)]
pub static FUZZ_TCP_SOCK: Mutex<Option<io::Cursor<Vec<u8>>>> = Mutex::new(None);
#[cfg(jsonrpc_fuzz)]
#[derive(Clone, Debug)]
struct TcpStream;
#[cfg(jsonrpc_fuzz)]
mod impls {
use super::*;
impl Read for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match *FUZZ_TCP_SOCK.lock().unwrap() {
Some(ref mut cursor) => io::Read::read(cursor, buf),
None => Ok(0),
}
}
}
impl Write for TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
io::sink().write(buf)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl TcpStream {
pub fn connect_timeout(_: &SocketAddr, _: Duration) -> io::Result<Self> {
Ok(TcpStream)
}
pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> {
Ok(())
}
pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> {
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use std::net;
#[cfg(feature = "proxy")]
use std::str::FromStr;
use super::*;
use crate::Client;
#[test]
fn test_urls() {
let addr: net::SocketAddr = ("localhost", 22).to_socket_addrs().unwrap().next().unwrap();
let urls = [
"localhost:22",
"http://localhost:22/",
"https://localhost:22/walletname/stuff?it=working",
"http://me:weak@localhost:22/wallet",
];
for u in &urls {
let tp = Builder::new().url(u).unwrap().build();
assert_eq!(tp.addr, addr);
}
// Default port and 80 and 443 fill-in.
let addr: net::SocketAddr = ("localhost", 80).to_socket_addrs().unwrap().next().unwrap();
let tp = Builder::new().url("http://localhost/").unwrap().build();
assert_eq!(tp.addr, addr);
let addr: net::SocketAddr = ("localhost", 443).to_socket_addrs().unwrap().next().unwrap();
let tp = Builder::new().url("https://localhost/").unwrap().build();
assert_eq!(tp.addr, addr);
let addr: net::SocketAddr =
("localhost", super::DEFAULT_PORT).to_socket_addrs().unwrap().next().unwrap();
let tp = Builder::new().url("localhost").unwrap().build();
assert_eq!(tp.addr, addr);
let valid_urls = [
"localhost",
"127.0.0.1:8080",
"http://127.0.0.1:8080/",
"http://127.0.0.1:8080/rpc/test",
"https://127.0.0.1/rpc/test",
"http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8300",
"http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]",
];
for u in &valid_urls {
let (addr, path) = check_url(u).unwrap();
let builder = Builder::new().url(u).unwrap_or_else(|_| panic!("error for: {}", u));
assert_eq!(builder.tp.addr, addr);
assert_eq!(builder.tp.path, path);
assert_eq!(builder.tp.timeout, DEFAULT_TIMEOUT);
assert_eq!(builder.tp.basic_auth, None);
#[cfg(feature = "proxy")]
assert_eq!(builder.tp.proxy_addr, SocketAddr::from_str("127.0.0.1:9050").unwrap());
}
let invalid_urls = [
"127.0.0.1.0:8080",
"httpx://127.0.0.1:8080/",
"ftp://127.0.0.1:8080/rpc/test",
"http://127.0.0./rpc/test",
// NB somehow, Rust's IpAddr accepts "127.0.0" and adds the extra 0..
];
for u in &invalid_urls {
if let Ok(b) = Builder::new().url(u) {
let tp = b.build();
panic!("expected error for url {}, got {:?}", u, tp);
}
}
}
#[test]
fn construct() {
let tp = Builder::new()
.timeout(Duration::from_millis(100))
.url("localhost:22")
.unwrap()
.auth("user", None)
.build();
let _ = Client::with_transport(tp);
let _ = Client::simple_http("localhost:22", None, None).unwrap();
}
#[cfg(feature = "proxy")]
#[test]
fn construct_with_proxy() {
let tp = Builder::new()
.timeout(Duration::from_millis(100))
.url("localhost:22")
.unwrap()
.auth("user", None)
.proxy_addr("127.0.0.1:9050")
.unwrap()
.build();
let _ = Client::with_transport(tp);
let _ = Client::http_proxy(
"localhost:22",
None,
None,
"127.0.0.1:9050",
Some(("user", "password")),
)
.unwrap();
}
/// Test that the client will detect that a socket is closed and open a fresh one before sending
/// the request
#[cfg(all(not(feature = "proxy"), not(jsonrpc_fuzz)))]
#[test]
fn request_to_closed_socket() {
use serde_json::{Number, Value};
use std::net::{Shutdown, TcpListener};
use std::sync::mpsc;
use std::thread;
let (tx, rx) = mpsc::sync_channel(1);
thread::spawn(move || {
let server = TcpListener::bind("localhost:0").expect("Binding a Tcp Listener");
tx.send(server.local_addr().unwrap().port()).unwrap();
for (request_id, stream) in server.incoming().enumerate() {
let mut stream = stream.unwrap();
let buf_reader = BufReader::new(&mut stream);
let _http_request: Vec<_> = buf_reader
.lines()
.map(|result| result.unwrap())
.take_while(|line| !line.is_empty())
.collect();
let response = Response {
result: None,
error: None,
id: Value::Number(Number::from(request_id)),
jsonrpc: Some(String::from("2.0")),
};
let response_str = serde_json::to_string(&response).unwrap();
stream.write_all(b"HTTP/1.1 200\r\n").unwrap();
stream.write_all(b"Content-Length: ").unwrap();
stream.write_all(response_str.len().to_string().as_bytes()).unwrap();
stream.write_all(b"\r\n").unwrap();
stream.write_all(b"\r\n").unwrap();
stream.write_all(response_str.as_bytes()).unwrap();
stream.flush().unwrap();
stream.shutdown(Shutdown::Both).unwrap();
}
});
// Give the server thread a second to start up and listen
thread::sleep(Duration::from_secs(1));
let port = rx.recv().unwrap();
let client =
Client::simple_http(format!("localhost:{}", port).as_str(), None, None).unwrap();
let request = client.build_request("test_request", &[]);
let result = client.send_request(request).unwrap();
assert_eq!(result.id, Value::Number(Number::from(0)));
thread::sleep(Duration::from_secs(1));
let request = client.build_request("test_request2", &[]);
let result2 = client.send_request(request)
.expect("This second request should not be an Err like `Err(Transport(HttpResponseTooShort { actual: 0, needed: 12 }))`");
assert_eq!(result2.id, Value::Number(Number::from(1)));
}
}
| {
use self::Error::*;
match *self {
InvalidUrl {
..
}
| HttpResponseTooShort {
..
}
| HttpResponseNonAsciiHello(..)
| HttpResponseBadHello {
..
}
| HttpResponseBadStatus(..)
| HttpResponseBadContentLength(..)
| HttpResponseContentLengthTooLarge {
..
}
| HttpErrorCode(_)
| IncompleteResponse {
..
} => None,
SocketError(ref e) => Some(e),
Json(ref e) => Some(e),
}
} | identifier_body |
simple_http.rs | // SPDX-License-Identifier: CC0-1.0
//! This module implements a minimal and non standard conforming HTTP 1.0
//! round-tripper that works with the bitcoind RPC server. This can be used
//! if minimal dependencies are a goal and synchronous communication is ok.
#[cfg(feature = "proxy")]
use socks::Socks5Stream;
use std::io::{BufRead, BufReader, Read, Write};
#[cfg(not(jsonrpc_fuzz))]
use std::net::TcpStream;
use std::net::{SocketAddr, ToSocketAddrs};
use std::sync::{Arc, Mutex, MutexGuard};
use std::time::Duration;
use std::{error, fmt, io, net, num};
use crate::client::Transport;
use crate::http::DEFAULT_PORT;
#[cfg(feature = "proxy")]
use crate::http::DEFAULT_PROXY_PORT;
use crate::{Request, Response};
/// Absolute maximum content length allowed before cutting off the response.
const FINAL_RESP_ALLOC: u64 = 1024 * 1024 * 1024;
#[cfg(not(jsonrpc_fuzz))]
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(15);
#[cfg(jsonrpc_fuzz)]
const DEFAULT_TIMEOUT: Duration = Duration::from_millis(1);
/// Simple HTTP transport that implements the necessary subset of HTTP for
/// running a bitcoind RPC client.
#[derive(Clone, Debug)]
pub struct SimpleHttpTransport {
addr: net::SocketAddr,
path: String,
timeout: Duration,
/// The value of the `Authorization` HTTP header.
basic_auth: Option<String>,
#[cfg(feature = "proxy")]
proxy_addr: net::SocketAddr,
#[cfg(feature = "proxy")]
proxy_auth: Option<(String, String)>,
sock: Arc<Mutex<Option<BufReader<TcpStream>>>>,
}
impl Default for SimpleHttpTransport {
fn default() -> Self {
SimpleHttpTransport {
addr: net::SocketAddr::new(
net::IpAddr::V4(net::Ipv4Addr::new(127, 0, 0, 1)),
DEFAULT_PORT,
),
path: "/".to_owned(),
timeout: DEFAULT_TIMEOUT,
basic_auth: None,
#[cfg(feature = "proxy")]
proxy_addr: net::SocketAddr::new(
net::IpAddr::V4(net::Ipv4Addr::new(127, 0, 0, 1)),
DEFAULT_PROXY_PORT,
),
#[cfg(feature = "proxy")]
proxy_auth: None,
sock: Arc::new(Mutex::new(None)),
}
}
}
impl SimpleHttpTransport {
/// Constructs a new [`SimpleHttpTransport`] with default parameters.
pub fn new() -> Self {
SimpleHttpTransport::default()
}
/// Returns a builder for [`SimpleHttpTransport`].
pub fn builder() -> Builder {
Builder::new()
}
/// Replaces the URL of the transport.
pub fn set_url(&mut self, url: &str) -> Result<(), Error> {
let url = check_url(url)?;
self.addr = url.0;
self.path = url.1;
Ok(())
}
/// Replaces only the path part of the URL.
pub fn set_url_path(&mut self, path: String) {
self.path = path;
}
fn request<R>(&self, req: impl serde::Serialize) -> Result<R, Error>
where
R: for<'a> serde::de::Deserialize<'a>,
{
match self.try_request(req) {
Ok(response) => Ok(response),
Err(err) => {
// No part of this codebase should panic, so unwrapping a mutex lock is fine
*self.sock.lock().expect("poisoned mutex") = None;
Err(err)
}
}
}
#[cfg(feature = "proxy")]
fn fresh_socket(&self) -> Result<TcpStream, Error> {
let stream = if let Some((username, password)) = &self.proxy_auth {
Socks5Stream::connect_with_password(
self.proxy_addr,
self.addr,
username.as_str(),
password.as_str(),
)?
} else {
Socks5Stream::connect(self.proxy_addr, self.addr)?
};
Ok(stream.into_inner())
}
#[cfg(not(feature = "proxy"))]
fn fresh_socket(&self) -> Result<TcpStream, Error> {
let stream = TcpStream::connect_timeout(&self.addr, self.timeout)?;
stream.set_read_timeout(Some(self.timeout))?;
stream.set_write_timeout(Some(self.timeout))?;
Ok(stream)
}
fn try_request<R>(&self, req: impl serde::Serialize) -> Result<R, Error>
where
R: for<'a> serde::de::Deserialize<'a>,
{
// No part of this codebase should panic, so unwrapping a mutex lock is fine
let mut sock_lock: MutexGuard<Option<_>> = self.sock.lock().expect("poisoned mutex");
if sock_lock.is_none() {
*sock_lock = Some(BufReader::new(self.fresh_socket()?));
};
// In the immediately preceding block, we made sure that `sock` is non-`None`,
// so unwrapping here is fine.
let sock: &mut BufReader<_> = sock_lock.as_mut().unwrap();
// Serialize the body first so we can set the Content-Length header.
let body = serde_json::to_vec(&req)?;
let mut request_bytes = Vec::new();
request_bytes.write_all(b"POST ")?;
request_bytes.write_all(self.path.as_bytes())?;
request_bytes.write_all(b" HTTP/1.1\r\n")?;
// Write headers
request_bytes.write_all(b"host: ")?;
request_bytes.write_all(self.addr.to_string().as_bytes())?;
request_bytes.write_all(b"\r\n")?;
request_bytes.write_all(b"Content-Type: application/json\r\n")?;
request_bytes.write_all(b"Content-Length: ")?;
request_bytes.write_all(body.len().to_string().as_bytes())?;
request_bytes.write_all(b"\r\n")?;
if let Some(ref auth) = self.basic_auth {
request_bytes.write_all(b"Authorization: ")?;
request_bytes.write_all(auth.as_ref())?;
request_bytes.write_all(b"\r\n")?;
}
// Write body
request_bytes.write_all(b"\r\n")?;
request_bytes.write_all(&body)?;
// Send HTTP request
let write_success = sock.get_mut().write_all(request_bytes.as_slice()).is_ok()
&& sock.get_mut().flush().is_ok();
// This indicates the socket is broken so let's retry the send once with a fresh socket
if !write_success {
*sock.get_mut() = self.fresh_socket()?;
sock.get_mut().write_all(request_bytes.as_slice())?;
sock.get_mut().flush()?;
}
// Parse first HTTP response header line
let mut header_buf = String::new();
let read_success = sock.read_line(&mut header_buf).is_ok();
// This is another possible indication that the socket is broken so let's retry the send once
// with a fresh socket IF the write attempt has not already experienced a failure
if (!read_success || header_buf.is_empty()) && write_success {
*sock.get_mut() = self.fresh_socket()?;
sock.get_mut().write_all(request_bytes.as_slice())?;
sock.get_mut().flush()?;
sock.read_line(&mut header_buf)?;
}
if header_buf.len() < 12 {
return Err(Error::HttpResponseTooShort {
actual: header_buf.len(),
needed: 12,
});
}
if !header_buf.as_bytes()[..12].is_ascii() {
return Err(Error::HttpResponseNonAsciiHello(header_buf.as_bytes()[..12].to_vec()));
}
if !header_buf.starts_with("HTTP/1.1 ") {
return Err(Error::HttpResponseBadHello {
actual: header_buf[0..9].into(),
expected: "HTTP/1.1 ".into(),
});
}
let response_code = match header_buf[9..12].parse::<u16>() {
Ok(n) => n,
Err(e) => return Err(Error::HttpResponseBadStatus(header_buf[9..12].into(), e)),
};
// Parse response header fields
let mut content_length = None;
loop {
header_buf.clear();
sock.read_line(&mut header_buf)?;
if header_buf == "\r\n" {
break;
}
header_buf.make_ascii_lowercase();
const CONTENT_LENGTH: &str = "content-length: ";
if let Some(s) = header_buf.strip_prefix(CONTENT_LENGTH) {
content_length = Some(
s.trim()
.parse::<u64>()
.map_err(|e| Error::HttpResponseBadContentLength(s.into(), e))?,
);
}
}
if response_code == 401 {
// There is no body in a 401 response, so don't try to read it
return Err(Error::HttpErrorCode(response_code));
}
// Read up to `content_length` bytes. Note that if there is no content-length
// header, we will assume an effectively infinite content length, i.e. we will
// just keep reading from the socket until it is closed.
let mut reader = match content_length {
None => sock.take(FINAL_RESP_ALLOC),
Some(n) if n > FINAL_RESP_ALLOC => {
return Err(Error::HttpResponseContentLengthTooLarge {
length: n,
max: FINAL_RESP_ALLOC,
});
}
Some(n) => sock.take(n),
};
// Attempt to parse the response. Don't check the HTTP error code until
// after parsing, since Bitcoin Core will often return a descriptive JSON
// error structure which is more useful than the error code.
match serde_json::from_reader(&mut reader) {
Ok(s) => {
if content_length.is_some() {
reader.bytes().count(); // consume any trailing bytes
}
Ok(s)
}
Err(e) => {
// If the response was not 200, assume the parse failed because of that
if response_code != 200 {
Err(Error::HttpErrorCode(response_code))
} else {
// If it was 200 then probably it was legitimately a parse error
Err(e.into())
}
}
}
}
}
/// Does some very basic manual URL parsing because the uri/url crates
/// all have unicode-normalization as a dependency and that's broken.
fn check_url(url: &str) -> Result<(SocketAddr, String), Error> {
// The fallback port in case no port was provided.
// This changes when the http or https scheme was provided.
let mut fallback_port = DEFAULT_PORT;
// We need to get the hostname and the port.
// (1) Split scheme
let after_scheme = {
let mut split = url.splitn(2, "://");
let s = split.next().unwrap();
match split.next() {
None => s, // no scheme present
Some(after) => {
// Check if the scheme is http or https.
if s == "http" {
fallback_port = 80;
} else if s == "https" {
fallback_port = 443;
} else {
return Err(Error::url(url, "scheme should be http or https"));
}
after
}
}
};
// (2) split off path
let (before_path, path) = {
if let Some(slash) = after_scheme.find('/') {
(&after_scheme[0..slash], &after_scheme[slash..])
} else {
(after_scheme, "/")
}
};
// (3) split off auth part
let after_auth = {
let mut split = before_path.splitn(2, '@');
let s = split.next().unwrap();
split.next().unwrap_or(s)
};
// (4) Parse into socket address.
// At this point we either have <host_name> or <host_name_>:<port>
// `std::net::ToSocketAddrs` requires `&str` to have <host_name_>:<port> format.
let mut addr = match after_auth.to_socket_addrs() {
Ok(addr) => addr,
Err(_) => {
// Invalid socket address. Try to add port.
format!("{}:{}", after_auth, fallback_port).to_socket_addrs()?
}
};
match addr.next() {
Some(a) => Ok((a, path.to_owned())),
None => Err(Error::url(url, "invalid hostname: error extracting socket address")),
}
}
impl Transport for SimpleHttpTransport {
fn send_request(&self, req: Request) -> Result<Response, crate::Error> {
Ok(self.request(req)?)
}
fn send_batch(&self, reqs: &[Request]) -> Result<Vec<Response>, crate::Error> {
Ok(self.request(reqs)?)
}
fn fmt_target(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "http://{}:{}{}", self.addr.ip(), self.addr.port(), self.path)
}
}
/// Builder for simple bitcoind [`SimpleHttpTransport`].
#[derive(Clone, Debug)]
pub struct Builder {
tp: SimpleHttpTransport,
}
impl Builder {
/// Constructs a new [`Builder`] with default configuration.
pub fn new() -> Builder {
Builder {
tp: SimpleHttpTransport::new(),
}
}
/// Sets the timeout after which requests will abort if they aren't finished.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.tp.timeout = timeout;
self
}
/// Sets the URL of the server to the transport.
pub fn url(mut self, url: &str) -> Result<Self, Error> {
self.tp.set_url(url)?;
Ok(self)
}
/// Adds authentication information to the transport.
pub fn auth<S: AsRef<str>>(mut self, user: S, pass: Option<S>) -> Self {
let mut auth = user.as_ref().to_owned();
auth.push(':');
if let Some(ref pass) = pass {
auth.push_str(pass.as_ref());
}
self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(auth.as_bytes())));
self
}
/// Adds authentication information to the transport using a cookie string ('user:pass').
pub fn cookie_auth<S: AsRef<str>>(mut self, cookie: S) -> Self {
self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(cookie.as_ref().as_bytes())));
self
}
/// Adds proxy address to the transport for SOCKS5 proxy.
#[cfg(feature = "proxy")]
pub fn proxy_addr<S: AsRef<str>>(mut self, proxy_addr: S) -> Result<Self, Error> {
// We don't expect path in proxy address.
self.tp.proxy_addr = check_url(proxy_addr.as_ref())?.0;
Ok(self)
}
/// Adds optional proxy authentication as ('username', 'password').
#[cfg(feature = "proxy")]
pub fn proxy_auth<S: AsRef<str>>(mut self, user: S, pass: S) -> Self {
self.tp.proxy_auth =
Some((user, pass)).map(|(u, p)| (u.as_ref().to_string(), p.as_ref().to_string()));
self
}
/// Builds the final [`SimpleHttpTransport`].
pub fn | (self) -> SimpleHttpTransport {
self.tp
}
}
impl Default for Builder {
fn default() -> Self {
Builder::new()
}
}
impl crate::Client {
/// Creates a new JSON-RPC client using a bare-minimum HTTP transport.
pub fn simple_http(
url: &str,
user: Option<String>,
pass: Option<String>,
) -> Result<crate::Client, Error> {
let mut builder = Builder::new().url(url)?;
if let Some(user) = user {
builder = builder.auth(user, pass);
}
Ok(crate::Client::with_transport(builder.build()))
}
/// Creates a new JSON_RPC client using a HTTP-Socks5 proxy transport.
#[cfg(feature = "proxy")]
pub fn http_proxy(
url: &str,
user: Option<String>,
pass: Option<String>,
proxy_addr: &str,
proxy_auth: Option<(&str, &str)>,
) -> Result<crate::Client, Error> {
let mut builder = Builder::new().url(url)?;
if let Some(user) = user {
builder = builder.auth(user, pass);
}
builder = builder.proxy_addr(proxy_addr)?;
if let Some((user, pass)) = proxy_auth {
builder = builder.proxy_auth(user, pass);
}
let tp = builder.build();
Ok(crate::Client::with_transport(tp))
}
}
/// Error that can happen when sending requests.
#[derive(Debug)]
pub enum Error {
/// An invalid URL was passed.
InvalidUrl {
/// The URL passed.
url: String,
/// The reason the URL is invalid.
reason: &'static str,
},
/// An error occurred on the socket layer.
SocketError(io::Error),
/// The HTTP response was too short to even fit a HTTP 1.1 header.
HttpResponseTooShort {
/// The total length of the response.
actual: usize,
/// Minimum length we can parse.
needed: usize,
},
/// The HTTP response started with a HTTP/1.1 line which was not ASCII.
HttpResponseNonAsciiHello(Vec<u8>),
/// The HTTP response did not start with HTTP/1.1
HttpResponseBadHello {
/// Actual HTTP-whatever string.
actual: String,
/// The hello string of the HTTP version we support.
expected: String,
},
/// Could not parse the status value as a number.
HttpResponseBadStatus(String, num::ParseIntError),
/// Could not parse the status value as a number.
HttpResponseBadContentLength(String, num::ParseIntError),
/// The indicated content-length header exceeded our maximum.
HttpResponseContentLengthTooLarge {
/// The length indicated in the content-length header.
length: u64,
/// Our hard maximum on number of bytes we'll try to read.
max: u64,
},
/// Unexpected HTTP error code (non-200).
HttpErrorCode(u16),
/// Received EOF before getting as many bytes as were indicated by the content-length header.
IncompleteResponse {
/// The content-length header.
content_length: u64,
/// The number of bytes we actually read.
n_read: u64,
},
/// JSON parsing error.
Json(serde_json::Error),
}
impl Error {
/// Utility method to create [`Error::InvalidUrl`] variants.
fn url<U: Into<String>>(url: U, reason: &'static str) -> Error {
Error::InvalidUrl {
url: url.into(),
reason,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use Error::*;
match *self {
InvalidUrl {
ref url,
ref reason,
} => write!(f, "invalid URL '{}': {}", url, reason),
SocketError(ref e) => write!(f, "Couldn't connect to host: {}", e),
HttpResponseTooShort {
ref actual,
ref needed,
} => {
write!(f, "HTTP response too short: length {}, needed {}.", actual, needed)
}
HttpResponseNonAsciiHello(ref bytes) => {
write!(f, "HTTP response started with non-ASCII {:?}", bytes)
}
HttpResponseBadHello {
ref actual,
ref expected,
} => {
write!(f, "HTTP response started with `{}`; expected `{}`.", actual, expected)
}
HttpResponseBadStatus(ref status, ref err) => {
write!(f, "HTTP response had bad status code `{}`: {}.", status, err)
}
HttpResponseBadContentLength(ref len, ref err) => {
write!(f, "HTTP response had bad content length `{}`: {}.", len, err)
}
HttpResponseContentLengthTooLarge {
length,
max,
} => {
write!(f, "HTTP response content length {} exceeds our max {}.", length, max)
}
HttpErrorCode(c) => write!(f, "unexpected HTTP code: {}", c),
IncompleteResponse {
content_length,
n_read,
} => {
write!(
f,
"read {} bytes but HTTP response content-length header was {}.",
n_read, content_length
)
}
Json(ref e) => write!(f, "JSON error: {}", e),
}
}
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
use self::Error::*;
match *self {
InvalidUrl {
..
}
| HttpResponseTooShort {
..
}
| HttpResponseNonAsciiHello(..)
| HttpResponseBadHello {
..
}
| HttpResponseBadStatus(..)
| HttpResponseBadContentLength(..)
| HttpResponseContentLengthTooLarge {
..
}
| HttpErrorCode(_)
| IncompleteResponse {
..
} => None,
SocketError(ref e) => Some(e),
Json(ref e) => Some(e),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::SocketError(e)
}
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Self {
Error::Json(e)
}
}
impl From<Error> for crate::Error {
fn from(e: Error) -> crate::Error {
match e {
Error::Json(e) => crate::Error::Json(e),
e => crate::Error::Transport(Box::new(e)),
}
}
}
/// Global mutex used by the fuzzing harness to inject data into the read end of the TCP stream.
#[cfg(jsonrpc_fuzz)]
pub static FUZZ_TCP_SOCK: Mutex<Option<io::Cursor<Vec<u8>>>> = Mutex::new(None);
#[cfg(jsonrpc_fuzz)]
#[derive(Clone, Debug)]
struct TcpStream;
#[cfg(jsonrpc_fuzz)]
mod impls {
use super::*;
impl Read for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match *FUZZ_TCP_SOCK.lock().unwrap() {
Some(ref mut cursor) => io::Read::read(cursor, buf),
None => Ok(0),
}
}
}
impl Write for TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
io::sink().write(buf)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl TcpStream {
pub fn connect_timeout(_: &SocketAddr, _: Duration) -> io::Result<Self> {
Ok(TcpStream)
}
pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> {
Ok(())
}
pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> {
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use std::net;
#[cfg(feature = "proxy")]
use std::str::FromStr;
use super::*;
use crate::Client;
#[test]
fn test_urls() {
let addr: net::SocketAddr = ("localhost", 22).to_socket_addrs().unwrap().next().unwrap();
let urls = [
"localhost:22",
"http://localhost:22/",
"https://localhost:22/walletname/stuff?it=working",
"http://me:weak@localhost:22/wallet",
];
for u in &urls {
let tp = Builder::new().url(u).unwrap().build();
assert_eq!(tp.addr, addr);
}
// Default port and 80 and 443 fill-in.
let addr: net::SocketAddr = ("localhost", 80).to_socket_addrs().unwrap().next().unwrap();
let tp = Builder::new().url("http://localhost/").unwrap().build();
assert_eq!(tp.addr, addr);
let addr: net::SocketAddr = ("localhost", 443).to_socket_addrs().unwrap().next().unwrap();
let tp = Builder::new().url("https://localhost/").unwrap().build();
assert_eq!(tp.addr, addr);
let addr: net::SocketAddr =
("localhost", super::DEFAULT_PORT).to_socket_addrs().unwrap().next().unwrap();
let tp = Builder::new().url("localhost").unwrap().build();
assert_eq!(tp.addr, addr);
let valid_urls = [
"localhost",
"127.0.0.1:8080",
"http://127.0.0.1:8080/",
"http://127.0.0.1:8080/rpc/test",
"https://127.0.0.1/rpc/test",
"http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8300",
"http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]",
];
for u in &valid_urls {
let (addr, path) = check_url(u).unwrap();
let builder = Builder::new().url(u).unwrap_or_else(|_| panic!("error for: {}", u));
assert_eq!(builder.tp.addr, addr);
assert_eq!(builder.tp.path, path);
assert_eq!(builder.tp.timeout, DEFAULT_TIMEOUT);
assert_eq!(builder.tp.basic_auth, None);
#[cfg(feature = "proxy")]
assert_eq!(builder.tp.proxy_addr, SocketAddr::from_str("127.0.0.1:9050").unwrap());
}
let invalid_urls = [
"127.0.0.1.0:8080",
"httpx://127.0.0.1:8080/",
"ftp://127.0.0.1:8080/rpc/test",
"http://127.0.0./rpc/test",
// NB somehow, Rust's IpAddr accepts "127.0.0" and adds the extra 0..
];
for u in &invalid_urls {
if let Ok(b) = Builder::new().url(u) {
let tp = b.build();
panic!("expected error for url {}, got {:?}", u, tp);
}
}
}
#[test]
fn construct() {
let tp = Builder::new()
.timeout(Duration::from_millis(100))
.url("localhost:22")
.unwrap()
.auth("user", None)
.build();
let _ = Client::with_transport(tp);
let _ = Client::simple_http("localhost:22", None, None).unwrap();
}
#[cfg(feature = "proxy")]
#[test]
fn construct_with_proxy() {
let tp = Builder::new()
.timeout(Duration::from_millis(100))
.url("localhost:22")
.unwrap()
.auth("user", None)
.proxy_addr("127.0.0.1:9050")
.unwrap()
.build();
let _ = Client::with_transport(tp);
let _ = Client::http_proxy(
"localhost:22",
None,
None,
"127.0.0.1:9050",
Some(("user", "password")),
)
.unwrap();
}
/// Test that the client will detect that a socket is closed and open a fresh one before sending
/// the request
#[cfg(all(not(feature = "proxy"), not(jsonrpc_fuzz)))]
#[test]
fn request_to_closed_socket() {
use serde_json::{Number, Value};
use std::net::{Shutdown, TcpListener};
use std::sync::mpsc;
use std::thread;
let (tx, rx) = mpsc::sync_channel(1);
thread::spawn(move || {
let server = TcpListener::bind("localhost:0").expect("Binding a Tcp Listener");
tx.send(server.local_addr().unwrap().port()).unwrap();
for (request_id, stream) in server.incoming().enumerate() {
let mut stream = stream.unwrap();
let buf_reader = BufReader::new(&mut stream);
let _http_request: Vec<_> = buf_reader
.lines()
.map(|result| result.unwrap())
.take_while(|line| !line.is_empty())
.collect();
let response = Response {
result: None,
error: None,
id: Value::Number(Number::from(request_id)),
jsonrpc: Some(String::from("2.0")),
};
let response_str = serde_json::to_string(&response).unwrap();
stream.write_all(b"HTTP/1.1 200\r\n").unwrap();
stream.write_all(b"Content-Length: ").unwrap();
stream.write_all(response_str.len().to_string().as_bytes()).unwrap();
stream.write_all(b"\r\n").unwrap();
stream.write_all(b"\r\n").unwrap();
stream.write_all(response_str.as_bytes()).unwrap();
stream.flush().unwrap();
stream.shutdown(Shutdown::Both).unwrap();
}
});
// Give the server thread a second to start up and listen
thread::sleep(Duration::from_secs(1));
let port = rx.recv().unwrap();
let client =
Client::simple_http(format!("localhost:{}", port).as_str(), None, None).unwrap();
let request = client.build_request("test_request", &[]);
let result = client.send_request(request).unwrap();
assert_eq!(result.id, Value::Number(Number::from(0)));
thread::sleep(Duration::from_secs(1));
let request = client.build_request("test_request2", &[]);
let result2 = client.send_request(request)
.expect("This second request should not be an Err like `Err(Transport(HttpResponseTooShort { actual: 0, needed: 12 }))`");
assert_eq!(result2.id, Value::Number(Number::from(1)));
}
}
| build | identifier_name |
simple_http.rs | // SPDX-License-Identifier: CC0-1.0
//! This module implements a minimal and non standard conforming HTTP 1.0
//! round-tripper that works with the bitcoind RPC server. This can be used
//! if minimal dependencies are a goal and synchronous communication is ok.
#[cfg(feature = "proxy")]
use socks::Socks5Stream;
use std::io::{BufRead, BufReader, Read, Write};
#[cfg(not(jsonrpc_fuzz))]
use std::net::TcpStream;
use std::net::{SocketAddr, ToSocketAddrs};
use std::sync::{Arc, Mutex, MutexGuard};
use std::time::Duration;
use std::{error, fmt, io, net, num};
use crate::client::Transport;
use crate::http::DEFAULT_PORT;
#[cfg(feature = "proxy")]
use crate::http::DEFAULT_PROXY_PORT;
use crate::{Request, Response};
/// Absolute maximum content length allowed before cutting off the response.
const FINAL_RESP_ALLOC: u64 = 1024 * 1024 * 1024;
#[cfg(not(jsonrpc_fuzz))]
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(15);
#[cfg(jsonrpc_fuzz)]
const DEFAULT_TIMEOUT: Duration = Duration::from_millis(1);
/// Simple HTTP transport that implements the necessary subset of HTTP for
/// running a bitcoind RPC client.
#[derive(Clone, Debug)]
pub struct SimpleHttpTransport {
addr: net::SocketAddr,
path: String,
timeout: Duration,
/// The value of the `Authorization` HTTP header.
basic_auth: Option<String>,
#[cfg(feature = "proxy")]
proxy_addr: net::SocketAddr,
#[cfg(feature = "proxy")]
proxy_auth: Option<(String, String)>,
sock: Arc<Mutex<Option<BufReader<TcpStream>>>>,
}
impl Default for SimpleHttpTransport {
fn default() -> Self {
SimpleHttpTransport {
addr: net::SocketAddr::new(
net::IpAddr::V4(net::Ipv4Addr::new(127, 0, 0, 1)),
DEFAULT_PORT,
),
path: "/".to_owned(),
timeout: DEFAULT_TIMEOUT,
basic_auth: None,
#[cfg(feature = "proxy")]
proxy_addr: net::SocketAddr::new(
net::IpAddr::V4(net::Ipv4Addr::new(127, 0, 0, 1)),
DEFAULT_PROXY_PORT,
),
#[cfg(feature = "proxy")]
proxy_auth: None,
sock: Arc::new(Mutex::new(None)),
}
}
}
impl SimpleHttpTransport {
/// Constructs a new [`SimpleHttpTransport`] with default parameters.
pub fn new() -> Self {
SimpleHttpTransport::default()
}
/// Returns a builder for [`SimpleHttpTransport`].
pub fn builder() -> Builder {
Builder::new()
}
/// Replaces the URL of the transport.
pub fn set_url(&mut self, url: &str) -> Result<(), Error> {
let url = check_url(url)?;
self.addr = url.0;
self.path = url.1;
Ok(())
}
/// Replaces only the path part of the URL.
pub fn set_url_path(&mut self, path: String) {
self.path = path;
}
fn request<R>(&self, req: impl serde::Serialize) -> Result<R, Error>
where
R: for<'a> serde::de::Deserialize<'a>,
{
match self.try_request(req) {
Ok(response) => Ok(response),
Err(err) => {
// No part of this codebase should panic, so unwrapping a mutex lock is fine
*self.sock.lock().expect("poisoned mutex") = None;
Err(err)
}
}
}
#[cfg(feature = "proxy")]
fn fresh_socket(&self) -> Result<TcpStream, Error> {
let stream = if let Some((username, password)) = &self.proxy_auth {
Socks5Stream::connect_with_password(
self.proxy_addr,
self.addr,
username.as_str(),
password.as_str(),
)?
} else {
Socks5Stream::connect(self.proxy_addr, self.addr)?
};
Ok(stream.into_inner())
}
#[cfg(not(feature = "proxy"))]
fn fresh_socket(&self) -> Result<TcpStream, Error> {
let stream = TcpStream::connect_timeout(&self.addr, self.timeout)?;
stream.set_read_timeout(Some(self.timeout))?;
stream.set_write_timeout(Some(self.timeout))?;
Ok(stream)
}
fn try_request<R>(&self, req: impl serde::Serialize) -> Result<R, Error>
where
R: for<'a> serde::de::Deserialize<'a>,
{
// No part of this codebase should panic, so unwrapping a mutex lock is fine
let mut sock_lock: MutexGuard<Option<_>> = self.sock.lock().expect("poisoned mutex");
if sock_lock.is_none() {
*sock_lock = Some(BufReader::new(self.fresh_socket()?));
};
// In the immediately preceding block, we made sure that `sock` is non-`None`,
// so unwrapping here is fine.
let sock: &mut BufReader<_> = sock_lock.as_mut().unwrap();
// Serialize the body first so we can set the Content-Length header.
let body = serde_json::to_vec(&req)?;
let mut request_bytes = Vec::new();
request_bytes.write_all(b"POST ")?;
request_bytes.write_all(self.path.as_bytes())?;
request_bytes.write_all(b" HTTP/1.1\r\n")?;
// Write headers
request_bytes.write_all(b"host: ")?;
request_bytes.write_all(self.addr.to_string().as_bytes())?;
request_bytes.write_all(b"\r\n")?;
request_bytes.write_all(b"Content-Type: application/json\r\n")?;
request_bytes.write_all(b"Content-Length: ")?;
request_bytes.write_all(body.len().to_string().as_bytes())?;
request_bytes.write_all(b"\r\n")?;
if let Some(ref auth) = self.basic_auth {
request_bytes.write_all(b"Authorization: ")?;
request_bytes.write_all(auth.as_ref())?;
request_bytes.write_all(b"\r\n")?;
}
// Write body
request_bytes.write_all(b"\r\n")?;
request_bytes.write_all(&body)?;
// Send HTTP request
let write_success = sock.get_mut().write_all(request_bytes.as_slice()).is_ok()
&& sock.get_mut().flush().is_ok();
// This indicates the socket is broken so let's retry the send once with a fresh socket
if !write_success {
*sock.get_mut() = self.fresh_socket()?;
sock.get_mut().write_all(request_bytes.as_slice())?;
sock.get_mut().flush()?;
}
// Parse first HTTP response header line
let mut header_buf = String::new();
let read_success = sock.read_line(&mut header_buf).is_ok();
// This is another possible indication that the socket is broken so let's retry the send once
// with a fresh socket IF the write attempt has not already experienced a failure
if (!read_success || header_buf.is_empty()) && write_success {
*sock.get_mut() = self.fresh_socket()?;
sock.get_mut().write_all(request_bytes.as_slice())?;
sock.get_mut().flush()?;
sock.read_line(&mut header_buf)?;
}
if header_buf.len() < 12 {
return Err(Error::HttpResponseTooShort {
actual: header_buf.len(),
needed: 12,
});
}
if !header_buf.as_bytes()[..12].is_ascii() {
return Err(Error::HttpResponseNonAsciiHello(header_buf.as_bytes()[..12].to_vec()));
}
if !header_buf.starts_with("HTTP/1.1 ") {
return Err(Error::HttpResponseBadHello {
actual: header_buf[0..9].into(),
expected: "HTTP/1.1 ".into(),
});
}
let response_code = match header_buf[9..12].parse::<u16>() {
Ok(n) => n,
Err(e) => return Err(Error::HttpResponseBadStatus(header_buf[9..12].into(), e)),
};
// Parse response header fields
let mut content_length = None;
loop {
header_buf.clear();
sock.read_line(&mut header_buf)?;
if header_buf == "\r\n" {
break;
}
header_buf.make_ascii_lowercase();
const CONTENT_LENGTH: &str = "content-length: ";
if let Some(s) = header_buf.strip_prefix(CONTENT_LENGTH) {
content_length = Some(
s.trim()
.parse::<u64>()
.map_err(|e| Error::HttpResponseBadContentLength(s.into(), e))?,
);
}
}
if response_code == 401 {
// There is no body in a 401 response, so don't try to read it
return Err(Error::HttpErrorCode(response_code));
}
// Read up to `content_length` bytes. Note that if there is no content-length
// header, we will assume an effectively infinite content length, i.e. we will
// just keep reading from the socket until it is closed.
let mut reader = match content_length {
None => sock.take(FINAL_RESP_ALLOC),
Some(n) if n > FINAL_RESP_ALLOC => {
return Err(Error::HttpResponseContentLengthTooLarge {
length: n,
max: FINAL_RESP_ALLOC,
});
}
Some(n) => sock.take(n),
};
// Attempt to parse the response. Don't check the HTTP error code until
// after parsing, since Bitcoin Core will often return a descriptive JSON
// error structure which is more useful than the error code.
match serde_json::from_reader(&mut reader) {
Ok(s) => {
if content_length.is_some() {
reader.bytes().count(); // consume any trailing bytes
}
Ok(s)
}
Err(e) => {
// If the response was not 200, assume the parse failed because of that
if response_code != 200 {
Err(Error::HttpErrorCode(response_code))
} else {
// If it was 200 then probably it was legitimately a parse error
Err(e.into())
}
}
}
}
}
/// Does some very basic manual URL parsing because the uri/url crates
/// all have unicode-normalization as a dependency and that's broken.
fn check_url(url: &str) -> Result<(SocketAddr, String), Error> {
// The fallback port in case no port was provided.
// This changes when the http or https scheme was provided.
let mut fallback_port = DEFAULT_PORT;
// We need to get the hostname and the port.
// (1) Split scheme
let after_scheme = {
let mut split = url.splitn(2, "://");
let s = split.next().unwrap();
match split.next() {
None => s, // no scheme present
Some(after) => {
// Check if the scheme is http or https.
if s == "http" {
fallback_port = 80;
} else if s == "https" {
fallback_port = 443;
} else {
return Err(Error::url(url, "scheme should be http or https"));
}
after
}
}
};
// (2) split off path
let (before_path, path) = {
if let Some(slash) = after_scheme.find('/') {
(&after_scheme[0..slash], &after_scheme[slash..])
} else {
(after_scheme, "/")
}
};
// (3) split off auth part
let after_auth = {
let mut split = before_path.splitn(2, '@');
let s = split.next().unwrap();
split.next().unwrap_or(s)
};
// (4) Parse into socket address.
// At this point we either have <host_name> or <host_name_>:<port>
// `std::net::ToSocketAddrs` requires `&str` to have <host_name_>:<port> format.
let mut addr = match after_auth.to_socket_addrs() {
Ok(addr) => addr,
Err(_) => {
// Invalid socket address. Try to add port.
format!("{}:{}", after_auth, fallback_port).to_socket_addrs()?
}
};
match addr.next() {
Some(a) => Ok((a, path.to_owned())),
None => Err(Error::url(url, "invalid hostname: error extracting socket address")),
}
}
impl Transport for SimpleHttpTransport {
fn send_request(&self, req: Request) -> Result<Response, crate::Error> {
Ok(self.request(req)?)
}
fn send_batch(&self, reqs: &[Request]) -> Result<Vec<Response>, crate::Error> {
Ok(self.request(reqs)?)
}
fn fmt_target(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "http://{}:{}{}", self.addr.ip(), self.addr.port(), self.path)
}
}
/// Builder for simple bitcoind [`SimpleHttpTransport`].
#[derive(Clone, Debug)]
pub struct Builder {
tp: SimpleHttpTransport,
}
impl Builder {
/// Constructs a new [`Builder`] with default configuration.
pub fn new() -> Builder {
Builder {
tp: SimpleHttpTransport::new(),
}
}
/// Sets the timeout after which requests will abort if they aren't finished.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.tp.timeout = timeout;
self
}
/// Sets the URL of the server to the transport.
pub fn url(mut self, url: &str) -> Result<Self, Error> {
self.tp.set_url(url)?;
Ok(self)
}
/// Adds authentication information to the transport.
pub fn auth<S: AsRef<str>>(mut self, user: S, pass: Option<S>) -> Self {
let mut auth = user.as_ref().to_owned();
auth.push(':');
if let Some(ref pass) = pass {
auth.push_str(pass.as_ref());
}
self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(auth.as_bytes())));
self
}
/// Adds authentication information to the transport using a cookie string ('user:pass').
pub fn cookie_auth<S: AsRef<str>>(mut self, cookie: S) -> Self {
self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(cookie.as_ref().as_bytes())));
self
}
/// Adds proxy address to the transport for SOCKS5 proxy.
#[cfg(feature = "proxy")]
pub fn proxy_addr<S: AsRef<str>>(mut self, proxy_addr: S) -> Result<Self, Error> {
// We don't expect path in proxy address.
self.tp.proxy_addr = check_url(proxy_addr.as_ref())?.0;
Ok(self)
}
/// Adds optional proxy authentication as ('username', 'password').
#[cfg(feature = "proxy")]
pub fn proxy_auth<S: AsRef<str>>(mut self, user: S, pass: S) -> Self {
self.tp.proxy_auth =
Some((user, pass)).map(|(u, p)| (u.as_ref().to_string(), p.as_ref().to_string()));
self
}
/// Builds the final [`SimpleHttpTransport`].
pub fn build(self) -> SimpleHttpTransport {
self.tp
}
}
impl Default for Builder {
fn default() -> Self {
Builder::new()
}
}
impl crate::Client {
/// Creates a new JSON-RPC client using a bare-minimum HTTP transport.
pub fn simple_http(
url: &str,
user: Option<String>,
pass: Option<String>,
) -> Result<crate::Client, Error> {
let mut builder = Builder::new().url(url)?;
if let Some(user) = user {
builder = builder.auth(user, pass);
}
Ok(crate::Client::with_transport(builder.build()))
}
/// Creates a new JSON_RPC client using a HTTP-Socks5 proxy transport.
#[cfg(feature = "proxy")]
pub fn http_proxy(
url: &str,
user: Option<String>,
pass: Option<String>,
proxy_addr: &str,
proxy_auth: Option<(&str, &str)>,
) -> Result<crate::Client, Error> {
let mut builder = Builder::new().url(url)?;
if let Some(user) = user {
builder = builder.auth(user, pass);
}
builder = builder.proxy_addr(proxy_addr)?;
if let Some((user, pass)) = proxy_auth {
builder = builder.proxy_auth(user, pass);
}
let tp = builder.build();
Ok(crate::Client::with_transport(tp))
}
}
/// Error that can happen when sending requests.
#[derive(Debug)]
pub enum Error {
/// An invalid URL was passed.
InvalidUrl {
/// The URL passed.
url: String,
/// The reason the URL is invalid.
reason: &'static str,
},
/// An error occurred on the socket layer.
SocketError(io::Error),
/// The HTTP response was too short to even fit a HTTP 1.1 header.
HttpResponseTooShort {
/// The total length of the response.
actual: usize,
/// Minimum length we can parse.
needed: usize,
},
/// The HTTP response started with a HTTP/1.1 line which was not ASCII.
HttpResponseNonAsciiHello(Vec<u8>),
/// The HTTP response did not start with HTTP/1.1
HttpResponseBadHello {
/// Actual HTTP-whatever string.
actual: String,
/// The hello string of the HTTP version we support.
expected: String,
},
/// Could not parse the status value as a number.
HttpResponseBadStatus(String, num::ParseIntError),
/// Could not parse the status value as a number.
HttpResponseBadContentLength(String, num::ParseIntError),
/// The indicated content-length header exceeded our maximum.
HttpResponseContentLengthTooLarge {
/// The length indicated in the content-length header.
length: u64,
/// Our hard maximum on number of bytes we'll try to read.
max: u64,
},
/// Unexpected HTTP error code (non-200).
HttpErrorCode(u16),
/// Received EOF before getting as many bytes as were indicated by the content-length header.
IncompleteResponse {
/// The content-length header.
content_length: u64,
/// The number of bytes we actually read.
n_read: u64,
},
/// JSON parsing error.
Json(serde_json::Error),
}
impl Error {
/// Utility method to create [`Error::InvalidUrl`] variants.
fn url<U: Into<String>>(url: U, reason: &'static str) -> Error {
Error::InvalidUrl {
url: url.into(),
reason,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use Error::*;
match *self {
InvalidUrl {
ref url,
ref reason,
} => write!(f, "invalid URL '{}': {}", url, reason),
SocketError(ref e) => write!(f, "Couldn't connect to host: {}", e),
HttpResponseTooShort {
ref actual,
ref needed,
} => {
write!(f, "HTTP response too short: length {}, needed {}.", actual, needed)
}
HttpResponseNonAsciiHello(ref bytes) => {
write!(f, "HTTP response started with non-ASCII {:?}", bytes)
}
HttpResponseBadHello {
ref actual,
ref expected,
} => {
write!(f, "HTTP response started with `{}`; expected `{}`.", actual, expected)
}
HttpResponseBadStatus(ref status, ref err) => {
write!(f, "HTTP response had bad status code `{}`: {}.", status, err)
}
HttpResponseBadContentLength(ref len, ref err) => {
write!(f, "HTTP response had bad content length `{}`: {}.", len, err)
}
HttpResponseContentLengthTooLarge {
length,
max,
} => {
write!(f, "HTTP response content length {} exceeds our max {}.", length, max)
}
HttpErrorCode(c) => write!(f, "unexpected HTTP code: {}", c),
IncompleteResponse {
content_length,
n_read,
} => {
write!(
f,
"read {} bytes but HTTP response content-length header was {}.",
n_read, content_length
)
}
Json(ref e) => write!(f, "JSON error: {}", e),
}
}
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
use self::Error::*;
match *self {
InvalidUrl {
..
}
| HttpResponseTooShort {
..
}
| HttpResponseNonAsciiHello(..)
| HttpResponseBadHello {
..
}
| HttpResponseBadStatus(..)
| HttpResponseBadContentLength(..)
| HttpResponseContentLengthTooLarge {
..
}
| HttpErrorCode(_)
| IncompleteResponse {
..
} => None,
SocketError(ref e) => Some(e),
Json(ref e) => Some(e),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::SocketError(e)
}
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Self {
Error::Json(e)
}
}
impl From<Error> for crate::Error {
fn from(e: Error) -> crate::Error {
match e {
Error::Json(e) => crate::Error::Json(e),
e => crate::Error::Transport(Box::new(e)),
}
}
}
/// Global mutex used by the fuzzing harness to inject data into the read end of the TCP stream.
#[cfg(jsonrpc_fuzz)]
pub static FUZZ_TCP_SOCK: Mutex<Option<io::Cursor<Vec<u8>>>> = Mutex::new(None);
#[cfg(jsonrpc_fuzz)]
#[derive(Clone, Debug)]
struct TcpStream;
#[cfg(jsonrpc_fuzz)]
mod impls {
use super::*;
impl Read for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match *FUZZ_TCP_SOCK.lock().unwrap() {
Some(ref mut cursor) => io::Read::read(cursor, buf),
None => Ok(0),
}
}
}
impl Write for TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
io::sink().write(buf)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl TcpStream {
pub fn connect_timeout(_: &SocketAddr, _: Duration) -> io::Result<Self> {
Ok(TcpStream)
}
pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> {
Ok(())
}
pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> {
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use std::net;
#[cfg(feature = "proxy")]
use std::str::FromStr;
use super::*;
use crate::Client;
#[test]
fn test_urls() {
let addr: net::SocketAddr = ("localhost", 22).to_socket_addrs().unwrap().next().unwrap();
let urls = [
"localhost:22",
"http://localhost:22/",
"https://localhost:22/walletname/stuff?it=working",
"http://me:weak@localhost:22/wallet",
];
for u in &urls {
let tp = Builder::new().url(u).unwrap().build();
assert_eq!(tp.addr, addr);
}
// Default port and 80 and 443 fill-in.
let addr: net::SocketAddr = ("localhost", 80).to_socket_addrs().unwrap().next().unwrap();
let tp = Builder::new().url("http://localhost/").unwrap().build();
assert_eq!(tp.addr, addr);
let addr: net::SocketAddr = ("localhost", 443).to_socket_addrs().unwrap().next().unwrap();
let tp = Builder::new().url("https://localhost/").unwrap().build();
assert_eq!(tp.addr, addr);
let addr: net::SocketAddr =
("localhost", super::DEFAULT_PORT).to_socket_addrs().unwrap().next().unwrap();
let tp = Builder::new().url("localhost").unwrap().build();
assert_eq!(tp.addr, addr);
let valid_urls = [
"localhost",
"127.0.0.1:8080",
"http://127.0.0.1:8080/",
"http://127.0.0.1:8080/rpc/test",
"https://127.0.0.1/rpc/test",
"http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8300",
"http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]",
];
for u in &valid_urls {
let (addr, path) = check_url(u).unwrap();
let builder = Builder::new().url(u).unwrap_or_else(|_| panic!("error for: {}", u));
assert_eq!(builder.tp.addr, addr);
assert_eq!(builder.tp.path, path);
assert_eq!(builder.tp.timeout, DEFAULT_TIMEOUT);
assert_eq!(builder.tp.basic_auth, None);
#[cfg(feature = "proxy")]
assert_eq!(builder.tp.proxy_addr, SocketAddr::from_str("127.0.0.1:9050").unwrap());
}
let invalid_urls = [
"127.0.0.1.0:8080",
"httpx://127.0.0.1:8080/",
"ftp://127.0.0.1:8080/rpc/test",
"http://127.0.0./rpc/test", | // NB somehow, Rust's IpAddr accepts "127.0.0" and adds the extra 0..
];
for u in &invalid_urls {
if let Ok(b) = Builder::new().url(u) {
let tp = b.build();
panic!("expected error for url {}, got {:?}", u, tp);
}
}
}
#[test]
fn construct() {
let tp = Builder::new()
.timeout(Duration::from_millis(100))
.url("localhost:22")
.unwrap()
.auth("user", None)
.build();
let _ = Client::with_transport(tp);
let _ = Client::simple_http("localhost:22", None, None).unwrap();
}
#[cfg(feature = "proxy")]
#[test]
fn construct_with_proxy() {
let tp = Builder::new()
.timeout(Duration::from_millis(100))
.url("localhost:22")
.unwrap()
.auth("user", None)
.proxy_addr("127.0.0.1:9050")
.unwrap()
.build();
let _ = Client::with_transport(tp);
let _ = Client::http_proxy(
"localhost:22",
None,
None,
"127.0.0.1:9050",
Some(("user", "password")),
)
.unwrap();
}
/// Test that the client will detect that a socket is closed and open a fresh one before sending
/// the request
#[cfg(all(not(feature = "proxy"), not(jsonrpc_fuzz)))]
#[test]
fn request_to_closed_socket() {
use serde_json::{Number, Value};
use std::net::{Shutdown, TcpListener};
use std::sync::mpsc;
use std::thread;
let (tx, rx) = mpsc::sync_channel(1);
thread::spawn(move || {
let server = TcpListener::bind("localhost:0").expect("Binding a Tcp Listener");
tx.send(server.local_addr().unwrap().port()).unwrap();
for (request_id, stream) in server.incoming().enumerate() {
let mut stream = stream.unwrap();
let buf_reader = BufReader::new(&mut stream);
let _http_request: Vec<_> = buf_reader
.lines()
.map(|result| result.unwrap())
.take_while(|line| !line.is_empty())
.collect();
let response = Response {
result: None,
error: None,
id: Value::Number(Number::from(request_id)),
jsonrpc: Some(String::from("2.0")),
};
let response_str = serde_json::to_string(&response).unwrap();
stream.write_all(b"HTTP/1.1 200\r\n").unwrap();
stream.write_all(b"Content-Length: ").unwrap();
stream.write_all(response_str.len().to_string().as_bytes()).unwrap();
stream.write_all(b"\r\n").unwrap();
stream.write_all(b"\r\n").unwrap();
stream.write_all(response_str.as_bytes()).unwrap();
stream.flush().unwrap();
stream.shutdown(Shutdown::Both).unwrap();
}
});
// Give the server thread a second to start up and listen
thread::sleep(Duration::from_secs(1));
let port = rx.recv().unwrap();
let client =
Client::simple_http(format!("localhost:{}", port).as_str(), None, None).unwrap();
let request = client.build_request("test_request", &[]);
let result = client.send_request(request).unwrap();
assert_eq!(result.id, Value::Number(Number::from(0)));
thread::sleep(Duration::from_secs(1));
let request = client.build_request("test_request2", &[]);
let result2 = client.send_request(request)
.expect("This second request should not be an Err like `Err(Transport(HttpResponseTooShort { actual: 0, needed: 12 }))`");
assert_eq!(result2.id, Value::Number(Number::from(1)));
}
} | random_line_split | |
DataManager.py | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 30 16:14:04 2018
@author: truthless
"""
import random
from stop import STOP_WORDS
import numpy as np
import torch
import torch.utils.data as data
import re
import json
import networkx as nx
import scipy.sparse as sp
PAD = 0
UNK = 1 #OOV
GO = 2
EOS = 3
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DataManager:
def __init__(self, stopword_freq_lb, path, no_pretrain_word2vec, dim, context_len):
#read text
self.text = {}
for name in ["train", "valid", "test"]:
self.text[name] = []
entities = []
file_path = "{0}/{1}_ent_1.txt".format(path, name)
for line in open(file_path):
entities.append(line.strip())
sys_ans_utt_ori = []
file_path = "{0}/{1}_ans_utt_ori_1.txt".format(path, name)
for line in open(file_path):
sys_ans_utt_ori.append(line.strip())
cnt = 0
file_path = "{0}/{1}_utt_1.txt".format(path, name)
for line in open(file_path):
utterances = line.strip().split('\t')
utterances = [''] * (context_len - len(utterances)) + utterances
self.text[name].append([utterances[-context_len:-1], utterances[-1], entities[cnt], sys_ans_utt_ori[cnt]])
cnt += 1
#arrange words
wordscount = {}
for name in ["train", "valid"]:
texts = self.text[name]
for item in texts:
words = item[0][-1].split() + item[1].split()
for word in words:
if word in wordscount:
wordscount[word] += 1
else:
wordscount[word] = 1
wordssorted = sorted(wordscount.items(), key = lambda d: (d[1],d[0]), reverse=True)
output = open("word_cnt_stat.txt", "w")
for i, (key, value) in enumerate(wordssorted):
output.write(str(value) + ":" + str(key) + "\n")
self.word2index = {'<PAD>':0, '<UNK>':1, '<GO>':2, '<EOS>':3}
stopwords_self = set()
for i, (key, value) in enumerate(wordssorted):
if value <= 5:
break
self.word2index[key] = i + 4 #PAD,UNK,GO,EOS
if value >= stopword_freq_lb:
stopwords_self.add(key)
# to add all entity name into vocab
entity_list = json.load(open("./data/entity_list_simple.json"))
start_idx = len(self.word2index)
for entity_name in entity_list:
entity_name = entity_name.split("::")[-1]
if entity_name not in self.word2index:
self.word2index[entity_name] = start_idx
start_idx += 1
self.stop_words_index = set([PAD, UNK, GO, EOS])
#self.stop_words_index |= set([self.word2index[word] for word in STOP_WORDS
# if word in self.word2index])
# here we add all words into stopword list
self.stop_words_index |= set([self.word2index[word] for word in stopwords_self])
self.index2word = dict((v, k) for k, v in self.word2index.items())
#load word vector
if no_pretrain_word2vec:
self.vector = None
else:
self.vector = 0.1 * np.random.rand(len(self.word2index), dim)
with open("{0}/vector.txt".format(path)) as fl:
for line in fl:
vec = line.strip().split()
word = vec[0].lower()
vec = list(map(float, vec[1:]))
if word in self.word2index:
self.vector[self.word2index[word]] = np.asarray(vec)
self.vector = torch.Tensor(self.vector)
# compute tf
len_voc = len(self.word2index.values())
self.index2nonstop = {}
cnt = 0
for i in range(len_voc):
if i not in self.stop_words_index:
self.index2nonstop[i] = cnt
cnt += 1
# for graph initialization
self.node_id_map, self.id_nodename_map = self.get_node_id_map()
self.node_info_map, self.nodename_attr_map = self.get_node_info()
self.adj = self.get_adj_mat("./data/adj_simple.json", self.node_id_map)
self.nodes_rep = self.get_nodes_rep(self.node_id_map, self.node_info_map)
self.n_entity = len(self.node_id_map)
#get index
self.data = {}
for name in ["train", "valid", "test"]:
self.data[name] = []
for number, item in enumerate(self.text[name]):
len_u = len(item[0])
indices = [[], [[] for _ in range(len_u)], [], [], [], []] #src_len, src, trg, trg_entities, trg_entities_mask
indices[0] = [u.count(' ')+1 for u in item[0]] # on purpose
max_u_len = max(indices[0])
# history
for i in range(len_u):
words = item[0][i].split()
indices[1][i] = [self.word2index[word] if word in self.word2index
else UNK for word in words] + [PAD] * (max_u_len - len(words))
# answer
words = item[1].split()
#print("item1:: ", len(words))
indices[2] = [self.word2index[word] if word in self.word2index
else UNK for word in words]
indices[2].append(EOS)
# answer entity
entities = item[2].split()
#print("item2 entities:: ", len(entities))
indices[3] = [self.node_id_map[entity_name] for entity_name in entities]
indices[3].append(0)
indices[4] = []
for x in indices[3]:
if x == 0:
indices[4].append(0)
else:
indices[4].append(1)
# ansuer original sentence
words = item[3].split()
indices[5] = words
indices[5].append("<EOS>")
if len(indices[2]) != len(indices[3]):
print(number, len(indices[2]), len(indices[3]))
print(item[1])
print(item[2])
exit()
self.data[name].append(indices)
def get_node_info(self):
node_info_map = json.load(open("./data/entity_info.json"))
nodename_attr_map = {}
for node, res in node_info_map.items():
node_name = node.split("::")[-1]
nodename_attr_map[node_name] = res
return node_info_map, nodename_attr_map
def post_process(self, outputs, pred_ents, topK=1):
outputs = outputs.cpu().numpy().tolist()
pred_ents = pred_ents.cpu().numpy()
entity_attr_list = {
"[attraction_address]",
"[restaurant_address]",
"[attraction_phone]",
"[restaurant_phone]",
"[hotel_address]",
"[restaurant_postcode]",
"[attraction_postcode]",
"[hotel_phone]",
"[hotel_postcode]",
"[hospital_phone]"
}
lens_new = []
for i, out in enumerate(outputs):
new_out = []
for j, each in enumerate(out):
if self.index2word[each] == "<$>":
pred_ent = np.argmax(pred_ents[i][j])
nodename = self.id_nodename_map[pred_ent]
new_out.append(nodename)
elif self.index2word[each] in entity_attr_list:
attr_name = self.index2word[each]
cnt = 0
suc_flag = False
for idx, prob in sorted(enumerate(pred_ents[i][j]), key=lambda i: i[1], reverse=True):
if suc_flag or cnt >= topK:
break
nodename = self.id_nodename_map[idx]
if nodename not in self.nodename_attr_map:
cnt += 1
continue
for attr, val in self.nodename_attr_map[nodename].items():
if attr in attr_name:
new_out.append(val)
suc_flag = True
break
cnt += 1
if not suc_flag:
new_out.append("<UNK>")
else:
new_out.append(self.index2word[each])
"""
if each == self.word2index["<$>"]:
pred_ent = np.argmax(pred_ents[i][j])
nodename = self.id_nodename_map[pred_ent]
nodename_wordids = [self.word2index[x] for x in nodename.split()]
new_out += nodename_wordids
else:
new_out.append(each)
"""
outputs[i] = new_out
return outputs
def get_nodes_rep(self, node_id_map, node_info_map, max_len=50):
nodes_rep = []
nodes_rep_map = []
for name, id_ in sorted(node_id_map.items(), key=lambda i: i[1]):
if name == "none" and id_ == 0:
nodes_rep.append([PAD] * max_len)
nodes_rep_map.append({"words": ["none"], "idx": [0]})
continue
# the attributes used to build relationship
# attributes as nodes: {"pricerange", "area", "food"}
# attributes only as relation: {"internet", "parking", "stars", "attraction_type", "hotel_type"}
# only user node name as node's feature
name = name.split("::")[-1]
node_desc = [name]
nodes_rep_idx = [PAD] * max_len
nodes_rep_idx[0] = self.word2index[name]
nodes_rep_word = [name]
"""
for attr, val in node_info_map.items():
#if attr in {"address", "area", "pricerange", "introduction", "food", "stars"} or "type" in attr:
if attr == "introduction":
node_desc.append(val)
node_desc = " ".join(node_desc)
nodes_rep_idx = []
nodes_rep_word = []
for each_word in node_desc.split():
for word in re.split(r'[\[\](::)_]', each_word):
if word == "":
continue
else:
if word not in self.word2index:
continue
else:
word_idx = self.word2index[word]
nodes_rep_idx.append(word_idx)
nodes_rep_word.append(word)
len_ = len(nodes_rep_idx)
if len_ >= max_len:
nodes_rep_idx = nodes_rep_idx[0:max_len]
nodes_rep_word = nodes_rep_word[0:max_len]
else:
nodes_rep_idx += [PAD] * (max_len - len_)
"""
nodes_rep.append(nodes_rep_idx)
nodes_rep_map.append({"words": nodes_rep_word, "idx": nodes_rep_idx})
json.dump(nodes_rep_map, open("nodes_rep_words_idx.json", "w"))
json.dump(self.word2index, open("word2index.json", "w"))
#exit()
return nodes_rep
def get_node_id_map(self):
data = json.load(open("./data/entity_list_simple.json"))
node_id_map = {}
id_nodename_map = {}
for i, node in enumerate(data):
node_id_map[node] = i + 1
tmp = node.split("::")
#node_name = " ".join(tmp[1].split("_"))
node_name = tmp[1]
id_nodename_map[i+1] = node_name
node_id_map["none"] = 0
id_nodename_map[0] = ""
return node_id_map, id_nodename_map
def get_adj_mat(self, input_file, item_id_map):
adj = json.load(open(input_file))
new_adj = {}
for i, neibors in adj.items():
i_idx = item_id_map[i]
new_adj[i_idx] = []
for j in neibors:
j_idx = item_id_map[j]
new_adj[i_idx].append(j_idx)
new_adj = nx.adjacency_matrix(nx.from_dict_of_lists(new_adj))
new_adj = self.normalize_adj(new_adj + sp.eye(new_adj.shape[0]))
new_adj = torch.FloatTensor(np.array(new_adj.todense()))
return new_adj
def normalize_adj(self, mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv_sqrt = np.power(rowsum, -0.5).flatten()
r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = sp.diags(r_inv_sqrt)
return mx.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt).tocoo()
def create_dataset(self, name, batch_size):
datas = self.data[name]
src_seq_lens = []
src_seqs, trg_seqs = [], []
trg_stops, src_tfs = [], []
trg_ents, trg_ents_mask, trg_seqs_ori = [], [], []
nonstop_voc_size = len(self.index2nonstop)
for item in datas:
src_len, src, trg, ents, ents_mask, trg_ori = item
tensor_src_len, tensor_src, tensor_trg = torch.LongTensor(src_len), \
torch.LongTensor(src), torch.LongTensor(trg)
src_seq_lens.append(tensor_src_len)
src_seqs.append(tensor_src)
trg_seqs.append(tensor_trg)
trg_stop = torch.zeros_like(tensor_trg)
for i, index in enumerate(trg):
if index in self.stop_words_index:
trg_stop[i] = 1
trg_stops.append(trg_stop)
src_tf = torch.zeros(nonstop_voc_size)
for j, uttr in enumerate(src):
for i, index in enumerate(uttr):
if i == src_len[j]:
break
if index not in self.stop_words_index:
src_tf[self.index2nonstop[index]] += 1
if src_tf.sum().item() > 0:
src_tf /= src_tf.sum()
src_tfs.append(src_tf)
trg_ents.append(torch.LongTensor(ents))
trg_ents_mask.append(torch.LongTensor(ents_mask))
trg_seqs_ori.append(trg_ori)
print(len(trg_stops), len(trg_seqs), len(trg_ents), len(trg_seqs_ori))
dataset = Dataset(src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori)
dataloader = data.DataLoader(dataset, batch_size, True, num_workers=0, collate_fn=pad_packed_collate)
return dataloader
def compute_stopword(self, y):
res = torch.zeros_like(y).to(device=device)
for i, row in enumerate(y):
words_index = row.tolist()
res[i] = torch.LongTensor([int(index in self.stop_words_index) for index in words_index])
return res
def interpret(self, preds, refs, lens, f):
i = random.randrange(0, len(lens))
l = max(lens)
for j in range(l):
word = self.index2word[preds[i][j].item()]
print(word, end=' ')
f.write('{0} '.format(word))
if word == '<EOS>':
break
print()
f.write('\n')
l = lens[i]
for j in range(l):
word = self.index2word[refs[i][j].item()]
print(word, end=' ')
f.write('{0} '.format(word))
print()
f.write('\n')
class Dataset(data.Dataset):
|
def pad_packed_collate(batch_data):
def merge(sequences):
lengths = [len(seq) for seq in sequences]
padded_seqs = torch.zeros(len(sequences), max(lengths)).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq
return padded_seqs, lengths
def hierarchical_merge(sequences, sequence_lengths):
lengths = torch.stack(sequence_lengths)
utterance_length = lengths.shape[1]
padded_seqs = torch.zeros(len(sequences), utterance_length, lengths.max().item()).long()
for i, seq in enumerate(sequences):
word_end = max(lengths[i]).item()
padded_seqs[i, :utterance_length, :word_end] = seq
return padded_seqs, lengths
# sort a list by sequence length (descending order) to use pack_padded_sequence
batch_data.sort(key=lambda x: len(x[0]), reverse=True)
# seperate source and target sequences
src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori = zip(*batch_data)
src_seqs, src_lens = hierarchical_merge(src_seqs, src_seq_lens)
trg_seqs, trg_lens = merge(trg_seqs)
trg_stops, _ = merge(trg_stops)
trg_ents, _ = merge(trg_ents)
trg_ents_mask, _ = merge(trg_ents)
return (src_seqs.to(device=device), src_lens.to(device=device),
trg_seqs.to(device=device), trg_lens,
trg_stops.to(device=device), torch.stack(src_tfs).to(device=device),
trg_ents.to(device=device), trg_ents_mask.to(device=device), trg_seqs_ori)
| def __init__(self, src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori):
self.src_seq_lens = src_seq_lens
self.src_seqs = src_seqs
self.trg_seqs = trg_seqs
self.trg_stops = trg_stops
self.src_tfs = src_tfs
self.num_total_seqs = len(src_seqs)
self.trg_ents = trg_ents
self.trg_ents_mask = trg_ents_mask
self.trg_seqs_ori = trg_seqs_ori
def __getitem__(self, index):
src_seq_len = self.src_seq_lens[index]
src_seq = self.src_seqs[index]
trg_seq = self.trg_seqs[index]
trg_stop = self.trg_stops[index]
src_tf = self.src_tfs[index]
trg_ent = self.trg_ents[index]
trg_ent_mask = self.trg_ents_mask[index]
trg_seq_ori = self.trg_seqs_ori[index]
return src_seq_len, src_seq, trg_seq, trg_stop, src_tf, trg_ent, trg_ent_mask, trg_seq_ori
def __len__(self):
return self.num_total_seqs | identifier_body |
DataManager.py | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 30 16:14:04 2018
@author: truthless
"""
import random
from stop import STOP_WORDS
import numpy as np
import torch
import torch.utils.data as data
import re
import json
import networkx as nx
import scipy.sparse as sp
PAD = 0
UNK = 1 #OOV
GO = 2
EOS = 3
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DataManager:
def __init__(self, stopword_freq_lb, path, no_pretrain_word2vec, dim, context_len):
#read text
self.text = {}
for name in ["train", "valid", "test"]:
self.text[name] = []
entities = []
file_path = "{0}/{1}_ent_1.txt".format(path, name)
for line in open(file_path):
entities.append(line.strip())
sys_ans_utt_ori = []
file_path = "{0}/{1}_ans_utt_ori_1.txt".format(path, name)
for line in open(file_path):
sys_ans_utt_ori.append(line.strip())
cnt = 0
file_path = "{0}/{1}_utt_1.txt".format(path, name)
for line in open(file_path):
utterances = line.strip().split('\t')
utterances = [''] * (context_len - len(utterances)) + utterances
self.text[name].append([utterances[-context_len:-1], utterances[-1], entities[cnt], sys_ans_utt_ori[cnt]])
cnt += 1
#arrange words
wordscount = {}
for name in ["train", "valid"]:
texts = self.text[name]
for item in texts:
words = item[0][-1].split() + item[1].split()
for word in words:
if word in wordscount:
wordscount[word] += 1
else:
wordscount[word] = 1
wordssorted = sorted(wordscount.items(), key = lambda d: (d[1],d[0]), reverse=True)
output = open("word_cnt_stat.txt", "w")
for i, (key, value) in enumerate(wordssorted):
output.write(str(value) + ":" + str(key) + "\n")
| self.word2index[key] = i + 4 #PAD,UNK,GO,EOS
if value >= stopword_freq_lb:
stopwords_self.add(key)
# to add all entity name into vocab
entity_list = json.load(open("./data/entity_list_simple.json"))
start_idx = len(self.word2index)
for entity_name in entity_list:
entity_name = entity_name.split("::")[-1]
if entity_name not in self.word2index:
self.word2index[entity_name] = start_idx
start_idx += 1
self.stop_words_index = set([PAD, UNK, GO, EOS])
#self.stop_words_index |= set([self.word2index[word] for word in STOP_WORDS
# if word in self.word2index])
# here we add all words into stopword list
self.stop_words_index |= set([self.word2index[word] for word in stopwords_self])
self.index2word = dict((v, k) for k, v in self.word2index.items())
#load word vector
if no_pretrain_word2vec:
self.vector = None
else:
self.vector = 0.1 * np.random.rand(len(self.word2index), dim)
with open("{0}/vector.txt".format(path)) as fl:
for line in fl:
vec = line.strip().split()
word = vec[0].lower()
vec = list(map(float, vec[1:]))
if word in self.word2index:
self.vector[self.word2index[word]] = np.asarray(vec)
self.vector = torch.Tensor(self.vector)
# compute tf
len_voc = len(self.word2index.values())
self.index2nonstop = {}
cnt = 0
for i in range(len_voc):
if i not in self.stop_words_index:
self.index2nonstop[i] = cnt
cnt += 1
# for graph initialization
self.node_id_map, self.id_nodename_map = self.get_node_id_map()
self.node_info_map, self.nodename_attr_map = self.get_node_info()
self.adj = self.get_adj_mat("./data/adj_simple.json", self.node_id_map)
self.nodes_rep = self.get_nodes_rep(self.node_id_map, self.node_info_map)
self.n_entity = len(self.node_id_map)
#get index
self.data = {}
for name in ["train", "valid", "test"]:
self.data[name] = []
for number, item in enumerate(self.text[name]):
len_u = len(item[0])
indices = [[], [[] for _ in range(len_u)], [], [], [], []] #src_len, src, trg, trg_entities, trg_entities_mask
indices[0] = [u.count(' ')+1 for u in item[0]] # on purpose
max_u_len = max(indices[0])
# history
for i in range(len_u):
words = item[0][i].split()
indices[1][i] = [self.word2index[word] if word in self.word2index
else UNK for word in words] + [PAD] * (max_u_len - len(words))
# answer
words = item[1].split()
#print("item1:: ", len(words))
indices[2] = [self.word2index[word] if word in self.word2index
else UNK for word in words]
indices[2].append(EOS)
# answer entity
entities = item[2].split()
#print("item2 entities:: ", len(entities))
indices[3] = [self.node_id_map[entity_name] for entity_name in entities]
indices[3].append(0)
indices[4] = []
for x in indices[3]:
if x == 0:
indices[4].append(0)
else:
indices[4].append(1)
# ansuer original sentence
words = item[3].split()
indices[5] = words
indices[5].append("<EOS>")
if len(indices[2]) != len(indices[3]):
print(number, len(indices[2]), len(indices[3]))
print(item[1])
print(item[2])
exit()
self.data[name].append(indices)
def get_node_info(self):
node_info_map = json.load(open("./data/entity_info.json"))
nodename_attr_map = {}
for node, res in node_info_map.items():
node_name = node.split("::")[-1]
nodename_attr_map[node_name] = res
return node_info_map, nodename_attr_map
def post_process(self, outputs, pred_ents, topK=1):
outputs = outputs.cpu().numpy().tolist()
pred_ents = pred_ents.cpu().numpy()
entity_attr_list = {
"[attraction_address]",
"[restaurant_address]",
"[attraction_phone]",
"[restaurant_phone]",
"[hotel_address]",
"[restaurant_postcode]",
"[attraction_postcode]",
"[hotel_phone]",
"[hotel_postcode]",
"[hospital_phone]"
}
lens_new = []
for i, out in enumerate(outputs):
new_out = []
for j, each in enumerate(out):
if self.index2word[each] == "<$>":
pred_ent = np.argmax(pred_ents[i][j])
nodename = self.id_nodename_map[pred_ent]
new_out.append(nodename)
elif self.index2word[each] in entity_attr_list:
attr_name = self.index2word[each]
cnt = 0
suc_flag = False
for idx, prob in sorted(enumerate(pred_ents[i][j]), key=lambda i: i[1], reverse=True):
if suc_flag or cnt >= topK:
break
nodename = self.id_nodename_map[idx]
if nodename not in self.nodename_attr_map:
cnt += 1
continue
for attr, val in self.nodename_attr_map[nodename].items():
if attr in attr_name:
new_out.append(val)
suc_flag = True
break
cnt += 1
if not suc_flag:
new_out.append("<UNK>")
else:
new_out.append(self.index2word[each])
"""
if each == self.word2index["<$>"]:
pred_ent = np.argmax(pred_ents[i][j])
nodename = self.id_nodename_map[pred_ent]
nodename_wordids = [self.word2index[x] for x in nodename.split()]
new_out += nodename_wordids
else:
new_out.append(each)
"""
outputs[i] = new_out
return outputs
def get_nodes_rep(self, node_id_map, node_info_map, max_len=50):
nodes_rep = []
nodes_rep_map = []
for name, id_ in sorted(node_id_map.items(), key=lambda i: i[1]):
if name == "none" and id_ == 0:
nodes_rep.append([PAD] * max_len)
nodes_rep_map.append({"words": ["none"], "idx": [0]})
continue
# the attributes used to build relationship
# attributes as nodes: {"pricerange", "area", "food"}
# attributes only as relation: {"internet", "parking", "stars", "attraction_type", "hotel_type"}
# only user node name as node's feature
name = name.split("::")[-1]
node_desc = [name]
nodes_rep_idx = [PAD] * max_len
nodes_rep_idx[0] = self.word2index[name]
nodes_rep_word = [name]
"""
for attr, val in node_info_map.items():
#if attr in {"address", "area", "pricerange", "introduction", "food", "stars"} or "type" in attr:
if attr == "introduction":
node_desc.append(val)
node_desc = " ".join(node_desc)
nodes_rep_idx = []
nodes_rep_word = []
for each_word in node_desc.split():
for word in re.split(r'[\[\](::)_]', each_word):
if word == "":
continue
else:
if word not in self.word2index:
continue
else:
word_idx = self.word2index[word]
nodes_rep_idx.append(word_idx)
nodes_rep_word.append(word)
len_ = len(nodes_rep_idx)
if len_ >= max_len:
nodes_rep_idx = nodes_rep_idx[0:max_len]
nodes_rep_word = nodes_rep_word[0:max_len]
else:
nodes_rep_idx += [PAD] * (max_len - len_)
"""
nodes_rep.append(nodes_rep_idx)
nodes_rep_map.append({"words": nodes_rep_word, "idx": nodes_rep_idx})
json.dump(nodes_rep_map, open("nodes_rep_words_idx.json", "w"))
json.dump(self.word2index, open("word2index.json", "w"))
#exit()
return nodes_rep
def get_node_id_map(self):
data = json.load(open("./data/entity_list_simple.json"))
node_id_map = {}
id_nodename_map = {}
for i, node in enumerate(data):
node_id_map[node] = i + 1
tmp = node.split("::")
#node_name = " ".join(tmp[1].split("_"))
node_name = tmp[1]
id_nodename_map[i+1] = node_name
node_id_map["none"] = 0
id_nodename_map[0] = ""
return node_id_map, id_nodename_map
def get_adj_mat(self, input_file, item_id_map):
adj = json.load(open(input_file))
new_adj = {}
for i, neibors in adj.items():
i_idx = item_id_map[i]
new_adj[i_idx] = []
for j in neibors:
j_idx = item_id_map[j]
new_adj[i_idx].append(j_idx)
new_adj = nx.adjacency_matrix(nx.from_dict_of_lists(new_adj))
new_adj = self.normalize_adj(new_adj + sp.eye(new_adj.shape[0]))
new_adj = torch.FloatTensor(np.array(new_adj.todense()))
return new_adj
def normalize_adj(self, mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv_sqrt = np.power(rowsum, -0.5).flatten()
r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = sp.diags(r_inv_sqrt)
return mx.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt).tocoo()
def create_dataset(self, name, batch_size):
datas = self.data[name]
src_seq_lens = []
src_seqs, trg_seqs = [], []
trg_stops, src_tfs = [], []
trg_ents, trg_ents_mask, trg_seqs_ori = [], [], []
nonstop_voc_size = len(self.index2nonstop)
for item in datas:
src_len, src, trg, ents, ents_mask, trg_ori = item
tensor_src_len, tensor_src, tensor_trg = torch.LongTensor(src_len), \
torch.LongTensor(src), torch.LongTensor(trg)
src_seq_lens.append(tensor_src_len)
src_seqs.append(tensor_src)
trg_seqs.append(tensor_trg)
trg_stop = torch.zeros_like(tensor_trg)
for i, index in enumerate(trg):
if index in self.stop_words_index:
trg_stop[i] = 1
trg_stops.append(trg_stop)
src_tf = torch.zeros(nonstop_voc_size)
for j, uttr in enumerate(src):
for i, index in enumerate(uttr):
if i == src_len[j]:
break
if index not in self.stop_words_index:
src_tf[self.index2nonstop[index]] += 1
if src_tf.sum().item() > 0:
src_tf /= src_tf.sum()
src_tfs.append(src_tf)
trg_ents.append(torch.LongTensor(ents))
trg_ents_mask.append(torch.LongTensor(ents_mask))
trg_seqs_ori.append(trg_ori)
print(len(trg_stops), len(trg_seqs), len(trg_ents), len(trg_seqs_ori))
dataset = Dataset(src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori)
dataloader = data.DataLoader(dataset, batch_size, True, num_workers=0, collate_fn=pad_packed_collate)
return dataloader
def compute_stopword(self, y):
res = torch.zeros_like(y).to(device=device)
for i, row in enumerate(y):
words_index = row.tolist()
res[i] = torch.LongTensor([int(index in self.stop_words_index) for index in words_index])
return res
def interpret(self, preds, refs, lens, f):
i = random.randrange(0, len(lens))
l = max(lens)
for j in range(l):
word = self.index2word[preds[i][j].item()]
print(word, end=' ')
f.write('{0} '.format(word))
if word == '<EOS>':
break
print()
f.write('\n')
l = lens[i]
for j in range(l):
word = self.index2word[refs[i][j].item()]
print(word, end=' ')
f.write('{0} '.format(word))
print()
f.write('\n')
class Dataset(data.Dataset):
def __init__(self, src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori):
self.src_seq_lens = src_seq_lens
self.src_seqs = src_seqs
self.trg_seqs = trg_seqs
self.trg_stops = trg_stops
self.src_tfs = src_tfs
self.num_total_seqs = len(src_seqs)
self.trg_ents = trg_ents
self.trg_ents_mask = trg_ents_mask
self.trg_seqs_ori = trg_seqs_ori
def __getitem__(self, index):
src_seq_len = self.src_seq_lens[index]
src_seq = self.src_seqs[index]
trg_seq = self.trg_seqs[index]
trg_stop = self.trg_stops[index]
src_tf = self.src_tfs[index]
trg_ent = self.trg_ents[index]
trg_ent_mask = self.trg_ents_mask[index]
trg_seq_ori = self.trg_seqs_ori[index]
return src_seq_len, src_seq, trg_seq, trg_stop, src_tf, trg_ent, trg_ent_mask, trg_seq_ori
def __len__(self):
return self.num_total_seqs
def pad_packed_collate(batch_data):
def merge(sequences):
lengths = [len(seq) for seq in sequences]
padded_seqs = torch.zeros(len(sequences), max(lengths)).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq
return padded_seqs, lengths
def hierarchical_merge(sequences, sequence_lengths):
lengths = torch.stack(sequence_lengths)
utterance_length = lengths.shape[1]
padded_seqs = torch.zeros(len(sequences), utterance_length, lengths.max().item()).long()
for i, seq in enumerate(sequences):
word_end = max(lengths[i]).item()
padded_seqs[i, :utterance_length, :word_end] = seq
return padded_seqs, lengths
# sort a list by sequence length (descending order) to use pack_padded_sequence
batch_data.sort(key=lambda x: len(x[0]), reverse=True)
# seperate source and target sequences
src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori = zip(*batch_data)
src_seqs, src_lens = hierarchical_merge(src_seqs, src_seq_lens)
trg_seqs, trg_lens = merge(trg_seqs)
trg_stops, _ = merge(trg_stops)
trg_ents, _ = merge(trg_ents)
trg_ents_mask, _ = merge(trg_ents)
return (src_seqs.to(device=device), src_lens.to(device=device),
trg_seqs.to(device=device), trg_lens,
trg_stops.to(device=device), torch.stack(src_tfs).to(device=device),
trg_ents.to(device=device), trg_ents_mask.to(device=device), trg_seqs_ori) | self.word2index = {'<PAD>':0, '<UNK>':1, '<GO>':2, '<EOS>':3}
stopwords_self = set()
for i, (key, value) in enumerate(wordssorted):
if value <= 5:
break | random_line_split |
DataManager.py | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 30 16:14:04 2018
@author: truthless
"""
import random
from stop import STOP_WORDS
import numpy as np
import torch
import torch.utils.data as data
import re
import json
import networkx as nx
import scipy.sparse as sp
PAD = 0
UNK = 1 #OOV
GO = 2
EOS = 3
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DataManager:
def __init__(self, stopword_freq_lb, path, no_pretrain_word2vec, dim, context_len):
#read text
self.text = {}
for name in ["train", "valid", "test"]:
self.text[name] = []
entities = []
file_path = "{0}/{1}_ent_1.txt".format(path, name)
for line in open(file_path):
entities.append(line.strip())
sys_ans_utt_ori = []
file_path = "{0}/{1}_ans_utt_ori_1.txt".format(path, name)
for line in open(file_path):
sys_ans_utt_ori.append(line.strip())
cnt = 0
file_path = "{0}/{1}_utt_1.txt".format(path, name)
for line in open(file_path):
utterances = line.strip().split('\t')
utterances = [''] * (context_len - len(utterances)) + utterances
self.text[name].append([utterances[-context_len:-1], utterances[-1], entities[cnt], sys_ans_utt_ori[cnt]])
cnt += 1
#arrange words
wordscount = {}
for name in ["train", "valid"]:
texts = self.text[name]
for item in texts:
words = item[0][-1].split() + item[1].split()
for word in words:
if word in wordscount:
|
else:
wordscount[word] = 1
wordssorted = sorted(wordscount.items(), key = lambda d: (d[1],d[0]), reverse=True)
output = open("word_cnt_stat.txt", "w")
for i, (key, value) in enumerate(wordssorted):
output.write(str(value) + ":" + str(key) + "\n")
self.word2index = {'<PAD>':0, '<UNK>':1, '<GO>':2, '<EOS>':3}
stopwords_self = set()
for i, (key, value) in enumerate(wordssorted):
if value <= 5:
break
self.word2index[key] = i + 4 #PAD,UNK,GO,EOS
if value >= stopword_freq_lb:
stopwords_self.add(key)
# to add all entity name into vocab
entity_list = json.load(open("./data/entity_list_simple.json"))
start_idx = len(self.word2index)
for entity_name in entity_list:
entity_name = entity_name.split("::")[-1]
if entity_name not in self.word2index:
self.word2index[entity_name] = start_idx
start_idx += 1
self.stop_words_index = set([PAD, UNK, GO, EOS])
#self.stop_words_index |= set([self.word2index[word] for word in STOP_WORDS
# if word in self.word2index])
# here we add all words into stopword list
self.stop_words_index |= set([self.word2index[word] for word in stopwords_self])
self.index2word = dict((v, k) for k, v in self.word2index.items())
#load word vector
if no_pretrain_word2vec:
self.vector = None
else:
self.vector = 0.1 * np.random.rand(len(self.word2index), dim)
with open("{0}/vector.txt".format(path)) as fl:
for line in fl:
vec = line.strip().split()
word = vec[0].lower()
vec = list(map(float, vec[1:]))
if word in self.word2index:
self.vector[self.word2index[word]] = np.asarray(vec)
self.vector = torch.Tensor(self.vector)
# compute tf
len_voc = len(self.word2index.values())
self.index2nonstop = {}
cnt = 0
for i in range(len_voc):
if i not in self.stop_words_index:
self.index2nonstop[i] = cnt
cnt += 1
# for graph initialization
self.node_id_map, self.id_nodename_map = self.get_node_id_map()
self.node_info_map, self.nodename_attr_map = self.get_node_info()
self.adj = self.get_adj_mat("./data/adj_simple.json", self.node_id_map)
self.nodes_rep = self.get_nodes_rep(self.node_id_map, self.node_info_map)
self.n_entity = len(self.node_id_map)
#get index
self.data = {}
for name in ["train", "valid", "test"]:
self.data[name] = []
for number, item in enumerate(self.text[name]):
len_u = len(item[0])
indices = [[], [[] for _ in range(len_u)], [], [], [], []] #src_len, src, trg, trg_entities, trg_entities_mask
indices[0] = [u.count(' ')+1 for u in item[0]] # on purpose
max_u_len = max(indices[0])
# history
for i in range(len_u):
words = item[0][i].split()
indices[1][i] = [self.word2index[word] if word in self.word2index
else UNK for word in words] + [PAD] * (max_u_len - len(words))
# answer
words = item[1].split()
#print("item1:: ", len(words))
indices[2] = [self.word2index[word] if word in self.word2index
else UNK for word in words]
indices[2].append(EOS)
# answer entity
entities = item[2].split()
#print("item2 entities:: ", len(entities))
indices[3] = [self.node_id_map[entity_name] for entity_name in entities]
indices[3].append(0)
indices[4] = []
for x in indices[3]:
if x == 0:
indices[4].append(0)
else:
indices[4].append(1)
# ansuer original sentence
words = item[3].split()
indices[5] = words
indices[5].append("<EOS>")
if len(indices[2]) != len(indices[3]):
print(number, len(indices[2]), len(indices[3]))
print(item[1])
print(item[2])
exit()
self.data[name].append(indices)
def get_node_info(self):
node_info_map = json.load(open("./data/entity_info.json"))
nodename_attr_map = {}
for node, res in node_info_map.items():
node_name = node.split("::")[-1]
nodename_attr_map[node_name] = res
return node_info_map, nodename_attr_map
def post_process(self, outputs, pred_ents, topK=1):
outputs = outputs.cpu().numpy().tolist()
pred_ents = pred_ents.cpu().numpy()
entity_attr_list = {
"[attraction_address]",
"[restaurant_address]",
"[attraction_phone]",
"[restaurant_phone]",
"[hotel_address]",
"[restaurant_postcode]",
"[attraction_postcode]",
"[hotel_phone]",
"[hotel_postcode]",
"[hospital_phone]"
}
lens_new = []
for i, out in enumerate(outputs):
new_out = []
for j, each in enumerate(out):
if self.index2word[each] == "<$>":
pred_ent = np.argmax(pred_ents[i][j])
nodename = self.id_nodename_map[pred_ent]
new_out.append(nodename)
elif self.index2word[each] in entity_attr_list:
attr_name = self.index2word[each]
cnt = 0
suc_flag = False
for idx, prob in sorted(enumerate(pred_ents[i][j]), key=lambda i: i[1], reverse=True):
if suc_flag or cnt >= topK:
break
nodename = self.id_nodename_map[idx]
if nodename not in self.nodename_attr_map:
cnt += 1
continue
for attr, val in self.nodename_attr_map[nodename].items():
if attr in attr_name:
new_out.append(val)
suc_flag = True
break
cnt += 1
if not suc_flag:
new_out.append("<UNK>")
else:
new_out.append(self.index2word[each])
"""
if each == self.word2index["<$>"]:
pred_ent = np.argmax(pred_ents[i][j])
nodename = self.id_nodename_map[pred_ent]
nodename_wordids = [self.word2index[x] for x in nodename.split()]
new_out += nodename_wordids
else:
new_out.append(each)
"""
outputs[i] = new_out
return outputs
def get_nodes_rep(self, node_id_map, node_info_map, max_len=50):
nodes_rep = []
nodes_rep_map = []
for name, id_ in sorted(node_id_map.items(), key=lambda i: i[1]):
if name == "none" and id_ == 0:
nodes_rep.append([PAD] * max_len)
nodes_rep_map.append({"words": ["none"], "idx": [0]})
continue
# the attributes used to build relationship
# attributes as nodes: {"pricerange", "area", "food"}
# attributes only as relation: {"internet", "parking", "stars", "attraction_type", "hotel_type"}
# only user node name as node's feature
name = name.split("::")[-1]
node_desc = [name]
nodes_rep_idx = [PAD] * max_len
nodes_rep_idx[0] = self.word2index[name]
nodes_rep_word = [name]
"""
for attr, val in node_info_map.items():
#if attr in {"address", "area", "pricerange", "introduction", "food", "stars"} or "type" in attr:
if attr == "introduction":
node_desc.append(val)
node_desc = " ".join(node_desc)
nodes_rep_idx = []
nodes_rep_word = []
for each_word in node_desc.split():
for word in re.split(r'[\[\](::)_]', each_word):
if word == "":
continue
else:
if word not in self.word2index:
continue
else:
word_idx = self.word2index[word]
nodes_rep_idx.append(word_idx)
nodes_rep_word.append(word)
len_ = len(nodes_rep_idx)
if len_ >= max_len:
nodes_rep_idx = nodes_rep_idx[0:max_len]
nodes_rep_word = nodes_rep_word[0:max_len]
else:
nodes_rep_idx += [PAD] * (max_len - len_)
"""
nodes_rep.append(nodes_rep_idx)
nodes_rep_map.append({"words": nodes_rep_word, "idx": nodes_rep_idx})
json.dump(nodes_rep_map, open("nodes_rep_words_idx.json", "w"))
json.dump(self.word2index, open("word2index.json", "w"))
#exit()
return nodes_rep
def get_node_id_map(self):
data = json.load(open("./data/entity_list_simple.json"))
node_id_map = {}
id_nodename_map = {}
for i, node in enumerate(data):
node_id_map[node] = i + 1
tmp = node.split("::")
#node_name = " ".join(tmp[1].split("_"))
node_name = tmp[1]
id_nodename_map[i+1] = node_name
node_id_map["none"] = 0
id_nodename_map[0] = ""
return node_id_map, id_nodename_map
def get_adj_mat(self, input_file, item_id_map):
adj = json.load(open(input_file))
new_adj = {}
for i, neibors in adj.items():
i_idx = item_id_map[i]
new_adj[i_idx] = []
for j in neibors:
j_idx = item_id_map[j]
new_adj[i_idx].append(j_idx)
new_adj = nx.adjacency_matrix(nx.from_dict_of_lists(new_adj))
new_adj = self.normalize_adj(new_adj + sp.eye(new_adj.shape[0]))
new_adj = torch.FloatTensor(np.array(new_adj.todense()))
return new_adj
def normalize_adj(self, mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv_sqrt = np.power(rowsum, -0.5).flatten()
r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = sp.diags(r_inv_sqrt)
return mx.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt).tocoo()
def create_dataset(self, name, batch_size):
datas = self.data[name]
src_seq_lens = []
src_seqs, trg_seqs = [], []
trg_stops, src_tfs = [], []
trg_ents, trg_ents_mask, trg_seqs_ori = [], [], []
nonstop_voc_size = len(self.index2nonstop)
for item in datas:
src_len, src, trg, ents, ents_mask, trg_ori = item
tensor_src_len, tensor_src, tensor_trg = torch.LongTensor(src_len), \
torch.LongTensor(src), torch.LongTensor(trg)
src_seq_lens.append(tensor_src_len)
src_seqs.append(tensor_src)
trg_seqs.append(tensor_trg)
trg_stop = torch.zeros_like(tensor_trg)
for i, index in enumerate(trg):
if index in self.stop_words_index:
trg_stop[i] = 1
trg_stops.append(trg_stop)
src_tf = torch.zeros(nonstop_voc_size)
for j, uttr in enumerate(src):
for i, index in enumerate(uttr):
if i == src_len[j]:
break
if index not in self.stop_words_index:
src_tf[self.index2nonstop[index]] += 1
if src_tf.sum().item() > 0:
src_tf /= src_tf.sum()
src_tfs.append(src_tf)
trg_ents.append(torch.LongTensor(ents))
trg_ents_mask.append(torch.LongTensor(ents_mask))
trg_seqs_ori.append(trg_ori)
print(len(trg_stops), len(trg_seqs), len(trg_ents), len(trg_seqs_ori))
dataset = Dataset(src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori)
dataloader = data.DataLoader(dataset, batch_size, True, num_workers=0, collate_fn=pad_packed_collate)
return dataloader
def compute_stopword(self, y):
res = torch.zeros_like(y).to(device=device)
for i, row in enumerate(y):
words_index = row.tolist()
res[i] = torch.LongTensor([int(index in self.stop_words_index) for index in words_index])
return res
def interpret(self, preds, refs, lens, f):
i = random.randrange(0, len(lens))
l = max(lens)
for j in range(l):
word = self.index2word[preds[i][j].item()]
print(word, end=' ')
f.write('{0} '.format(word))
if word == '<EOS>':
break
print()
f.write('\n')
l = lens[i]
for j in range(l):
word = self.index2word[refs[i][j].item()]
print(word, end=' ')
f.write('{0} '.format(word))
print()
f.write('\n')
class Dataset(data.Dataset):
def __init__(self, src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori):
self.src_seq_lens = src_seq_lens
self.src_seqs = src_seqs
self.trg_seqs = trg_seqs
self.trg_stops = trg_stops
self.src_tfs = src_tfs
self.num_total_seqs = len(src_seqs)
self.trg_ents = trg_ents
self.trg_ents_mask = trg_ents_mask
self.trg_seqs_ori = trg_seqs_ori
def __getitem__(self, index):
src_seq_len = self.src_seq_lens[index]
src_seq = self.src_seqs[index]
trg_seq = self.trg_seqs[index]
trg_stop = self.trg_stops[index]
src_tf = self.src_tfs[index]
trg_ent = self.trg_ents[index]
trg_ent_mask = self.trg_ents_mask[index]
trg_seq_ori = self.trg_seqs_ori[index]
return src_seq_len, src_seq, trg_seq, trg_stop, src_tf, trg_ent, trg_ent_mask, trg_seq_ori
def __len__(self):
return self.num_total_seqs
def pad_packed_collate(batch_data):
def merge(sequences):
lengths = [len(seq) for seq in sequences]
padded_seqs = torch.zeros(len(sequences), max(lengths)).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq
return padded_seqs, lengths
def hierarchical_merge(sequences, sequence_lengths):
lengths = torch.stack(sequence_lengths)
utterance_length = lengths.shape[1]
padded_seqs = torch.zeros(len(sequences), utterance_length, lengths.max().item()).long()
for i, seq in enumerate(sequences):
word_end = max(lengths[i]).item()
padded_seqs[i, :utterance_length, :word_end] = seq
return padded_seqs, lengths
# sort a list by sequence length (descending order) to use pack_padded_sequence
batch_data.sort(key=lambda x: len(x[0]), reverse=True)
# seperate source and target sequences
src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori = zip(*batch_data)
src_seqs, src_lens = hierarchical_merge(src_seqs, src_seq_lens)
trg_seqs, trg_lens = merge(trg_seqs)
trg_stops, _ = merge(trg_stops)
trg_ents, _ = merge(trg_ents)
trg_ents_mask, _ = merge(trg_ents)
return (src_seqs.to(device=device), src_lens.to(device=device),
trg_seqs.to(device=device), trg_lens,
trg_stops.to(device=device), torch.stack(src_tfs).to(device=device),
trg_ents.to(device=device), trg_ents_mask.to(device=device), trg_seqs_ori)
| wordscount[word] += 1 | conditional_block |
DataManager.py | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 30 16:14:04 2018
@author: truthless
"""
import random
from stop import STOP_WORDS
import numpy as np
import torch
import torch.utils.data as data
import re
import json
import networkx as nx
import scipy.sparse as sp
PAD = 0
UNK = 1 #OOV
GO = 2
EOS = 3
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DataManager:
def __init__(self, stopword_freq_lb, path, no_pretrain_word2vec, dim, context_len):
#read text
self.text = {}
for name in ["train", "valid", "test"]:
self.text[name] = []
entities = []
file_path = "{0}/{1}_ent_1.txt".format(path, name)
for line in open(file_path):
entities.append(line.strip())
sys_ans_utt_ori = []
file_path = "{0}/{1}_ans_utt_ori_1.txt".format(path, name)
for line in open(file_path):
sys_ans_utt_ori.append(line.strip())
cnt = 0
file_path = "{0}/{1}_utt_1.txt".format(path, name)
for line in open(file_path):
utterances = line.strip().split('\t')
utterances = [''] * (context_len - len(utterances)) + utterances
self.text[name].append([utterances[-context_len:-1], utterances[-1], entities[cnt], sys_ans_utt_ori[cnt]])
cnt += 1
#arrange words
wordscount = {}
for name in ["train", "valid"]:
texts = self.text[name]
for item in texts:
words = item[0][-1].split() + item[1].split()
for word in words:
if word in wordscount:
wordscount[word] += 1
else:
wordscount[word] = 1
wordssorted = sorted(wordscount.items(), key = lambda d: (d[1],d[0]), reverse=True)
output = open("word_cnt_stat.txt", "w")
for i, (key, value) in enumerate(wordssorted):
output.write(str(value) + ":" + str(key) + "\n")
self.word2index = {'<PAD>':0, '<UNK>':1, '<GO>':2, '<EOS>':3}
stopwords_self = set()
for i, (key, value) in enumerate(wordssorted):
if value <= 5:
break
self.word2index[key] = i + 4 #PAD,UNK,GO,EOS
if value >= stopword_freq_lb:
stopwords_self.add(key)
# to add all entity name into vocab
entity_list = json.load(open("./data/entity_list_simple.json"))
start_idx = len(self.word2index)
for entity_name in entity_list:
entity_name = entity_name.split("::")[-1]
if entity_name not in self.word2index:
self.word2index[entity_name] = start_idx
start_idx += 1
self.stop_words_index = set([PAD, UNK, GO, EOS])
#self.stop_words_index |= set([self.word2index[word] for word in STOP_WORDS
# if word in self.word2index])
# here we add all words into stopword list
self.stop_words_index |= set([self.word2index[word] for word in stopwords_self])
self.index2word = dict((v, k) for k, v in self.word2index.items())
#load word vector
if no_pretrain_word2vec:
self.vector = None
else:
self.vector = 0.1 * np.random.rand(len(self.word2index), dim)
with open("{0}/vector.txt".format(path)) as fl:
for line in fl:
vec = line.strip().split()
word = vec[0].lower()
vec = list(map(float, vec[1:]))
if word in self.word2index:
self.vector[self.word2index[word]] = np.asarray(vec)
self.vector = torch.Tensor(self.vector)
# compute tf
len_voc = len(self.word2index.values())
self.index2nonstop = {}
cnt = 0
for i in range(len_voc):
if i not in self.stop_words_index:
self.index2nonstop[i] = cnt
cnt += 1
# for graph initialization
self.node_id_map, self.id_nodename_map = self.get_node_id_map()
self.node_info_map, self.nodename_attr_map = self.get_node_info()
self.adj = self.get_adj_mat("./data/adj_simple.json", self.node_id_map)
self.nodes_rep = self.get_nodes_rep(self.node_id_map, self.node_info_map)
self.n_entity = len(self.node_id_map)
#get index
self.data = {}
for name in ["train", "valid", "test"]:
self.data[name] = []
for number, item in enumerate(self.text[name]):
len_u = len(item[0])
indices = [[], [[] for _ in range(len_u)], [], [], [], []] #src_len, src, trg, trg_entities, trg_entities_mask
indices[0] = [u.count(' ')+1 for u in item[0]] # on purpose
max_u_len = max(indices[0])
# history
for i in range(len_u):
words = item[0][i].split()
indices[1][i] = [self.word2index[word] if word in self.word2index
else UNK for word in words] + [PAD] * (max_u_len - len(words))
# answer
words = item[1].split()
#print("item1:: ", len(words))
indices[2] = [self.word2index[word] if word in self.word2index
else UNK for word in words]
indices[2].append(EOS)
# answer entity
entities = item[2].split()
#print("item2 entities:: ", len(entities))
indices[3] = [self.node_id_map[entity_name] for entity_name in entities]
indices[3].append(0)
indices[4] = []
for x in indices[3]:
if x == 0:
indices[4].append(0)
else:
indices[4].append(1)
# ansuer original sentence
words = item[3].split()
indices[5] = words
indices[5].append("<EOS>")
if len(indices[2]) != len(indices[3]):
print(number, len(indices[2]), len(indices[3]))
print(item[1])
print(item[2])
exit()
self.data[name].append(indices)
def get_node_info(self):
node_info_map = json.load(open("./data/entity_info.json"))
nodename_attr_map = {}
for node, res in node_info_map.items():
node_name = node.split("::")[-1]
nodename_attr_map[node_name] = res
return node_info_map, nodename_attr_map
def post_process(self, outputs, pred_ents, topK=1):
outputs = outputs.cpu().numpy().tolist()
pred_ents = pred_ents.cpu().numpy()
entity_attr_list = {
"[attraction_address]",
"[restaurant_address]",
"[attraction_phone]",
"[restaurant_phone]",
"[hotel_address]",
"[restaurant_postcode]",
"[attraction_postcode]",
"[hotel_phone]",
"[hotel_postcode]",
"[hospital_phone]"
}
lens_new = []
for i, out in enumerate(outputs):
new_out = []
for j, each in enumerate(out):
if self.index2word[each] == "<$>":
pred_ent = np.argmax(pred_ents[i][j])
nodename = self.id_nodename_map[pred_ent]
new_out.append(nodename)
elif self.index2word[each] in entity_attr_list:
attr_name = self.index2word[each]
cnt = 0
suc_flag = False
for idx, prob in sorted(enumerate(pred_ents[i][j]), key=lambda i: i[1], reverse=True):
if suc_flag or cnt >= topK:
break
nodename = self.id_nodename_map[idx]
if nodename not in self.nodename_attr_map:
cnt += 1
continue
for attr, val in self.nodename_attr_map[nodename].items():
if attr in attr_name:
new_out.append(val)
suc_flag = True
break
cnt += 1
if not suc_flag:
new_out.append("<UNK>")
else:
new_out.append(self.index2word[each])
"""
if each == self.word2index["<$>"]:
pred_ent = np.argmax(pred_ents[i][j])
nodename = self.id_nodename_map[pred_ent]
nodename_wordids = [self.word2index[x] for x in nodename.split()]
new_out += nodename_wordids
else:
new_out.append(each)
"""
outputs[i] = new_out
return outputs
def get_nodes_rep(self, node_id_map, node_info_map, max_len=50):
nodes_rep = []
nodes_rep_map = []
for name, id_ in sorted(node_id_map.items(), key=lambda i: i[1]):
if name == "none" and id_ == 0:
nodes_rep.append([PAD] * max_len)
nodes_rep_map.append({"words": ["none"], "idx": [0]})
continue
# the attributes used to build relationship
# attributes as nodes: {"pricerange", "area", "food"}
# attributes only as relation: {"internet", "parking", "stars", "attraction_type", "hotel_type"}
# only user node name as node's feature
name = name.split("::")[-1]
node_desc = [name]
nodes_rep_idx = [PAD] * max_len
nodes_rep_idx[0] = self.word2index[name]
nodes_rep_word = [name]
"""
for attr, val in node_info_map.items():
#if attr in {"address", "area", "pricerange", "introduction", "food", "stars"} or "type" in attr:
if attr == "introduction":
node_desc.append(val)
node_desc = " ".join(node_desc)
nodes_rep_idx = []
nodes_rep_word = []
for each_word in node_desc.split():
for word in re.split(r'[\[\](::)_]', each_word):
if word == "":
continue
else:
if word not in self.word2index:
continue
else:
word_idx = self.word2index[word]
nodes_rep_idx.append(word_idx)
nodes_rep_word.append(word)
len_ = len(nodes_rep_idx)
if len_ >= max_len:
nodes_rep_idx = nodes_rep_idx[0:max_len]
nodes_rep_word = nodes_rep_word[0:max_len]
else:
nodes_rep_idx += [PAD] * (max_len - len_)
"""
nodes_rep.append(nodes_rep_idx)
nodes_rep_map.append({"words": nodes_rep_word, "idx": nodes_rep_idx})
json.dump(nodes_rep_map, open("nodes_rep_words_idx.json", "w"))
json.dump(self.word2index, open("word2index.json", "w"))
#exit()
return nodes_rep
def get_node_id_map(self):
data = json.load(open("./data/entity_list_simple.json"))
node_id_map = {}
id_nodename_map = {}
for i, node in enumerate(data):
node_id_map[node] = i + 1
tmp = node.split("::")
#node_name = " ".join(tmp[1].split("_"))
node_name = tmp[1]
id_nodename_map[i+1] = node_name
node_id_map["none"] = 0
id_nodename_map[0] = ""
return node_id_map, id_nodename_map
def get_adj_mat(self, input_file, item_id_map):
adj = json.load(open(input_file))
new_adj = {}
for i, neibors in adj.items():
i_idx = item_id_map[i]
new_adj[i_idx] = []
for j in neibors:
j_idx = item_id_map[j]
new_adj[i_idx].append(j_idx)
new_adj = nx.adjacency_matrix(nx.from_dict_of_lists(new_adj))
new_adj = self.normalize_adj(new_adj + sp.eye(new_adj.shape[0]))
new_adj = torch.FloatTensor(np.array(new_adj.todense()))
return new_adj
def normalize_adj(self, mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv_sqrt = np.power(rowsum, -0.5).flatten()
r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = sp.diags(r_inv_sqrt)
return mx.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt).tocoo()
def create_dataset(self, name, batch_size):
datas = self.data[name]
src_seq_lens = []
src_seqs, trg_seqs = [], []
trg_stops, src_tfs = [], []
trg_ents, trg_ents_mask, trg_seqs_ori = [], [], []
nonstop_voc_size = len(self.index2nonstop)
for item in datas:
src_len, src, trg, ents, ents_mask, trg_ori = item
tensor_src_len, tensor_src, tensor_trg = torch.LongTensor(src_len), \
torch.LongTensor(src), torch.LongTensor(trg)
src_seq_lens.append(tensor_src_len)
src_seqs.append(tensor_src)
trg_seqs.append(tensor_trg)
trg_stop = torch.zeros_like(tensor_trg)
for i, index in enumerate(trg):
if index in self.stop_words_index:
trg_stop[i] = 1
trg_stops.append(trg_stop)
src_tf = torch.zeros(nonstop_voc_size)
for j, uttr in enumerate(src):
for i, index in enumerate(uttr):
if i == src_len[j]:
break
if index not in self.stop_words_index:
src_tf[self.index2nonstop[index]] += 1
if src_tf.sum().item() > 0:
src_tf /= src_tf.sum()
src_tfs.append(src_tf)
trg_ents.append(torch.LongTensor(ents))
trg_ents_mask.append(torch.LongTensor(ents_mask))
trg_seqs_ori.append(trg_ori)
print(len(trg_stops), len(trg_seqs), len(trg_ents), len(trg_seqs_ori))
dataset = Dataset(src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori)
dataloader = data.DataLoader(dataset, batch_size, True, num_workers=0, collate_fn=pad_packed_collate)
return dataloader
def compute_stopword(self, y):
res = torch.zeros_like(y).to(device=device)
for i, row in enumerate(y):
words_index = row.tolist()
res[i] = torch.LongTensor([int(index in self.stop_words_index) for index in words_index])
return res
def interpret(self, preds, refs, lens, f):
i = random.randrange(0, len(lens))
l = max(lens)
for j in range(l):
word = self.index2word[preds[i][j].item()]
print(word, end=' ')
f.write('{0} '.format(word))
if word == '<EOS>':
break
print()
f.write('\n')
l = lens[i]
for j in range(l):
word = self.index2word[refs[i][j].item()]
print(word, end=' ')
f.write('{0} '.format(word))
print()
f.write('\n')
class Dataset(data.Dataset):
def __init__(self, src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori):
self.src_seq_lens = src_seq_lens
self.src_seqs = src_seqs
self.trg_seqs = trg_seqs
self.trg_stops = trg_stops
self.src_tfs = src_tfs
self.num_total_seqs = len(src_seqs)
self.trg_ents = trg_ents
self.trg_ents_mask = trg_ents_mask
self.trg_seqs_ori = trg_seqs_ori
def __getitem__(self, index):
src_seq_len = self.src_seq_lens[index]
src_seq = self.src_seqs[index]
trg_seq = self.trg_seqs[index]
trg_stop = self.trg_stops[index]
src_tf = self.src_tfs[index]
trg_ent = self.trg_ents[index]
trg_ent_mask = self.trg_ents_mask[index]
trg_seq_ori = self.trg_seqs_ori[index]
return src_seq_len, src_seq, trg_seq, trg_stop, src_tf, trg_ent, trg_ent_mask, trg_seq_ori
def __len__(self):
return self.num_total_seqs
def pad_packed_collate(batch_data):
def merge(sequences):
lengths = [len(seq) for seq in sequences]
padded_seqs = torch.zeros(len(sequences), max(lengths)).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq
return padded_seqs, lengths
def | (sequences, sequence_lengths):
lengths = torch.stack(sequence_lengths)
utterance_length = lengths.shape[1]
padded_seqs = torch.zeros(len(sequences), utterance_length, lengths.max().item()).long()
for i, seq in enumerate(sequences):
word_end = max(lengths[i]).item()
padded_seqs[i, :utterance_length, :word_end] = seq
return padded_seqs, lengths
# sort a list by sequence length (descending order) to use pack_padded_sequence
batch_data.sort(key=lambda x: len(x[0]), reverse=True)
# seperate source and target sequences
src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori = zip(*batch_data)
src_seqs, src_lens = hierarchical_merge(src_seqs, src_seq_lens)
trg_seqs, trg_lens = merge(trg_seqs)
trg_stops, _ = merge(trg_stops)
trg_ents, _ = merge(trg_ents)
trg_ents_mask, _ = merge(trg_ents)
return (src_seqs.to(device=device), src_lens.to(device=device),
trg_seqs.to(device=device), trg_lens,
trg_stops.to(device=device), torch.stack(src_tfs).to(device=device),
trg_ents.to(device=device), trg_ents_mask.to(device=device), trg_seqs_ori)
| hierarchical_merge | identifier_name |
compression_utils.py | # Copyright 2021, Google LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compression util functions."""
import numpy as np
import tensorflow as tf
DEFAULT_BETA = np.exp(-0.5)
def stochastic_rounding(x, conditional, l2_norm_bound=None, beta=DEFAULT_BETA):
"""Randomly rounds the elements of a tensor to integer values (keeps dtype).
Args:
x: The input tensor.
conditional: A bool constant specifying whether to do conditional rounding
(i.e. keep retrying rounding until L2 norm of the flattened tensor as a
vector doesn't grow too much).
l2_norm_bound: A float constant denoting the bound of the L2 norm of the
input records. This is useful when `l2_norm_bound` is larger than the
input norm, in which case we can allow more leeway during conditional
stochastic rounding rounding. If `None`, defaults to `l2_norm(x)`.
beta: A constant in [0, 1) controlling the concentration inequality for the
probabilistic norm bound after rounding.
Returns:
The rounded tensor.
"""
def post_rounding_l2_norm_bound(x, l2_norm_bound, beta):
"""Computes the L2 norm bound of a vector after rounding (Thm. 1, Eq. 2)."""
beta = tf.cast(beta, x.dtype)
dim = tf.cast(tf.size(x), x.dtype)
if l2_norm_bound is None:
x_norm = tf.norm(x, ord=2)
else:
x_norm = tf.cast(l2_norm_bound, x.dtype)
# We consider 2 (scaled) norm bounds and take the min (Proposition 22).
bound1 = x_norm + tf.sqrt(dim)
squared_bound2 = tf.square(x_norm) + 0.25 * dim
squared_bound2 += (
tf.sqrt(2.0 * tf.math.log(1.0 / beta)) * (x_norm + 0.5 * tf.sqrt(dim)))
bound2 = tf.sqrt(squared_bound2)
# bound2 is inf if beta = 0, in which case we fall back to bound1.
return tf.minimum(bound1, bound2)
conditional = tf.cast(conditional, tf.bool)
l2_norm_threshold = post_rounding_l2_norm_bound(x, l2_norm_bound, beta)
floored_x = tf.floor(x)
decimal_x = x - floored_x
def round_fn(repeat, _):
# 1. Try stochastic rounding on input (ignore previous iterations' outputs).
uniform = tf.random.uniform(tf.shape(x), dtype=x.dtype, minval=0, maxval=1)
bernoulli = uniform < decimal_x
rounded_x = floored_x + tf.cast(bernoulli, x.dtype)
# 2. Try again if the rounded vector has excessive L2 norm.
rounded_l2_norm = tf.norm(rounded_x, ord=2)
repeat = tf.logical_and(conditional,
tf.greater(rounded_l2_norm, l2_norm_threshold))
return [repeat, rounded_x]
repeat = tf.constant(True)
_, result_x = tf.while_loop(
cond=lambda r, _: r, body=round_fn, loop_vars=[repeat, x])
return result_x
def scaled_quantization(x,
scale,
stochastic,
conditional,
l2_norm_bound,
beta=DEFAULT_BETA):
"""Scales the tensors and rounds to integers."""
scale = tf.cast(scale, x.dtype)
l2_norm_bound = tf.cast(l2_norm_bound, x.dtype)
scaled_x = x * scale
scaled_bound = l2_norm_bound * scale
quantized_x = tf.cond(
tf.cast(stochastic, tf.bool),
lambda: stochastic_rounding(scaled_x, conditional, scaled_bound, beta),
lambda: tf.round(scaled_x))
return quantized_x
def inverse_scaled_quantization(x, scale):
"""Restores the value range of `x` from `scaled_quantization`."""
return x / tf.cast(scale, x.dtype)
def flatten_concat(structure):
"""Flattens each tensor in the structure and concats them as a vector.
Each tensor within the structure should have rank >= 1 (i.e. no scalars).
Args:
structure: The input structure of tensors.
Returns:
The flattened and concatenated component tensors as a tf.Tensor with
shape (d,) where `d` is the total number of elements in the structure.
"""
flattened_as_list = []
for x in tf.nest.flatten(structure):
with tf.control_dependencies([tf.debugging.assert_rank_at_least(x, 1)]):
flattened_as_list.append(tf.reshape(x, [-1]))
return tf.concat(flattened_as_list, axis=0)
def inverse_flatten_concat(flat_vector, original_structure):
"""Applies the inverse of `flatten_concat` given the original structure."""
location, split_tensors = 0, []
for orig_t in tf.nest.flatten(original_structure):
length = tf.size(orig_t)
split_vector = tf.slice(flat_vector, [location], [length])
split_tensors.append(tf.reshape(split_vector, orig_t.shape))
location += length
return tf.nest.pack_sequence_as(original_structure, split_tensors)
def sample_rademacher(shape, dtype, seed_pair):
"""Sample uniform random +1/-1 values with specified shape/dtype/seed_pair."""
rand_uniform = tf.random.stateless_uniform(shape=shape, seed=seed_pair)
return tf.cast(tf.sign(rand_uniform - 0.5), dtype)
def pad_zeros(x):
"""Pads a vector with shape (d,) with zeros to the next power of two."""
dim = tf.shape(x)[0]
log2_dim = tf.math.log(tf.cast(dim, tf.float32)) / tf.math.log(2.0)
pad_dim = tf.pow(2, tf.cast(tf.math.ceil(log2_dim), tf.int32))
with tf.control_dependencies([tf.debugging.assert_rank(x, 1)]):
return tf.pad(x, [[0, tf.maximum(0, pad_dim - dim)]])
def randomized_hadamard_transform(x, seed_pair, repeat=1):
"""Applies randomized Hadamard transform to a vector with the given seed.
Args:
x: The input vector.
seed_pair: The seed pair for generating randomness.
repeat: Number of times to repeat the randomized Hadamard transform.
Returns:
The transformed vector.
"""
def | (repeat_index, x):
# All sources of randomness depend on the input seed.
cur_seed = seed_pair + repeat_index
# Randomly flip signs.
signs = sample_rademacher(tf.shape(x), dtype=x.dtype, seed_pair=cur_seed)
rademacher_x = signs * x
# Apply Hadamard (+ expand/squeeze dims).
encoded_x = tf.squeeze(
fast_walsh_hadamard_transform(tf.expand_dims(rademacher_x, axis=0)),
axis=0)
return encoded_x
tf.debugging.assert_type(x, tf.float32)
padded_x = pad_zeros(x) # Hadamard transform requires vectors with 2^n dims.
i, result_x = tf.constant(0), padded_x
cond_fn = lambda i, _: tf.less(i, repeat)
body_fn = lambda i, x: [tf.add(i, 1), apply_transform(i, x)]
_, result_x = tf.while_loop(cond_fn, body_fn, [i, result_x])
return result_x
def inverse_randomized_hadamard_transform(x, original_dim, seed_pair, repeat=1):
"""Applies inverse of `randomized_hadamard_transform` with the given seed.
Args:
x: The transformed vector.
original_dim: The dimension of the original vector.
seed_pair: The same seed pair used in the forward transform.
repeat: Number of times the randomized Hadamard transform was applied.
Returns:
The original vector.
"""
def inverse_transform(repeat_index, x):
# All sources of randomness depend on the input seed.
cur_seed = seed_pair + repeat_index
# Apply Hadamard.
unrotated_x = fast_walsh_hadamard_transform(tf.expand_dims(x, axis=0))
unrotated_x = tf.squeeze(unrotated_x, axis=0)
# Unflip signs.
signs = sample_rademacher(
tf.shape(unrotated_x), dtype=x.dtype, seed_pair=cur_seed)
decoded_x = signs * unrotated_x
return decoded_x
# Repeat inverse transforms (with reversed indices).
tf.debugging.assert_type(x, tf.float32)
i, result_x = tf.constant(repeat - 1), x
cond_fn = lambda i, _: tf.greater_equal(i, 0)
body_fn = lambda i, x: [tf.subtract(i, 1), inverse_transform(i, x)]
_, result_x = tf.while_loop(cond_fn, body_fn, [i, result_x])
# Unpad zeros from forward transform.
return result_x[:original_dim]
def fast_walsh_hadamard_transform(x):
"""Applies the fast Walsh-Hadamard transform to a set of vectors.
This method uses a composition of existing TensorFlow operations to implement
the transform.
This function is forked from https://github.com/tensorflow/model-optimization.
Args:
x: A `Tensor`. Must be of shape `[a, b]`, where `a` can be anything (not
necessarily known), and `b` must be a power of two, not required to be
statically known.
Returns:
A `Tensor` of shape `[a, b]`, where `[i, :]` is the product `x[i, :]*H`,
where `H` is the Hadamard matrix.
Raises:
ValueError: If the input is not rank 2 `Tensor`, and if the second dimension
is statically known and is not a power of two.
OpError: If the second dimension is not statically known and is not a power
of two. Note that in graph execution, this error is not raised during the
execution of the Python function, but during execution of the resulting
computation.
"""
with tf.compat.v1.name_scope(None, 'fast_walsh_hadamard_transform'):
# Validate input.
x = tf.convert_to_tensor(x)
if x.shape.ndims != 2:
raise ValueError('Number of dimensions of x must be 2. Shape of x: %s' %
x.shape)
original_x_shape = x.shape.as_list()
dim = x.shape.as_list()[-1]
if dim is None: # dim is not statically known.
dim = tf.shape(x)[-1]
log2 = tf.cast(
tf.math.round(
tf.math.log(tf.cast(dim, tf.float32)) / tf.math.log(2.)),
tf.int32)
with tf.control_dependencies([
tf.compat.v1.assert_equal(
dim,
tf.math.pow(2, log2),
message='The dimension of x must be a power of two.'
'Provided dimension is: %s' % dim)
]):
x = tf.identity(x)
else: # dim is statically known.
if not (dim and ((dim & (dim - 1)) == 0)):
raise ValueError('The dimension of x must be a power of two. '
'Provided dimension is: %s' % dim)
log2 = int(np.ceil(np.log2(dim)))
if dim == 1: # Equivalent to identity.
return tf.identity(x)
h_core = tf.constant([[1., 1.], [1., -1.]],
dtype=x.dtype,
name='hadamard_weights_2x2')
permutation = tf.constant([0, 2, 1], name='hadamard_permutation')
# A step of the fast Walsh-Hadamard algorithm.
def _hadamard_step(x, dim):
"""A single step in the fast Walsh-Hadamard transform."""
x_shape = x.shape.as_list()
x = tf.reshape(x, [-1, 2]) # Reshape so that we have a matrix.
x = tf.matmul(x, h_core) # Multiply.
x = tf.reshape(x, [-1, dim // 2, 2]) # Reshape to rank-3.
x = tf.transpose(x, perm=permutation) # Swap last two dimensions.
x.set_shape(x_shape) # Failed shape inference in tf.while_loop.
return x
def _fwht(x, dim, log2):
x = tf.reshape(x, [-1, 2, dim // 2])
# The fast Walsh-Hadamard transform.
i = tf.constant(0)
c = lambda i, x: tf.less(i, log2)
b = lambda i, x: [i + 1, _hadamard_step(x, dim)]
i, x = tf.while_loop(c, b, [i, x])
return x
x = tf.cond(
tf.equal(dim, 1), lambda: tf.identity(x), lambda: _fwht(x, dim, log2))
x = tf.reshape(x, [-1, dim])
x /= tf.sqrt(tf.cast(dim, x.dtype)) # Normalize.
x.set_shape(original_x_shape) # Failed shape inference after tf.while_loop.
return x
| apply_transform | identifier_name |
compression_utils.py | # Copyright 2021, Google LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compression util functions."""
import numpy as np
import tensorflow as tf
DEFAULT_BETA = np.exp(-0.5)
def stochastic_rounding(x, conditional, l2_norm_bound=None, beta=DEFAULT_BETA):
"""Randomly rounds the elements of a tensor to integer values (keeps dtype).
Args:
x: The input tensor.
conditional: A bool constant specifying whether to do conditional rounding
(i.e. keep retrying rounding until L2 norm of the flattened tensor as a
vector doesn't grow too much).
l2_norm_bound: A float constant denoting the bound of the L2 norm of the
input records. This is useful when `l2_norm_bound` is larger than the
input norm, in which case we can allow more leeway during conditional
stochastic rounding rounding. If `None`, defaults to `l2_norm(x)`.
beta: A constant in [0, 1) controlling the concentration inequality for the
probabilistic norm bound after rounding.
Returns:
The rounded tensor.
"""
def post_rounding_l2_norm_bound(x, l2_norm_bound, beta):
"""Computes the L2 norm bound of a vector after rounding (Thm. 1, Eq. 2)."""
beta = tf.cast(beta, x.dtype)
dim = tf.cast(tf.size(x), x.dtype)
if l2_norm_bound is None:
x_norm = tf.norm(x, ord=2)
else:
x_norm = tf.cast(l2_norm_bound, x.dtype)
# We consider 2 (scaled) norm bounds and take the min (Proposition 22).
bound1 = x_norm + tf.sqrt(dim)
squared_bound2 = tf.square(x_norm) + 0.25 * dim
squared_bound2 += (
tf.sqrt(2.0 * tf.math.log(1.0 / beta)) * (x_norm + 0.5 * tf.sqrt(dim)))
bound2 = tf.sqrt(squared_bound2)
# bound2 is inf if beta = 0, in which case we fall back to bound1.
return tf.minimum(bound1, bound2)
conditional = tf.cast(conditional, tf.bool)
l2_norm_threshold = post_rounding_l2_norm_bound(x, l2_norm_bound, beta)
floored_x = tf.floor(x)
decimal_x = x - floored_x
def round_fn(repeat, _):
# 1. Try stochastic rounding on input (ignore previous iterations' outputs).
uniform = tf.random.uniform(tf.shape(x), dtype=x.dtype, minval=0, maxval=1)
bernoulli = uniform < decimal_x
rounded_x = floored_x + tf.cast(bernoulli, x.dtype)
# 2. Try again if the rounded vector has excessive L2 norm.
rounded_l2_norm = tf.norm(rounded_x, ord=2)
repeat = tf.logical_and(conditional,
tf.greater(rounded_l2_norm, l2_norm_threshold))
return [repeat, rounded_x]
repeat = tf.constant(True)
_, result_x = tf.while_loop(
cond=lambda r, _: r, body=round_fn, loop_vars=[repeat, x])
return result_x
def scaled_quantization(x,
scale,
stochastic,
conditional,
l2_norm_bound,
beta=DEFAULT_BETA):
"""Scales the tensors and rounds to integers."""
scale = tf.cast(scale, x.dtype)
l2_norm_bound = tf.cast(l2_norm_bound, x.dtype)
scaled_x = x * scale
scaled_bound = l2_norm_bound * scale
quantized_x = tf.cond(
tf.cast(stochastic, tf.bool),
lambda: stochastic_rounding(scaled_x, conditional, scaled_bound, beta),
lambda: tf.round(scaled_x))
return quantized_x
def inverse_scaled_quantization(x, scale):
"""Restores the value range of `x` from `scaled_quantization`."""
return x / tf.cast(scale, x.dtype)
def flatten_concat(structure):
"""Flattens each tensor in the structure and concats them as a vector.
Each tensor within the structure should have rank >= 1 (i.e. no scalars).
Args:
structure: The input structure of tensors.
Returns:
The flattened and concatenated component tensors as a tf.Tensor with
shape (d,) where `d` is the total number of elements in the structure.
"""
flattened_as_list = []
for x in tf.nest.flatten(structure):
with tf.control_dependencies([tf.debugging.assert_rank_at_least(x, 1)]):
flattened_as_list.append(tf.reshape(x, [-1]))
return tf.concat(flattened_as_list, axis=0)
def inverse_flatten_concat(flat_vector, original_structure):
"""Applies the inverse of `flatten_concat` given the original structure."""
location, split_tensors = 0, []
for orig_t in tf.nest.flatten(original_structure):
length = tf.size(orig_t)
split_vector = tf.slice(flat_vector, [location], [length])
split_tensors.append(tf.reshape(split_vector, orig_t.shape))
location += length
return tf.nest.pack_sequence_as(original_structure, split_tensors)
def sample_rademacher(shape, dtype, seed_pair):
"""Sample uniform random +1/-1 values with specified shape/dtype/seed_pair."""
rand_uniform = tf.random.stateless_uniform(shape=shape, seed=seed_pair)
return tf.cast(tf.sign(rand_uniform - 0.5), dtype)
def pad_zeros(x):
"""Pads a vector with shape (d,) with zeros to the next power of two."""
dim = tf.shape(x)[0]
log2_dim = tf.math.log(tf.cast(dim, tf.float32)) / tf.math.log(2.0)
pad_dim = tf.pow(2, tf.cast(tf.math.ceil(log2_dim), tf.int32))
with tf.control_dependencies([tf.debugging.assert_rank(x, 1)]):
return tf.pad(x, [[0, tf.maximum(0, pad_dim - dim)]])
def randomized_hadamard_transform(x, seed_pair, repeat=1):
"""Applies randomized Hadamard transform to a vector with the given seed.
Args:
x: The input vector.
seed_pair: The seed pair for generating randomness.
repeat: Number of times to repeat the randomized Hadamard transform.
Returns:
The transformed vector.
"""
def apply_transform(repeat_index, x):
# All sources of randomness depend on the input seed.
cur_seed = seed_pair + repeat_index
# Randomly flip signs.
signs = sample_rademacher(tf.shape(x), dtype=x.dtype, seed_pair=cur_seed)
rademacher_x = signs * x
# Apply Hadamard (+ expand/squeeze dims).
encoded_x = tf.squeeze(
fast_walsh_hadamard_transform(tf.expand_dims(rademacher_x, axis=0)),
axis=0)
return encoded_x
tf.debugging.assert_type(x, tf.float32)
padded_x = pad_zeros(x) # Hadamard transform requires vectors with 2^n dims.
i, result_x = tf.constant(0), padded_x
cond_fn = lambda i, _: tf.less(i, repeat)
body_fn = lambda i, x: [tf.add(i, 1), apply_transform(i, x)]
_, result_x = tf.while_loop(cond_fn, body_fn, [i, result_x])
return result_x
def inverse_randomized_hadamard_transform(x, original_dim, seed_pair, repeat=1):
"""Applies inverse of `randomized_hadamard_transform` with the given seed.
Args:
x: The transformed vector.
original_dim: The dimension of the original vector.
seed_pair: The same seed pair used in the forward transform.
repeat: Number of times the randomized Hadamard transform was applied.
Returns:
The original vector.
"""
def inverse_transform(repeat_index, x):
# All sources of randomness depend on the input seed.
cur_seed = seed_pair + repeat_index
# Apply Hadamard.
unrotated_x = fast_walsh_hadamard_transform(tf.expand_dims(x, axis=0))
unrotated_x = tf.squeeze(unrotated_x, axis=0)
# Unflip signs.
signs = sample_rademacher(
tf.shape(unrotated_x), dtype=x.dtype, seed_pair=cur_seed)
decoded_x = signs * unrotated_x
return decoded_x
# Repeat inverse transforms (with reversed indices).
tf.debugging.assert_type(x, tf.float32)
i, result_x = tf.constant(repeat - 1), x
cond_fn = lambda i, _: tf.greater_equal(i, 0)
body_fn = lambda i, x: [tf.subtract(i, 1), inverse_transform(i, x)]
_, result_x = tf.while_loop(cond_fn, body_fn, [i, result_x])
# Unpad zeros from forward transform.
return result_x[:original_dim]
def fast_walsh_hadamard_transform(x):
"""Applies the fast Walsh-Hadamard transform to a set of vectors.
This method uses a composition of existing TensorFlow operations to implement
the transform.
This function is forked from https://github.com/tensorflow/model-optimization.
Args:
x: A `Tensor`. Must be of shape `[a, b]`, where `a` can be anything (not
necessarily known), and `b` must be a power of two, not required to be
statically known.
Returns:
A `Tensor` of shape `[a, b]`, where `[i, :]` is the product `x[i, :]*H`,
where `H` is the Hadamard matrix.
Raises:
ValueError: If the input is not rank 2 `Tensor`, and if the second dimension
is statically known and is not a power of two.
OpError: If the second dimension is not statically known and is not a power
of two. Note that in graph execution, this error is not raised during the
execution of the Python function, but during execution of the resulting
computation.
"""
with tf.compat.v1.name_scope(None, 'fast_walsh_hadamard_transform'):
# Validate input.
x = tf.convert_to_tensor(x)
if x.shape.ndims != 2:
raise ValueError('Number of dimensions of x must be 2. Shape of x: %s' %
x.shape)
original_x_shape = x.shape.as_list()
dim = x.shape.as_list()[-1]
if dim is None: # dim is not statically known.
dim = tf.shape(x)[-1]
log2 = tf.cast(
tf.math.round(
tf.math.log(tf.cast(dim, tf.float32)) / tf.math.log(2.)),
tf.int32)
with tf.control_dependencies([
tf.compat.v1.assert_equal(
dim,
tf.math.pow(2, log2),
message='The dimension of x must be a power of two.'
'Provided dimension is: %s' % dim)
]):
x = tf.identity(x)
else: # dim is statically known.
if not (dim and ((dim & (dim - 1)) == 0)):
raise ValueError('The dimension of x must be a power of two. '
'Provided dimension is: %s' % dim)
log2 = int(np.ceil(np.log2(dim)))
if dim == 1: # Equivalent to identity.
return tf.identity(x)
h_core = tf.constant([[1., 1.], [1., -1.]],
dtype=x.dtype,
name='hadamard_weights_2x2')
permutation = tf.constant([0, 2, 1], name='hadamard_permutation')
# A step of the fast Walsh-Hadamard algorithm.
def _hadamard_step(x, dim):
"""A single step in the fast Walsh-Hadamard transform."""
x_shape = x.shape.as_list()
x = tf.reshape(x, [-1, 2]) # Reshape so that we have a matrix.
x = tf.matmul(x, h_core) # Multiply.
x = tf.reshape(x, [-1, dim // 2, 2]) # Reshape to rank-3.
x = tf.transpose(x, perm=permutation) # Swap last two dimensions. | def _fwht(x, dim, log2):
x = tf.reshape(x, [-1, 2, dim // 2])
# The fast Walsh-Hadamard transform.
i = tf.constant(0)
c = lambda i, x: tf.less(i, log2)
b = lambda i, x: [i + 1, _hadamard_step(x, dim)]
i, x = tf.while_loop(c, b, [i, x])
return x
x = tf.cond(
tf.equal(dim, 1), lambda: tf.identity(x), lambda: _fwht(x, dim, log2))
x = tf.reshape(x, [-1, dim])
x /= tf.sqrt(tf.cast(dim, x.dtype)) # Normalize.
x.set_shape(original_x_shape) # Failed shape inference after tf.while_loop.
return x | x.set_shape(x_shape) # Failed shape inference in tf.while_loop.
return x
| random_line_split |
compression_utils.py | # Copyright 2021, Google LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compression util functions."""
import numpy as np
import tensorflow as tf
DEFAULT_BETA = np.exp(-0.5)
def stochastic_rounding(x, conditional, l2_norm_bound=None, beta=DEFAULT_BETA):
"""Randomly rounds the elements of a tensor to integer values (keeps dtype).
Args:
x: The input tensor.
conditional: A bool constant specifying whether to do conditional rounding
(i.e. keep retrying rounding until L2 norm of the flattened tensor as a
vector doesn't grow too much).
l2_norm_bound: A float constant denoting the bound of the L2 norm of the
input records. This is useful when `l2_norm_bound` is larger than the
input norm, in which case we can allow more leeway during conditional
stochastic rounding rounding. If `None`, defaults to `l2_norm(x)`.
beta: A constant in [0, 1) controlling the concentration inequality for the
probabilistic norm bound after rounding.
Returns:
The rounded tensor.
"""
def post_rounding_l2_norm_bound(x, l2_norm_bound, beta):
"""Computes the L2 norm bound of a vector after rounding (Thm. 1, Eq. 2)."""
beta = tf.cast(beta, x.dtype)
dim = tf.cast(tf.size(x), x.dtype)
if l2_norm_bound is None:
x_norm = tf.norm(x, ord=2)
else:
x_norm = tf.cast(l2_norm_bound, x.dtype)
# We consider 2 (scaled) norm bounds and take the min (Proposition 22).
bound1 = x_norm + tf.sqrt(dim)
squared_bound2 = tf.square(x_norm) + 0.25 * dim
squared_bound2 += (
tf.sqrt(2.0 * tf.math.log(1.0 / beta)) * (x_norm + 0.5 * tf.sqrt(dim)))
bound2 = tf.sqrt(squared_bound2)
# bound2 is inf if beta = 0, in which case we fall back to bound1.
return tf.minimum(bound1, bound2)
conditional = tf.cast(conditional, tf.bool)
l2_norm_threshold = post_rounding_l2_norm_bound(x, l2_norm_bound, beta)
floored_x = tf.floor(x)
decimal_x = x - floored_x
def round_fn(repeat, _):
# 1. Try stochastic rounding on input (ignore previous iterations' outputs).
uniform = tf.random.uniform(tf.shape(x), dtype=x.dtype, minval=0, maxval=1)
bernoulli = uniform < decimal_x
rounded_x = floored_x + tf.cast(bernoulli, x.dtype)
# 2. Try again if the rounded vector has excessive L2 norm.
rounded_l2_norm = tf.norm(rounded_x, ord=2)
repeat = tf.logical_and(conditional,
tf.greater(rounded_l2_norm, l2_norm_threshold))
return [repeat, rounded_x]
repeat = tf.constant(True)
_, result_x = tf.while_loop(
cond=lambda r, _: r, body=round_fn, loop_vars=[repeat, x])
return result_x
def scaled_quantization(x,
scale,
stochastic,
conditional,
l2_norm_bound,
beta=DEFAULT_BETA):
"""Scales the tensors and rounds to integers."""
scale = tf.cast(scale, x.dtype)
l2_norm_bound = tf.cast(l2_norm_bound, x.dtype)
scaled_x = x * scale
scaled_bound = l2_norm_bound * scale
quantized_x = tf.cond(
tf.cast(stochastic, tf.bool),
lambda: stochastic_rounding(scaled_x, conditional, scaled_bound, beta),
lambda: tf.round(scaled_x))
return quantized_x
def inverse_scaled_quantization(x, scale):
"""Restores the value range of `x` from `scaled_quantization`."""
return x / tf.cast(scale, x.dtype)
def flatten_concat(structure):
"""Flattens each tensor in the structure and concats them as a vector.
Each tensor within the structure should have rank >= 1 (i.e. no scalars).
Args:
structure: The input structure of tensors.
Returns:
The flattened and concatenated component tensors as a tf.Tensor with
shape (d,) where `d` is the total number of elements in the structure.
"""
flattened_as_list = []
for x in tf.nest.flatten(structure):
with tf.control_dependencies([tf.debugging.assert_rank_at_least(x, 1)]):
flattened_as_list.append(tf.reshape(x, [-1]))
return tf.concat(flattened_as_list, axis=0)
def inverse_flatten_concat(flat_vector, original_structure):
"""Applies the inverse of `flatten_concat` given the original structure."""
location, split_tensors = 0, []
for orig_t in tf.nest.flatten(original_structure):
length = tf.size(orig_t)
split_vector = tf.slice(flat_vector, [location], [length])
split_tensors.append(tf.reshape(split_vector, orig_t.shape))
location += length
return tf.nest.pack_sequence_as(original_structure, split_tensors)
def sample_rademacher(shape, dtype, seed_pair):
"""Sample uniform random +1/-1 values with specified shape/dtype/seed_pair."""
rand_uniform = tf.random.stateless_uniform(shape=shape, seed=seed_pair)
return tf.cast(tf.sign(rand_uniform - 0.5), dtype)
def pad_zeros(x):
"""Pads a vector with shape (d,) with zeros to the next power of two."""
dim = tf.shape(x)[0]
log2_dim = tf.math.log(tf.cast(dim, tf.float32)) / tf.math.log(2.0)
pad_dim = tf.pow(2, tf.cast(tf.math.ceil(log2_dim), tf.int32))
with tf.control_dependencies([tf.debugging.assert_rank(x, 1)]):
return tf.pad(x, [[0, tf.maximum(0, pad_dim - dim)]])
def randomized_hadamard_transform(x, seed_pair, repeat=1):
|
def inverse_randomized_hadamard_transform(x, original_dim, seed_pair, repeat=1):
"""Applies inverse of `randomized_hadamard_transform` with the given seed.
Args:
x: The transformed vector.
original_dim: The dimension of the original vector.
seed_pair: The same seed pair used in the forward transform.
repeat: Number of times the randomized Hadamard transform was applied.
Returns:
The original vector.
"""
def inverse_transform(repeat_index, x):
# All sources of randomness depend on the input seed.
cur_seed = seed_pair + repeat_index
# Apply Hadamard.
unrotated_x = fast_walsh_hadamard_transform(tf.expand_dims(x, axis=0))
unrotated_x = tf.squeeze(unrotated_x, axis=0)
# Unflip signs.
signs = sample_rademacher(
tf.shape(unrotated_x), dtype=x.dtype, seed_pair=cur_seed)
decoded_x = signs * unrotated_x
return decoded_x
# Repeat inverse transforms (with reversed indices).
tf.debugging.assert_type(x, tf.float32)
i, result_x = tf.constant(repeat - 1), x
cond_fn = lambda i, _: tf.greater_equal(i, 0)
body_fn = lambda i, x: [tf.subtract(i, 1), inverse_transform(i, x)]
_, result_x = tf.while_loop(cond_fn, body_fn, [i, result_x])
# Unpad zeros from forward transform.
return result_x[:original_dim]
def fast_walsh_hadamard_transform(x):
"""Applies the fast Walsh-Hadamard transform to a set of vectors.
This method uses a composition of existing TensorFlow operations to implement
the transform.
This function is forked from https://github.com/tensorflow/model-optimization.
Args:
x: A `Tensor`. Must be of shape `[a, b]`, where `a` can be anything (not
necessarily known), and `b` must be a power of two, not required to be
statically known.
Returns:
A `Tensor` of shape `[a, b]`, where `[i, :]` is the product `x[i, :]*H`,
where `H` is the Hadamard matrix.
Raises:
ValueError: If the input is not rank 2 `Tensor`, and if the second dimension
is statically known and is not a power of two.
OpError: If the second dimension is not statically known and is not a power
of two. Note that in graph execution, this error is not raised during the
execution of the Python function, but during execution of the resulting
computation.
"""
with tf.compat.v1.name_scope(None, 'fast_walsh_hadamard_transform'):
# Validate input.
x = tf.convert_to_tensor(x)
if x.shape.ndims != 2:
raise ValueError('Number of dimensions of x must be 2. Shape of x: %s' %
x.shape)
original_x_shape = x.shape.as_list()
dim = x.shape.as_list()[-1]
if dim is None: # dim is not statically known.
dim = tf.shape(x)[-1]
log2 = tf.cast(
tf.math.round(
tf.math.log(tf.cast(dim, tf.float32)) / tf.math.log(2.)),
tf.int32)
with tf.control_dependencies([
tf.compat.v1.assert_equal(
dim,
tf.math.pow(2, log2),
message='The dimension of x must be a power of two.'
'Provided dimension is: %s' % dim)
]):
x = tf.identity(x)
else: # dim is statically known.
if not (dim and ((dim & (dim - 1)) == 0)):
raise ValueError('The dimension of x must be a power of two. '
'Provided dimension is: %s' % dim)
log2 = int(np.ceil(np.log2(dim)))
if dim == 1: # Equivalent to identity.
return tf.identity(x)
h_core = tf.constant([[1., 1.], [1., -1.]],
dtype=x.dtype,
name='hadamard_weights_2x2')
permutation = tf.constant([0, 2, 1], name='hadamard_permutation')
# A step of the fast Walsh-Hadamard algorithm.
def _hadamard_step(x, dim):
"""A single step in the fast Walsh-Hadamard transform."""
x_shape = x.shape.as_list()
x = tf.reshape(x, [-1, 2]) # Reshape so that we have a matrix.
x = tf.matmul(x, h_core) # Multiply.
x = tf.reshape(x, [-1, dim // 2, 2]) # Reshape to rank-3.
x = tf.transpose(x, perm=permutation) # Swap last two dimensions.
x.set_shape(x_shape) # Failed shape inference in tf.while_loop.
return x
def _fwht(x, dim, log2):
x = tf.reshape(x, [-1, 2, dim // 2])
# The fast Walsh-Hadamard transform.
i = tf.constant(0)
c = lambda i, x: tf.less(i, log2)
b = lambda i, x: [i + 1, _hadamard_step(x, dim)]
i, x = tf.while_loop(c, b, [i, x])
return x
x = tf.cond(
tf.equal(dim, 1), lambda: tf.identity(x), lambda: _fwht(x, dim, log2))
x = tf.reshape(x, [-1, dim])
x /= tf.sqrt(tf.cast(dim, x.dtype)) # Normalize.
x.set_shape(original_x_shape) # Failed shape inference after tf.while_loop.
return x
| """Applies randomized Hadamard transform to a vector with the given seed.
Args:
x: The input vector.
seed_pair: The seed pair for generating randomness.
repeat: Number of times to repeat the randomized Hadamard transform.
Returns:
The transformed vector.
"""
def apply_transform(repeat_index, x):
# All sources of randomness depend on the input seed.
cur_seed = seed_pair + repeat_index
# Randomly flip signs.
signs = sample_rademacher(tf.shape(x), dtype=x.dtype, seed_pair=cur_seed)
rademacher_x = signs * x
# Apply Hadamard (+ expand/squeeze dims).
encoded_x = tf.squeeze(
fast_walsh_hadamard_transform(tf.expand_dims(rademacher_x, axis=0)),
axis=0)
return encoded_x
tf.debugging.assert_type(x, tf.float32)
padded_x = pad_zeros(x) # Hadamard transform requires vectors with 2^n dims.
i, result_x = tf.constant(0), padded_x
cond_fn = lambda i, _: tf.less(i, repeat)
body_fn = lambda i, x: [tf.add(i, 1), apply_transform(i, x)]
_, result_x = tf.while_loop(cond_fn, body_fn, [i, result_x])
return result_x | identifier_body |
compression_utils.py | # Copyright 2021, Google LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compression util functions."""
import numpy as np
import tensorflow as tf
DEFAULT_BETA = np.exp(-0.5)
def stochastic_rounding(x, conditional, l2_norm_bound=None, beta=DEFAULT_BETA):
"""Randomly rounds the elements of a tensor to integer values (keeps dtype).
Args:
x: The input tensor.
conditional: A bool constant specifying whether to do conditional rounding
(i.e. keep retrying rounding until L2 norm of the flattened tensor as a
vector doesn't grow too much).
l2_norm_bound: A float constant denoting the bound of the L2 norm of the
input records. This is useful when `l2_norm_bound` is larger than the
input norm, in which case we can allow more leeway during conditional
stochastic rounding rounding. If `None`, defaults to `l2_norm(x)`.
beta: A constant in [0, 1) controlling the concentration inequality for the
probabilistic norm bound after rounding.
Returns:
The rounded tensor.
"""
def post_rounding_l2_norm_bound(x, l2_norm_bound, beta):
"""Computes the L2 norm bound of a vector after rounding (Thm. 1, Eq. 2)."""
beta = tf.cast(beta, x.dtype)
dim = tf.cast(tf.size(x), x.dtype)
if l2_norm_bound is None:
x_norm = tf.norm(x, ord=2)
else:
x_norm = tf.cast(l2_norm_bound, x.dtype)
# We consider 2 (scaled) norm bounds and take the min (Proposition 22).
bound1 = x_norm + tf.sqrt(dim)
squared_bound2 = tf.square(x_norm) + 0.25 * dim
squared_bound2 += (
tf.sqrt(2.0 * tf.math.log(1.0 / beta)) * (x_norm + 0.5 * tf.sqrt(dim)))
bound2 = tf.sqrt(squared_bound2)
# bound2 is inf if beta = 0, in which case we fall back to bound1.
return tf.minimum(bound1, bound2)
conditional = tf.cast(conditional, tf.bool)
l2_norm_threshold = post_rounding_l2_norm_bound(x, l2_norm_bound, beta)
floored_x = tf.floor(x)
decimal_x = x - floored_x
def round_fn(repeat, _):
# 1. Try stochastic rounding on input (ignore previous iterations' outputs).
uniform = tf.random.uniform(tf.shape(x), dtype=x.dtype, minval=0, maxval=1)
bernoulli = uniform < decimal_x
rounded_x = floored_x + tf.cast(bernoulli, x.dtype)
# 2. Try again if the rounded vector has excessive L2 norm.
rounded_l2_norm = tf.norm(rounded_x, ord=2)
repeat = tf.logical_and(conditional,
tf.greater(rounded_l2_norm, l2_norm_threshold))
return [repeat, rounded_x]
repeat = tf.constant(True)
_, result_x = tf.while_loop(
cond=lambda r, _: r, body=round_fn, loop_vars=[repeat, x])
return result_x
def scaled_quantization(x,
scale,
stochastic,
conditional,
l2_norm_bound,
beta=DEFAULT_BETA):
"""Scales the tensors and rounds to integers."""
scale = tf.cast(scale, x.dtype)
l2_norm_bound = tf.cast(l2_norm_bound, x.dtype)
scaled_x = x * scale
scaled_bound = l2_norm_bound * scale
quantized_x = tf.cond(
tf.cast(stochastic, tf.bool),
lambda: stochastic_rounding(scaled_x, conditional, scaled_bound, beta),
lambda: tf.round(scaled_x))
return quantized_x
def inverse_scaled_quantization(x, scale):
"""Restores the value range of `x` from `scaled_quantization`."""
return x / tf.cast(scale, x.dtype)
def flatten_concat(structure):
"""Flattens each tensor in the structure and concats them as a vector.
Each tensor within the structure should have rank >= 1 (i.e. no scalars).
Args:
structure: The input structure of tensors.
Returns:
The flattened and concatenated component tensors as a tf.Tensor with
shape (d,) where `d` is the total number of elements in the structure.
"""
flattened_as_list = []
for x in tf.nest.flatten(structure):
with tf.control_dependencies([tf.debugging.assert_rank_at_least(x, 1)]):
flattened_as_list.append(tf.reshape(x, [-1]))
return tf.concat(flattened_as_list, axis=0)
def inverse_flatten_concat(flat_vector, original_structure):
"""Applies the inverse of `flatten_concat` given the original structure."""
location, split_tensors = 0, []
for orig_t in tf.nest.flatten(original_structure):
length = tf.size(orig_t)
split_vector = tf.slice(flat_vector, [location], [length])
split_tensors.append(tf.reshape(split_vector, orig_t.shape))
location += length
return tf.nest.pack_sequence_as(original_structure, split_tensors)
def sample_rademacher(shape, dtype, seed_pair):
"""Sample uniform random +1/-1 values with specified shape/dtype/seed_pair."""
rand_uniform = tf.random.stateless_uniform(shape=shape, seed=seed_pair)
return tf.cast(tf.sign(rand_uniform - 0.5), dtype)
def pad_zeros(x):
"""Pads a vector with shape (d,) with zeros to the next power of two."""
dim = tf.shape(x)[0]
log2_dim = tf.math.log(tf.cast(dim, tf.float32)) / tf.math.log(2.0)
pad_dim = tf.pow(2, tf.cast(tf.math.ceil(log2_dim), tf.int32))
with tf.control_dependencies([tf.debugging.assert_rank(x, 1)]):
return tf.pad(x, [[0, tf.maximum(0, pad_dim - dim)]])
def randomized_hadamard_transform(x, seed_pair, repeat=1):
"""Applies randomized Hadamard transform to a vector with the given seed.
Args:
x: The input vector.
seed_pair: The seed pair for generating randomness.
repeat: Number of times to repeat the randomized Hadamard transform.
Returns:
The transformed vector.
"""
def apply_transform(repeat_index, x):
# All sources of randomness depend on the input seed.
cur_seed = seed_pair + repeat_index
# Randomly flip signs.
signs = sample_rademacher(tf.shape(x), dtype=x.dtype, seed_pair=cur_seed)
rademacher_x = signs * x
# Apply Hadamard (+ expand/squeeze dims).
encoded_x = tf.squeeze(
fast_walsh_hadamard_transform(tf.expand_dims(rademacher_x, axis=0)),
axis=0)
return encoded_x
tf.debugging.assert_type(x, tf.float32)
padded_x = pad_zeros(x) # Hadamard transform requires vectors with 2^n dims.
i, result_x = tf.constant(0), padded_x
cond_fn = lambda i, _: tf.less(i, repeat)
body_fn = lambda i, x: [tf.add(i, 1), apply_transform(i, x)]
_, result_x = tf.while_loop(cond_fn, body_fn, [i, result_x])
return result_x
def inverse_randomized_hadamard_transform(x, original_dim, seed_pair, repeat=1):
"""Applies inverse of `randomized_hadamard_transform` with the given seed.
Args:
x: The transformed vector.
original_dim: The dimension of the original vector.
seed_pair: The same seed pair used in the forward transform.
repeat: Number of times the randomized Hadamard transform was applied.
Returns:
The original vector.
"""
def inverse_transform(repeat_index, x):
# All sources of randomness depend on the input seed.
cur_seed = seed_pair + repeat_index
# Apply Hadamard.
unrotated_x = fast_walsh_hadamard_transform(tf.expand_dims(x, axis=0))
unrotated_x = tf.squeeze(unrotated_x, axis=0)
# Unflip signs.
signs = sample_rademacher(
tf.shape(unrotated_x), dtype=x.dtype, seed_pair=cur_seed)
decoded_x = signs * unrotated_x
return decoded_x
# Repeat inverse transforms (with reversed indices).
tf.debugging.assert_type(x, tf.float32)
i, result_x = tf.constant(repeat - 1), x
cond_fn = lambda i, _: tf.greater_equal(i, 0)
body_fn = lambda i, x: [tf.subtract(i, 1), inverse_transform(i, x)]
_, result_x = tf.while_loop(cond_fn, body_fn, [i, result_x])
# Unpad zeros from forward transform.
return result_x[:original_dim]
def fast_walsh_hadamard_transform(x):
"""Applies the fast Walsh-Hadamard transform to a set of vectors.
This method uses a composition of existing TensorFlow operations to implement
the transform.
This function is forked from https://github.com/tensorflow/model-optimization.
Args:
x: A `Tensor`. Must be of shape `[a, b]`, where `a` can be anything (not
necessarily known), and `b` must be a power of two, not required to be
statically known.
Returns:
A `Tensor` of shape `[a, b]`, where `[i, :]` is the product `x[i, :]*H`,
where `H` is the Hadamard matrix.
Raises:
ValueError: If the input is not rank 2 `Tensor`, and if the second dimension
is statically known and is not a power of two.
OpError: If the second dimension is not statically known and is not a power
of two. Note that in graph execution, this error is not raised during the
execution of the Python function, but during execution of the resulting
computation.
"""
with tf.compat.v1.name_scope(None, 'fast_walsh_hadamard_transform'):
# Validate input.
x = tf.convert_to_tensor(x)
if x.shape.ndims != 2:
|
original_x_shape = x.shape.as_list()
dim = x.shape.as_list()[-1]
if dim is None: # dim is not statically known.
dim = tf.shape(x)[-1]
log2 = tf.cast(
tf.math.round(
tf.math.log(tf.cast(dim, tf.float32)) / tf.math.log(2.)),
tf.int32)
with tf.control_dependencies([
tf.compat.v1.assert_equal(
dim,
tf.math.pow(2, log2),
message='The dimension of x must be a power of two.'
'Provided dimension is: %s' % dim)
]):
x = tf.identity(x)
else: # dim is statically known.
if not (dim and ((dim & (dim - 1)) == 0)):
raise ValueError('The dimension of x must be a power of two. '
'Provided dimension is: %s' % dim)
log2 = int(np.ceil(np.log2(dim)))
if dim == 1: # Equivalent to identity.
return tf.identity(x)
h_core = tf.constant([[1., 1.], [1., -1.]],
dtype=x.dtype,
name='hadamard_weights_2x2')
permutation = tf.constant([0, 2, 1], name='hadamard_permutation')
# A step of the fast Walsh-Hadamard algorithm.
def _hadamard_step(x, dim):
"""A single step in the fast Walsh-Hadamard transform."""
x_shape = x.shape.as_list()
x = tf.reshape(x, [-1, 2]) # Reshape so that we have a matrix.
x = tf.matmul(x, h_core) # Multiply.
x = tf.reshape(x, [-1, dim // 2, 2]) # Reshape to rank-3.
x = tf.transpose(x, perm=permutation) # Swap last two dimensions.
x.set_shape(x_shape) # Failed shape inference in tf.while_loop.
return x
def _fwht(x, dim, log2):
x = tf.reshape(x, [-1, 2, dim // 2])
# The fast Walsh-Hadamard transform.
i = tf.constant(0)
c = lambda i, x: tf.less(i, log2)
b = lambda i, x: [i + 1, _hadamard_step(x, dim)]
i, x = tf.while_loop(c, b, [i, x])
return x
x = tf.cond(
tf.equal(dim, 1), lambda: tf.identity(x), lambda: _fwht(x, dim, log2))
x = tf.reshape(x, [-1, dim])
x /= tf.sqrt(tf.cast(dim, x.dtype)) # Normalize.
x.set_shape(original_x_shape) # Failed shape inference after tf.while_loop.
return x
| raise ValueError('Number of dimensions of x must be 2. Shape of x: %s' %
x.shape) | conditional_block |
import_openapi.go | package openapi
import (
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/boynton/sadl"
"github.com/ghodss/yaml"
)
var EnumTypes bool = true
func IsValidFile(path string) bool {
_, err := Load(path)
return err == nil
}
func Load(path string) (*Model, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("Cannot read OpenAPI file: %v\n", err)
}
v3 := &Model{}
ext := filepath.Ext(path)
if ext == ".yaml" {
err = yaml.Unmarshal(data, &v3)
} else {
err = json.Unmarshal(data, &v3)
}
if err != nil {
return nil, err
}
return Validate(v3)
}
func Import(paths []string, conf *sadl.Data) (*sadl.Model, error) {
if len(paths) != 1 {
return nil, fmt.Errorf("Cannot merge multiple OpenAPI files")
}
path := paths[0]
name := path
n := strings.LastIndex(name, "/")
// format := ""
if n >= 0 {
name = name[n+1:]
}
n = strings.LastIndex(name, ".")
if n >= 0 {
// format = name[n+1:]
name = name[:n]
name = strings.Replace(name, ".", "_", -1)
}
oas3, err := Load(path)
if err != nil {
return nil, err
}
model, err := oas3.ToSadl(name)
if err != nil {
return nil, fmt.Errorf("Cannot convert to SADL: %v\n", err)
}
//err = model.ConvertInlineEnums()
return model, err
}
/*
func DetermineVersion(data []byte, format string) (string, error) {
var raw map[string]interface{}
var err error
switch format {
case "json":
err = json.Unmarshal(data, &raw)
case "yaml":
err = yaml.Unmarshal(data, &raw)
default:
err = fmt.Errorf("Unsupported file format: %q. Only \"json\" and \"yaml\" are supported.", format)
}
if err != nil {
return "", err
}
if v, ok := raw["openapi"]; ok {
if s, ok := v.(string); ok {
return s, nil
}
}
if v, ok := raw["swagger"]; ok {
if s, ok := v.(string); ok {
return s, nil
}
}
return "", fmt.Errorf("Cannot find an 'openapi' in the specified %s file to determine the version", format)
}
*/
/*
func xParse(data []byte, format string) (*Model, error) {
version, err := DetermineVersion(data, format)
if err != nil {
return nil, err
}
oas := &Oas{
source: version,
}
if strings.HasPrefix(version, "3.") {
oas.V3, err = oas3.Parse(data, format)
return oas, err
} else if strings.HasPrefix(version, "2.") {
v2, err := oas2.Parse(data, format)
if err == nil {
oas.V3, err = oas2.ConvertToV3(v2)
}
return oas, err
}
return nil, fmt.Errorf("Unsupported version of OpenAPI Spec: %s", version)
}
*/
var examples []*sadl.ExampleDef
var methods = []string{"GET", "PUT", "POST", "DELETE", "HEAD"} //to do: "PATCH", "OPTIONS", "TRACE"
func (model *Model) ToSadl(name string) (*sadl.Model, error) {
annotations := make(map[string]string, 0)
examples = nil
annotations["x_openapi_version"] = model.OpenAPI
comment := model.Info.Description
if model.Info.Title != "" {
if sadl.IsSymbol(model.Info.Title) {
name = model.Info.Title
} else {
comment = model.Info.Title + " - " + comment
}
}
schema := &sadl.Schema{
Name: name,
Comment: comment,
Version: model.Info.Version,
}
for name, oasSchema := range model.Components.Schemas {
name = validSadlName(name, oasSchema)
if name == "" {
continue
}
var ts sadl.TypeSpec
var err error
comment := ""
tname := oasTypeRef(oasSchema)
if tname != "" {
if oasDef, ok := model.Components.Schemas[tname]; ok {
ts, err = convertOasType(tname, oasDef) //doesn't handle N levels
} else {
panic("hmm")
}
} else {
ts, err = convertOasType(name, oasSchema)
comment = oasSchema.Description
}
if err != nil {
return nil, err
}
td := &sadl.TypeDef{
TypeSpec: ts,
Name: name,
Comment: comment,
//annotations
}
schema.Types = append(schema.Types, td)
}
httpBindings := true
for tmpl, path := range model.Paths {
path2 := *path
for _, method := range methods {
op := getPathOperation(&path2, method)
if op != nil {
if strings.HasPrefix(tmpl, "x-") {
continue
}
if httpBindings {
hact, err := convertOasPath(tmpl, op, method)
if err != nil {
return nil, err
}
schema.Http = append(schema.Http, hact)
}
}
}
}
for _, server := range model.Servers {
annotations["x_server"] = server.URL
}
if model.Info.License != nil {
if model.Info.License.Name != "" {
annotations["x_license_name"] = model.Info.License.Name
}
if model.Info.License.URL != "" {
// schema.Annotations["x_license_url"] = oas.V3.Info.License.URL
annotations["x_license_url"] = model.Info.License.URL
}
}
if len(annotations) > 0 {
schema.Annotations = annotations
}
schema.Examples = examples
return sadl.NewModel(schema)
}
func validSadlName(name string, oasSchema *Schema) string {
if name == "Timestamp" {
if oasSchema.Type == "string" {
return ""
}
} else if name == "Decimal" {
return ""
}
return name
}
func oasTypeRef(oasSchema *Schema) string {
if oasSchema != nil && oasSchema.Ref != "" {
if strings.HasPrefix(oasSchema.Ref, "#/components/schemas/") {
return oasSchema.Ref[len("#/components/schemas/"):]
}
return oasSchema.Ref //?
}
return ""
}
func convertOasType(name string, oasSchema *Schema) (sadl.TypeSpec, error) {
var err error
var ts sadl.TypeSpec
if oasSchema.Example != nil {
ex := &sadl.ExampleDef{
Target: name,
Example: oasSchema.Example,
}
examples = append(examples, ex)
}
switch oasSchema.Type {
case "boolean":
ts.Type = "Bool"
case "string":
if oasSchema.Enum != nil {
//OAS defines element *descriptions* as the values, not symbolic identifiers.
//so we look for the case where all values look like identifiers, and call that an enum. Else a strings with accepted "values"
//perhaps the spirit of JSON Schema enums are just values, not what I think of as "enums", i.e. "a set of named values", per wikipedia.
//still, with symbolic values, perhaps the intent is to use proper enums, if only JSON Schema had them.
isEnum := EnumTypes
var values []string
for _, val := range oasSchema.Enum {
if s, ok := val.(string); ok {
values = append(values, s)
if !sadl.IsSymbol(s) {
isEnum = false
}
} else {
return ts, fmt.Errorf("Error in OAS source: string enum value is not a string: %v", val)
}
}
if isEnum {
ts.Type = "Enum"
for _, sym := range values {
el := &sadl.EnumElementDef{
Symbol: sym,
}
ts.Elements = append(ts.Elements, el)
}
} else {
ts.Type = "String"
ts.Values = values
}
} else {
ts.Type = "String"
}
if ts.Type == "String" {
if oasSchema.Format == "uuid" {
ts.Type = "UUID"
} else if oasSchema.Format == "date-time" {
ts.Type = "Timestamp"
} else {
ts.Pattern = oasSchema.Pattern
if oasSchema.MinLength > 0 {
tmpMin := int64(oasSchema.MinLength)
ts.MinSize = &tmpMin
}
if oasSchema.MaxLength != nil {
tmpMax := int64(*oasSchema.MaxLength)
ts.MaxSize = &tmpMax
}
if oasSchema.Format != "" {
fmt.Println("NYI: String 'format':", oasSchema.Format)
}
}
}
case "array":
ts.Type = "Array"
if oasSchema.Items != nil {
if oasSchema.Items.Ref != "" {
ts.Items = oasTypeRef(oasSchema.Items)
} else {
its, err := convertOasType(name+".Items", oasSchema.Items)
if err == nil {
ts.Items = its.Type
}
}
}
//minsize, maxsize
//comment
case "number":
ts.Type = "Decimal"
if oasSchema.Min != nil {
ts.Min = sadl.NewDecimal(*oasSchema.Min)
}
if oasSchema.Max != nil {
ts.Max = sadl.NewDecimal(*oasSchema.Max)
}
case "integer":
switch oasSchema.Format {
case "int8":
ts.Type = "Int8"
case "int16":
ts.Type = "Int16"
case "int32":
ts.Type = "Int32"
case "int64":
ts.Type = "Int64"
default:
ts.Type = "Int64"
}
if oasSchema.Min != nil {
ts.Min = sadl.NewDecimal(*oasSchema.Min)
}
if oasSchema.Max != nil {
ts.Max = sadl.NewDecimal(*oasSchema.Max)
}
case "", "object":
ts.Type = "Struct"
if oasSchema.Properties != nil {
req := oasSchema.Required
for fname, fschema := range oasSchema.Properties {
fd := &sadl.StructFieldDef{
Name: fname,
Comment: fschema.Description,
}
if containsString(req, fname) {
fd.Required = true
}
fd.Type = oasTypeRef(fschema)
if fd.Type == "" {
fd.TypeSpec, err = convertOasType(name+"."+fname, fschema)
}
ts.Fields = append(ts.Fields, fd)
}
}
default:
fmt.Printf("oas type is %q\n", oasSchema.Type)
panic("oas type not handled")
}
return ts, err
}
func containsString(lst []string, val string) bool {
for _, s := range lst {
if s == val {
return true
}
}
return false
}
func capitalize(s string) string {
return strings.ToUpper(s[0:1]) + s[1:]
}
func uncapitalize(s string) string {
return strings.ToLower(s[0:1]) + s[1:]
}
func makeIdentifier(text string) string {
reg, _ := regexp.Compile("[^a-zA-Z_][^a-zA-Z_0-9]*")
return reg.ReplaceAllString(text, "")
}
func convertOasPath(path string, op *Operation, method string) (*sadl.HttpDef, error) {
hact := &sadl.HttpDef{
Name: op.OperationId,
Path: path,
Method: method,
Comment: op.Summary,
}
if len(op.Tags) > 0 {
hact.Annotations = make(map[string]string, 0)
//note: first tag is used as the "resource" name in SADL.
tmp := ""
rez := ""
for _, tag := range op.Tags {
if rez == "" {
rez = tag
} else if tmp == "" {
tmp = tag
} else {
tmp = tmp + "," + tag
}
}
hact.Resource = rez
if len(tmp) > 0 {
hact.Annotations["x_tags"] = tmp
}
}
var queries []string
for _, param := range op.Parameters {
name := makeIdentifier(param.Name)
spec := &sadl.HttpParamSpec{
StructFieldDef: sadl.StructFieldDef{
Name: name,
Comment: param.Description,
Required: param.Required,
},
}
switch param.In {
case "query":
spec.Query = param.Name
queries = append(queries, param.Name+"={"+name+"}")
case "path":
spec.Path = true
if strings.Index(path, "{"+name+"}") < 0 {
fmt.Println("WARNING: path param is not in path template:", path, name)
panic("here")
}
case "header":
spec.Header = param.Name
case "cookie":
return nil, fmt.Errorf("Cookie params NYI: %v", sadl.AsString(param))
}
spec.Type = oasTypeRef(param.Schema)
if spec.Type == "" {
if param.Schema != nil {
spec.Type = sadlPrimitiveType(param.Schema.Type)
}
if spec.Type == "Array" {
if param.Schema.Items == nil {
spec.Items = "Any"
} else {
schref := param.Schema.Items
switch schref.Type {
case "string":
spec.Items = "String"
default:
spec.Items = "Any"
}
}
}
if spec.Type == "Struct" {
panic("Whoops, that can't be right")
}
if param.Schema != nil && param.Schema.Enum != nil {
for _, val := range param.Schema.Enum {
if s, ok := val.(string); ok {
spec.Values = append(spec.Values, s)
} else {
return nil, fmt.Errorf("String enum values are not strings: %v", param.Schema.Enum)
}
}
}
} else {
}
hact.Inputs = append(hact.Inputs, spec)
}
if len(queries) > 0 {
hact.Path = hact.Path + "?" + strings.Join(queries, "&")
}
if hact.Method == "POST" || hact.Method == "PUT" || hact.Method == "PATCH" |
//expected: if 200 is in the list, use that
//else: if 201 is in the list, use that
//else: ? find a likely candidate.
var expectedStatus string = "default"
for status, _ := range op.Responses {
if strings.HasPrefix(status, "2") || strings.HasPrefix(status, "3") {
expectedStatus = status
break
}
}
// if expectedStatus == "default" {
// expectedStatus = "200" //?
// }
if expectedStatus != "" {
eparam := op.Responses[expectedStatus]
if eparam == nil {
return nil, fmt.Errorf("no response entity type provided for operation %q", op.OperationId)
}
var err error
code := 200
if expectedStatus != "default" && strings.Index(expectedStatus, "X") < 0 {
code, err = strconv.Atoi(expectedStatus)
if err != nil {
return nil, err
}
}
ex := &sadl.HttpExpectedSpec{
Status: int32(code),
Comment: eparam.Description,
}
for header, def := range eparam.Headers {
param := &sadl.HttpParamSpec{}
param.Header = header
param.Comment = def.Description
s := param.Header
//most app-defined headers start with "x-" or "X-". Strip that off for a more reasonable variable name.
if strings.HasPrefix(param.Header, "x-") || strings.HasPrefix(param.Header, "X-") {
s = s[2:]
}
param.Name = makeIdentifier(s)
schref := def.Schema
if schref != nil {
if schref.Ref != "" {
param.Type = oasTypeRef(schref)
} else {
param.TypeSpec, err = convertOasType(hact.Name+".Expected."+param.Name, schref) //fix: example
}
ex.Outputs = append(ex.Outputs, param)
}
}
for contentType, mediadef := range eparam.Content {
if contentType == "application/json" { //hack
result := &sadl.HttpParamSpec{}
result.Name = "body"
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
result.Type = oasTypeRef(schref)
} else {
result.TypeSpec, err = convertOasType(hact.Name+".Expected.payload", schref) //fix: example
}
ex.Outputs = append(ex.Outputs, result)
} else {
fmt.Println("HTTP Action has no expected result type:", sadl.Pretty(eparam))
}
}
}
hact.Expected = ex
}
for status, param := range op.Responses {
if status != expectedStatus {
//the status can be "default", or "4XX" (where 'X' is a wildcard) or "404". If the latter, it takes precedence.
//for SADL, not specifying the response is a bug. So "default" will be turned into "500". The wildcards
if status == "default" {
status = "0"
} else if strings.Index(status, "X") >= 0 {
panic("wildcard response codes not supported")
}
code, err := strconv.Atoi(status)
if err != nil {
return nil, fmt.Errorf("Invalid status code: %q", status)
}
ex := &sadl.HttpExceptionSpec{
Status: int32(code),
Comment: param.Description,
}
//FIXME: sadl should allow response headers for exceptions, also.
for contentType, mediadef := range param.Content {
if contentType == "application/json" { //hack
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
ex.Type = oasTypeRef(schref)
} else {
panic("inline response types not yet supported")
}
break
}
}
}
hact.Exceptions = append(hact.Exceptions, ex)
}
}
//tags: add `x-tags="one,two"` annotation
return hact, nil
}
func getPathOperation(oasPathItem *PathItem, method string) *Operation {
switch method {
case "GET":
return oasPathItem.Get
case "PUT":
// fmt.Println("xxxxxxxxxxxxxxxx----!!!!", method, oasPathItem.OperationId)
// panic("here")
return oasPathItem.Put
case "POST":
return oasPathItem.Post
case "DELETE":
return oasPathItem.Delete
case "HEAD":
return oasPathItem.Head
/*
case "PATCH":
return oasPathItem.Patch
case "OPTIONS":
return oasPathItem.Options
case "TRACE":
return oasPathItem.Trace
case "CONNECT":
return oasPathItem.Connect
*/
}
return nil
}
func guessOperationName(op *Operation, method string) string {
defaultStatus := guessDefaultResponseCode(op)
switch method {
case "GET":
resp := op.Responses[defaultStatus]
if resp == nil {
resp = op.Responses["default"]
}
for contentType, mediadef := range resp.Content {
if contentType == "application/json" {
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
entityType := oasTypeRef(schref)
return entityType
} else {
entityType := sadlPrimitiveType(schref.Type)
if entityType == "Array" {
itemType := schref.Items
if itemType.Ref != "" {
itemTypeName := oasTypeRef(itemType)
entityType = "ArrayOf" + itemTypeName
}
}
return entityType
}
} else {
fmt.Println("HTTP Action has no expected result type:", sadl.Pretty(resp))
}
}
}
}
return ""
}
func sadlPrimitiveType(name string) string {
switch name {
case "string":
return "String"
case "number":
return "Decimal"
case "integer":
return "Int32"
case "array":
return "Array"
case "object":
return "Struct"
case "boolean":
return "Bool"
default:
fmt.Println("sadlPrimitiveType for:", name)
panic("what?")
}
}
func findTypeDef(schema *sadl.Schema, name string) *sadl.TypeDef {
for _, td := range schema.Types {
if td.Name == name {
return td
}
}
return nil
}
func guessDefaultResponseCode(op *Operation) string {
for status, _ := range op.Responses {
if strings.HasPrefix(status, "2") || strings.HasPrefix(status, "3") {
//kind of an arbitrary choice: the first one we encounter, and this is random order, too.
return status
}
}
return "200" //!
}
func responseTypeName(resp *Response) string {
for contentType, mediadef := range resp.Content {
if contentType == "application/json" { //hack
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
return oasTypeRef(schref)
} else {
ts, err := convertOasType("", schref)
if err == nil {
return ts.Type //fixme
}
}
}
}
}
return ""
}
| {
if op.RequestBody != nil {
for contentType, mediadef := range op.RequestBody.Content {
if contentType == "application/json" { //hack
bodyType := oasTypeRef(mediadef.Schema)
if bodyType != "" {
spec := &sadl.HttpParamSpec{
StructFieldDef: sadl.StructFieldDef{
TypeSpec: sadl.TypeSpec{
Type: bodyType,
},
Comment: op.RequestBody.Description,
Name: "body",
Required: op.RequestBody.Required,
},
}
hact.Inputs = append(hact.Inputs, spec)
}
}
}
}
} | conditional_block |
import_openapi.go | package openapi
import (
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/boynton/sadl"
"github.com/ghodss/yaml"
)
var EnumTypes bool = true
func IsValidFile(path string) bool {
_, err := Load(path)
return err == nil
}
func Load(path string) (*Model, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("Cannot read OpenAPI file: %v\n", err)
}
v3 := &Model{}
ext := filepath.Ext(path)
if ext == ".yaml" {
err = yaml.Unmarshal(data, &v3)
} else {
err = json.Unmarshal(data, &v3)
}
if err != nil {
return nil, err
}
return Validate(v3)
}
func Import(paths []string, conf *sadl.Data) (*sadl.Model, error) {
if len(paths) != 1 {
return nil, fmt.Errorf("Cannot merge multiple OpenAPI files")
}
path := paths[0]
name := path
n := strings.LastIndex(name, "/")
// format := ""
if n >= 0 {
name = name[n+1:]
}
n = strings.LastIndex(name, ".")
if n >= 0 {
// format = name[n+1:]
name = name[:n]
name = strings.Replace(name, ".", "_", -1)
}
oas3, err := Load(path)
if err != nil {
return nil, err
}
model, err := oas3.ToSadl(name)
if err != nil {
return nil, fmt.Errorf("Cannot convert to SADL: %v\n", err)
}
//err = model.ConvertInlineEnums()
return model, err
}
/*
func DetermineVersion(data []byte, format string) (string, error) {
var raw map[string]interface{}
var err error
switch format {
case "json":
err = json.Unmarshal(data, &raw)
case "yaml":
err = yaml.Unmarshal(data, &raw)
default:
err = fmt.Errorf("Unsupported file format: %q. Only \"json\" and \"yaml\" are supported.", format)
}
if err != nil {
return "", err
}
if v, ok := raw["openapi"]; ok {
if s, ok := v.(string); ok {
return s, nil
}
}
if v, ok := raw["swagger"]; ok {
if s, ok := v.(string); ok {
return s, nil
}
}
return "", fmt.Errorf("Cannot find an 'openapi' in the specified %s file to determine the version", format)
}
*/
/*
func xParse(data []byte, format string) (*Model, error) {
version, err := DetermineVersion(data, format)
if err != nil {
return nil, err
}
oas := &Oas{
source: version,
}
if strings.HasPrefix(version, "3.") {
oas.V3, err = oas3.Parse(data, format)
return oas, err
} else if strings.HasPrefix(version, "2.") {
v2, err := oas2.Parse(data, format)
if err == nil {
oas.V3, err = oas2.ConvertToV3(v2)
}
return oas, err
}
return nil, fmt.Errorf("Unsupported version of OpenAPI Spec: %s", version)
}
*/
var examples []*sadl.ExampleDef
var methods = []string{"GET", "PUT", "POST", "DELETE", "HEAD"} //to do: "PATCH", "OPTIONS", "TRACE"
func (model *Model) ToSadl(name string) (*sadl.Model, error) {
annotations := make(map[string]string, 0)
examples = nil
annotations["x_openapi_version"] = model.OpenAPI
comment := model.Info.Description
if model.Info.Title != "" {
if sadl.IsSymbol(model.Info.Title) {
name = model.Info.Title
} else {
comment = model.Info.Title + " - " + comment
}
}
schema := &sadl.Schema{
Name: name,
Comment: comment,
Version: model.Info.Version,
}
for name, oasSchema := range model.Components.Schemas {
name = validSadlName(name, oasSchema)
if name == "" {
continue
}
var ts sadl.TypeSpec
var err error
comment := ""
tname := oasTypeRef(oasSchema)
if tname != "" {
if oasDef, ok := model.Components.Schemas[tname]; ok {
ts, err = convertOasType(tname, oasDef) //doesn't handle N levels
} else {
panic("hmm")
}
} else {
ts, err = convertOasType(name, oasSchema)
comment = oasSchema.Description
}
if err != nil {
return nil, err
}
td := &sadl.TypeDef{
TypeSpec: ts,
Name: name,
Comment: comment,
//annotations
}
schema.Types = append(schema.Types, td)
}
httpBindings := true
for tmpl, path := range model.Paths {
path2 := *path
for _, method := range methods {
op := getPathOperation(&path2, method)
if op != nil {
if strings.HasPrefix(tmpl, "x-") {
continue
}
if httpBindings {
hact, err := convertOasPath(tmpl, op, method)
if err != nil {
return nil, err
}
schema.Http = append(schema.Http, hact)
}
}
}
}
for _, server := range model.Servers {
annotations["x_server"] = server.URL
}
if model.Info.License != nil {
if model.Info.License.Name != "" {
annotations["x_license_name"] = model.Info.License.Name
}
if model.Info.License.URL != "" {
// schema.Annotations["x_license_url"] = oas.V3.Info.License.URL
annotations["x_license_url"] = model.Info.License.URL
}
}
if len(annotations) > 0 {
schema.Annotations = annotations
}
schema.Examples = examples
return sadl.NewModel(schema)
}
func validSadlName(name string, oasSchema *Schema) string {
if name == "Timestamp" {
if oasSchema.Type == "string" {
return ""
}
} else if name == "Decimal" {
return ""
}
return name
}
func oasTypeRef(oasSchema *Schema) string {
if oasSchema != nil && oasSchema.Ref != "" {
if strings.HasPrefix(oasSchema.Ref, "#/components/schemas/") {
return oasSchema.Ref[len("#/components/schemas/"):]
}
return oasSchema.Ref //?
}
return ""
}
func convertOasType(name string, oasSchema *Schema) (sadl.TypeSpec, error) {
var err error
var ts sadl.TypeSpec
if oasSchema.Example != nil {
ex := &sadl.ExampleDef{
Target: name,
Example: oasSchema.Example,
}
examples = append(examples, ex)
}
switch oasSchema.Type {
case "boolean":
ts.Type = "Bool"
case "string":
if oasSchema.Enum != nil {
//OAS defines element *descriptions* as the values, not symbolic identifiers.
//so we look for the case where all values look like identifiers, and call that an enum. Else a strings with accepted "values"
//perhaps the spirit of JSON Schema enums are just values, not what I think of as "enums", i.e. "a set of named values", per wikipedia.
//still, with symbolic values, perhaps the intent is to use proper enums, if only JSON Schema had them.
isEnum := EnumTypes
var values []string
for _, val := range oasSchema.Enum {
if s, ok := val.(string); ok {
values = append(values, s)
if !sadl.IsSymbol(s) {
isEnum = false
}
} else {
return ts, fmt.Errorf("Error in OAS source: string enum value is not a string: %v", val)
}
}
if isEnum {
ts.Type = "Enum"
for _, sym := range values {
el := &sadl.EnumElementDef{
Symbol: sym,
}
ts.Elements = append(ts.Elements, el)
}
} else {
ts.Type = "String"
ts.Values = values
}
} else {
ts.Type = "String"
}
if ts.Type == "String" {
if oasSchema.Format == "uuid" {
ts.Type = "UUID"
} else if oasSchema.Format == "date-time" {
ts.Type = "Timestamp"
} else {
ts.Pattern = oasSchema.Pattern
if oasSchema.MinLength > 0 {
tmpMin := int64(oasSchema.MinLength)
ts.MinSize = &tmpMin
}
if oasSchema.MaxLength != nil {
tmpMax := int64(*oasSchema.MaxLength)
ts.MaxSize = &tmpMax
}
if oasSchema.Format != "" {
fmt.Println("NYI: String 'format':", oasSchema.Format)
}
}
}
case "array":
ts.Type = "Array"
if oasSchema.Items != nil {
if oasSchema.Items.Ref != "" {
ts.Items = oasTypeRef(oasSchema.Items)
} else {
its, err := convertOasType(name+".Items", oasSchema.Items)
if err == nil {
ts.Items = its.Type
}
}
}
//minsize, maxsize
//comment
case "number":
ts.Type = "Decimal"
if oasSchema.Min != nil {
ts.Min = sadl.NewDecimal(*oasSchema.Min)
}
if oasSchema.Max != nil {
ts.Max = sadl.NewDecimal(*oasSchema.Max)
}
case "integer":
switch oasSchema.Format {
case "int8":
ts.Type = "Int8"
case "int16":
ts.Type = "Int16"
case "int32":
ts.Type = "Int32"
case "int64":
ts.Type = "Int64"
default:
ts.Type = "Int64"
}
if oasSchema.Min != nil {
ts.Min = sadl.NewDecimal(*oasSchema.Min)
}
if oasSchema.Max != nil {
ts.Max = sadl.NewDecimal(*oasSchema.Max)
}
case "", "object":
ts.Type = "Struct"
if oasSchema.Properties != nil {
req := oasSchema.Required
for fname, fschema := range oasSchema.Properties {
fd := &sadl.StructFieldDef{
Name: fname,
Comment: fschema.Description,
}
if containsString(req, fname) {
fd.Required = true
}
fd.Type = oasTypeRef(fschema)
if fd.Type == "" {
fd.TypeSpec, err = convertOasType(name+"."+fname, fschema)
}
ts.Fields = append(ts.Fields, fd)
}
}
default:
fmt.Printf("oas type is %q\n", oasSchema.Type)
panic("oas type not handled")
}
return ts, err
}
func containsString(lst []string, val string) bool {
for _, s := range lst {
if s == val {
return true
}
}
return false
}
func capitalize(s string) string {
return strings.ToUpper(s[0:1]) + s[1:]
}
func uncapitalize(s string) string {
return strings.ToLower(s[0:1]) + s[1:]
}
func makeIdentifier(text string) string {
reg, _ := regexp.Compile("[^a-zA-Z_][^a-zA-Z_0-9]*")
return reg.ReplaceAllString(text, "")
}
func convertOasPath(path string, op *Operation, method string) (*sadl.HttpDef, error) {
hact := &sadl.HttpDef{
Name: op.OperationId,
Path: path,
Method: method,
Comment: op.Summary,
}
if len(op.Tags) > 0 {
hact.Annotations = make(map[string]string, 0)
//note: first tag is used as the "resource" name in SADL.
tmp := ""
rez := ""
for _, tag := range op.Tags {
if rez == "" {
rez = tag
} else if tmp == "" {
tmp = tag
} else {
tmp = tmp + "," + tag
}
}
hact.Resource = rez
if len(tmp) > 0 {
hact.Annotations["x_tags"] = tmp
}
}
var queries []string
for _, param := range op.Parameters {
name := makeIdentifier(param.Name)
spec := &sadl.HttpParamSpec{
StructFieldDef: sadl.StructFieldDef{
Name: name,
Comment: param.Description,
Required: param.Required,
},
}
switch param.In {
case "query":
spec.Query = param.Name
queries = append(queries, param.Name+"={"+name+"}")
case "path":
spec.Path = true
if strings.Index(path, "{"+name+"}") < 0 {
fmt.Println("WARNING: path param is not in path template:", path, name)
panic("here")
}
case "header":
spec.Header = param.Name
case "cookie":
return nil, fmt.Errorf("Cookie params NYI: %v", sadl.AsString(param))
}
spec.Type = oasTypeRef(param.Schema)
if spec.Type == "" {
if param.Schema != nil {
spec.Type = sadlPrimitiveType(param.Schema.Type)
}
if spec.Type == "Array" {
if param.Schema.Items == nil {
spec.Items = "Any"
} else {
schref := param.Schema.Items
switch schref.Type {
case "string":
spec.Items = "String"
default:
spec.Items = "Any"
}
}
}
if spec.Type == "Struct" {
panic("Whoops, that can't be right")
}
if param.Schema != nil && param.Schema.Enum != nil {
for _, val := range param.Schema.Enum {
if s, ok := val.(string); ok {
spec.Values = append(spec.Values, s)
} else {
return nil, fmt.Errorf("String enum values are not strings: %v", param.Schema.Enum)
}
}
}
} else {
}
hact.Inputs = append(hact.Inputs, spec)
}
if len(queries) > 0 {
hact.Path = hact.Path + "?" + strings.Join(queries, "&")
}
if hact.Method == "POST" || hact.Method == "PUT" || hact.Method == "PATCH" {
if op.RequestBody != nil {
for contentType, mediadef := range op.RequestBody.Content {
if contentType == "application/json" { //hack
bodyType := oasTypeRef(mediadef.Schema)
if bodyType != "" {
spec := &sadl.HttpParamSpec{
StructFieldDef: sadl.StructFieldDef{
TypeSpec: sadl.TypeSpec{
Type: bodyType,
},
Comment: op.RequestBody.Description,
Name: "body",
Required: op.RequestBody.Required,
},
}
hact.Inputs = append(hact.Inputs, spec)
}
}
}
}
}
//expected: if 200 is in the list, use that
//else: if 201 is in the list, use that
//else: ? find a likely candidate.
var expectedStatus string = "default"
for status, _ := range op.Responses {
if strings.HasPrefix(status, "2") || strings.HasPrefix(status, "3") {
expectedStatus = status
break
}
}
// if expectedStatus == "default" {
// expectedStatus = "200" //?
// }
if expectedStatus != "" {
eparam := op.Responses[expectedStatus]
if eparam == nil {
return nil, fmt.Errorf("no response entity type provided for operation %q", op.OperationId)
}
var err error
code := 200
if expectedStatus != "default" && strings.Index(expectedStatus, "X") < 0 {
code, err = strconv.Atoi(expectedStatus)
if err != nil {
return nil, err
}
}
ex := &sadl.HttpExpectedSpec{
Status: int32(code),
Comment: eparam.Description,
}
for header, def := range eparam.Headers {
param := &sadl.HttpParamSpec{}
param.Header = header
param.Comment = def.Description
s := param.Header
//most app-defined headers start with "x-" or "X-". Strip that off for a more reasonable variable name.
if strings.HasPrefix(param.Header, "x-") || strings.HasPrefix(param.Header, "X-") {
s = s[2:]
}
param.Name = makeIdentifier(s)
schref := def.Schema
if schref != nil {
if schref.Ref != "" {
param.Type = oasTypeRef(schref)
} else {
param.TypeSpec, err = convertOasType(hact.Name+".Expected."+param.Name, schref) //fix: example
}
ex.Outputs = append(ex.Outputs, param)
}
}
for contentType, mediadef := range eparam.Content {
if contentType == "application/json" { //hack
result := &sadl.HttpParamSpec{}
result.Name = "body"
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
result.Type = oasTypeRef(schref)
} else {
result.TypeSpec, err = convertOasType(hact.Name+".Expected.payload", schref) //fix: example
}
ex.Outputs = append(ex.Outputs, result)
} else {
fmt.Println("HTTP Action has no expected result type:", sadl.Pretty(eparam))
}
}
}
hact.Expected = ex
}
for status, param := range op.Responses {
if status != expectedStatus {
//the status can be "default", or "4XX" (where 'X' is a wildcard) or "404". If the latter, it takes precedence.
//for SADL, not specifying the response is a bug. So "default" will be turned into "500". The wildcards
if status == "default" {
status = "0"
} else if strings.Index(status, "X") >= 0 {
panic("wildcard response codes not supported")
}
code, err := strconv.Atoi(status)
if err != nil {
return nil, fmt.Errorf("Invalid status code: %q", status)
}
ex := &sadl.HttpExceptionSpec{
Status: int32(code),
Comment: param.Description,
}
//FIXME: sadl should allow response headers for exceptions, also.
for contentType, mediadef := range param.Content {
if contentType == "application/json" { //hack
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
ex.Type = oasTypeRef(schref)
} else {
panic("inline response types not yet supported")
}
break
}
}
}
hact.Exceptions = append(hact.Exceptions, ex)
}
}
//tags: add `x-tags="one,two"` annotation
return hact, nil
}
func getPathOperation(oasPathItem *PathItem, method string) *Operation {
switch method {
case "GET":
return oasPathItem.Get
case "PUT":
// fmt.Println("xxxxxxxxxxxxxxxx----!!!!", method, oasPathItem.OperationId)
// panic("here")
return oasPathItem.Put
case "POST":
return oasPathItem.Post
case "DELETE":
return oasPathItem.Delete
case "HEAD":
return oasPathItem.Head
/*
case "PATCH":
return oasPathItem.Patch
case "OPTIONS":
return oasPathItem.Options
case "TRACE":
return oasPathItem.Trace
case "CONNECT":
return oasPathItem.Connect
*/
}
return nil
}
func guessOperationName(op *Operation, method string) string {
defaultStatus := guessDefaultResponseCode(op)
switch method {
case "GET":
resp := op.Responses[defaultStatus]
if resp == nil {
resp = op.Responses["default"]
}
for contentType, mediadef := range resp.Content {
if contentType == "application/json" {
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
entityType := oasTypeRef(schref)
return entityType
} else {
entityType := sadlPrimitiveType(schref.Type)
if entityType == "Array" {
itemType := schref.Items
if itemType.Ref != "" {
itemTypeName := oasTypeRef(itemType)
entityType = "ArrayOf" + itemTypeName
}
}
return entityType
}
} else {
fmt.Println("HTTP Action has no expected result type:", sadl.Pretty(resp))
}
}
}
}
return ""
}
func sadlPrimitiveType(name string) string {
switch name {
case "string":
return "String"
case "number":
return "Decimal"
case "integer":
return "Int32"
case "array":
return "Array"
case "object":
return "Struct"
case "boolean":
return "Bool"
default:
fmt.Println("sadlPrimitiveType for:", name)
panic("what?")
}
}
func findTypeDef(schema *sadl.Schema, name string) *sadl.TypeDef {
for _, td := range schema.Types {
if td.Name == name {
return td | }
return nil
}
func guessDefaultResponseCode(op *Operation) string {
for status, _ := range op.Responses {
if strings.HasPrefix(status, "2") || strings.HasPrefix(status, "3") {
//kind of an arbitrary choice: the first one we encounter, and this is random order, too.
return status
}
}
return "200" //!
}
func responseTypeName(resp *Response) string {
for contentType, mediadef := range resp.Content {
if contentType == "application/json" { //hack
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
return oasTypeRef(schref)
} else {
ts, err := convertOasType("", schref)
if err == nil {
return ts.Type //fixme
}
}
}
}
}
return ""
} | } | random_line_split |
import_openapi.go | package openapi
import (
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/boynton/sadl"
"github.com/ghodss/yaml"
)
var EnumTypes bool = true
func IsValidFile(path string) bool {
_, err := Load(path)
return err == nil
}
func Load(path string) (*Model, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("Cannot read OpenAPI file: %v\n", err)
}
v3 := &Model{}
ext := filepath.Ext(path)
if ext == ".yaml" {
err = yaml.Unmarshal(data, &v3)
} else {
err = json.Unmarshal(data, &v3)
}
if err != nil {
return nil, err
}
return Validate(v3)
}
func | (paths []string, conf *sadl.Data) (*sadl.Model, error) {
if len(paths) != 1 {
return nil, fmt.Errorf("Cannot merge multiple OpenAPI files")
}
path := paths[0]
name := path
n := strings.LastIndex(name, "/")
// format := ""
if n >= 0 {
name = name[n+1:]
}
n = strings.LastIndex(name, ".")
if n >= 0 {
// format = name[n+1:]
name = name[:n]
name = strings.Replace(name, ".", "_", -1)
}
oas3, err := Load(path)
if err != nil {
return nil, err
}
model, err := oas3.ToSadl(name)
if err != nil {
return nil, fmt.Errorf("Cannot convert to SADL: %v\n", err)
}
//err = model.ConvertInlineEnums()
return model, err
}
/*
func DetermineVersion(data []byte, format string) (string, error) {
var raw map[string]interface{}
var err error
switch format {
case "json":
err = json.Unmarshal(data, &raw)
case "yaml":
err = yaml.Unmarshal(data, &raw)
default:
err = fmt.Errorf("Unsupported file format: %q. Only \"json\" and \"yaml\" are supported.", format)
}
if err != nil {
return "", err
}
if v, ok := raw["openapi"]; ok {
if s, ok := v.(string); ok {
return s, nil
}
}
if v, ok := raw["swagger"]; ok {
if s, ok := v.(string); ok {
return s, nil
}
}
return "", fmt.Errorf("Cannot find an 'openapi' in the specified %s file to determine the version", format)
}
*/
/*
func xParse(data []byte, format string) (*Model, error) {
version, err := DetermineVersion(data, format)
if err != nil {
return nil, err
}
oas := &Oas{
source: version,
}
if strings.HasPrefix(version, "3.") {
oas.V3, err = oas3.Parse(data, format)
return oas, err
} else if strings.HasPrefix(version, "2.") {
v2, err := oas2.Parse(data, format)
if err == nil {
oas.V3, err = oas2.ConvertToV3(v2)
}
return oas, err
}
return nil, fmt.Errorf("Unsupported version of OpenAPI Spec: %s", version)
}
*/
var examples []*sadl.ExampleDef
var methods = []string{"GET", "PUT", "POST", "DELETE", "HEAD"} //to do: "PATCH", "OPTIONS", "TRACE"
func (model *Model) ToSadl(name string) (*sadl.Model, error) {
annotations := make(map[string]string, 0)
examples = nil
annotations["x_openapi_version"] = model.OpenAPI
comment := model.Info.Description
if model.Info.Title != "" {
if sadl.IsSymbol(model.Info.Title) {
name = model.Info.Title
} else {
comment = model.Info.Title + " - " + comment
}
}
schema := &sadl.Schema{
Name: name,
Comment: comment,
Version: model.Info.Version,
}
for name, oasSchema := range model.Components.Schemas {
name = validSadlName(name, oasSchema)
if name == "" {
continue
}
var ts sadl.TypeSpec
var err error
comment := ""
tname := oasTypeRef(oasSchema)
if tname != "" {
if oasDef, ok := model.Components.Schemas[tname]; ok {
ts, err = convertOasType(tname, oasDef) //doesn't handle N levels
} else {
panic("hmm")
}
} else {
ts, err = convertOasType(name, oasSchema)
comment = oasSchema.Description
}
if err != nil {
return nil, err
}
td := &sadl.TypeDef{
TypeSpec: ts,
Name: name,
Comment: comment,
//annotations
}
schema.Types = append(schema.Types, td)
}
httpBindings := true
for tmpl, path := range model.Paths {
path2 := *path
for _, method := range methods {
op := getPathOperation(&path2, method)
if op != nil {
if strings.HasPrefix(tmpl, "x-") {
continue
}
if httpBindings {
hact, err := convertOasPath(tmpl, op, method)
if err != nil {
return nil, err
}
schema.Http = append(schema.Http, hact)
}
}
}
}
for _, server := range model.Servers {
annotations["x_server"] = server.URL
}
if model.Info.License != nil {
if model.Info.License.Name != "" {
annotations["x_license_name"] = model.Info.License.Name
}
if model.Info.License.URL != "" {
// schema.Annotations["x_license_url"] = oas.V3.Info.License.URL
annotations["x_license_url"] = model.Info.License.URL
}
}
if len(annotations) > 0 {
schema.Annotations = annotations
}
schema.Examples = examples
return sadl.NewModel(schema)
}
func validSadlName(name string, oasSchema *Schema) string {
if name == "Timestamp" {
if oasSchema.Type == "string" {
return ""
}
} else if name == "Decimal" {
return ""
}
return name
}
func oasTypeRef(oasSchema *Schema) string {
if oasSchema != nil && oasSchema.Ref != "" {
if strings.HasPrefix(oasSchema.Ref, "#/components/schemas/") {
return oasSchema.Ref[len("#/components/schemas/"):]
}
return oasSchema.Ref //?
}
return ""
}
func convertOasType(name string, oasSchema *Schema) (sadl.TypeSpec, error) {
var err error
var ts sadl.TypeSpec
if oasSchema.Example != nil {
ex := &sadl.ExampleDef{
Target: name,
Example: oasSchema.Example,
}
examples = append(examples, ex)
}
switch oasSchema.Type {
case "boolean":
ts.Type = "Bool"
case "string":
if oasSchema.Enum != nil {
//OAS defines element *descriptions* as the values, not symbolic identifiers.
//so we look for the case where all values look like identifiers, and call that an enum. Else a strings with accepted "values"
//perhaps the spirit of JSON Schema enums are just values, not what I think of as "enums", i.e. "a set of named values", per wikipedia.
//still, with symbolic values, perhaps the intent is to use proper enums, if only JSON Schema had them.
isEnum := EnumTypes
var values []string
for _, val := range oasSchema.Enum {
if s, ok := val.(string); ok {
values = append(values, s)
if !sadl.IsSymbol(s) {
isEnum = false
}
} else {
return ts, fmt.Errorf("Error in OAS source: string enum value is not a string: %v", val)
}
}
if isEnum {
ts.Type = "Enum"
for _, sym := range values {
el := &sadl.EnumElementDef{
Symbol: sym,
}
ts.Elements = append(ts.Elements, el)
}
} else {
ts.Type = "String"
ts.Values = values
}
} else {
ts.Type = "String"
}
if ts.Type == "String" {
if oasSchema.Format == "uuid" {
ts.Type = "UUID"
} else if oasSchema.Format == "date-time" {
ts.Type = "Timestamp"
} else {
ts.Pattern = oasSchema.Pattern
if oasSchema.MinLength > 0 {
tmpMin := int64(oasSchema.MinLength)
ts.MinSize = &tmpMin
}
if oasSchema.MaxLength != nil {
tmpMax := int64(*oasSchema.MaxLength)
ts.MaxSize = &tmpMax
}
if oasSchema.Format != "" {
fmt.Println("NYI: String 'format':", oasSchema.Format)
}
}
}
case "array":
ts.Type = "Array"
if oasSchema.Items != nil {
if oasSchema.Items.Ref != "" {
ts.Items = oasTypeRef(oasSchema.Items)
} else {
its, err := convertOasType(name+".Items", oasSchema.Items)
if err == nil {
ts.Items = its.Type
}
}
}
//minsize, maxsize
//comment
case "number":
ts.Type = "Decimal"
if oasSchema.Min != nil {
ts.Min = sadl.NewDecimal(*oasSchema.Min)
}
if oasSchema.Max != nil {
ts.Max = sadl.NewDecimal(*oasSchema.Max)
}
case "integer":
switch oasSchema.Format {
case "int8":
ts.Type = "Int8"
case "int16":
ts.Type = "Int16"
case "int32":
ts.Type = "Int32"
case "int64":
ts.Type = "Int64"
default:
ts.Type = "Int64"
}
if oasSchema.Min != nil {
ts.Min = sadl.NewDecimal(*oasSchema.Min)
}
if oasSchema.Max != nil {
ts.Max = sadl.NewDecimal(*oasSchema.Max)
}
case "", "object":
ts.Type = "Struct"
if oasSchema.Properties != nil {
req := oasSchema.Required
for fname, fschema := range oasSchema.Properties {
fd := &sadl.StructFieldDef{
Name: fname,
Comment: fschema.Description,
}
if containsString(req, fname) {
fd.Required = true
}
fd.Type = oasTypeRef(fschema)
if fd.Type == "" {
fd.TypeSpec, err = convertOasType(name+"."+fname, fschema)
}
ts.Fields = append(ts.Fields, fd)
}
}
default:
fmt.Printf("oas type is %q\n", oasSchema.Type)
panic("oas type not handled")
}
return ts, err
}
func containsString(lst []string, val string) bool {
for _, s := range lst {
if s == val {
return true
}
}
return false
}
func capitalize(s string) string {
return strings.ToUpper(s[0:1]) + s[1:]
}
func uncapitalize(s string) string {
return strings.ToLower(s[0:1]) + s[1:]
}
func makeIdentifier(text string) string {
reg, _ := regexp.Compile("[^a-zA-Z_][^a-zA-Z_0-9]*")
return reg.ReplaceAllString(text, "")
}
func convertOasPath(path string, op *Operation, method string) (*sadl.HttpDef, error) {
hact := &sadl.HttpDef{
Name: op.OperationId,
Path: path,
Method: method,
Comment: op.Summary,
}
if len(op.Tags) > 0 {
hact.Annotations = make(map[string]string, 0)
//note: first tag is used as the "resource" name in SADL.
tmp := ""
rez := ""
for _, tag := range op.Tags {
if rez == "" {
rez = tag
} else if tmp == "" {
tmp = tag
} else {
tmp = tmp + "," + tag
}
}
hact.Resource = rez
if len(tmp) > 0 {
hact.Annotations["x_tags"] = tmp
}
}
var queries []string
for _, param := range op.Parameters {
name := makeIdentifier(param.Name)
spec := &sadl.HttpParamSpec{
StructFieldDef: sadl.StructFieldDef{
Name: name,
Comment: param.Description,
Required: param.Required,
},
}
switch param.In {
case "query":
spec.Query = param.Name
queries = append(queries, param.Name+"={"+name+"}")
case "path":
spec.Path = true
if strings.Index(path, "{"+name+"}") < 0 {
fmt.Println("WARNING: path param is not in path template:", path, name)
panic("here")
}
case "header":
spec.Header = param.Name
case "cookie":
return nil, fmt.Errorf("Cookie params NYI: %v", sadl.AsString(param))
}
spec.Type = oasTypeRef(param.Schema)
if spec.Type == "" {
if param.Schema != nil {
spec.Type = sadlPrimitiveType(param.Schema.Type)
}
if spec.Type == "Array" {
if param.Schema.Items == nil {
spec.Items = "Any"
} else {
schref := param.Schema.Items
switch schref.Type {
case "string":
spec.Items = "String"
default:
spec.Items = "Any"
}
}
}
if spec.Type == "Struct" {
panic("Whoops, that can't be right")
}
if param.Schema != nil && param.Schema.Enum != nil {
for _, val := range param.Schema.Enum {
if s, ok := val.(string); ok {
spec.Values = append(spec.Values, s)
} else {
return nil, fmt.Errorf("String enum values are not strings: %v", param.Schema.Enum)
}
}
}
} else {
}
hact.Inputs = append(hact.Inputs, spec)
}
if len(queries) > 0 {
hact.Path = hact.Path + "?" + strings.Join(queries, "&")
}
if hact.Method == "POST" || hact.Method == "PUT" || hact.Method == "PATCH" {
if op.RequestBody != nil {
for contentType, mediadef := range op.RequestBody.Content {
if contentType == "application/json" { //hack
bodyType := oasTypeRef(mediadef.Schema)
if bodyType != "" {
spec := &sadl.HttpParamSpec{
StructFieldDef: sadl.StructFieldDef{
TypeSpec: sadl.TypeSpec{
Type: bodyType,
},
Comment: op.RequestBody.Description,
Name: "body",
Required: op.RequestBody.Required,
},
}
hact.Inputs = append(hact.Inputs, spec)
}
}
}
}
}
//expected: if 200 is in the list, use that
//else: if 201 is in the list, use that
//else: ? find a likely candidate.
var expectedStatus string = "default"
for status, _ := range op.Responses {
if strings.HasPrefix(status, "2") || strings.HasPrefix(status, "3") {
expectedStatus = status
break
}
}
// if expectedStatus == "default" {
// expectedStatus = "200" //?
// }
if expectedStatus != "" {
eparam := op.Responses[expectedStatus]
if eparam == nil {
return nil, fmt.Errorf("no response entity type provided for operation %q", op.OperationId)
}
var err error
code := 200
if expectedStatus != "default" && strings.Index(expectedStatus, "X") < 0 {
code, err = strconv.Atoi(expectedStatus)
if err != nil {
return nil, err
}
}
ex := &sadl.HttpExpectedSpec{
Status: int32(code),
Comment: eparam.Description,
}
for header, def := range eparam.Headers {
param := &sadl.HttpParamSpec{}
param.Header = header
param.Comment = def.Description
s := param.Header
//most app-defined headers start with "x-" or "X-". Strip that off for a more reasonable variable name.
if strings.HasPrefix(param.Header, "x-") || strings.HasPrefix(param.Header, "X-") {
s = s[2:]
}
param.Name = makeIdentifier(s)
schref := def.Schema
if schref != nil {
if schref.Ref != "" {
param.Type = oasTypeRef(schref)
} else {
param.TypeSpec, err = convertOasType(hact.Name+".Expected."+param.Name, schref) //fix: example
}
ex.Outputs = append(ex.Outputs, param)
}
}
for contentType, mediadef := range eparam.Content {
if contentType == "application/json" { //hack
result := &sadl.HttpParamSpec{}
result.Name = "body"
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
result.Type = oasTypeRef(schref)
} else {
result.TypeSpec, err = convertOasType(hact.Name+".Expected.payload", schref) //fix: example
}
ex.Outputs = append(ex.Outputs, result)
} else {
fmt.Println("HTTP Action has no expected result type:", sadl.Pretty(eparam))
}
}
}
hact.Expected = ex
}
for status, param := range op.Responses {
if status != expectedStatus {
//the status can be "default", or "4XX" (where 'X' is a wildcard) or "404". If the latter, it takes precedence.
//for SADL, not specifying the response is a bug. So "default" will be turned into "500". The wildcards
if status == "default" {
status = "0"
} else if strings.Index(status, "X") >= 0 {
panic("wildcard response codes not supported")
}
code, err := strconv.Atoi(status)
if err != nil {
return nil, fmt.Errorf("Invalid status code: %q", status)
}
ex := &sadl.HttpExceptionSpec{
Status: int32(code),
Comment: param.Description,
}
//FIXME: sadl should allow response headers for exceptions, also.
for contentType, mediadef := range param.Content {
if contentType == "application/json" { //hack
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
ex.Type = oasTypeRef(schref)
} else {
panic("inline response types not yet supported")
}
break
}
}
}
hact.Exceptions = append(hact.Exceptions, ex)
}
}
//tags: add `x-tags="one,two"` annotation
return hact, nil
}
func getPathOperation(oasPathItem *PathItem, method string) *Operation {
switch method {
case "GET":
return oasPathItem.Get
case "PUT":
// fmt.Println("xxxxxxxxxxxxxxxx----!!!!", method, oasPathItem.OperationId)
// panic("here")
return oasPathItem.Put
case "POST":
return oasPathItem.Post
case "DELETE":
return oasPathItem.Delete
case "HEAD":
return oasPathItem.Head
/*
case "PATCH":
return oasPathItem.Patch
case "OPTIONS":
return oasPathItem.Options
case "TRACE":
return oasPathItem.Trace
case "CONNECT":
return oasPathItem.Connect
*/
}
return nil
}
func guessOperationName(op *Operation, method string) string {
defaultStatus := guessDefaultResponseCode(op)
switch method {
case "GET":
resp := op.Responses[defaultStatus]
if resp == nil {
resp = op.Responses["default"]
}
for contentType, mediadef := range resp.Content {
if contentType == "application/json" {
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
entityType := oasTypeRef(schref)
return entityType
} else {
entityType := sadlPrimitiveType(schref.Type)
if entityType == "Array" {
itemType := schref.Items
if itemType.Ref != "" {
itemTypeName := oasTypeRef(itemType)
entityType = "ArrayOf" + itemTypeName
}
}
return entityType
}
} else {
fmt.Println("HTTP Action has no expected result type:", sadl.Pretty(resp))
}
}
}
}
return ""
}
func sadlPrimitiveType(name string) string {
switch name {
case "string":
return "String"
case "number":
return "Decimal"
case "integer":
return "Int32"
case "array":
return "Array"
case "object":
return "Struct"
case "boolean":
return "Bool"
default:
fmt.Println("sadlPrimitiveType for:", name)
panic("what?")
}
}
func findTypeDef(schema *sadl.Schema, name string) *sadl.TypeDef {
for _, td := range schema.Types {
if td.Name == name {
return td
}
}
return nil
}
func guessDefaultResponseCode(op *Operation) string {
for status, _ := range op.Responses {
if strings.HasPrefix(status, "2") || strings.HasPrefix(status, "3") {
//kind of an arbitrary choice: the first one we encounter, and this is random order, too.
return status
}
}
return "200" //!
}
func responseTypeName(resp *Response) string {
for contentType, mediadef := range resp.Content {
if contentType == "application/json" { //hack
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
return oasTypeRef(schref)
} else {
ts, err := convertOasType("", schref)
if err == nil {
return ts.Type //fixme
}
}
}
}
}
return ""
}
| Import | identifier_name |
import_openapi.go | package openapi
import (
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/boynton/sadl"
"github.com/ghodss/yaml"
)
var EnumTypes bool = true
func IsValidFile(path string) bool {
_, err := Load(path)
return err == nil
}
func Load(path string) (*Model, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("Cannot read OpenAPI file: %v\n", err)
}
v3 := &Model{}
ext := filepath.Ext(path)
if ext == ".yaml" {
err = yaml.Unmarshal(data, &v3)
} else {
err = json.Unmarshal(data, &v3)
}
if err != nil {
return nil, err
}
return Validate(v3)
}
func Import(paths []string, conf *sadl.Data) (*sadl.Model, error) |
/*
func DetermineVersion(data []byte, format string) (string, error) {
var raw map[string]interface{}
var err error
switch format {
case "json":
err = json.Unmarshal(data, &raw)
case "yaml":
err = yaml.Unmarshal(data, &raw)
default:
err = fmt.Errorf("Unsupported file format: %q. Only \"json\" and \"yaml\" are supported.", format)
}
if err != nil {
return "", err
}
if v, ok := raw["openapi"]; ok {
if s, ok := v.(string); ok {
return s, nil
}
}
if v, ok := raw["swagger"]; ok {
if s, ok := v.(string); ok {
return s, nil
}
}
return "", fmt.Errorf("Cannot find an 'openapi' in the specified %s file to determine the version", format)
}
*/
/*
func xParse(data []byte, format string) (*Model, error) {
version, err := DetermineVersion(data, format)
if err != nil {
return nil, err
}
oas := &Oas{
source: version,
}
if strings.HasPrefix(version, "3.") {
oas.V3, err = oas3.Parse(data, format)
return oas, err
} else if strings.HasPrefix(version, "2.") {
v2, err := oas2.Parse(data, format)
if err == nil {
oas.V3, err = oas2.ConvertToV3(v2)
}
return oas, err
}
return nil, fmt.Errorf("Unsupported version of OpenAPI Spec: %s", version)
}
*/
var examples []*sadl.ExampleDef
var methods = []string{"GET", "PUT", "POST", "DELETE", "HEAD"} //to do: "PATCH", "OPTIONS", "TRACE"
func (model *Model) ToSadl(name string) (*sadl.Model, error) {
annotations := make(map[string]string, 0)
examples = nil
annotations["x_openapi_version"] = model.OpenAPI
comment := model.Info.Description
if model.Info.Title != "" {
if sadl.IsSymbol(model.Info.Title) {
name = model.Info.Title
} else {
comment = model.Info.Title + " - " + comment
}
}
schema := &sadl.Schema{
Name: name,
Comment: comment,
Version: model.Info.Version,
}
for name, oasSchema := range model.Components.Schemas {
name = validSadlName(name, oasSchema)
if name == "" {
continue
}
var ts sadl.TypeSpec
var err error
comment := ""
tname := oasTypeRef(oasSchema)
if tname != "" {
if oasDef, ok := model.Components.Schemas[tname]; ok {
ts, err = convertOasType(tname, oasDef) //doesn't handle N levels
} else {
panic("hmm")
}
} else {
ts, err = convertOasType(name, oasSchema)
comment = oasSchema.Description
}
if err != nil {
return nil, err
}
td := &sadl.TypeDef{
TypeSpec: ts,
Name: name,
Comment: comment,
//annotations
}
schema.Types = append(schema.Types, td)
}
httpBindings := true
for tmpl, path := range model.Paths {
path2 := *path
for _, method := range methods {
op := getPathOperation(&path2, method)
if op != nil {
if strings.HasPrefix(tmpl, "x-") {
continue
}
if httpBindings {
hact, err := convertOasPath(tmpl, op, method)
if err != nil {
return nil, err
}
schema.Http = append(schema.Http, hact)
}
}
}
}
for _, server := range model.Servers {
annotations["x_server"] = server.URL
}
if model.Info.License != nil {
if model.Info.License.Name != "" {
annotations["x_license_name"] = model.Info.License.Name
}
if model.Info.License.URL != "" {
// schema.Annotations["x_license_url"] = oas.V3.Info.License.URL
annotations["x_license_url"] = model.Info.License.URL
}
}
if len(annotations) > 0 {
schema.Annotations = annotations
}
schema.Examples = examples
return sadl.NewModel(schema)
}
func validSadlName(name string, oasSchema *Schema) string {
if name == "Timestamp" {
if oasSchema.Type == "string" {
return ""
}
} else if name == "Decimal" {
return ""
}
return name
}
func oasTypeRef(oasSchema *Schema) string {
if oasSchema != nil && oasSchema.Ref != "" {
if strings.HasPrefix(oasSchema.Ref, "#/components/schemas/") {
return oasSchema.Ref[len("#/components/schemas/"):]
}
return oasSchema.Ref //?
}
return ""
}
func convertOasType(name string, oasSchema *Schema) (sadl.TypeSpec, error) {
var err error
var ts sadl.TypeSpec
if oasSchema.Example != nil {
ex := &sadl.ExampleDef{
Target: name,
Example: oasSchema.Example,
}
examples = append(examples, ex)
}
switch oasSchema.Type {
case "boolean":
ts.Type = "Bool"
case "string":
if oasSchema.Enum != nil {
//OAS defines element *descriptions* as the values, not symbolic identifiers.
//so we look for the case where all values look like identifiers, and call that an enum. Else a strings with accepted "values"
//perhaps the spirit of JSON Schema enums are just values, not what I think of as "enums", i.e. "a set of named values", per wikipedia.
//still, with symbolic values, perhaps the intent is to use proper enums, if only JSON Schema had them.
isEnum := EnumTypes
var values []string
for _, val := range oasSchema.Enum {
if s, ok := val.(string); ok {
values = append(values, s)
if !sadl.IsSymbol(s) {
isEnum = false
}
} else {
return ts, fmt.Errorf("Error in OAS source: string enum value is not a string: %v", val)
}
}
if isEnum {
ts.Type = "Enum"
for _, sym := range values {
el := &sadl.EnumElementDef{
Symbol: sym,
}
ts.Elements = append(ts.Elements, el)
}
} else {
ts.Type = "String"
ts.Values = values
}
} else {
ts.Type = "String"
}
if ts.Type == "String" {
if oasSchema.Format == "uuid" {
ts.Type = "UUID"
} else if oasSchema.Format == "date-time" {
ts.Type = "Timestamp"
} else {
ts.Pattern = oasSchema.Pattern
if oasSchema.MinLength > 0 {
tmpMin := int64(oasSchema.MinLength)
ts.MinSize = &tmpMin
}
if oasSchema.MaxLength != nil {
tmpMax := int64(*oasSchema.MaxLength)
ts.MaxSize = &tmpMax
}
if oasSchema.Format != "" {
fmt.Println("NYI: String 'format':", oasSchema.Format)
}
}
}
case "array":
ts.Type = "Array"
if oasSchema.Items != nil {
if oasSchema.Items.Ref != "" {
ts.Items = oasTypeRef(oasSchema.Items)
} else {
its, err := convertOasType(name+".Items", oasSchema.Items)
if err == nil {
ts.Items = its.Type
}
}
}
//minsize, maxsize
//comment
case "number":
ts.Type = "Decimal"
if oasSchema.Min != nil {
ts.Min = sadl.NewDecimal(*oasSchema.Min)
}
if oasSchema.Max != nil {
ts.Max = sadl.NewDecimal(*oasSchema.Max)
}
case "integer":
switch oasSchema.Format {
case "int8":
ts.Type = "Int8"
case "int16":
ts.Type = "Int16"
case "int32":
ts.Type = "Int32"
case "int64":
ts.Type = "Int64"
default:
ts.Type = "Int64"
}
if oasSchema.Min != nil {
ts.Min = sadl.NewDecimal(*oasSchema.Min)
}
if oasSchema.Max != nil {
ts.Max = sadl.NewDecimal(*oasSchema.Max)
}
case "", "object":
ts.Type = "Struct"
if oasSchema.Properties != nil {
req := oasSchema.Required
for fname, fschema := range oasSchema.Properties {
fd := &sadl.StructFieldDef{
Name: fname,
Comment: fschema.Description,
}
if containsString(req, fname) {
fd.Required = true
}
fd.Type = oasTypeRef(fschema)
if fd.Type == "" {
fd.TypeSpec, err = convertOasType(name+"."+fname, fschema)
}
ts.Fields = append(ts.Fields, fd)
}
}
default:
fmt.Printf("oas type is %q\n", oasSchema.Type)
panic("oas type not handled")
}
return ts, err
}
func containsString(lst []string, val string) bool {
for _, s := range lst {
if s == val {
return true
}
}
return false
}
func capitalize(s string) string {
return strings.ToUpper(s[0:1]) + s[1:]
}
func uncapitalize(s string) string {
return strings.ToLower(s[0:1]) + s[1:]
}
func makeIdentifier(text string) string {
reg, _ := regexp.Compile("[^a-zA-Z_][^a-zA-Z_0-9]*")
return reg.ReplaceAllString(text, "")
}
func convertOasPath(path string, op *Operation, method string) (*sadl.HttpDef, error) {
hact := &sadl.HttpDef{
Name: op.OperationId,
Path: path,
Method: method,
Comment: op.Summary,
}
if len(op.Tags) > 0 {
hact.Annotations = make(map[string]string, 0)
//note: first tag is used as the "resource" name in SADL.
tmp := ""
rez := ""
for _, tag := range op.Tags {
if rez == "" {
rez = tag
} else if tmp == "" {
tmp = tag
} else {
tmp = tmp + "," + tag
}
}
hact.Resource = rez
if len(tmp) > 0 {
hact.Annotations["x_tags"] = tmp
}
}
var queries []string
for _, param := range op.Parameters {
name := makeIdentifier(param.Name)
spec := &sadl.HttpParamSpec{
StructFieldDef: sadl.StructFieldDef{
Name: name,
Comment: param.Description,
Required: param.Required,
},
}
switch param.In {
case "query":
spec.Query = param.Name
queries = append(queries, param.Name+"={"+name+"}")
case "path":
spec.Path = true
if strings.Index(path, "{"+name+"}") < 0 {
fmt.Println("WARNING: path param is not in path template:", path, name)
panic("here")
}
case "header":
spec.Header = param.Name
case "cookie":
return nil, fmt.Errorf("Cookie params NYI: %v", sadl.AsString(param))
}
spec.Type = oasTypeRef(param.Schema)
if spec.Type == "" {
if param.Schema != nil {
spec.Type = sadlPrimitiveType(param.Schema.Type)
}
if spec.Type == "Array" {
if param.Schema.Items == nil {
spec.Items = "Any"
} else {
schref := param.Schema.Items
switch schref.Type {
case "string":
spec.Items = "String"
default:
spec.Items = "Any"
}
}
}
if spec.Type == "Struct" {
panic("Whoops, that can't be right")
}
if param.Schema != nil && param.Schema.Enum != nil {
for _, val := range param.Schema.Enum {
if s, ok := val.(string); ok {
spec.Values = append(spec.Values, s)
} else {
return nil, fmt.Errorf("String enum values are not strings: %v", param.Schema.Enum)
}
}
}
} else {
}
hact.Inputs = append(hact.Inputs, spec)
}
if len(queries) > 0 {
hact.Path = hact.Path + "?" + strings.Join(queries, "&")
}
if hact.Method == "POST" || hact.Method == "PUT" || hact.Method == "PATCH" {
if op.RequestBody != nil {
for contentType, mediadef := range op.RequestBody.Content {
if contentType == "application/json" { //hack
bodyType := oasTypeRef(mediadef.Schema)
if bodyType != "" {
spec := &sadl.HttpParamSpec{
StructFieldDef: sadl.StructFieldDef{
TypeSpec: sadl.TypeSpec{
Type: bodyType,
},
Comment: op.RequestBody.Description,
Name: "body",
Required: op.RequestBody.Required,
},
}
hact.Inputs = append(hact.Inputs, spec)
}
}
}
}
}
//expected: if 200 is in the list, use that
//else: if 201 is in the list, use that
//else: ? find a likely candidate.
var expectedStatus string = "default"
for status, _ := range op.Responses {
if strings.HasPrefix(status, "2") || strings.HasPrefix(status, "3") {
expectedStatus = status
break
}
}
// if expectedStatus == "default" {
// expectedStatus = "200" //?
// }
if expectedStatus != "" {
eparam := op.Responses[expectedStatus]
if eparam == nil {
return nil, fmt.Errorf("no response entity type provided for operation %q", op.OperationId)
}
var err error
code := 200
if expectedStatus != "default" && strings.Index(expectedStatus, "X") < 0 {
code, err = strconv.Atoi(expectedStatus)
if err != nil {
return nil, err
}
}
ex := &sadl.HttpExpectedSpec{
Status: int32(code),
Comment: eparam.Description,
}
for header, def := range eparam.Headers {
param := &sadl.HttpParamSpec{}
param.Header = header
param.Comment = def.Description
s := param.Header
//most app-defined headers start with "x-" or "X-". Strip that off for a more reasonable variable name.
if strings.HasPrefix(param.Header, "x-") || strings.HasPrefix(param.Header, "X-") {
s = s[2:]
}
param.Name = makeIdentifier(s)
schref := def.Schema
if schref != nil {
if schref.Ref != "" {
param.Type = oasTypeRef(schref)
} else {
param.TypeSpec, err = convertOasType(hact.Name+".Expected."+param.Name, schref) //fix: example
}
ex.Outputs = append(ex.Outputs, param)
}
}
for contentType, mediadef := range eparam.Content {
if contentType == "application/json" { //hack
result := &sadl.HttpParamSpec{}
result.Name = "body"
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
result.Type = oasTypeRef(schref)
} else {
result.TypeSpec, err = convertOasType(hact.Name+".Expected.payload", schref) //fix: example
}
ex.Outputs = append(ex.Outputs, result)
} else {
fmt.Println("HTTP Action has no expected result type:", sadl.Pretty(eparam))
}
}
}
hact.Expected = ex
}
for status, param := range op.Responses {
if status != expectedStatus {
//the status can be "default", or "4XX" (where 'X' is a wildcard) or "404". If the latter, it takes precedence.
//for SADL, not specifying the response is a bug. So "default" will be turned into "500". The wildcards
if status == "default" {
status = "0"
} else if strings.Index(status, "X") >= 0 {
panic("wildcard response codes not supported")
}
code, err := strconv.Atoi(status)
if err != nil {
return nil, fmt.Errorf("Invalid status code: %q", status)
}
ex := &sadl.HttpExceptionSpec{
Status: int32(code),
Comment: param.Description,
}
//FIXME: sadl should allow response headers for exceptions, also.
for contentType, mediadef := range param.Content {
if contentType == "application/json" { //hack
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
ex.Type = oasTypeRef(schref)
} else {
panic("inline response types not yet supported")
}
break
}
}
}
hact.Exceptions = append(hact.Exceptions, ex)
}
}
//tags: add `x-tags="one,two"` annotation
return hact, nil
}
func getPathOperation(oasPathItem *PathItem, method string) *Operation {
switch method {
case "GET":
return oasPathItem.Get
case "PUT":
// fmt.Println("xxxxxxxxxxxxxxxx----!!!!", method, oasPathItem.OperationId)
// panic("here")
return oasPathItem.Put
case "POST":
return oasPathItem.Post
case "DELETE":
return oasPathItem.Delete
case "HEAD":
return oasPathItem.Head
/*
case "PATCH":
return oasPathItem.Patch
case "OPTIONS":
return oasPathItem.Options
case "TRACE":
return oasPathItem.Trace
case "CONNECT":
return oasPathItem.Connect
*/
}
return nil
}
func guessOperationName(op *Operation, method string) string {
defaultStatus := guessDefaultResponseCode(op)
switch method {
case "GET":
resp := op.Responses[defaultStatus]
if resp == nil {
resp = op.Responses["default"]
}
for contentType, mediadef := range resp.Content {
if contentType == "application/json" {
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
entityType := oasTypeRef(schref)
return entityType
} else {
entityType := sadlPrimitiveType(schref.Type)
if entityType == "Array" {
itemType := schref.Items
if itemType.Ref != "" {
itemTypeName := oasTypeRef(itemType)
entityType = "ArrayOf" + itemTypeName
}
}
return entityType
}
} else {
fmt.Println("HTTP Action has no expected result type:", sadl.Pretty(resp))
}
}
}
}
return ""
}
func sadlPrimitiveType(name string) string {
switch name {
case "string":
return "String"
case "number":
return "Decimal"
case "integer":
return "Int32"
case "array":
return "Array"
case "object":
return "Struct"
case "boolean":
return "Bool"
default:
fmt.Println("sadlPrimitiveType for:", name)
panic("what?")
}
}
func findTypeDef(schema *sadl.Schema, name string) *sadl.TypeDef {
for _, td := range schema.Types {
if td.Name == name {
return td
}
}
return nil
}
func guessDefaultResponseCode(op *Operation) string {
for status, _ := range op.Responses {
if strings.HasPrefix(status, "2") || strings.HasPrefix(status, "3") {
//kind of an arbitrary choice: the first one we encounter, and this is random order, too.
return status
}
}
return "200" //!
}
func responseTypeName(resp *Response) string {
for contentType, mediadef := range resp.Content {
if contentType == "application/json" { //hack
schref := mediadef.Schema
if schref != nil {
if schref.Ref != "" {
return oasTypeRef(schref)
} else {
ts, err := convertOasType("", schref)
if err == nil {
return ts.Type //fixme
}
}
}
}
}
return ""
}
| {
if len(paths) != 1 {
return nil, fmt.Errorf("Cannot merge multiple OpenAPI files")
}
path := paths[0]
name := path
n := strings.LastIndex(name, "/")
// format := ""
if n >= 0 {
name = name[n+1:]
}
n = strings.LastIndex(name, ".")
if n >= 0 {
// format = name[n+1:]
name = name[:n]
name = strings.Replace(name, ".", "_", -1)
}
oas3, err := Load(path)
if err != nil {
return nil, err
}
model, err := oas3.ToSadl(name)
if err != nil {
return nil, fmt.Errorf("Cannot convert to SADL: %v\n", err)
}
//err = model.ConvertInlineEnums()
return model, err
} | identifier_body |
ser.rs | #![allow(unused)]
#![warn(unused_must_use)]
use crate::{
ser::{Map, Seq, Serialize, ValueView},
Result,
};
use ::std::io::{self, Write as _};
/// Serialize any serializable type into a CBOR byte sequence.
///
/// ```rust
/// use miniserde_ditto::{cbor, Serialize};
///
/// #[derive(Serialize, Debug)]
/// struct Example {
/// code: u32,
/// message: String,
/// }
///
/// fn main() {
/// let example = Example {
/// code: 200,
/// message: "Reminiscent of Serde".to_owned(),
/// };
///
/// let bytes = &cbor::to_vec(&example).unwrap()[..];
/// println!("{:#x?}", bytes);
/// assert_eq!(bytes, &[
/// 0xa2, // 2-long map
///
/// 0x64, // 4-long str
/// b'c', b'o', b'd', b'e',
/// 0x18, // positive u8 > 24.
/// 0xc8, // 200 = 0xc8
///
/// 0x67, // 7-long str
/// b'm', b'e', b's', b's', b'a', b'g', b'e',
/// 0x74, // str of length: 0x14 = 20.
/// b'R', b'e', b'm', b'i', b'n', b'i', b's', b'c', b'e', b'n', b't',
/// b' ', b'o', b'f', b' ',
/// b'S', b'e', b'r', b'd', b'e',
/// ][..]);
/// }
/// ```
pub fn to_vec<T: Serialize>(ref value: T) -> Result<Vec<u8>> {
let mut v = vec![];
match to_writer(&mut v, &value) {
Ok(()) => Ok(v),
Err(None) => Err(crate::Error),
Err(Some(io_err)) => unreachable!("IO failure on a Vec: {}", io_err),
}
}
struct Serializer<'a> {
stack: Vec<Layer<'a>>,
}
| }
impl<'a> Drop for Serializer<'a> {
fn drop(&mut self) {
// Drop layers in reverse order.
while !self.stack.is_empty() {
self.stack.pop();
}
}
}
#[allow(nonstandard_style)]
struct write_u64 {
major: u8,
v: u64,
}
impl write_u64 {
fn into(self, out: &'_ mut (dyn io::Write)) -> io::Result<()> {
let Self { major, v: value } = self;
let mask = major << 5;
macro_rules! with_uNs {( $($uN:ident)<* ) => ({
mod c {
$(
pub mod $uN { pub const MAX: u64 = ::core::$uN::MAX as _; }
)*
pub mod u8 { pub const MAX: u64 = ::core::u8::MAX as _; }
}
const SMALL_U8_MAX: u64 = 0x17;
#[allow(nonstandard_style)]
enum MaskFor {
u8 = (SMALL_U8_MAX + 1) as _,
$($uN),*
}
match value {
0 ..= SMALL_U8_MAX => out.write_all(&[mask | (value as u8)]),
0 ..= c::u8::MAX => out.write_all(&[
mask | (MaskFor::u8 as u8),
value as u8,
]),
$(
0 ..= c::$uN::MAX => {
let value = value as $uN;
let ref mut buf = [0; 1 + ::core::mem::size_of::<$uN>()];
buf[0] = mask | (MaskFor::$uN as u8);
buf[1 ..].copy_from_slice(&value.to_be_bytes());
out.write_all(buf)
},
)*
_ => unreachable!(),
}
})}
with_uNs!(u16 < u32 < u64)
}
}
/// Serialize any serializable type as a CBOR byte sequence into a
/// [`Write`][io::Write]able sink.
///
/// Returns:
/// - `Ok(())` on success.
/// - `Err(Some(io_error))` on I/O failure.
/// - `Err(None)` on serialization error (unrepresentable integer).
pub fn to_writer<'value>(
out: &'_ mut dyn io::Write,
value: &'value dyn Serialize,
) -> Result<(), Option<io::Error>> {
// Borrow-checker-friendly "closure"
#[cfg_attr(rustfmt, rustfmt::skip)]
macro_rules! write { ($bytes:expr) => ({
out.write_all($bytes).map_err(Some)
})}
// Use a manual stack to avoid (stack-allocated) recursion.
let mut stack: Vec<Layer<'value>> = vec![Layer::Single(value)];
// where:
enum Layer<'value> {
Seq(Box<dyn Seq<'value> + 'value>),
Map(Box<dyn Map<'value> + 'value>),
Single(&'value dyn Serialize),
}
while let Some(last) = stack.last_mut() {
let view: ValueView<'value> = match last {
&mut Layer::Single(value) => {
let view = value.view();
drop(stack.pop());
view
}
Layer::Seq(seq) => {
match seq.next() {
Some(value) => stack.push(Layer::Single(value)),
None => drop(stack.pop()),
}
continue;
}
Layer::Map(map) => {
match map.next() {
Some((key, value)) => {
stack.push(Layer::Single(value));
stack.push(Layer::Single(key));
}
None => drop(stack.pop()),
}
continue;
}
};
match view {
ValueView::Null => write!(&[0xf6])?,
ValueView::Bool(b) => write!(&[0xf4 | (b as u8)])?,
ValueView::Str(s) => {
write_u64 {
major: 3,
v: s.len() as u64,
}
.into(out)?;
write!(s.as_bytes())?;
}
ValueView::Bytes(bs) => {
write_u64 {
major: 2,
v: bs.len() as u64,
}
.into(out)?;
write!(&*bs)?;
}
ValueView::Int(i) => {
const MIN: i128 = -(1_i128 << 64);
const MAX: i128 = ::core::u64::MAX as _;
match i {
MIN..=-1 => write_u64 {
major: 1,
v: (-(i + 1)) as u64,
}
.into(out)?,
0..=MAX => write_u64 {
major: 0,
v: i as u64,
}
.into(out)?,
_ => err!("Cannot serialize integer {:?} as CBOR: out of range", i),
}
}
ValueView::F64(f) if f.is_infinite() => write!(if f.is_sign_positive() {
&[0xf9, 0x7c, 0x00]
} else {
&[0xf9, 0xfc, 0x00]
})?,
ValueView::F64(f) if f.is_nan() => {
write!(&[0xf9, 0x7e, 0x00])?;
}
ValueView::F64(f) => {
// Finite float.
let f_16;
let f_32;
match () {
_case
if {
f_16 = ::half::f16::from_f64(f);
f64::from(f_16) == f
} =>
{
let ref mut buf = [0xf9, 0, 0];
buf[1..].copy_from_slice(&f_16.to_bits().to_be_bytes());
write!(buf)?;
}
_case
if {
f_32 = f as f32;
f64::from(f_32) == f
} =>
{
let ref mut buf = [0xfa, 0, 0, 0, 0];
buf[1..].copy_from_slice(&f_32.to_bits().to_be_bytes());
write!(buf)?;
}
_default => {
let ref mut buf = [0xfb, 0, 0, 0, 0, 0, 0, 0, 0];
buf[1..].copy_from_slice(&f.to_bits().to_be_bytes());
write!(buf)?;
}
}
}
ValueView::Seq(mut seq) => {
let count = seq.remaining();
write_u64 {
major: 4,
v: count as _,
}
.into(out)?;
stack.push(Layer::Seq(seq));
}
ValueView::Map(mut map) => {
let count = map.remaining();
write_u64 {
major: 5,
v: count as _,
}
.into(out)?;
stack.push(Layer::Map(map));
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
//! Most of these tests have been taken from
//! https://github.com/pyfisch/cbor/blob/a218403a52e60c991313f429e4acc05cce81ce25/tests/ser.rs
use super::*;
use crate::{
cbor::{value::*, *},
Serialize,
};
#[cfg_attr(rustfmt, rustfmt::skip)]
macro_rules! assert_eq_hex {(
$left:expr,
$right:expr $(,)?
) => (
match (&$left[..], &$right[..]) {
(ref left, ref right) => {
if <[u8] as ::core::cmp::PartialEq>::ne(left, right) {
panic!(
"assertion failed: (`{}` == `{}`)\n{}]",
stringify!($left),
stringify!($right),
(0..left.len().max(right.len()))
.map(|i| match (left.get(i), right.get(i)) {
(Some(l), Some(r)) => format!(
" {:01}|{:02x} – {:01}|{:02x},\n",
l >> 5, l & 0x1f,
r >> 5, r & 0x1f
),
(Some(l), _) =>
format!(" {:01}|{:02x} - ____,\n", l >> 5, l & 0x1f),
(_, Some(r)) =>
format!("____ - {:01}|{:02x},\n", r >> 5, r & 0x1f),
_ => unreachable!(),
})
.collect::<String>(),
);
}
}
}
)}
#[test]
fn test_str() {
serialize_and_compare("foobar", b"ffoobar");
}
#[test]
fn test_list() {
serialize_and_compare(&[1, 2, 3][..], b"\x83\x01\x02\x03");
}
#[test]
fn test_float() {
serialize_and_compare(12.3f64, b"\xfb@(\x99\x99\x99\x99\x99\x9a");
}
#[test]
fn test_integer() {
// u8
serialize_and_compare(24, b"\x18\x18");
// i8
serialize_and_compare(-5, b"\x24");
// i16
serialize_and_compare(-300, b"\x39\x01\x2b");
// i32
serialize_and_compare(-23567997, b"\x3a\x01\x67\x9e\x7c");
// u64
serialize_and_compare(::core::u64::MAX, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff");
}
fn serialize_and_compare<T: Serialize>(value: T, expected: &[u8]) {
assert_eq_hex!(&to_vec(&value).unwrap()[..], expected,);
}
mod std {
use super::*;
use ::std::collections::BTreeMap;
#[test]
fn test_string() {
let value = "foobar".to_owned();
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"ffoobar");
}
#[test]
fn test_list() {
let value = vec![1, 2, 3];
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"\x83\x01\x02\x03");
}
#[test]
fn test_list_strings() {
let value = vec!["1", "2", "3"];
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"\x83\x611\x612\x613");
}
#[test]
fn test_object() {
use ::std::collections::HashMap;
let mut object = HashMap::new();
object.insert("a".to_owned(), "A".to_owned());
object.insert("b".to_owned(), "B".to_owned());
object.insert("c".to_owned(), "C".to_owned());
object.insert("d".to_owned(), "D".to_owned());
object.insert("e".to_owned(), "E".to_owned());
let vec = to_vec(&object).unwrap();
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_object_list_keys() {
let mut object = BTreeMap::new();
object.insert(vec![0i64], ());
object.insert(vec![100i64], ());
object.insert(vec![-1i64], ());
object.insert(vec![-2i64], ());
object.insert(vec![0i64, 0i64], ());
object.insert(vec![0i64, -1i64], ());
let vec = to_vec(&to_value(&object).unwrap()).unwrap();
assert_eq_hex!(
vec![
166, 129, 0, 246, 129, 24, 100, 246, 129, 32, 246, 129, 33, 246, 130, 0, 0,
246, 130, 0, 32, 246
],
vec
);
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_object_object_keys() {
use ::std::iter::FromIterator;
let mut object = BTreeMap::new();
let keys = vec![
vec!["a"],
vec!["b"],
vec!["c"],
vec!["d"],
vec!["aa"],
vec!["a", "aa"],
]
.into_iter()
.map(|v| BTreeMap::from_iter(v.into_iter().map(|s| (s.to_owned(), ()))));
for key in keys {
object.insert(key, ());
}
let vec = to_vec(&to_value(&object).unwrap()).unwrap();
assert_eq_hex!(
vec![
166, 161, 97, 97, 246, 246, 161, 97, 98, 246, 246, 161, 97, 99, 246, 246, 161,
97, 100, 246, 246, 161, 98, 97, 97, 246, 246, 162, 97, 97, 246, 98, 97, 97,
246, 246
],
vec
);
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_float() {
let vec = to_vec(&12.3f64).unwrap();
assert_eq_hex!(vec, b"\xfb@(\x99\x99\x99\x99\x99\x9a");
}
#[test]
fn test_f32() {
let vec = to_vec(&4000.5f32).unwrap();
assert_eq_hex!(vec, b"\xfa\x45\x7a\x08\x00");
}
#[test]
fn test_infinity() {
let vec = to_vec(&::std::f64::INFINITY).unwrap();
assert_eq_hex!(vec, b"\xf9|\x00");
}
#[test]
fn test_neg_infinity() {
let vec = to_vec(&::std::f64::NEG_INFINITY).unwrap();
assert_eq_hex!(vec, b"\xf9\xfc\x00");
}
#[test]
fn test_nan() {
let vec = to_vec(&::std::f32::NAN).unwrap();
assert_eq_hex!(vec, b"\xf9\x7e\x00");
}
#[test]
fn test_integer() {
// u8
let vec = to_vec(&24).unwrap();
assert_eq_hex!(vec, b"\x18\x18");
// i8
let vec = to_vec(&-5).unwrap();
assert_eq_hex!(vec, b"\x24");
// i16
let vec = to_vec(&-300).unwrap();
assert_eq_hex!(vec, b"\x39\x01\x2b");
// i32
let vec = to_vec(&-23567997).unwrap();
assert_eq_hex!(vec, b"\x3a\x01\x67\x9e\x7c");
// u64
let vec = to_vec(&::std::u64::MAX).unwrap();
assert_eq_hex!(vec, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff");
}
// #[test]
// fn test_self_describing() {
// let mut vec = Vec::new();
// {
// let mut serializer = ser::Serializer::new(&mut vec);
// serializer.self_describe().unwrap();
// serializer.serialize_u64(9).unwrap();
// }
// assert_eq_hex!(vec, b"\xd9\xd9\xf7\x09");
// }
// #[test]
// fn test_ip_addr() {
// use ::std::net::Ipv4Addr;
// let addr = Ipv4Addr::new(8, 8, 8, 8);
// let vec = to_vec(&addr).unwrap();
// println!("{:?}", vec);
// assert_eq_hex!(vec.len(), 5);
// let test_addr: Ipv4Addr = from_slice(&vec).unwrap();
// assert_eq_hex!(addr, test_addr);
// }
/// Test all of CBOR's fixed-length byte string types
#[test]
fn test_byte_string() {
// Very short byte strings have 1-byte headers
let short = vec![0_u8, 1, 2, 255];
let short_s = to_vec(&short).unwrap();
assert_eq_hex!(&short_s[..], [0x44, 0, 1, 2, 255]);
// byte strings > 23 bytes have 2-byte headers
let medium = vec![
0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
255,
];
let medium_s = to_vec(&medium).unwrap();
assert_eq_hex!(
&medium_s[..],
[
0x58, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 255
]
);
// byte strings ≥ 256 bytes have 3-byte headers
let long_vec = (0..256).map(|i| (i & 0xFF) as u8).collect::<Vec<_>>();
let long_s = to_vec(&long_vec).unwrap();
assert_eq_hex!(&long_s[0..3], [0x59, 1, 0]);
assert_eq_hex!(&long_s[3..], &long_vec[..]);
// byte strings ≥ 2^16 bytes have 5-byte headers
let very_long_vec = (0..65536).map(|i| (i & 0xFF) as u8).collect::<Vec<_>>();
let very_long_s = to_vec(&very_long_vec).unwrap();
assert_eq_hex!(&very_long_s[0..5], [0x5a, 0, 1, 0, 0]);
assert_eq_hex!(&very_long_s[5..], &very_long_vec[..]);
// byte strings ≥ 2^32 bytes have 9-byte headers,
// but they take too much RAM to test in most CI setups, such as Travis.
// Confident on our implementation of the serialization code (which
// only copies the byte slice contents provided the writer allows it),
// we `unsafe`-ly fake a gigantic slice by using a writer
// that will saturate right after the header has been written.
#[cfg(all(not(miri), target_pointer_width = "64"))] #[cfg_attr(rustfmt, rustfmt::skip)]
unsafe {
let fake_huge_byte_seq: &'_ [u8] = ::core::slice::from_raw_parts(
0x1 as _,
0x00_00_00_01_de_ad_be_ef,
);
let mut _9 = [0_u8; 9];
let _ = to_writer(&mut &mut _9[..], &fake_huge_byte_seq);
assert_eq_hex!(
&_9[..],
[
0x5b,
0x00, 0x00, 0x00, 0x01, 0xde, 0xad, 0xbe, 0xef,
],
);
}
}
#[test]
fn test_half() {
let vec = to_vec(&42.5f32).unwrap();
assert_eq_hex!(vec, b"\xF9\x51\x50");
assert_eq!(from_slice::<f32>(&vec[..]).unwrap(), 42.5f32);
}
}
} | enum Layer<'a> {
Seq(Box<dyn Seq<'a> + 'a>),
Map(Box<dyn Map<'a> + 'a>), | random_line_split |
ser.rs | #![allow(unused)]
#![warn(unused_must_use)]
use crate::{
ser::{Map, Seq, Serialize, ValueView},
Result,
};
use ::std::io::{self, Write as _};
/// Serialize any serializable type into a CBOR byte sequence.
///
/// ```rust
/// use miniserde_ditto::{cbor, Serialize};
///
/// #[derive(Serialize, Debug)]
/// struct Example {
/// code: u32,
/// message: String,
/// }
///
/// fn main() {
/// let example = Example {
/// code: 200,
/// message: "Reminiscent of Serde".to_owned(),
/// };
///
/// let bytes = &cbor::to_vec(&example).unwrap()[..];
/// println!("{:#x?}", bytes);
/// assert_eq!(bytes, &[
/// 0xa2, // 2-long map
///
/// 0x64, // 4-long str
/// b'c', b'o', b'd', b'e',
/// 0x18, // positive u8 > 24.
/// 0xc8, // 200 = 0xc8
///
/// 0x67, // 7-long str
/// b'm', b'e', b's', b's', b'a', b'g', b'e',
/// 0x74, // str of length: 0x14 = 20.
/// b'R', b'e', b'm', b'i', b'n', b'i', b's', b'c', b'e', b'n', b't',
/// b' ', b'o', b'f', b' ',
/// b'S', b'e', b'r', b'd', b'e',
/// ][..]);
/// }
/// ```
pub fn to_vec<T: Serialize>(ref value: T) -> Result<Vec<u8>> {
let mut v = vec![];
match to_writer(&mut v, &value) {
Ok(()) => Ok(v),
Err(None) => Err(crate::Error),
Err(Some(io_err)) => unreachable!("IO failure on a Vec: {}", io_err),
}
}
struct Serializer<'a> {
stack: Vec<Layer<'a>>,
}
enum Layer<'a> {
Seq(Box<dyn Seq<'a> + 'a>),
Map(Box<dyn Map<'a> + 'a>),
}
impl<'a> Drop for Serializer<'a> {
fn drop(&mut self) {
// Drop layers in reverse order.
while !self.stack.is_empty() {
self.stack.pop();
}
}
}
#[allow(nonstandard_style)]
struct write_u64 {
major: u8,
v: u64,
}
impl write_u64 {
fn | (self, out: &'_ mut (dyn io::Write)) -> io::Result<()> {
let Self { major, v: value } = self;
let mask = major << 5;
macro_rules! with_uNs {( $($uN:ident)<* ) => ({
mod c {
$(
pub mod $uN { pub const MAX: u64 = ::core::$uN::MAX as _; }
)*
pub mod u8 { pub const MAX: u64 = ::core::u8::MAX as _; }
}
const SMALL_U8_MAX: u64 = 0x17;
#[allow(nonstandard_style)]
enum MaskFor {
u8 = (SMALL_U8_MAX + 1) as _,
$($uN),*
}
match value {
0 ..= SMALL_U8_MAX => out.write_all(&[mask | (value as u8)]),
0 ..= c::u8::MAX => out.write_all(&[
mask | (MaskFor::u8 as u8),
value as u8,
]),
$(
0 ..= c::$uN::MAX => {
let value = value as $uN;
let ref mut buf = [0; 1 + ::core::mem::size_of::<$uN>()];
buf[0] = mask | (MaskFor::$uN as u8);
buf[1 ..].copy_from_slice(&value.to_be_bytes());
out.write_all(buf)
},
)*
_ => unreachable!(),
}
})}
with_uNs!(u16 < u32 < u64)
}
}
/// Serialize any serializable type as a CBOR byte sequence into a
/// [`Write`][io::Write]able sink.
///
/// Returns:
/// - `Ok(())` on success.
/// - `Err(Some(io_error))` on I/O failure.
/// - `Err(None)` on serialization error (unrepresentable integer).
pub fn to_writer<'value>(
out: &'_ mut dyn io::Write,
value: &'value dyn Serialize,
) -> Result<(), Option<io::Error>> {
// Borrow-checker-friendly "closure"
#[cfg_attr(rustfmt, rustfmt::skip)]
macro_rules! write { ($bytes:expr) => ({
out.write_all($bytes).map_err(Some)
})}
// Use a manual stack to avoid (stack-allocated) recursion.
let mut stack: Vec<Layer<'value>> = vec![Layer::Single(value)];
// where:
enum Layer<'value> {
Seq(Box<dyn Seq<'value> + 'value>),
Map(Box<dyn Map<'value> + 'value>),
Single(&'value dyn Serialize),
}
while let Some(last) = stack.last_mut() {
let view: ValueView<'value> = match last {
&mut Layer::Single(value) => {
let view = value.view();
drop(stack.pop());
view
}
Layer::Seq(seq) => {
match seq.next() {
Some(value) => stack.push(Layer::Single(value)),
None => drop(stack.pop()),
}
continue;
}
Layer::Map(map) => {
match map.next() {
Some((key, value)) => {
stack.push(Layer::Single(value));
stack.push(Layer::Single(key));
}
None => drop(stack.pop()),
}
continue;
}
};
match view {
ValueView::Null => write!(&[0xf6])?,
ValueView::Bool(b) => write!(&[0xf4 | (b as u8)])?,
ValueView::Str(s) => {
write_u64 {
major: 3,
v: s.len() as u64,
}
.into(out)?;
write!(s.as_bytes())?;
}
ValueView::Bytes(bs) => {
write_u64 {
major: 2,
v: bs.len() as u64,
}
.into(out)?;
write!(&*bs)?;
}
ValueView::Int(i) => {
const MIN: i128 = -(1_i128 << 64);
const MAX: i128 = ::core::u64::MAX as _;
match i {
MIN..=-1 => write_u64 {
major: 1,
v: (-(i + 1)) as u64,
}
.into(out)?,
0..=MAX => write_u64 {
major: 0,
v: i as u64,
}
.into(out)?,
_ => err!("Cannot serialize integer {:?} as CBOR: out of range", i),
}
}
ValueView::F64(f) if f.is_infinite() => write!(if f.is_sign_positive() {
&[0xf9, 0x7c, 0x00]
} else {
&[0xf9, 0xfc, 0x00]
})?,
ValueView::F64(f) if f.is_nan() => {
write!(&[0xf9, 0x7e, 0x00])?;
}
ValueView::F64(f) => {
// Finite float.
let f_16;
let f_32;
match () {
_case
if {
f_16 = ::half::f16::from_f64(f);
f64::from(f_16) == f
} =>
{
let ref mut buf = [0xf9, 0, 0];
buf[1..].copy_from_slice(&f_16.to_bits().to_be_bytes());
write!(buf)?;
}
_case
if {
f_32 = f as f32;
f64::from(f_32) == f
} =>
{
let ref mut buf = [0xfa, 0, 0, 0, 0];
buf[1..].copy_from_slice(&f_32.to_bits().to_be_bytes());
write!(buf)?;
}
_default => {
let ref mut buf = [0xfb, 0, 0, 0, 0, 0, 0, 0, 0];
buf[1..].copy_from_slice(&f.to_bits().to_be_bytes());
write!(buf)?;
}
}
}
ValueView::Seq(mut seq) => {
let count = seq.remaining();
write_u64 {
major: 4,
v: count as _,
}
.into(out)?;
stack.push(Layer::Seq(seq));
}
ValueView::Map(mut map) => {
let count = map.remaining();
write_u64 {
major: 5,
v: count as _,
}
.into(out)?;
stack.push(Layer::Map(map));
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
//! Most of these tests have been taken from
//! https://github.com/pyfisch/cbor/blob/a218403a52e60c991313f429e4acc05cce81ce25/tests/ser.rs
use super::*;
use crate::{
cbor::{value::*, *},
Serialize,
};
#[cfg_attr(rustfmt, rustfmt::skip)]
macro_rules! assert_eq_hex {(
$left:expr,
$right:expr $(,)?
) => (
match (&$left[..], &$right[..]) {
(ref left, ref right) => {
if <[u8] as ::core::cmp::PartialEq>::ne(left, right) {
panic!(
"assertion failed: (`{}` == `{}`)\n{}]",
stringify!($left),
stringify!($right),
(0..left.len().max(right.len()))
.map(|i| match (left.get(i), right.get(i)) {
(Some(l), Some(r)) => format!(
" {:01}|{:02x} – {:01}|{:02x},\n",
l >> 5, l & 0x1f,
r >> 5, r & 0x1f
),
(Some(l), _) =>
format!(" {:01}|{:02x} - ____,\n", l >> 5, l & 0x1f),
(_, Some(r)) =>
format!("____ - {:01}|{:02x},\n", r >> 5, r & 0x1f),
_ => unreachable!(),
})
.collect::<String>(),
);
}
}
}
)}
#[test]
fn test_str() {
serialize_and_compare("foobar", b"ffoobar");
}
#[test]
fn test_list() {
serialize_and_compare(&[1, 2, 3][..], b"\x83\x01\x02\x03");
}
#[test]
fn test_float() {
serialize_and_compare(12.3f64, b"\xfb@(\x99\x99\x99\x99\x99\x9a");
}
#[test]
fn test_integer() {
// u8
serialize_and_compare(24, b"\x18\x18");
// i8
serialize_and_compare(-5, b"\x24");
// i16
serialize_and_compare(-300, b"\x39\x01\x2b");
// i32
serialize_and_compare(-23567997, b"\x3a\x01\x67\x9e\x7c");
// u64
serialize_and_compare(::core::u64::MAX, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff");
}
fn serialize_and_compare<T: Serialize>(value: T, expected: &[u8]) {
assert_eq_hex!(&to_vec(&value).unwrap()[..], expected,);
}
mod std {
use super::*;
use ::std::collections::BTreeMap;
#[test]
fn test_string() {
let value = "foobar".to_owned();
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"ffoobar");
}
#[test]
fn test_list() {
let value = vec![1, 2, 3];
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"\x83\x01\x02\x03");
}
#[test]
fn test_list_strings() {
let value = vec!["1", "2", "3"];
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"\x83\x611\x612\x613");
}
#[test]
fn test_object() {
use ::std::collections::HashMap;
let mut object = HashMap::new();
object.insert("a".to_owned(), "A".to_owned());
object.insert("b".to_owned(), "B".to_owned());
object.insert("c".to_owned(), "C".to_owned());
object.insert("d".to_owned(), "D".to_owned());
object.insert("e".to_owned(), "E".to_owned());
let vec = to_vec(&object).unwrap();
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_object_list_keys() {
let mut object = BTreeMap::new();
object.insert(vec![0i64], ());
object.insert(vec![100i64], ());
object.insert(vec![-1i64], ());
object.insert(vec![-2i64], ());
object.insert(vec![0i64, 0i64], ());
object.insert(vec![0i64, -1i64], ());
let vec = to_vec(&to_value(&object).unwrap()).unwrap();
assert_eq_hex!(
vec![
166, 129, 0, 246, 129, 24, 100, 246, 129, 32, 246, 129, 33, 246, 130, 0, 0,
246, 130, 0, 32, 246
],
vec
);
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_object_object_keys() {
use ::std::iter::FromIterator;
let mut object = BTreeMap::new();
let keys = vec![
vec!["a"],
vec!["b"],
vec!["c"],
vec!["d"],
vec!["aa"],
vec!["a", "aa"],
]
.into_iter()
.map(|v| BTreeMap::from_iter(v.into_iter().map(|s| (s.to_owned(), ()))));
for key in keys {
object.insert(key, ());
}
let vec = to_vec(&to_value(&object).unwrap()).unwrap();
assert_eq_hex!(
vec![
166, 161, 97, 97, 246, 246, 161, 97, 98, 246, 246, 161, 97, 99, 246, 246, 161,
97, 100, 246, 246, 161, 98, 97, 97, 246, 246, 162, 97, 97, 246, 98, 97, 97,
246, 246
],
vec
);
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_float() {
let vec = to_vec(&12.3f64).unwrap();
assert_eq_hex!(vec, b"\xfb@(\x99\x99\x99\x99\x99\x9a");
}
#[test]
fn test_f32() {
let vec = to_vec(&4000.5f32).unwrap();
assert_eq_hex!(vec, b"\xfa\x45\x7a\x08\x00");
}
#[test]
fn test_infinity() {
let vec = to_vec(&::std::f64::INFINITY).unwrap();
assert_eq_hex!(vec, b"\xf9|\x00");
}
#[test]
fn test_neg_infinity() {
let vec = to_vec(&::std::f64::NEG_INFINITY).unwrap();
assert_eq_hex!(vec, b"\xf9\xfc\x00");
}
#[test]
fn test_nan() {
let vec = to_vec(&::std::f32::NAN).unwrap();
assert_eq_hex!(vec, b"\xf9\x7e\x00");
}
#[test]
fn test_integer() {
// u8
let vec = to_vec(&24).unwrap();
assert_eq_hex!(vec, b"\x18\x18");
// i8
let vec = to_vec(&-5).unwrap();
assert_eq_hex!(vec, b"\x24");
// i16
let vec = to_vec(&-300).unwrap();
assert_eq_hex!(vec, b"\x39\x01\x2b");
// i32
let vec = to_vec(&-23567997).unwrap();
assert_eq_hex!(vec, b"\x3a\x01\x67\x9e\x7c");
// u64
let vec = to_vec(&::std::u64::MAX).unwrap();
assert_eq_hex!(vec, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff");
}
// #[test]
// fn test_self_describing() {
// let mut vec = Vec::new();
// {
// let mut serializer = ser::Serializer::new(&mut vec);
// serializer.self_describe().unwrap();
// serializer.serialize_u64(9).unwrap();
// }
// assert_eq_hex!(vec, b"\xd9\xd9\xf7\x09");
// }
// #[test]
// fn test_ip_addr() {
// use ::std::net::Ipv4Addr;
// let addr = Ipv4Addr::new(8, 8, 8, 8);
// let vec = to_vec(&addr).unwrap();
// println!("{:?}", vec);
// assert_eq_hex!(vec.len(), 5);
// let test_addr: Ipv4Addr = from_slice(&vec).unwrap();
// assert_eq_hex!(addr, test_addr);
// }
/// Test all of CBOR's fixed-length byte string types
#[test]
fn test_byte_string() {
// Very short byte strings have 1-byte headers
let short = vec![0_u8, 1, 2, 255];
let short_s = to_vec(&short).unwrap();
assert_eq_hex!(&short_s[..], [0x44, 0, 1, 2, 255]);
// byte strings > 23 bytes have 2-byte headers
let medium = vec![
0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
255,
];
let medium_s = to_vec(&medium).unwrap();
assert_eq_hex!(
&medium_s[..],
[
0x58, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 255
]
);
// byte strings ≥ 256 bytes have 3-byte headers
let long_vec = (0..256).map(|i| (i & 0xFF) as u8).collect::<Vec<_>>();
let long_s = to_vec(&long_vec).unwrap();
assert_eq_hex!(&long_s[0..3], [0x59, 1, 0]);
assert_eq_hex!(&long_s[3..], &long_vec[..]);
// byte strings ≥ 2^16 bytes have 5-byte headers
let very_long_vec = (0..65536).map(|i| (i & 0xFF) as u8).collect::<Vec<_>>();
let very_long_s = to_vec(&very_long_vec).unwrap();
assert_eq_hex!(&very_long_s[0..5], [0x5a, 0, 1, 0, 0]);
assert_eq_hex!(&very_long_s[5..], &very_long_vec[..]);
// byte strings ≥ 2^32 bytes have 9-byte headers,
// but they take too much RAM to test in most CI setups, such as Travis.
// Confident on our implementation of the serialization code (which
// only copies the byte slice contents provided the writer allows it),
// we `unsafe`-ly fake a gigantic slice by using a writer
// that will saturate right after the header has been written.
#[cfg(all(not(miri), target_pointer_width = "64"))] #[cfg_attr(rustfmt, rustfmt::skip)]
unsafe {
let fake_huge_byte_seq: &'_ [u8] = ::core::slice::from_raw_parts(
0x1 as _,
0x00_00_00_01_de_ad_be_ef,
);
let mut _9 = [0_u8; 9];
let _ = to_writer(&mut &mut _9[..], &fake_huge_byte_seq);
assert_eq_hex!(
&_9[..],
[
0x5b,
0x00, 0x00, 0x00, 0x01, 0xde, 0xad, 0xbe, 0xef,
],
);
}
}
#[test]
fn test_half() {
let vec = to_vec(&42.5f32).unwrap();
assert_eq_hex!(vec, b"\xF9\x51\x50");
assert_eq!(from_slice::<f32>(&vec[..]).unwrap(), 42.5f32);
}
}
}
| into | identifier_name |
ser.rs | #![allow(unused)]
#![warn(unused_must_use)]
use crate::{
ser::{Map, Seq, Serialize, ValueView},
Result,
};
use ::std::io::{self, Write as _};
/// Serialize any serializable type into a CBOR byte sequence.
///
/// ```rust
/// use miniserde_ditto::{cbor, Serialize};
///
/// #[derive(Serialize, Debug)]
/// struct Example {
/// code: u32,
/// message: String,
/// }
///
/// fn main() {
/// let example = Example {
/// code: 200,
/// message: "Reminiscent of Serde".to_owned(),
/// };
///
/// let bytes = &cbor::to_vec(&example).unwrap()[..];
/// println!("{:#x?}", bytes);
/// assert_eq!(bytes, &[
/// 0xa2, // 2-long map
///
/// 0x64, // 4-long str
/// b'c', b'o', b'd', b'e',
/// 0x18, // positive u8 > 24.
/// 0xc8, // 200 = 0xc8
///
/// 0x67, // 7-long str
/// b'm', b'e', b's', b's', b'a', b'g', b'e',
/// 0x74, // str of length: 0x14 = 20.
/// b'R', b'e', b'm', b'i', b'n', b'i', b's', b'c', b'e', b'n', b't',
/// b' ', b'o', b'f', b' ',
/// b'S', b'e', b'r', b'd', b'e',
/// ][..]);
/// }
/// ```
pub fn to_vec<T: Serialize>(ref value: T) -> Result<Vec<u8>> {
let mut v = vec![];
match to_writer(&mut v, &value) {
Ok(()) => Ok(v),
Err(None) => Err(crate::Error),
Err(Some(io_err)) => unreachable!("IO failure on a Vec: {}", io_err),
}
}
struct Serializer<'a> {
stack: Vec<Layer<'a>>,
}
enum Layer<'a> {
Seq(Box<dyn Seq<'a> + 'a>),
Map(Box<dyn Map<'a> + 'a>),
}
impl<'a> Drop for Serializer<'a> {
fn drop(&mut self) {
// Drop layers in reverse order.
while !self.stack.is_empty() {
self.stack.pop();
}
}
}
#[allow(nonstandard_style)]
struct write_u64 {
major: u8,
v: u64,
}
impl write_u64 {
fn into(self, out: &'_ mut (dyn io::Write)) -> io::Result<()> {
let Self { major, v: value } = self;
let mask = major << 5;
macro_rules! with_uNs {( $($uN:ident)<* ) => ({
mod c {
$(
pub mod $uN { pub const MAX: u64 = ::core::$uN::MAX as _; }
)*
pub mod u8 { pub const MAX: u64 = ::core::u8::MAX as _; }
}
const SMALL_U8_MAX: u64 = 0x17;
#[allow(nonstandard_style)]
enum MaskFor {
u8 = (SMALL_U8_MAX + 1) as _,
$($uN),*
}
match value {
0 ..= SMALL_U8_MAX => out.write_all(&[mask | (value as u8)]),
0 ..= c::u8::MAX => out.write_all(&[
mask | (MaskFor::u8 as u8),
value as u8,
]),
$(
0 ..= c::$uN::MAX => {
let value = value as $uN;
let ref mut buf = [0; 1 + ::core::mem::size_of::<$uN>()];
buf[0] = mask | (MaskFor::$uN as u8);
buf[1 ..].copy_from_slice(&value.to_be_bytes());
out.write_all(buf)
},
)*
_ => unreachable!(),
}
})}
with_uNs!(u16 < u32 < u64)
}
}
/// Serialize any serializable type as a CBOR byte sequence into a
/// [`Write`][io::Write]able sink.
///
/// Returns:
/// - `Ok(())` on success.
/// - `Err(Some(io_error))` on I/O failure.
/// - `Err(None)` on serialization error (unrepresentable integer).
pub fn to_writer<'value>(
out: &'_ mut dyn io::Write,
value: &'value dyn Serialize,
) -> Result<(), Option<io::Error>> {
// Borrow-checker-friendly "closure"
#[cfg_attr(rustfmt, rustfmt::skip)]
macro_rules! write { ($bytes:expr) => ({
out.write_all($bytes).map_err(Some)
})}
// Use a manual stack to avoid (stack-allocated) recursion.
let mut stack: Vec<Layer<'value>> = vec![Layer::Single(value)];
// where:
enum Layer<'value> {
Seq(Box<dyn Seq<'value> + 'value>),
Map(Box<dyn Map<'value> + 'value>),
Single(&'value dyn Serialize),
}
while let Some(last) = stack.last_mut() {
let view: ValueView<'value> = match last {
&mut Layer::Single(value) => {
let view = value.view();
drop(stack.pop());
view
}
Layer::Seq(seq) => {
match seq.next() {
Some(value) => stack.push(Layer::Single(value)),
None => drop(stack.pop()),
}
continue;
}
Layer::Map(map) => {
match map.next() {
Some((key, value)) => {
stack.push(Layer::Single(value));
stack.push(Layer::Single(key));
}
None => drop(stack.pop()),
}
continue;
}
};
match view {
ValueView::Null => write!(&[0xf6])?,
ValueView::Bool(b) => write!(&[0xf4 | (b as u8)])?,
ValueView::Str(s) => {
write_u64 {
major: 3,
v: s.len() as u64,
}
.into(out)?;
write!(s.as_bytes())?;
}
ValueView::Bytes(bs) => {
write_u64 {
major: 2,
v: bs.len() as u64,
}
.into(out)?;
write!(&*bs)?;
}
ValueView::Int(i) => {
const MIN: i128 = -(1_i128 << 64);
const MAX: i128 = ::core::u64::MAX as _;
match i {
MIN..=-1 => write_u64 {
major: 1,
v: (-(i + 1)) as u64,
}
.into(out)?,
0..=MAX => write_u64 {
major: 0,
v: i as u64,
}
.into(out)?,
_ => err!("Cannot serialize integer {:?} as CBOR: out of range", i),
}
}
ValueView::F64(f) if f.is_infinite() => write!(if f.is_sign_positive() {
&[0xf9, 0x7c, 0x00]
} else {
&[0xf9, 0xfc, 0x00]
})?,
ValueView::F64(f) if f.is_nan() => {
write!(&[0xf9, 0x7e, 0x00])?;
}
ValueView::F64(f) => {
// Finite float.
let f_16;
let f_32;
match () {
_case
if {
f_16 = ::half::f16::from_f64(f);
f64::from(f_16) == f
} =>
{
let ref mut buf = [0xf9, 0, 0];
buf[1..].copy_from_slice(&f_16.to_bits().to_be_bytes());
write!(buf)?;
}
_case
if {
f_32 = f as f32;
f64::from(f_32) == f
} =>
{
let ref mut buf = [0xfa, 0, 0, 0, 0];
buf[1..].copy_from_slice(&f_32.to_bits().to_be_bytes());
write!(buf)?;
}
_default => {
let ref mut buf = [0xfb, 0, 0, 0, 0, 0, 0, 0, 0];
buf[1..].copy_from_slice(&f.to_bits().to_be_bytes());
write!(buf)?;
}
}
}
ValueView::Seq(mut seq) => {
let count = seq.remaining();
write_u64 {
major: 4,
v: count as _,
}
.into(out)?;
stack.push(Layer::Seq(seq));
}
ValueView::Map(mut map) => {
let count = map.remaining();
write_u64 {
major: 5,
v: count as _,
}
.into(out)?;
stack.push(Layer::Map(map));
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
//! Most of these tests have been taken from
//! https://github.com/pyfisch/cbor/blob/a218403a52e60c991313f429e4acc05cce81ce25/tests/ser.rs
use super::*;
use crate::{
cbor::{value::*, *},
Serialize,
};
#[cfg_attr(rustfmt, rustfmt::skip)]
macro_rules! assert_eq_hex {(
$left:expr,
$right:expr $(,)?
) => (
match (&$left[..], &$right[..]) {
(ref left, ref right) => {
if <[u8] as ::core::cmp::PartialEq>::ne(left, right) {
panic!(
"assertion failed: (`{}` == `{}`)\n{}]",
stringify!($left),
stringify!($right),
(0..left.len().max(right.len()))
.map(|i| match (left.get(i), right.get(i)) {
(Some(l), Some(r)) => format!(
" {:01}|{:02x} – {:01}|{:02x},\n",
l >> 5, l & 0x1f,
r >> 5, r & 0x1f
),
(Some(l), _) =>
format!(" {:01}|{:02x} - ____,\n", l >> 5, l & 0x1f),
(_, Some(r)) =>
format!("____ - {:01}|{:02x},\n", r >> 5, r & 0x1f),
_ => unreachable!(),
})
.collect::<String>(),
);
}
}
}
)}
#[test]
fn test_str() {
serialize_and_compare("foobar", b"ffoobar");
}
#[test]
fn test_list() {
serialize_and_compare(&[1, 2, 3][..], b"\x83\x01\x02\x03");
}
#[test]
fn test_float() {
serialize_and_compare(12.3f64, b"\xfb@(\x99\x99\x99\x99\x99\x9a");
}
#[test]
fn test_integer() {
// u8
serialize_and_compare(24, b"\x18\x18");
// i8
serialize_and_compare(-5, b"\x24");
// i16
serialize_and_compare(-300, b"\x39\x01\x2b");
// i32
serialize_and_compare(-23567997, b"\x3a\x01\x67\x9e\x7c");
// u64
serialize_and_compare(::core::u64::MAX, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff");
}
fn serialize_and_compare<T: Serialize>(value: T, expected: &[u8]) {
assert_eq_hex!(&to_vec(&value).unwrap()[..], expected,);
}
mod std {
use super::*;
use ::std::collections::BTreeMap;
#[test]
fn test_string() {
let value = "foobar".to_owned();
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"ffoobar");
}
#[test]
fn test_list() {
let value = vec![1, 2, 3];
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"\x83\x01\x02\x03");
}
#[test]
fn test_list_strings() {
let value = vec!["1", "2", "3"];
assert_eq_hex!(&to_vec(&value).unwrap()[..], b"\x83\x611\x612\x613");
}
#[test]
fn test_object() {
use ::std::collections::HashMap;
let mut object = HashMap::new();
object.insert("a".to_owned(), "A".to_owned());
object.insert("b".to_owned(), "B".to_owned());
object.insert("c".to_owned(), "C".to_owned());
object.insert("d".to_owned(), "D".to_owned());
object.insert("e".to_owned(), "E".to_owned());
let vec = to_vec(&object).unwrap();
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_object_list_keys() {
let mut object = BTreeMap::new();
object.insert(vec![0i64], ());
object.insert(vec![100i64], ());
object.insert(vec![-1i64], ());
object.insert(vec![-2i64], ());
object.insert(vec![0i64, 0i64], ());
object.insert(vec![0i64, -1i64], ());
let vec = to_vec(&to_value(&object).unwrap()).unwrap();
assert_eq_hex!(
vec![
166, 129, 0, 246, 129, 24, 100, 246, 129, 32, 246, 129, 33, 246, 130, 0, 0,
246, 130, 0, 32, 246
],
vec
);
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_object_object_keys() {
use ::std::iter::FromIterator;
let mut object = BTreeMap::new();
let keys = vec![
vec!["a"],
vec!["b"],
vec!["c"],
vec!["d"],
vec!["aa"],
vec!["a", "aa"],
]
.into_iter()
.map(|v| BTreeMap::from_iter(v.into_iter().map(|s| (s.to_owned(), ()))));
for key in keys {
object.insert(key, ());
}
let vec = to_vec(&to_value(&object).unwrap()).unwrap();
assert_eq_hex!(
vec![
166, 161, 97, 97, 246, 246, 161, 97, 98, 246, 246, 161, 97, 99, 246, 246, 161,
97, 100, 246, 246, 161, 98, 97, 97, 246, 246, 162, 97, 97, 246, 98, 97, 97,
246, 246
],
vec
);
let test_object = from_slice(&vec[..]).unwrap();
assert_eq!(object, test_object);
}
#[test]
fn test_float() {
let vec = to_vec(&12.3f64).unwrap();
assert_eq_hex!(vec, b"\xfb@(\x99\x99\x99\x99\x99\x9a");
}
#[test]
fn test_f32() {
let vec = to_vec(&4000.5f32).unwrap();
assert_eq_hex!(vec, b"\xfa\x45\x7a\x08\x00");
}
#[test]
fn test_infinity() {
let vec = to_vec(&::std::f64::INFINITY).unwrap();
assert_eq_hex!(vec, b"\xf9|\x00");
}
#[test]
fn test_neg_infinity() {
let vec = to_vec(&::std::f64::NEG_INFINITY).unwrap();
assert_eq_hex!(vec, b"\xf9\xfc\x00");
}
#[test]
fn test_nan() {
let vec = to_vec(&::std::f32::NAN).unwrap();
assert_eq_hex!(vec, b"\xf9\x7e\x00");
}
#[test]
fn test_integer() {
// u8
let vec = to_vec(&24).unwrap();
assert_eq_hex!(vec, b"\x18\x18");
// i8
let vec = to_vec(&-5).unwrap();
assert_eq_hex!(vec, b"\x24");
// i16
let vec = to_vec(&-300).unwrap();
assert_eq_hex!(vec, b"\x39\x01\x2b");
// i32
let vec = to_vec(&-23567997).unwrap();
assert_eq_hex!(vec, b"\x3a\x01\x67\x9e\x7c");
// u64
let vec = to_vec(&::std::u64::MAX).unwrap();
assert_eq_hex!(vec, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff");
}
// #[test]
// fn test_self_describing() {
// let mut vec = Vec::new();
// {
// let mut serializer = ser::Serializer::new(&mut vec);
// serializer.self_describe().unwrap();
// serializer.serialize_u64(9).unwrap();
// }
// assert_eq_hex!(vec, b"\xd9\xd9\xf7\x09");
// }
// #[test]
// fn test_ip_addr() {
// use ::std::net::Ipv4Addr;
// let addr = Ipv4Addr::new(8, 8, 8, 8);
// let vec = to_vec(&addr).unwrap();
// println!("{:?}", vec);
// assert_eq_hex!(vec.len(), 5);
// let test_addr: Ipv4Addr = from_slice(&vec).unwrap();
// assert_eq_hex!(addr, test_addr);
// }
/// Test all of CBOR's fixed-length byte string types
#[test]
fn test_byte_string() {
// Very short byte strings have 1-byte headers
let short = vec![0_u8, 1, 2, 255];
let short_s = to_vec(&short).unwrap();
assert_eq_hex!(&short_s[..], [0x44, 0, 1, 2, 255]);
// byte strings > 23 bytes have 2-byte headers
let medium = vec![
0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
255,
];
let medium_s = to_vec(&medium).unwrap();
assert_eq_hex!(
&medium_s[..],
[
0x58, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 255
]
);
// byte strings ≥ 256 bytes have 3-byte headers
let long_vec = (0..256).map(|i| (i & 0xFF) as u8).collect::<Vec<_>>();
let long_s = to_vec(&long_vec).unwrap();
assert_eq_hex!(&long_s[0..3], [0x59, 1, 0]);
assert_eq_hex!(&long_s[3..], &long_vec[..]);
// byte strings ≥ 2^16 bytes have 5-byte headers
let very_long_vec = (0..65536).map(|i| (i & 0xFF) as u8).collect::<Vec<_>>();
let very_long_s = to_vec(&very_long_vec).unwrap();
assert_eq_hex!(&very_long_s[0..5], [0x5a, 0, 1, 0, 0]);
assert_eq_hex!(&very_long_s[5..], &very_long_vec[..]);
// byte strings ≥ 2^32 bytes have 9-byte headers,
// but they take too much RAM to test in most CI setups, such as Travis.
// Confident on our implementation of the serialization code (which
// only copies the byte slice contents provided the writer allows it),
// we `unsafe`-ly fake a gigantic slice by using a writer
// that will saturate right after the header has been written.
#[cfg(all(not(miri), target_pointer_width = "64"))] #[cfg_attr(rustfmt, rustfmt::skip)]
unsafe {
let fake_huge_byte_seq: &'_ [u8] = ::core::slice::from_raw_parts(
0x1 as _,
0x00_00_00_01_de_ad_be_ef,
);
let mut _9 = [0_u8; 9];
let _ = to_writer(&mut &mut _9[..], &fake_huge_byte_seq);
assert_eq_hex!(
&_9[..],
[
0x5b,
0x00, 0x00, 0x00, 0x01, 0xde, 0xad, 0xbe, 0xef,
],
);
}
}
#[test]
fn test_half() {
| let vec = to_vec(&42.5f32).unwrap();
assert_eq_hex!(vec, b"\xF9\x51\x50");
assert_eq!(from_slice::<f32>(&vec[..]).unwrap(), 42.5f32);
}
}
} | identifier_body | |
result-info-plot-scatter-exec-time.py | #!/usr/bin/env python
# Copyright (c) 2017, Daniel Liew
# This file is covered by the license in LICENSE.txt
# vim: set sw=4 ts=4 softtabstop=4 expandtab:
"""
Read two result info files and generate a scatter plot of execution time
"""
from load_smtrunner import add_smtrunner_to_module_search_path
add_smtrunner_to_module_search_path()
from smtrunner import ResultInfo, DriverUtil, ResultInfoUtil, analysis, event_analysis
import smtrunner.util
import matplotlib.pyplot as plt
import argparse
import json
import logging
import math
import os
import pprint
import random
import re
import sys
import yaml
_logger = None
def strip(prefix, path):
if prefix == "":
return path
if path.startswith(prefix):
return path[len(prefix):]
def main(args):
global _logger
global _fail_count
parser = argparse.ArgumentParser(description=__doc__)
DriverUtil.parserAddLoggerArg(parser)
parser.add_argument('first_result_info',
type=argparse.FileType('r'))
parser.add_argument('second_result_info',
type=argparse.FileType('r'))
parser.add_argument('--base', type=str, default="")
parser.add_argument('--point-size', type=float, default=25.0, dest='point_size')
parser.add_argument('--allow-merge-failures',
dest='allow_merge_failures',
default=False,
action='store_true',
)
parser.add_argument('--max-exec-time',
default=None,
type=float,
dest='max_exec_time',
)
parser.add_argument('--title',
default="{num_keys} benchmarks, {num_points} jointly SAT or timeout"
)
parser.add_argument("--xlabel",
type=str,
default=None,
)
parser.add_argument("--ylabel",
type=str,
default=None,
)
parser.add_argument("--axis-label-suffix",
type=str,
default=" execution time (s)",
dest="axis_label_suffix",
)
parser.add_argument("--axis-label-colour",
type=str,
default="black",
dest="axis_label_colour",
)
parser.add_argument("--annotate",
default=False,
action='store_true',
)
parser.add_argument("--annotate-use-legacy-values",
default=False,
action='store_true',
)
parser.add_argument("--output",
default=None,
type=argparse.FileType('wb'),
)
parser.add_argument("--error-bars",
default=False,
action='store_true',
)
parser.add_argument("--annotate-timeout-point",
dest='annotate_timeout_point',
default=False,
action='store_true',
)
parser.add_argument("--require-time-abs-diff",
dest="require_time_abs_diff",
default=0.0,
type=float
)
parser.add_argument('--true-type-fonts',
default=False,
action='store_true'
)
pargs = parser.parse_args(args)
DriverUtil.handleLoggerArgs(pargs, parser)
_logger = logging.getLogger(__name__)
if pargs.max_exec_time is None:
_logger.error('--max-exec-time must be specified')
return 1
if pargs.true_type_fonts:
smtrunner.util.set_true_type_font()
index_to_raw_result_infos = []
index_to_file_name = []
for index, result_infos_file in enumerate([pargs.first_result_info, pargs.second_result_info]):
try:
_logger.info('Loading "{}"'.format(result_infos_file.name))
result_infos = ResultInfo.loadRawResultInfos(result_infos_file)
index_to_raw_result_infos.append(result_infos)
index_to_file_name.append(result_infos_file.name)
except ResultInfo.ResultInfoValidationError as e:
_logger.error('Validation error:\n{}'.format(e))
return 1
_logger.info('Loading done')
result_infos = None
# Perform grouping by benchmark name
key_to_results_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by(
index_to_raw_result_infos)
if len(rejected_result_infos) > 0:
_logger.warning('There were rejected result infos')
num_merge_failures = 0
for index, l in enumerate(rejected_result_infos):
_logger.warning('Index {} had {} rejections'.format(index, len(l)))
num_merge_failures += len(l)
if num_merge_failures > 0:
if pargs.allow_merge_failures:
_logger.warning('Merge failures being allowed')
else:
_logger.error('Merge failures are not allowed')
return 1
# Generate scatter points
x_scatter_points = []
x_scatter_errors = [[], [] ]
y_scatter_points = []
y_scatter_errors = [[], []]
count_dual_timeout = 0
count_x_lt_y_not_dt = 0
count_x_gt_y_not_dt = 0
count_x_eq_y_not_dt = 0
# New counting vars
bounds_incomparable_keys = set()
x_gt_y_keys = set()
x_lt_y_keys = set()
x_eq_y_keys = set()
x_eq_y_and_is_timeout_keys = set()
for key, raw_result_info_list in sorted(key_to_results_infos.items(), key=lambda kv:kv[0]):
_logger.info('Ranking on "{}" : '.format(key))
indices_to_use = []
# Compute indices to use
modified_raw_result_info_list = [ ]
# Handle "unknown"
# Only compare results that gave sat/unsat
for index, ri in enumerate(raw_result_info_list):
if isinstance(ri['event_tag'], str):
# single result
event_tag = ri['event_tag']
else:
|
# Event must be sat or timeout
_logger.info('index {} is {}'.format(index, event_tag))
if event_tag not in { 'sat', 'timeout', 'soft_timeout'}:
# Skip this. We can't do a meaningful comparison here
continue
indices_to_use.append(index)
# Normalise timeouts to have fixed values for the time.
if event_tag in {'timeout', 'soft_timeout'}:
modified_ri = analysis.get_result_with_modified_time(
ri,
pargs.max_exec_time)
_logger.debug('modified_ri: {}'.format(
pprint.pformat(modified_ri)))
_logger.debug(
'Treating index {} for {} due to unknown as having max-time'.format(
index,
key))
modified_raw_result_info_list.append(modified_ri)
else:
modified_raw_result_info_list.append(ri)
_logger.debug('used indices_to_use: {}'.format(indices_to_use))
if len(indices_to_use) != 2:
# Skip this one. One of the result infos can't be compared
# against.
continue
assert len(indices_to_use) == 2
# Get execution times
index_to_execution_time_bounds = analysis.get_index_to_execution_time_bounds(
modified_raw_result_info_list,
indices_to_use,
pargs.max_exec_time,
analysis.get_arithmetic_mean_and_99_confidence_intervals,
['dsoes_wallclock', 'wallclock'])
assert isinstance(index_to_execution_time_bounds, list)
x_scatter_point_bounds = index_to_execution_time_bounds[0]
y_scatter_point_bounds = index_to_execution_time_bounds[1]
x_scatter_point = x_scatter_point_bounds[1] # mean
y_scatter_point = y_scatter_point_bounds[1] # mean
x_scatter_lower_error = x_scatter_point_bounds[1] - x_scatter_point_bounds[0]
assert x_scatter_lower_error >= 0
x_scatter_higher_error = x_scatter_point_bounds[2] - x_scatter_point_bounds[1]
assert x_scatter_higher_error >= 0
y_scatter_lower_error = y_scatter_point_bounds[1] - y_scatter_point_bounds[0]
assert y_scatter_lower_error >= 0
y_scatter_higher_error = y_scatter_point_bounds[2] - y_scatter_point_bounds[1]
assert y_scatter_higher_error >= 0
x_scatter_points.append(x_scatter_point)
y_scatter_points.append(y_scatter_point)
# Error bar points
#x_scatter_errors.append((x_scatter_lower_error, x_scatter_higher_error))
x_scatter_errors[0].append(x_scatter_lower_error)
x_scatter_errors[1].append(x_scatter_higher_error)
#y_scatter_errors.append((y_scatter_lower_error, y_scatter_higher_error))
y_scatter_errors[0].append(y_scatter_lower_error)
y_scatter_errors[1].append(y_scatter_higher_error)
# LEGACY: Now do some counting
if x_scatter_point == y_scatter_point:
if x_scatter_point == pargs.max_exec_time:
assert x_scatter_lower_error == 0
assert x_scatter_higher_error == 0
assert y_scatter_lower_error == 0
assert y_scatter_higher_error == 0
count_dual_timeout += 1
else:
_logger.info('Found count_x_eq_y_not_dt: x: {}, key: {}'.format(
x_scatter_point,
key))
count_x_eq_y_not_dt += 1
elif x_scatter_point > y_scatter_point:
count_x_gt_y_not_dt += 1
else:
assert x_scatter_point < y_scatter_point
count_x_lt_y_not_dt += 1
# SMARTER counting: uses error bounds
if analysis.bounds_overlap(x_scatter_point_bounds, y_scatter_point_bounds):
# Bounds overlap, we can't compare the execution times in a meaningful way
bounds_incomparable_keys.add(key)
# However if both are timeouts we can note this
if x_scatter_point == pargs.max_exec_time:
x_eq_y_and_is_timeout_keys.add(key)
else:
# Compare the means
if x_scatter_point > y_scatter_point and abs(x_scatter_point - y_scatter_point) > pargs.require_time_abs_diff:
x_gt_y_keys.add(key)
elif x_scatter_point < y_scatter_point and abs(x_scatter_point - y_scatter_point) > pargs.require_time_abs_diff:
x_lt_y_keys.add(key)
else:
if pargs.require_time_abs_diff == 0.0:
assert x_scatter_point == y_scatter_point
x_eq_y_keys.add(key)
# Report counts
print("# of points : {}".format(len(x_scatter_points)))
print("LEGACY: count_dual_timeout: {}".format(count_dual_timeout))
print("LEGACY: count_x_eq_y_not_dt: {}".format(count_x_eq_y_not_dt))
print("LEGACY: count_x_gt_y_not_dt: {}".format(count_x_gt_y_not_dt))
print("LEGACY: count_x_lt_y_not_dt: {}".format(count_x_lt_y_not_dt))
print("")
print("# x > y and no bound overlap: {}".format(len(x_gt_y_keys)))
print("# x < y and no bound overlap: {}".format(len(x_lt_y_keys)))
print("# x = y and no bound overlap: {}".format(len(x_eq_y_keys)))
print("# incomparable: {}".format(len(bounds_incomparable_keys)))
print("# of x = y and is timeout: {}".format(len(x_eq_y_and_is_timeout_keys)))
# Now plot
extend = 100
tickFreq = 100
assert len(x_scatter_points) == len(y_scatter_points)
fig, ax = plt.subplots()
fig.patch.set_alpha(0.0) # Transparent
if pargs.error_bars:
splot = ax.errorbar(
x_scatter_points,
y_scatter_points,
xerr=x_scatter_errors,
yerr=y_scatter_errors,
fmt='o',
picker=5,
ms=pargs.point_size/2.0, # HACK
ecolor='black',
capsize=5,
#capthick=10,
)
else:
splot = ax.scatter(x_scatter_points, y_scatter_points, picker=5, s=pargs.point_size)
xlabel = index_to_file_name[0] if pargs.xlabel is None else pargs.xlabel
ylabel = index_to_file_name[1] if pargs.ylabel is None else pargs.ylabel
xlabel += pargs.axis_label_suffix
ylabel += pargs.axis_label_suffix
ax.xaxis.label.set_color(pargs.axis_label_colour)
ax.yaxis.label.set_color(pargs.axis_label_colour)
ax.tick_params(axis='x', colors=pargs.axis_label_colour)
ax.tick_params(axis='y', colors=pargs.axis_label_colour)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(0,pargs.max_exec_time + extend)
ax.set_ylim(0,pargs.max_exec_time + extend)
# +1 is just so the pargs.max_exec_time is included because range()'s end is not inclusive
ax.set_xticks(range(0, int(pargs.max_exec_time) + 1, tickFreq))
ax.set_yticks(range(0, int(pargs.max_exec_time) + 1, tickFreq))
# Construct title keyword args
title_kwargs = {
'num_points': len(x_scatter_points),
'xlabel': xlabel,
'ylabel': ylabel,
'num_keys': len(key_to_results_infos.keys()),
}
ax.set_title(pargs.title.format(**title_kwargs))
# Identity line
ax.plot([ 0 , pargs.max_exec_time + extend], [0, pargs.max_exec_time + extend], linewidth=1.0, color='black')
if pargs.annotate:
if pargs.annotate_use_legacy_values:
_logger.warning('Displaying legacy values')
x_lt_value_to_display = count_x_lt_y_not_dt
x_gt_value_to_display = count_x_gt_y_not_dt
else:
_logger.info('Displaying new values')
x_lt_value_to_display = len(x_lt_y_keys)
x_gt_value_to_display = len(x_gt_y_keys)
ax.annotate(
'{}'.format(x_lt_value_to_display),
xy=(200,550),
fontsize=40
)
ax.annotate(
'{}'.format(x_gt_value_to_display),
xy=(550,200),
fontsize=40
)
# timeout point annotation
if pargs.annotate_timeout_point:
num_dual_timeouts = len(x_eq_y_and_is_timeout_keys)
dual_timeout_txt = None
if num_dual_timeouts == 1:
dual_timeout_txt = '{} dual timeout'.format(num_dual_timeouts)
else:
dual_timeout_txt = '{} dual timeouts'.format(num_dual_timeouts)
ax.annotate(dual_timeout_txt,
# HACK -5 is to offset arrow properly
xy=(pargs.max_exec_time - 15.00, pargs.max_exec_time), xycoords='data',
xytext=(-50, 0), textcoords='offset points',
arrowprops=dict(facecolor='black', shrink=0.05, width=1.5, headwidth=7.0),
horizontalalignment='right', verticalalignment='center',
bbox=dict(boxstyle='round',fc='None'),
fontsize=15)
# Finally show
if pargs.output is None:
plt.show()
else:
# For command line usage
fig.show()
fig.savefig(pargs.output, format='pdf')
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| assert isinstance(ri['event_tag'], list)
event_tag, _ = event_analysis.merge_aggregate_events(
ri['event_tag']) | conditional_block |
result-info-plot-scatter-exec-time.py | #!/usr/bin/env python
# Copyright (c) 2017, Daniel Liew
# This file is covered by the license in LICENSE.txt
# vim: set sw=4 ts=4 softtabstop=4 expandtab:
"""
Read two result info files and generate a scatter plot of execution time
"""
from load_smtrunner import add_smtrunner_to_module_search_path
add_smtrunner_to_module_search_path()
from smtrunner import ResultInfo, DriverUtil, ResultInfoUtil, analysis, event_analysis
import smtrunner.util
import matplotlib.pyplot as plt
import argparse
import json
import logging
import math
import os
import pprint
import random
import re
import sys
import yaml
_logger = None
def strip(prefix, path):
|
def main(args):
global _logger
global _fail_count
parser = argparse.ArgumentParser(description=__doc__)
DriverUtil.parserAddLoggerArg(parser)
parser.add_argument('first_result_info',
type=argparse.FileType('r'))
parser.add_argument('second_result_info',
type=argparse.FileType('r'))
parser.add_argument('--base', type=str, default="")
parser.add_argument('--point-size', type=float, default=25.0, dest='point_size')
parser.add_argument('--allow-merge-failures',
dest='allow_merge_failures',
default=False,
action='store_true',
)
parser.add_argument('--max-exec-time',
default=None,
type=float,
dest='max_exec_time',
)
parser.add_argument('--title',
default="{num_keys} benchmarks, {num_points} jointly SAT or timeout"
)
parser.add_argument("--xlabel",
type=str,
default=None,
)
parser.add_argument("--ylabel",
type=str,
default=None,
)
parser.add_argument("--axis-label-suffix",
type=str,
default=" execution time (s)",
dest="axis_label_suffix",
)
parser.add_argument("--axis-label-colour",
type=str,
default="black",
dest="axis_label_colour",
)
parser.add_argument("--annotate",
default=False,
action='store_true',
)
parser.add_argument("--annotate-use-legacy-values",
default=False,
action='store_true',
)
parser.add_argument("--output",
default=None,
type=argparse.FileType('wb'),
)
parser.add_argument("--error-bars",
default=False,
action='store_true',
)
parser.add_argument("--annotate-timeout-point",
dest='annotate_timeout_point',
default=False,
action='store_true',
)
parser.add_argument("--require-time-abs-diff",
dest="require_time_abs_diff",
default=0.0,
type=float
)
parser.add_argument('--true-type-fonts',
default=False,
action='store_true'
)
pargs = parser.parse_args(args)
DriverUtil.handleLoggerArgs(pargs, parser)
_logger = logging.getLogger(__name__)
if pargs.max_exec_time is None:
_logger.error('--max-exec-time must be specified')
return 1
if pargs.true_type_fonts:
smtrunner.util.set_true_type_font()
index_to_raw_result_infos = []
index_to_file_name = []
for index, result_infos_file in enumerate([pargs.first_result_info, pargs.second_result_info]):
try:
_logger.info('Loading "{}"'.format(result_infos_file.name))
result_infos = ResultInfo.loadRawResultInfos(result_infos_file)
index_to_raw_result_infos.append(result_infos)
index_to_file_name.append(result_infos_file.name)
except ResultInfo.ResultInfoValidationError as e:
_logger.error('Validation error:\n{}'.format(e))
return 1
_logger.info('Loading done')
result_infos = None
# Perform grouping by benchmark name
key_to_results_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by(
index_to_raw_result_infos)
if len(rejected_result_infos) > 0:
_logger.warning('There were rejected result infos')
num_merge_failures = 0
for index, l in enumerate(rejected_result_infos):
_logger.warning('Index {} had {} rejections'.format(index, len(l)))
num_merge_failures += len(l)
if num_merge_failures > 0:
if pargs.allow_merge_failures:
_logger.warning('Merge failures being allowed')
else:
_logger.error('Merge failures are not allowed')
return 1
# Generate scatter points
x_scatter_points = []
x_scatter_errors = [[], [] ]
y_scatter_points = []
y_scatter_errors = [[], []]
count_dual_timeout = 0
count_x_lt_y_not_dt = 0
count_x_gt_y_not_dt = 0
count_x_eq_y_not_dt = 0
# New counting vars
bounds_incomparable_keys = set()
x_gt_y_keys = set()
x_lt_y_keys = set()
x_eq_y_keys = set()
x_eq_y_and_is_timeout_keys = set()
for key, raw_result_info_list in sorted(key_to_results_infos.items(), key=lambda kv:kv[0]):
_logger.info('Ranking on "{}" : '.format(key))
indices_to_use = []
# Compute indices to use
modified_raw_result_info_list = [ ]
# Handle "unknown"
# Only compare results that gave sat/unsat
for index, ri in enumerate(raw_result_info_list):
if isinstance(ri['event_tag'], str):
# single result
event_tag = ri['event_tag']
else:
assert isinstance(ri['event_tag'], list)
event_tag, _ = event_analysis.merge_aggregate_events(
ri['event_tag'])
# Event must be sat or timeout
_logger.info('index {} is {}'.format(index, event_tag))
if event_tag not in { 'sat', 'timeout', 'soft_timeout'}:
# Skip this. We can't do a meaningful comparison here
continue
indices_to_use.append(index)
# Normalise timeouts to have fixed values for the time.
if event_tag in {'timeout', 'soft_timeout'}:
modified_ri = analysis.get_result_with_modified_time(
ri,
pargs.max_exec_time)
_logger.debug('modified_ri: {}'.format(
pprint.pformat(modified_ri)))
_logger.debug(
'Treating index {} for {} due to unknown as having max-time'.format(
index,
key))
modified_raw_result_info_list.append(modified_ri)
else:
modified_raw_result_info_list.append(ri)
_logger.debug('used indices_to_use: {}'.format(indices_to_use))
if len(indices_to_use) != 2:
# Skip this one. One of the result infos can't be compared
# against.
continue
assert len(indices_to_use) == 2
# Get execution times
index_to_execution_time_bounds = analysis.get_index_to_execution_time_bounds(
modified_raw_result_info_list,
indices_to_use,
pargs.max_exec_time,
analysis.get_arithmetic_mean_and_99_confidence_intervals,
['dsoes_wallclock', 'wallclock'])
assert isinstance(index_to_execution_time_bounds, list)
x_scatter_point_bounds = index_to_execution_time_bounds[0]
y_scatter_point_bounds = index_to_execution_time_bounds[1]
x_scatter_point = x_scatter_point_bounds[1] # mean
y_scatter_point = y_scatter_point_bounds[1] # mean
x_scatter_lower_error = x_scatter_point_bounds[1] - x_scatter_point_bounds[0]
assert x_scatter_lower_error >= 0
x_scatter_higher_error = x_scatter_point_bounds[2] - x_scatter_point_bounds[1]
assert x_scatter_higher_error >= 0
y_scatter_lower_error = y_scatter_point_bounds[1] - y_scatter_point_bounds[0]
assert y_scatter_lower_error >= 0
y_scatter_higher_error = y_scatter_point_bounds[2] - y_scatter_point_bounds[1]
assert y_scatter_higher_error >= 0
x_scatter_points.append(x_scatter_point)
y_scatter_points.append(y_scatter_point)
# Error bar points
#x_scatter_errors.append((x_scatter_lower_error, x_scatter_higher_error))
x_scatter_errors[0].append(x_scatter_lower_error)
x_scatter_errors[1].append(x_scatter_higher_error)
#y_scatter_errors.append((y_scatter_lower_error, y_scatter_higher_error))
y_scatter_errors[0].append(y_scatter_lower_error)
y_scatter_errors[1].append(y_scatter_higher_error)
# LEGACY: Now do some counting
if x_scatter_point == y_scatter_point:
if x_scatter_point == pargs.max_exec_time:
assert x_scatter_lower_error == 0
assert x_scatter_higher_error == 0
assert y_scatter_lower_error == 0
assert y_scatter_higher_error == 0
count_dual_timeout += 1
else:
_logger.info('Found count_x_eq_y_not_dt: x: {}, key: {}'.format(
x_scatter_point,
key))
count_x_eq_y_not_dt += 1
elif x_scatter_point > y_scatter_point:
count_x_gt_y_not_dt += 1
else:
assert x_scatter_point < y_scatter_point
count_x_lt_y_not_dt += 1
# SMARTER counting: uses error bounds
if analysis.bounds_overlap(x_scatter_point_bounds, y_scatter_point_bounds):
# Bounds overlap, we can't compare the execution times in a meaningful way
bounds_incomparable_keys.add(key)
# However if both are timeouts we can note this
if x_scatter_point == pargs.max_exec_time:
x_eq_y_and_is_timeout_keys.add(key)
else:
# Compare the means
if x_scatter_point > y_scatter_point and abs(x_scatter_point - y_scatter_point) > pargs.require_time_abs_diff:
x_gt_y_keys.add(key)
elif x_scatter_point < y_scatter_point and abs(x_scatter_point - y_scatter_point) > pargs.require_time_abs_diff:
x_lt_y_keys.add(key)
else:
if pargs.require_time_abs_diff == 0.0:
assert x_scatter_point == y_scatter_point
x_eq_y_keys.add(key)
# Report counts
print("# of points : {}".format(len(x_scatter_points)))
print("LEGACY: count_dual_timeout: {}".format(count_dual_timeout))
print("LEGACY: count_x_eq_y_not_dt: {}".format(count_x_eq_y_not_dt))
print("LEGACY: count_x_gt_y_not_dt: {}".format(count_x_gt_y_not_dt))
print("LEGACY: count_x_lt_y_not_dt: {}".format(count_x_lt_y_not_dt))
print("")
print("# x > y and no bound overlap: {}".format(len(x_gt_y_keys)))
print("# x < y and no bound overlap: {}".format(len(x_lt_y_keys)))
print("# x = y and no bound overlap: {}".format(len(x_eq_y_keys)))
print("# incomparable: {}".format(len(bounds_incomparable_keys)))
print("# of x = y and is timeout: {}".format(len(x_eq_y_and_is_timeout_keys)))
# Now plot
extend = 100
tickFreq = 100
assert len(x_scatter_points) == len(y_scatter_points)
fig, ax = plt.subplots()
fig.patch.set_alpha(0.0) # Transparent
if pargs.error_bars:
splot = ax.errorbar(
x_scatter_points,
y_scatter_points,
xerr=x_scatter_errors,
yerr=y_scatter_errors,
fmt='o',
picker=5,
ms=pargs.point_size/2.0, # HACK
ecolor='black',
capsize=5,
#capthick=10,
)
else:
splot = ax.scatter(x_scatter_points, y_scatter_points, picker=5, s=pargs.point_size)
xlabel = index_to_file_name[0] if pargs.xlabel is None else pargs.xlabel
ylabel = index_to_file_name[1] if pargs.ylabel is None else pargs.ylabel
xlabel += pargs.axis_label_suffix
ylabel += pargs.axis_label_suffix
ax.xaxis.label.set_color(pargs.axis_label_colour)
ax.yaxis.label.set_color(pargs.axis_label_colour)
ax.tick_params(axis='x', colors=pargs.axis_label_colour)
ax.tick_params(axis='y', colors=pargs.axis_label_colour)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(0,pargs.max_exec_time + extend)
ax.set_ylim(0,pargs.max_exec_time + extend)
# +1 is just so the pargs.max_exec_time is included because range()'s end is not inclusive
ax.set_xticks(range(0, int(pargs.max_exec_time) + 1, tickFreq))
ax.set_yticks(range(0, int(pargs.max_exec_time) + 1, tickFreq))
# Construct title keyword args
title_kwargs = {
'num_points': len(x_scatter_points),
'xlabel': xlabel,
'ylabel': ylabel,
'num_keys': len(key_to_results_infos.keys()),
}
ax.set_title(pargs.title.format(**title_kwargs))
# Identity line
ax.plot([ 0 , pargs.max_exec_time + extend], [0, pargs.max_exec_time + extend], linewidth=1.0, color='black')
if pargs.annotate:
if pargs.annotate_use_legacy_values:
_logger.warning('Displaying legacy values')
x_lt_value_to_display = count_x_lt_y_not_dt
x_gt_value_to_display = count_x_gt_y_not_dt
else:
_logger.info('Displaying new values')
x_lt_value_to_display = len(x_lt_y_keys)
x_gt_value_to_display = len(x_gt_y_keys)
ax.annotate(
'{}'.format(x_lt_value_to_display),
xy=(200,550),
fontsize=40
)
ax.annotate(
'{}'.format(x_gt_value_to_display),
xy=(550,200),
fontsize=40
)
# timeout point annotation
if pargs.annotate_timeout_point:
num_dual_timeouts = len(x_eq_y_and_is_timeout_keys)
dual_timeout_txt = None
if num_dual_timeouts == 1:
dual_timeout_txt = '{} dual timeout'.format(num_dual_timeouts)
else:
dual_timeout_txt = '{} dual timeouts'.format(num_dual_timeouts)
ax.annotate(dual_timeout_txt,
# HACK -5 is to offset arrow properly
xy=(pargs.max_exec_time - 15.00, pargs.max_exec_time), xycoords='data',
xytext=(-50, 0), textcoords='offset points',
arrowprops=dict(facecolor='black', shrink=0.05, width=1.5, headwidth=7.0),
horizontalalignment='right', verticalalignment='center',
bbox=dict(boxstyle='round',fc='None'),
fontsize=15)
# Finally show
if pargs.output is None:
plt.show()
else:
# For command line usage
fig.show()
fig.savefig(pargs.output, format='pdf')
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| if prefix == "":
return path
if path.startswith(prefix):
return path[len(prefix):] | identifier_body |
result-info-plot-scatter-exec-time.py | #!/usr/bin/env python
# Copyright (c) 2017, Daniel Liew
# This file is covered by the license in LICENSE.txt
# vim: set sw=4 ts=4 softtabstop=4 expandtab:
"""
Read two result info files and generate a scatter plot of execution time
"""
from load_smtrunner import add_smtrunner_to_module_search_path
add_smtrunner_to_module_search_path()
from smtrunner import ResultInfo, DriverUtil, ResultInfoUtil, analysis, event_analysis
import smtrunner.util
import matplotlib.pyplot as plt
import argparse
import json
import logging
import math
import os
import pprint
import random
import re
import sys
import yaml
_logger = None
def strip(prefix, path):
if prefix == "":
return path
if path.startswith(prefix):
return path[len(prefix):]
def main(args):
global _logger
global _fail_count
parser = argparse.ArgumentParser(description=__doc__)
DriverUtil.parserAddLoggerArg(parser)
parser.add_argument('first_result_info',
type=argparse.FileType('r'))
parser.add_argument('second_result_info',
type=argparse.FileType('r'))
parser.add_argument('--base', type=str, default="")
parser.add_argument('--point-size', type=float, default=25.0, dest='point_size')
parser.add_argument('--allow-merge-failures',
dest='allow_merge_failures',
default=False,
action='store_true',
)
parser.add_argument('--max-exec-time',
default=None,
type=float,
dest='max_exec_time',
)
parser.add_argument('--title',
default="{num_keys} benchmarks, {num_points} jointly SAT or timeout"
)
parser.add_argument("--xlabel",
type=str,
default=None,
)
parser.add_argument("--ylabel",
type=str,
default=None,
)
parser.add_argument("--axis-label-suffix",
type=str,
default=" execution time (s)",
dest="axis_label_suffix",
)
parser.add_argument("--axis-label-colour",
type=str,
default="black",
dest="axis_label_colour",
)
parser.add_argument("--annotate",
default=False,
action='store_true',
)
parser.add_argument("--annotate-use-legacy-values",
default=False,
action='store_true',
)
parser.add_argument("--output",
default=None,
type=argparse.FileType('wb'),
)
parser.add_argument("--error-bars",
default=False,
action='store_true',
)
parser.add_argument("--annotate-timeout-point",
dest='annotate_timeout_point',
default=False,
action='store_true',
)
parser.add_argument("--require-time-abs-diff",
dest="require_time_abs_diff",
default=0.0,
type=float
)
parser.add_argument('--true-type-fonts',
default=False,
action='store_true'
)
pargs = parser.parse_args(args)
DriverUtil.handleLoggerArgs(pargs, parser)
_logger = logging.getLogger(__name__)
if pargs.max_exec_time is None:
_logger.error('--max-exec-time must be specified')
return 1
if pargs.true_type_fonts:
smtrunner.util.set_true_type_font()
index_to_raw_result_infos = []
index_to_file_name = []
for index, result_infos_file in enumerate([pargs.first_result_info, pargs.second_result_info]):
try:
_logger.info('Loading "{}"'.format(result_infos_file.name))
result_infos = ResultInfo.loadRawResultInfos(result_infos_file)
index_to_raw_result_infos.append(result_infos)
index_to_file_name.append(result_infos_file.name)
except ResultInfo.ResultInfoValidationError as e:
_logger.error('Validation error:\n{}'.format(e))
return 1
_logger.info('Loading done')
result_infos = None
# Perform grouping by benchmark name
key_to_results_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by(
index_to_raw_result_infos)
if len(rejected_result_infos) > 0:
_logger.warning('There were rejected result infos')
num_merge_failures = 0
for index, l in enumerate(rejected_result_infos):
_logger.warning('Index {} had {} rejections'.format(index, len(l)))
num_merge_failures += len(l)
if num_merge_failures > 0:
if pargs.allow_merge_failures:
_logger.warning('Merge failures being allowed')
else:
_logger.error('Merge failures are not allowed')
return 1
# Generate scatter points
x_scatter_points = []
x_scatter_errors = [[], [] ]
y_scatter_points = []
y_scatter_errors = [[], []]
count_dual_timeout = 0
count_x_lt_y_not_dt = 0
count_x_gt_y_not_dt = 0
count_x_eq_y_not_dt = 0
# New counting vars
bounds_incomparable_keys = set()
x_gt_y_keys = set()
x_lt_y_keys = set()
x_eq_y_keys = set()
x_eq_y_and_is_timeout_keys = set()
for key, raw_result_info_list in sorted(key_to_results_infos.items(), key=lambda kv:kv[0]):
_logger.info('Ranking on "{}" : '.format(key))
indices_to_use = []
# Compute indices to use
modified_raw_result_info_list = [ ]
# Handle "unknown"
# Only compare results that gave sat/unsat
for index, ri in enumerate(raw_result_info_list):
if isinstance(ri['event_tag'], str):
# single result
event_tag = ri['event_tag']
else:
assert isinstance(ri['event_tag'], list)
event_tag, _ = event_analysis.merge_aggregate_events(
ri['event_tag'])
# Event must be sat or timeout
_logger.info('index {} is {}'.format(index, event_tag))
if event_tag not in { 'sat', 'timeout', 'soft_timeout'}:
# Skip this. We can't do a meaningful comparison here
continue
indices_to_use.append(index)
# Normalise timeouts to have fixed values for the time.
if event_tag in {'timeout', 'soft_timeout'}:
modified_ri = analysis.get_result_with_modified_time(
ri,
pargs.max_exec_time)
_logger.debug('modified_ri: {}'.format(
pprint.pformat(modified_ri)))
_logger.debug(
'Treating index {} for {} due to unknown as having max-time'.format(
index,
key))
modified_raw_result_info_list.append(modified_ri)
else:
modified_raw_result_info_list.append(ri)
_logger.debug('used indices_to_use: {}'.format(indices_to_use))
if len(indices_to_use) != 2:
# Skip this one. One of the result infos can't be compared
# against.
continue
assert len(indices_to_use) == 2
# Get execution times
index_to_execution_time_bounds = analysis.get_index_to_execution_time_bounds(
modified_raw_result_info_list,
indices_to_use,
pargs.max_exec_time,
analysis.get_arithmetic_mean_and_99_confidence_intervals,
['dsoes_wallclock', 'wallclock'])
assert isinstance(index_to_execution_time_bounds, list)
x_scatter_point_bounds = index_to_execution_time_bounds[0]
y_scatter_point_bounds = index_to_execution_time_bounds[1]
x_scatter_point = x_scatter_point_bounds[1] # mean
y_scatter_point = y_scatter_point_bounds[1] # mean
x_scatter_lower_error = x_scatter_point_bounds[1] - x_scatter_point_bounds[0]
assert x_scatter_lower_error >= 0
x_scatter_higher_error = x_scatter_point_bounds[2] - x_scatter_point_bounds[1]
assert x_scatter_higher_error >= 0
y_scatter_lower_error = y_scatter_point_bounds[1] - y_scatter_point_bounds[0]
assert y_scatter_lower_error >= 0
y_scatter_higher_error = y_scatter_point_bounds[2] - y_scatter_point_bounds[1]
assert y_scatter_higher_error >= 0
x_scatter_points.append(x_scatter_point)
y_scatter_points.append(y_scatter_point)
# Error bar points
#x_scatter_errors.append((x_scatter_lower_error, x_scatter_higher_error))
x_scatter_errors[0].append(x_scatter_lower_error)
x_scatter_errors[1].append(x_scatter_higher_error)
#y_scatter_errors.append((y_scatter_lower_error, y_scatter_higher_error))
y_scatter_errors[0].append(y_scatter_lower_error)
y_scatter_errors[1].append(y_scatter_higher_error)
# LEGACY: Now do some counting
if x_scatter_point == y_scatter_point:
if x_scatter_point == pargs.max_exec_time:
assert x_scatter_lower_error == 0
assert x_scatter_higher_error == 0
assert y_scatter_lower_error == 0
assert y_scatter_higher_error == 0
count_dual_timeout += 1
else:
_logger.info('Found count_x_eq_y_not_dt: x: {}, key: {}'.format(
x_scatter_point,
key))
count_x_eq_y_not_dt += 1
elif x_scatter_point > y_scatter_point:
count_x_gt_y_not_dt += 1
else:
assert x_scatter_point < y_scatter_point
count_x_lt_y_not_dt += 1
# SMARTER counting: uses error bounds
if analysis.bounds_overlap(x_scatter_point_bounds, y_scatter_point_bounds):
# Bounds overlap, we can't compare the execution times in a meaningful way
bounds_incomparable_keys.add(key)
# However if both are timeouts we can note this
if x_scatter_point == pargs.max_exec_time:
x_eq_y_and_is_timeout_keys.add(key)
else:
# Compare the means
if x_scatter_point > y_scatter_point and abs(x_scatter_point - y_scatter_point) > pargs.require_time_abs_diff:
x_gt_y_keys.add(key)
elif x_scatter_point < y_scatter_point and abs(x_scatter_point - y_scatter_point) > pargs.require_time_abs_diff:
x_lt_y_keys.add(key)
else:
if pargs.require_time_abs_diff == 0.0:
assert x_scatter_point == y_scatter_point
x_eq_y_keys.add(key)
# Report counts
print("# of points : {}".format(len(x_scatter_points)))
print("LEGACY: count_dual_timeout: {}".format(count_dual_timeout))
print("LEGACY: count_x_eq_y_not_dt: {}".format(count_x_eq_y_not_dt))
print("LEGACY: count_x_gt_y_not_dt: {}".format(count_x_gt_y_not_dt))
print("LEGACY: count_x_lt_y_not_dt: {}".format(count_x_lt_y_not_dt))
print("")
print("# x > y and no bound overlap: {}".format(len(x_gt_y_keys)))
print("# x < y and no bound overlap: {}".format(len(x_lt_y_keys)))
print("# x = y and no bound overlap: {}".format(len(x_eq_y_keys))) | tickFreq = 100
assert len(x_scatter_points) == len(y_scatter_points)
fig, ax = plt.subplots()
fig.patch.set_alpha(0.0) # Transparent
if pargs.error_bars:
splot = ax.errorbar(
x_scatter_points,
y_scatter_points,
xerr=x_scatter_errors,
yerr=y_scatter_errors,
fmt='o',
picker=5,
ms=pargs.point_size/2.0, # HACK
ecolor='black',
capsize=5,
#capthick=10,
)
else:
splot = ax.scatter(x_scatter_points, y_scatter_points, picker=5, s=pargs.point_size)
xlabel = index_to_file_name[0] if pargs.xlabel is None else pargs.xlabel
ylabel = index_to_file_name[1] if pargs.ylabel is None else pargs.ylabel
xlabel += pargs.axis_label_suffix
ylabel += pargs.axis_label_suffix
ax.xaxis.label.set_color(pargs.axis_label_colour)
ax.yaxis.label.set_color(pargs.axis_label_colour)
ax.tick_params(axis='x', colors=pargs.axis_label_colour)
ax.tick_params(axis='y', colors=pargs.axis_label_colour)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(0,pargs.max_exec_time + extend)
ax.set_ylim(0,pargs.max_exec_time + extend)
# +1 is just so the pargs.max_exec_time is included because range()'s end is not inclusive
ax.set_xticks(range(0, int(pargs.max_exec_time) + 1, tickFreq))
ax.set_yticks(range(0, int(pargs.max_exec_time) + 1, tickFreq))
# Construct title keyword args
title_kwargs = {
'num_points': len(x_scatter_points),
'xlabel': xlabel,
'ylabel': ylabel,
'num_keys': len(key_to_results_infos.keys()),
}
ax.set_title(pargs.title.format(**title_kwargs))
# Identity line
ax.plot([ 0 , pargs.max_exec_time + extend], [0, pargs.max_exec_time + extend], linewidth=1.0, color='black')
if pargs.annotate:
if pargs.annotate_use_legacy_values:
_logger.warning('Displaying legacy values')
x_lt_value_to_display = count_x_lt_y_not_dt
x_gt_value_to_display = count_x_gt_y_not_dt
else:
_logger.info('Displaying new values')
x_lt_value_to_display = len(x_lt_y_keys)
x_gt_value_to_display = len(x_gt_y_keys)
ax.annotate(
'{}'.format(x_lt_value_to_display),
xy=(200,550),
fontsize=40
)
ax.annotate(
'{}'.format(x_gt_value_to_display),
xy=(550,200),
fontsize=40
)
# timeout point annotation
if pargs.annotate_timeout_point:
num_dual_timeouts = len(x_eq_y_and_is_timeout_keys)
dual_timeout_txt = None
if num_dual_timeouts == 1:
dual_timeout_txt = '{} dual timeout'.format(num_dual_timeouts)
else:
dual_timeout_txt = '{} dual timeouts'.format(num_dual_timeouts)
ax.annotate(dual_timeout_txt,
# HACK -5 is to offset arrow properly
xy=(pargs.max_exec_time - 15.00, pargs.max_exec_time), xycoords='data',
xytext=(-50, 0), textcoords='offset points',
arrowprops=dict(facecolor='black', shrink=0.05, width=1.5, headwidth=7.0),
horizontalalignment='right', verticalalignment='center',
bbox=dict(boxstyle='round',fc='None'),
fontsize=15)
# Finally show
if pargs.output is None:
plt.show()
else:
# For command line usage
fig.show()
fig.savefig(pargs.output, format='pdf')
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) | print("# incomparable: {}".format(len(bounds_incomparable_keys)))
print("# of x = y and is timeout: {}".format(len(x_eq_y_and_is_timeout_keys)))
# Now plot
extend = 100 | random_line_split |
result-info-plot-scatter-exec-time.py | #!/usr/bin/env python
# Copyright (c) 2017, Daniel Liew
# This file is covered by the license in LICENSE.txt
# vim: set sw=4 ts=4 softtabstop=4 expandtab:
"""
Read two result info files and generate a scatter plot of execution time
"""
from load_smtrunner import add_smtrunner_to_module_search_path
add_smtrunner_to_module_search_path()
from smtrunner import ResultInfo, DriverUtil, ResultInfoUtil, analysis, event_analysis
import smtrunner.util
import matplotlib.pyplot as plt
import argparse
import json
import logging
import math
import os
import pprint
import random
import re
import sys
import yaml
_logger = None
def | (prefix, path):
if prefix == "":
return path
if path.startswith(prefix):
return path[len(prefix):]
def main(args):
global _logger
global _fail_count
parser = argparse.ArgumentParser(description=__doc__)
DriverUtil.parserAddLoggerArg(parser)
parser.add_argument('first_result_info',
type=argparse.FileType('r'))
parser.add_argument('second_result_info',
type=argparse.FileType('r'))
parser.add_argument('--base', type=str, default="")
parser.add_argument('--point-size', type=float, default=25.0, dest='point_size')
parser.add_argument('--allow-merge-failures',
dest='allow_merge_failures',
default=False,
action='store_true',
)
parser.add_argument('--max-exec-time',
default=None,
type=float,
dest='max_exec_time',
)
parser.add_argument('--title',
default="{num_keys} benchmarks, {num_points} jointly SAT or timeout"
)
parser.add_argument("--xlabel",
type=str,
default=None,
)
parser.add_argument("--ylabel",
type=str,
default=None,
)
parser.add_argument("--axis-label-suffix",
type=str,
default=" execution time (s)",
dest="axis_label_suffix",
)
parser.add_argument("--axis-label-colour",
type=str,
default="black",
dest="axis_label_colour",
)
parser.add_argument("--annotate",
default=False,
action='store_true',
)
parser.add_argument("--annotate-use-legacy-values",
default=False,
action='store_true',
)
parser.add_argument("--output",
default=None,
type=argparse.FileType('wb'),
)
parser.add_argument("--error-bars",
default=False,
action='store_true',
)
parser.add_argument("--annotate-timeout-point",
dest='annotate_timeout_point',
default=False,
action='store_true',
)
parser.add_argument("--require-time-abs-diff",
dest="require_time_abs_diff",
default=0.0,
type=float
)
parser.add_argument('--true-type-fonts',
default=False,
action='store_true'
)
pargs = parser.parse_args(args)
DriverUtil.handleLoggerArgs(pargs, parser)
_logger = logging.getLogger(__name__)
if pargs.max_exec_time is None:
_logger.error('--max-exec-time must be specified')
return 1
if pargs.true_type_fonts:
smtrunner.util.set_true_type_font()
index_to_raw_result_infos = []
index_to_file_name = []
for index, result_infos_file in enumerate([pargs.first_result_info, pargs.second_result_info]):
try:
_logger.info('Loading "{}"'.format(result_infos_file.name))
result_infos = ResultInfo.loadRawResultInfos(result_infos_file)
index_to_raw_result_infos.append(result_infos)
index_to_file_name.append(result_infos_file.name)
except ResultInfo.ResultInfoValidationError as e:
_logger.error('Validation error:\n{}'.format(e))
return 1
_logger.info('Loading done')
result_infos = None
# Perform grouping by benchmark name
key_to_results_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by(
index_to_raw_result_infos)
if len(rejected_result_infos) > 0:
_logger.warning('There were rejected result infos')
num_merge_failures = 0
for index, l in enumerate(rejected_result_infos):
_logger.warning('Index {} had {} rejections'.format(index, len(l)))
num_merge_failures += len(l)
if num_merge_failures > 0:
if pargs.allow_merge_failures:
_logger.warning('Merge failures being allowed')
else:
_logger.error('Merge failures are not allowed')
return 1
# Generate scatter points
x_scatter_points = []
x_scatter_errors = [[], [] ]
y_scatter_points = []
y_scatter_errors = [[], []]
count_dual_timeout = 0
count_x_lt_y_not_dt = 0
count_x_gt_y_not_dt = 0
count_x_eq_y_not_dt = 0
# New counting vars
bounds_incomparable_keys = set()
x_gt_y_keys = set()
x_lt_y_keys = set()
x_eq_y_keys = set()
x_eq_y_and_is_timeout_keys = set()
for key, raw_result_info_list in sorted(key_to_results_infos.items(), key=lambda kv:kv[0]):
_logger.info('Ranking on "{}" : '.format(key))
indices_to_use = []
# Compute indices to use
modified_raw_result_info_list = [ ]
# Handle "unknown"
# Only compare results that gave sat/unsat
for index, ri in enumerate(raw_result_info_list):
if isinstance(ri['event_tag'], str):
# single result
event_tag = ri['event_tag']
else:
assert isinstance(ri['event_tag'], list)
event_tag, _ = event_analysis.merge_aggregate_events(
ri['event_tag'])
# Event must be sat or timeout
_logger.info('index {} is {}'.format(index, event_tag))
if event_tag not in { 'sat', 'timeout', 'soft_timeout'}:
# Skip this. We can't do a meaningful comparison here
continue
indices_to_use.append(index)
# Normalise timeouts to have fixed values for the time.
if event_tag in {'timeout', 'soft_timeout'}:
modified_ri = analysis.get_result_with_modified_time(
ri,
pargs.max_exec_time)
_logger.debug('modified_ri: {}'.format(
pprint.pformat(modified_ri)))
_logger.debug(
'Treating index {} for {} due to unknown as having max-time'.format(
index,
key))
modified_raw_result_info_list.append(modified_ri)
else:
modified_raw_result_info_list.append(ri)
_logger.debug('used indices_to_use: {}'.format(indices_to_use))
if len(indices_to_use) != 2:
# Skip this one. One of the result infos can't be compared
# against.
continue
assert len(indices_to_use) == 2
# Get execution times
index_to_execution_time_bounds = analysis.get_index_to_execution_time_bounds(
modified_raw_result_info_list,
indices_to_use,
pargs.max_exec_time,
analysis.get_arithmetic_mean_and_99_confidence_intervals,
['dsoes_wallclock', 'wallclock'])
assert isinstance(index_to_execution_time_bounds, list)
x_scatter_point_bounds = index_to_execution_time_bounds[0]
y_scatter_point_bounds = index_to_execution_time_bounds[1]
x_scatter_point = x_scatter_point_bounds[1] # mean
y_scatter_point = y_scatter_point_bounds[1] # mean
x_scatter_lower_error = x_scatter_point_bounds[1] - x_scatter_point_bounds[0]
assert x_scatter_lower_error >= 0
x_scatter_higher_error = x_scatter_point_bounds[2] - x_scatter_point_bounds[1]
assert x_scatter_higher_error >= 0
y_scatter_lower_error = y_scatter_point_bounds[1] - y_scatter_point_bounds[0]
assert y_scatter_lower_error >= 0
y_scatter_higher_error = y_scatter_point_bounds[2] - y_scatter_point_bounds[1]
assert y_scatter_higher_error >= 0
x_scatter_points.append(x_scatter_point)
y_scatter_points.append(y_scatter_point)
# Error bar points
#x_scatter_errors.append((x_scatter_lower_error, x_scatter_higher_error))
x_scatter_errors[0].append(x_scatter_lower_error)
x_scatter_errors[1].append(x_scatter_higher_error)
#y_scatter_errors.append((y_scatter_lower_error, y_scatter_higher_error))
y_scatter_errors[0].append(y_scatter_lower_error)
y_scatter_errors[1].append(y_scatter_higher_error)
# LEGACY: Now do some counting
if x_scatter_point == y_scatter_point:
if x_scatter_point == pargs.max_exec_time:
assert x_scatter_lower_error == 0
assert x_scatter_higher_error == 0
assert y_scatter_lower_error == 0
assert y_scatter_higher_error == 0
count_dual_timeout += 1
else:
_logger.info('Found count_x_eq_y_not_dt: x: {}, key: {}'.format(
x_scatter_point,
key))
count_x_eq_y_not_dt += 1
elif x_scatter_point > y_scatter_point:
count_x_gt_y_not_dt += 1
else:
assert x_scatter_point < y_scatter_point
count_x_lt_y_not_dt += 1
# SMARTER counting: uses error bounds
if analysis.bounds_overlap(x_scatter_point_bounds, y_scatter_point_bounds):
# Bounds overlap, we can't compare the execution times in a meaningful way
bounds_incomparable_keys.add(key)
# However if both are timeouts we can note this
if x_scatter_point == pargs.max_exec_time:
x_eq_y_and_is_timeout_keys.add(key)
else:
# Compare the means
if x_scatter_point > y_scatter_point and abs(x_scatter_point - y_scatter_point) > pargs.require_time_abs_diff:
x_gt_y_keys.add(key)
elif x_scatter_point < y_scatter_point and abs(x_scatter_point - y_scatter_point) > pargs.require_time_abs_diff:
x_lt_y_keys.add(key)
else:
if pargs.require_time_abs_diff == 0.0:
assert x_scatter_point == y_scatter_point
x_eq_y_keys.add(key)
# Report counts
print("# of points : {}".format(len(x_scatter_points)))
print("LEGACY: count_dual_timeout: {}".format(count_dual_timeout))
print("LEGACY: count_x_eq_y_not_dt: {}".format(count_x_eq_y_not_dt))
print("LEGACY: count_x_gt_y_not_dt: {}".format(count_x_gt_y_not_dt))
print("LEGACY: count_x_lt_y_not_dt: {}".format(count_x_lt_y_not_dt))
print("")
print("# x > y and no bound overlap: {}".format(len(x_gt_y_keys)))
print("# x < y and no bound overlap: {}".format(len(x_lt_y_keys)))
print("# x = y and no bound overlap: {}".format(len(x_eq_y_keys)))
print("# incomparable: {}".format(len(bounds_incomparable_keys)))
print("# of x = y and is timeout: {}".format(len(x_eq_y_and_is_timeout_keys)))
# Now plot
extend = 100
tickFreq = 100
assert len(x_scatter_points) == len(y_scatter_points)
fig, ax = plt.subplots()
fig.patch.set_alpha(0.0) # Transparent
if pargs.error_bars:
splot = ax.errorbar(
x_scatter_points,
y_scatter_points,
xerr=x_scatter_errors,
yerr=y_scatter_errors,
fmt='o',
picker=5,
ms=pargs.point_size/2.0, # HACK
ecolor='black',
capsize=5,
#capthick=10,
)
else:
splot = ax.scatter(x_scatter_points, y_scatter_points, picker=5, s=pargs.point_size)
xlabel = index_to_file_name[0] if pargs.xlabel is None else pargs.xlabel
ylabel = index_to_file_name[1] if pargs.ylabel is None else pargs.ylabel
xlabel += pargs.axis_label_suffix
ylabel += pargs.axis_label_suffix
ax.xaxis.label.set_color(pargs.axis_label_colour)
ax.yaxis.label.set_color(pargs.axis_label_colour)
ax.tick_params(axis='x', colors=pargs.axis_label_colour)
ax.tick_params(axis='y', colors=pargs.axis_label_colour)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(0,pargs.max_exec_time + extend)
ax.set_ylim(0,pargs.max_exec_time + extend)
# +1 is just so the pargs.max_exec_time is included because range()'s end is not inclusive
ax.set_xticks(range(0, int(pargs.max_exec_time) + 1, tickFreq))
ax.set_yticks(range(0, int(pargs.max_exec_time) + 1, tickFreq))
# Construct title keyword args
title_kwargs = {
'num_points': len(x_scatter_points),
'xlabel': xlabel,
'ylabel': ylabel,
'num_keys': len(key_to_results_infos.keys()),
}
ax.set_title(pargs.title.format(**title_kwargs))
# Identity line
ax.plot([ 0 , pargs.max_exec_time + extend], [0, pargs.max_exec_time + extend], linewidth=1.0, color='black')
if pargs.annotate:
if pargs.annotate_use_legacy_values:
_logger.warning('Displaying legacy values')
x_lt_value_to_display = count_x_lt_y_not_dt
x_gt_value_to_display = count_x_gt_y_not_dt
else:
_logger.info('Displaying new values')
x_lt_value_to_display = len(x_lt_y_keys)
x_gt_value_to_display = len(x_gt_y_keys)
ax.annotate(
'{}'.format(x_lt_value_to_display),
xy=(200,550),
fontsize=40
)
ax.annotate(
'{}'.format(x_gt_value_to_display),
xy=(550,200),
fontsize=40
)
# timeout point annotation
if pargs.annotate_timeout_point:
num_dual_timeouts = len(x_eq_y_and_is_timeout_keys)
dual_timeout_txt = None
if num_dual_timeouts == 1:
dual_timeout_txt = '{} dual timeout'.format(num_dual_timeouts)
else:
dual_timeout_txt = '{} dual timeouts'.format(num_dual_timeouts)
ax.annotate(dual_timeout_txt,
# HACK -5 is to offset arrow properly
xy=(pargs.max_exec_time - 15.00, pargs.max_exec_time), xycoords='data',
xytext=(-50, 0), textcoords='offset points',
arrowprops=dict(facecolor='black', shrink=0.05, width=1.5, headwidth=7.0),
horizontalalignment='right', verticalalignment='center',
bbox=dict(boxstyle='round',fc='None'),
fontsize=15)
# Finally show
if pargs.output is None:
plt.show()
else:
# For command line usage
fig.show()
fig.savefig(pargs.output, format='pdf')
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| strip | identifier_name |
api.js | var express = require('express');
var router = express.Router();
var multer = require('multer');
var path = require('path');
var fs = require('fs');
var PDFParser = require('pdf2json');
var moment = require('moment');
const util = require('util');
var Nominatim = require('node-nominatim2');
var Perizia = require('../models/perizia');
//data needed by openstreemaps
var options = {
useragent: 'MyApp',
referer: 'https://github.com/xbgmsharp/node-nominatim2',
timeout: 1000
};
var nominatim = new Nominatim(options);
var uploadDestination = 'uploads';
var SPACE = "\u00a0";
const destination = function (req, file, cb) {
cb(null, uploadDestination)
};
const filename= function (req, file, cb) {
var datetimestamp = Date.now();
cb(null, file.originalname);
//cb(null, file.fieldname + '-' + datetimestamp + '.' + file.originalname.split('.')[file.originalname.split('.').length -1]);
};
var storage = multer.diskStorage({ //multers disk storage settings
destination, filename
});
var upload = multer({ //multer settings
storage: storage
}).single('file');
var ubicazioneLines = [{text:"Comune", jsonId:"Comune", skip:1},
{text:"Provincia", jsonId:"Provincia", skip:1},
{text:"CAP", jsonId:"CAP", skip:1},
{text:"Indirizzo", jsonId:"Indirizzo", skip:1},
{text:"N." + SPACE + "civico", jsonId:"N_civico", skip:1},
{text:"Interno", jsonId:"Interno", skip:1},
{text:"Scala", jsonId:"Scala", skip:1},
{text:"Piano", jsonId:"Piano", skip:1}
];
var relevantLines = [
{text:"Codice" + SPACE + "CRIF", jsonId:"_id", skip:1},
{text:"Data" + SPACE + "Evasione" + SPACE + "Perizia", jsonId:"Data_Evasione_Perizia", skip:1, date: true},
{text:"Descrizione" + SPACE + "unità" + SPACE + "di" + SPACE + "stima", jsonId:"Descrizione_unita_di_stima", skip:1},
{text:"Valore" + SPACE + "di" + SPACE + "mercato" + SPACE + "del" + SPACE + "lotto", jsonId:"Valore_di_mercato_del_lotto", skip:2, number:true},
{text:"Foglio" + SPACE + "di" + SPACE + "mappa", jsonId:"Foglio", skip:1},
{text:"Totale" + SPACE + "superficie" + SPACE + "principale", jsonId:"Totale_superficie_principale", skip:2, number:true},
{text:"Impianto" + SPACE + "elettrico", jsonId:"Impianto_elettrico_anni", skip:2},
{text:"Impianto" + SPACE + "idraulico", jsonId:"Impianto_idraulico_anni", skip:2},
{text:"Tipologia" + SPACE + "edilizia" , jsonId:"Tipologia_edilizia", skip:1},
{text:"COMMERCIALE", jsonId:"SUPERFICIE_COMMERCIALE_MQ", skip:2, number:true},
{text:"Particella", jsonId:"Particella", skip:7},
{text:"Categoria", jsonId:"Categoria", skip:7},
{text:"Consistenza", jsonId:"Consistenza", skip:7, number:true},
{text:"RC", jsonId:"RC", skip:7, number:true},
{text:"Anno" + SPACE + "di" + SPACE + "costruzione", jsonId:"Anno_di_costruzione", skip:1}
];
var superficiLines = [
{text:"Descrizione", jsonId:"Descrizione", skip:1},
{text:"Misura (mq)", jsonId:"Misura_mq", skip:1, number:true},
{text:"Rapporto mercantile", jsonId:"Rapporto_mercantile", skip:1, number:true},
{text:"Sup Rap", jsonId:"Sup_Rap", skip:1, number:true}
];
var parseJsonPdf = function(arr){
var str = '';
var jsonString = '{';
for (var i = 0; i < arr.length; i ++) {
for (var j = 0; j < arr[i].Texts.length; j ++) {
str = decodeURIComponent(arr[i].Texts[j].R[0].T);
if (str.lastIndexOf('COLLEGATE', 0) === 0){
jsonString = jsonString + '\"SUPERFICI_SECONDARIE_ANNESSE_E_COLLEGATE\":\n[';
j = j + 4;
for (var h = 1; h < 20; h ++){
j = j + 1
str = decodeURIComponent(arr[i].Texts[j].R[0].T);
if (str.lastIndexOf(h.toString() + '.', 0) === 0){
if (h != 1){
jsonString += ',';
}
jsonString = jsonString + '{';
for (var w = 0; w < superficiLines.length; w ++){
j = j + 1;
str = decodeURIComponent(arr[i].Texts[j].R[0].T);
if (superficiLines[w].number){
str = str.replace('.','').replace(',','.');
}
jsonString = jsonString + '\"' + superficiLines[w].jsonId + '\":\"' + str + '\"';
if (w != superficiLines.length - 1) {
jsonString += ',';
}
jsonString += '\n';
}
jsonString += '}\n';
//jsonString = jsonString + '},\n';
} else {
jsonString += '],\n';
h = 20;
}
}
} else if (str.lastIndexOf(SPACE + 'UNITA\'' + SPACE + 'IMMOBILIARE', 0 ) === 0){
j = j + 1
str = decodeURIComponent(arr[i].Texts[j].R[0].T);
if (str.lastIndexOf('UBICAZIONE', 0) === 0){
for (var w = 0; w < ubicazioneLines.length; w ++){
j = j + 1;
str = decodeURIComponent(arr[i].Texts[j].R[0].T);
if (str.lastIndexOf(ubicazioneLines[w].text, 0) === 0){
j = j + 1;
str = decodeURIComponent(arr[i].Texts[j].R[0].T);
jsonString += '\"' + ubicazioneLines[w].jsonId + '\":\"' + str + '\",\n';
}
}
}
} else {
for (var w = 0; w < relevantLines.length; w ++){
if (str.lastIndexOf(relevantLines[w].text, 0) === 0){
j = j + relevantLines[w].skip;
var text = decodeURIComponent(arr[i].Texts[j].R[0].T);
//trasforma un numero in formato italiano in un numero in formato json standard
//e.g. 1,23 -> 1.23
if (relevantLines[w].number){
text = text.replace('.','').replace(',','.');
} else if (relevantLines[w].date){
//trasforma una data in formato italiano dd/mm/yyyy in una data in formato json standard
text = moment(text, 'DD/MM/YYYY').format('MM/DD/YYYY');
}
jsonString += '\"' + relevantLines[w].jsonId + '\":\"' + text + '\"';
j = j - relevantLines[w].skip;
if (w != ubicazioneLines.length - 1) {
jsonString += ',';
}
jsonString += '\n';
}
}
}
}
}
jsonString += '}';
return jsonString;
}
var salvaPerizia = function (json){
console.log("salvo la perizia");
var uploadedPerizia = new Perizia(json);
uploadedPerizia.save(function(err) {
if (err) {
//console.log(err.code);
if (err.code === 11000){//duplicate key
console.log(err.message);
return;
} else {
console.log(err);
}
} else {
console.log('La perizia CRIF ' + json.Nome_File + ' è stata salvata in mongo');
}
});
}
router.post('/upload', function(req, res){
upload(req, res, function(err){
if (err){
res.json({error_code:1, err_desc:err});
return;
}
res.json({error_code:0, err_desc:null});
var pdfParser = new PDFParser(this, 1);
pdfParser.on("pdfParser_dataError", errData => console.error(errData.parserError) );
pdfParser.on("pdfParser_dataReady", pdfData => {
var jsonPdf = JSON.parse(JSON.stringify(pdfData, null, '\t')).formImage.Pages;
var jsonString = parseJsonPdf(jsonPdf);
fs.writeFile(uploadDestination + path.sep + req.file.filename + '.txt', jsonString, function(errf) {
if (errf) {
return console.log(errf);
}
jsonPdf = null;
});
var jsonMongo = JSON.parse(jsonString);
jsonMongo.Nome_File = req.file.filename;
//salvaPerizia(jsonMongo);
//usa Nominatim per ottenere longitudine e latitudine associate al luogo della perizia
//quindi salva la perizia in mongo
var indirizzo = jsonMongo.Indirizzo + " " + jsonMongo.N_civico + ", " + jsonMongo.Comune + ", " + jsonMongo.Provincia + ", " + jsonMongo.CAP;
nominatim.search({q: indirizzo}, function (err, res, data) {
console.log(indirizzo);
if (err) {
throw err;
}
var loc = new Array();
if (data != undefined && data[0] != undefined){
loc = [data[0].lon, data[0].lat];
jsonMongo.loc = loc;
salvaPerizia(jsonMongo);
} else {
| });
});
pdfParser.loadPDF(uploadDestination + path.sep + req.file.filename);
})
});
router.get('/id/:_id', function (req, res) {
Perizia.findOne({ CRIF: req.params._id }, function (err, perizia) {
if (err || !perizia) {
res.render('error', {});
} else {
res.json(perizia);//get json data
console.log(perizia);
}
});
});
router.get('/file/:Nome_File', function (req, res) {
Perizia.findOne({ Nome_File: req.params.Nome_File }, function (err, perizia){
console.log("la perizia: " + perizia + ".");
if (err || !perizia || perizia == null) {
res.render('error', {});
} else {
res.json(perizia);//get json data
}
});
});
//need req.query.limit, req.query.DISTANZA, req.query.loc
router.get('/distanza/:DISTANZA/limite/:limite/data_min/:DATA_MIN/indirizzo/:indirizzo', function (req, res) {
console.log('distanza=' + req.params.DISTANZA + ' limite =' + req.params.limite + ' indirizzo=' + req.params.indirizzo);
// convert the distance to radians
// the radius of Earth is approximately 6371 kilometers
var maxDistance = req.params.DISTANZA * 100 / 111.2;
nominatim.search({q: req.params.indirizzo}, function (err, resp, data) {
console.log(req.params.indirizzo);
if (err) {
throw err;
}
console.log(data);
if (data != undefined && data[0] != undefined){
var loc = new Array();
loc = [data[0].lon, data[0].lat];
Perizia.find({ 'loc': { $near: loc, $maxDistance: maxDistance }, "Data_Evasione_Perizia": {'$gt': req.params.DATA_MIN }})
.limit(Number(req.params.limite))
.exec(function(err, perizie) {
if (err) {
return res.json(err);
}
console.log('perizie ' + perizie); console.log(perizie.length);
res.json(perizie);
});
}
});
})
module.exports = router;
| var indirizzo2 = jsonMongo.Indirizzo + " " + jsonMongo.N_civico + ", " + jsonMongo.Comune + ", " + jsonMongo.Provincia;
console.log(indirizzo2);
nominatim.search({q: indirizzo2}, function (err2, res2, data2) {
if (err2) {
throw err2;
}
if (data2 != undefined && data2[0] != undefined) {
loc = [data2[0].lon, data2[0].lat];
jsonMongo.loc = loc;
salvaPerizia(jsonMongo);
} else {
salvaPerizia(jsonMongo);
}
});
}
| conditional_block |
api.js | var express = require('express');
var router = express.Router();
var multer = require('multer');
var path = require('path');
var fs = require('fs');
var PDFParser = require('pdf2json');
var moment = require('moment');
const util = require('util');
var Nominatim = require('node-nominatim2');
var Perizia = require('../models/perizia');
//data needed by openstreemaps
var options = {
useragent: 'MyApp',
referer: 'https://github.com/xbgmsharp/node-nominatim2',
timeout: 1000
};
var nominatim = new Nominatim(options);
var uploadDestination = 'uploads';
var SPACE = "\u00a0";
const destination = function (req, file, cb) {
cb(null, uploadDestination)
};
const filename= function (req, file, cb) {
var datetimestamp = Date.now();
cb(null, file.originalname);
//cb(null, file.fieldname + '-' + datetimestamp + '.' + file.originalname.split('.')[file.originalname.split('.').length -1]);
};
var storage = multer.diskStorage({ //multers disk storage settings
destination, filename
});
var upload = multer({ //multer settings
storage: storage
}).single('file');
var ubicazioneLines = [{text:"Comune", jsonId:"Comune", skip:1},
{text:"Provincia", jsonId:"Provincia", skip:1},
{text:"CAP", jsonId:"CAP", skip:1},
{text:"Indirizzo", jsonId:"Indirizzo", skip:1},
{text:"N." + SPACE + "civico", jsonId:"N_civico", skip:1},
{text:"Interno", jsonId:"Interno", skip:1},
{text:"Scala", jsonId:"Scala", skip:1},
{text:"Piano", jsonId:"Piano", skip:1}
];
var relevantLines = [
{text:"Codice" + SPACE + "CRIF", jsonId:"_id", skip:1},
{text:"Data" + SPACE + "Evasione" + SPACE + "Perizia", jsonId:"Data_Evasione_Perizia", skip:1, date: true},
{text:"Descrizione" + SPACE + "unità" + SPACE + "di" + SPACE + "stima", jsonId:"Descrizione_unita_di_stima", skip:1},
{text:"Valore" + SPACE + "di" + SPACE + "mercato" + SPACE + "del" + SPACE + "lotto", jsonId:"Valore_di_mercato_del_lotto", skip:2, number:true},
{text:"Foglio" + SPACE + "di" + SPACE + "mappa", jsonId:"Foglio", skip:1},
{text:"Totale" + SPACE + "superficie" + SPACE + "principale", jsonId:"Totale_superficie_principale", skip:2, number:true},
{text:"Impianto" + SPACE + "elettrico", jsonId:"Impianto_elettrico_anni", skip:2},
{text:"Impianto" + SPACE + "idraulico", jsonId:"Impianto_idraulico_anni", skip:2},
{text:"Tipologia" + SPACE + "edilizia" , jsonId:"Tipologia_edilizia", skip:1},
{text:"COMMERCIALE", jsonId:"SUPERFICIE_COMMERCIALE_MQ", skip:2, number:true},
{text:"Particella", jsonId:"Particella", skip:7},
{text:"Categoria", jsonId:"Categoria", skip:7},
{text:"Consistenza", jsonId:"Consistenza", skip:7, number:true},
{text:"RC", jsonId:"RC", skip:7, number:true},
{text:"Anno" + SPACE + "di" + SPACE + "costruzione", jsonId:"Anno_di_costruzione", skip:1}
];
var superficiLines = [
{text:"Descrizione", jsonId:"Descrizione", skip:1},
{text:"Misura (mq)", jsonId:"Misura_mq", skip:1, number:true},
{text:"Rapporto mercantile", jsonId:"Rapporto_mercantile", skip:1, number:true},
{text:"Sup Rap", jsonId:"Sup_Rap", skip:1, number:true}
];
var parseJsonPdf = function(arr){
var str = '';
var jsonString = '{';
for (var i = 0; i < arr.length; i ++) {
for (var j = 0; j < arr[i].Texts.length; j ++) {
str = decodeURIComponent(arr[i].Texts[j].R[0].T);
if (str.lastIndexOf('COLLEGATE', 0) === 0){
jsonString = jsonString + '\"SUPERFICI_SECONDARIE_ANNESSE_E_COLLEGATE\":\n[';
j = j + 4;
for (var h = 1; h < 20; h ++){
j = j + 1
str = decodeURIComponent(arr[i].Texts[j].R[0].T);
if (str.lastIndexOf(h.toString() + '.', 0) === 0){
if (h != 1){
jsonString += ',';
}
jsonString = jsonString + '{';
for (var w = 0; w < superficiLines.length; w ++){
j = j + 1;
str = decodeURIComponent(arr[i].Texts[j].R[0].T);
if (superficiLines[w].number){
str = str.replace('.','').replace(',','.');
}
jsonString = jsonString + '\"' + superficiLines[w].jsonId + '\":\"' + str + '\"';
if (w != superficiLines.length - 1) {
jsonString += ',';
}
jsonString += '\n';
}
jsonString += '}\n';
//jsonString = jsonString + '},\n';
} else {
jsonString += '],\n';
h = 20;
}
}
} else if (str.lastIndexOf(SPACE + 'UNITA\'' + SPACE + 'IMMOBILIARE', 0 ) === 0){
j = j + 1
str = decodeURIComponent(arr[i].Texts[j].R[0].T);
if (str.lastIndexOf('UBICAZIONE', 0) === 0){
for (var w = 0; w < ubicazioneLines.length; w ++){
j = j + 1;
str = decodeURIComponent(arr[i].Texts[j].R[0].T);
if (str.lastIndexOf(ubicazioneLines[w].text, 0) === 0){
j = j + 1;
str = decodeURIComponent(arr[i].Texts[j].R[0].T);
jsonString += '\"' + ubicazioneLines[w].jsonId + '\":\"' + str + '\",\n';
}
}
}
} else {
for (var w = 0; w < relevantLines.length; w ++){
if (str.lastIndexOf(relevantLines[w].text, 0) === 0){
j = j + relevantLines[w].skip;
var text = decodeURIComponent(arr[i].Texts[j].R[0].T);
//trasforma un numero in formato italiano in un numero in formato json standard
//e.g. 1,23 -> 1.23
if (relevantLines[w].number){
text = text.replace('.','').replace(',','.');
} else if (relevantLines[w].date){
//trasforma una data in formato italiano dd/mm/yyyy in una data in formato json standard
text = moment(text, 'DD/MM/YYYY').format('MM/DD/YYYY');
}
jsonString += '\"' + relevantLines[w].jsonId + '\":\"' + text + '\"';
j = j - relevantLines[w].skip;
if (w != ubicazioneLines.length - 1) {
jsonString += ',';
}
jsonString += '\n';
}
}
}
}
}
jsonString += '}';
return jsonString;
}
var salvaPerizia = function (json){
console.log("salvo la perizia");
var uploadedPerizia = new Perizia(json);
uploadedPerizia.save(function(err) {
if (err) {
//console.log(err.code);
if (err.code === 11000){//duplicate key
console.log(err.message);
return; | console.log('La perizia CRIF ' + json.Nome_File + ' è stata salvata in mongo');
}
});
}
router.post('/upload', function(req, res){
upload(req, res, function(err){
if (err){
res.json({error_code:1, err_desc:err});
return;
}
res.json({error_code:0, err_desc:null});
var pdfParser = new PDFParser(this, 1);
pdfParser.on("pdfParser_dataError", errData => console.error(errData.parserError) );
pdfParser.on("pdfParser_dataReady", pdfData => {
var jsonPdf = JSON.parse(JSON.stringify(pdfData, null, '\t')).formImage.Pages;
var jsonString = parseJsonPdf(jsonPdf);
fs.writeFile(uploadDestination + path.sep + req.file.filename + '.txt', jsonString, function(errf) {
if (errf) {
return console.log(errf);
}
jsonPdf = null;
});
var jsonMongo = JSON.parse(jsonString);
jsonMongo.Nome_File = req.file.filename;
//salvaPerizia(jsonMongo);
//usa Nominatim per ottenere longitudine e latitudine associate al luogo della perizia
//quindi salva la perizia in mongo
var indirizzo = jsonMongo.Indirizzo + " " + jsonMongo.N_civico + ", " + jsonMongo.Comune + ", " + jsonMongo.Provincia + ", " + jsonMongo.CAP;
nominatim.search({q: indirizzo}, function (err, res, data) {
console.log(indirizzo);
if (err) {
throw err;
}
var loc = new Array();
if (data != undefined && data[0] != undefined){
loc = [data[0].lon, data[0].lat];
jsonMongo.loc = loc;
salvaPerizia(jsonMongo);
} else {
var indirizzo2 = jsonMongo.Indirizzo + " " + jsonMongo.N_civico + ", " + jsonMongo.Comune + ", " + jsonMongo.Provincia;
console.log(indirizzo2);
nominatim.search({q: indirizzo2}, function (err2, res2, data2) {
if (err2) {
throw err2;
}
if (data2 != undefined && data2[0] != undefined) {
loc = [data2[0].lon, data2[0].lat];
jsonMongo.loc = loc;
salvaPerizia(jsonMongo);
} else {
salvaPerizia(jsonMongo);
}
});
}
});
});
pdfParser.loadPDF(uploadDestination + path.sep + req.file.filename);
})
});
router.get('/id/:_id', function (req, res) {
Perizia.findOne({ CRIF: req.params._id }, function (err, perizia) {
if (err || !perizia) {
res.render('error', {});
} else {
res.json(perizia);//get json data
console.log(perizia);
}
});
});
router.get('/file/:Nome_File', function (req, res) {
Perizia.findOne({ Nome_File: req.params.Nome_File }, function (err, perizia){
console.log("la perizia: " + perizia + ".");
if (err || !perizia || perizia == null) {
res.render('error', {});
} else {
res.json(perizia);//get json data
}
});
});
//need req.query.limit, req.query.DISTANZA, req.query.loc
router.get('/distanza/:DISTANZA/limite/:limite/data_min/:DATA_MIN/indirizzo/:indirizzo', function (req, res) {
console.log('distanza=' + req.params.DISTANZA + ' limite =' + req.params.limite + ' indirizzo=' + req.params.indirizzo);
// convert the distance to radians
// the radius of Earth is approximately 6371 kilometers
var maxDistance = req.params.DISTANZA * 100 / 111.2;
nominatim.search({q: req.params.indirizzo}, function (err, resp, data) {
console.log(req.params.indirizzo);
if (err) {
throw err;
}
console.log(data);
if (data != undefined && data[0] != undefined){
var loc = new Array();
loc = [data[0].lon, data[0].lat];
Perizia.find({ 'loc': { $near: loc, $maxDistance: maxDistance }, "Data_Evasione_Perizia": {'$gt': req.params.DATA_MIN }})
.limit(Number(req.params.limite))
.exec(function(err, perizie) {
if (err) {
return res.json(err);
}
console.log('perizie ' + perizie); console.log(perizie.length);
res.json(perizie);
});
}
});
})
module.exports = router; | } else {
console.log(err);
}
} else { | random_line_split |
apply.rs | use anyhow::{anyhow, Context, Result};
use rand::seq::SliceRandom;
use std::fs;
use std::io::{self, Read};
use std::path;
use std::process;
use std::str;
use std::thread;
use crate::config::Config;
use crate::find::find;
use crate::operations::build::build_template;
use crate::scheme::Scheme;
/// Picks a random path, from given vec
/// * `values` - Vec with paths
fn random(values: Vec<path::PathBuf>) -> Result<path::PathBuf> {
let chosen = values.choose(&mut rand::thread_rng()).ok_or_else(|| {
anyhow!(
"Scheme not found. Check if it exists, or run update schemes if you didn't already."
)
})?;
Ok(chosen.to_path_buf())
}
/// Runs hook commands
///
/// * `command` - Command string to execute
/// * `verbose` - Should we be verbose?
fn run_hook(command: Option<String>, shell: &str, verbose: bool) -> Result<()> {
if let Some(command) = command {
let full_command = shell.replace("{}", &command);
if verbose {
println!("running {}", full_command);
}
let command_vec = shell_words::split(&full_command)?;
if command_vec.len() == 1 | else {
process::Command::new(&command_vec[0])
.args(&command_vec[1..])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
}
}
Ok(())
}
/// Replace with delimiter lines
///
/// In a string, removes everything from one line to another, and puts the built template in place
///
/// * `file_content` - String with lines to be replaced
/// * `start` - Where to start replacing
/// * `end` - Where to stop replacing
/// * `built_template` - Built template to be injected
fn replace_delimiter(
file_content: &str,
start: &str,
end: &str,
built_template: &str,
) -> Result<String> {
let mut changed_content = String::new();
let mut found_start = false;
let mut found_end = false;
let mut appended = false;
for line in file_content.lines() {
if found_start && !found_end {
if !appended {
changed_content.push_str(&built_template);
appended = true;
}
if line.trim().to_lowercase().eq(&end) {
changed_content.push_str(&format!("{}\n", line));
found_end = true;
}
} else {
changed_content.push_str(&format!("{}\n", line));
if line.trim().to_lowercase().eq(&start) {
found_start = true
}
}
}
if !found_start {
Err(anyhow!("Couldn't find starting string."))
} else if !found_end {
Err(anyhow!("Couldn't find ending string."))
} else {
Ok(changed_content)
}
}
/// Apply function
///
/// * `patterns` - Which patterns the user specified
/// * `base_dir` - Flavours base directory
/// * `config_path` - Flavours configuration path
/// * `light` - Don't run hooks marked as non-lightweight
/// * `from_stdin` - Read scheme from stdin?
/// * `verbose` - Should we be verbose?
pub fn apply(
patterns: Vec<&str>,
base_dir: &path::Path,
config_path: &path::Path,
light_mode: bool,
from_stdin: bool,
verbose: bool,
) -> Result<()> {
let (scheme_contents, scheme_slug) = if from_stdin {
let mut buffer = String::new();
let stdin = io::stdin();
let mut handle = stdin.lock();
handle.read_to_string(&mut buffer)?;
(buffer, String::from("generated"))
} else {
//Find schemes that match given patterns
let mut schemes = Vec::new();
for pattern in patterns {
let found_schemes = find(pattern, &base_dir.join("base16").join("schemes"))?;
for found_scheme in found_schemes {
schemes.push(found_scheme);
}
}
//Sort and remove duplicates
schemes.sort();
schemes.dedup();
//Get random scheme
let scheme_file = random(schemes)?;
let scheme_slug: String = scheme_file
.file_stem()
.ok_or_else(|| anyhow!("Couldn't get scheme name."))?
.to_str()
.ok_or_else(|| anyhow!("Couldn't convert scheme file name."))?
.into();
//Read chosen scheme
(
fs::read_to_string(&scheme_file)
.with_context(|| format!("Couldn't read scheme file at {:?}.", scheme_file))?,
scheme_slug,
)
};
let scheme = Scheme::from_str(&scheme_contents, &scheme_slug)?;
if verbose {
println!(
"Using scheme: {} ({}), by {}",
scheme.name, scheme.slug, scheme.author
);
println!();
}
//Check if config file exists
if !config_path.exists() {
eprintln!("Config {:?} doesn't exist, creating", config_path);
let default_content = match fs::read_to_string(path::Path::new("/etc/flavours.conf")) {
Ok(content) => content,
Err(_) => String::from(""),
};
let config_path_parent = config_path
.parent()
.with_context(|| format!("Couldn't get parent directory of {:?}", config_path))?;
fs::create_dir_all(config_path_parent).with_context(|| {
format!(
"Couldn't create configuration file parent directory {:?}",
config_path_parent
)
})?;
fs::write(config_path, default_content)
.with_context(|| format!("Couldn't create configuration file at {:?}", config_path))?;
}
let config_contents = fs::read_to_string(config_path)
.with_context(|| format!("Couldn't read configuration file {:?}.", config_path))?;
let config = Config::from_str(&config_contents)?;
// If shell is present, check if it contains the placeholder
let shell = config.shell.unwrap_or_else(|| "sh -c '{}'".into());
if !shell.contains("{}") {
return Err(anyhow!("The configured shell does not contain the required command placeholder '{}'. Check the default file or github for config examples."));
}
let mut hooks = Vec::new();
//Iterate configurated entries (templates)
let items_legacy = config.item.unwrap_or_default();
let mut items = config.items.unwrap_or_default();
items.extend(items_legacy.into_iter()) ;
if items.is_empty() {
return Err(anyhow!("Couldn't get items from config file. Check the default file or github for config examples."));
}
for item in items.iter() {
//Template name
let template = &item.template;
//Subtemplate name
let subtemplate = match &item.subtemplate {
Some(value) => String::from(value),
None => String::from("default"),
};
//Is the hook lightweight?
let light = match &item.light {
Some(value) => *value,
None => true,
};
//Rewrite or replace
let rewrite = match &item.rewrite {
Some(value) => *value,
None => false,
};
//Replace start delimiter
let start = match &item.start {
Some(value) => String::from(value),
None => String::from("# Start flavours"),
}
.trim()
.to_lowercase();
//Replace end delimiter
let end = match &item.end {
Some(value) => String::from(value),
None => String::from("# End flavours"),
}
.trim()
.to_lowercase();
//(sub)template file path
let subtemplate_file = &base_dir
.join("base16")
.join("templates")
.join(&template)
.join("templates")
.join(format!("{}.mustache", subtemplate));
//Template content
let template_content = fs::read_to_string(subtemplate_file)
.with_context(||format!("Couldn't read template {}/{} at {:?}. Check if the correct template/subtemplate was specified, and run the update templates command if you didn't already.", template, subtemplate, subtemplate_file))?;
//Template with correct colors
let built_template = build_template(template_content, &scheme)
.context("Couldn't replace placeholders. Check if all colors on the specified scheme file are valid (don't include a leading '#').")?;
//File to write
let file = shellexpand::full(&item.file)?.to_string();
//Rewrite file with built template
if rewrite {
std::path::Path::new(&file).parent().and_then(|p| fs::create_dir_all(p).ok());
fs::write(&file, built_template)
.with_context(|| format!("Couldn't write to file {:?}.", file))?;
if verbose {
println!("Wrote {}/{} on: {:?}", template, subtemplate, file)
}
} else {
//Or replace with delimiters
let file_content = fs::read_to_string(&file)?;
match replace_delimiter(&file_content, &start, &end, &built_template) {
Ok(content) => fs::write(&file, content)
.with_context(|| format!("Couldn't write to file {:?}", file))?,
Err(error) => eprintln!("Couldn't replace lines in {:?}: {}", file, error),
}
if verbose {
println!("Wrote {}/{} on {:?}", template, subtemplate, file);
}
}
let command = item.hook.clone();
let shell = shell.clone();
// Only add hook to queue if either:
// - Not running on lightweight mode
// - Hook is set as lightweight
if !light_mode || light {
hooks.push(thread::spawn(move || run_hook(command, &shell, verbose)));
}
}
let last_scheme_file = &base_dir.join("lastscheme");
fs::write(&last_scheme_file, &scheme.slug)
.with_context(|| "Couldn't update applied scheme name")?;
while !hooks.is_empty() {
hooks
.pop()
.ok_or_else(|| anyhow!("Couldn't pop hooks."))?
.join()
.unwrap()?;
}
if verbose {
println!("Successfully applied {}", &scheme.slug);
}
Ok(())
}
| {
process::Command::new(&command_vec[0])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
} | conditional_block |
apply.rs | use anyhow::{anyhow, Context, Result};
use rand::seq::SliceRandom;
use std::fs;
use std::io::{self, Read};
use std::path;
use std::process;
use std::str;
use std::thread;
use crate::config::Config;
use crate::find::find;
use crate::operations::build::build_template;
use crate::scheme::Scheme;
/// Picks a random path, from given vec
/// * `values` - Vec with paths
fn random(values: Vec<path::PathBuf>) -> Result<path::PathBuf> {
let chosen = values.choose(&mut rand::thread_rng()).ok_or_else(|| {
anyhow!(
"Scheme not found. Check if it exists, or run update schemes if you didn't already."
)
})?;
Ok(chosen.to_path_buf())
}
/// Runs hook commands
///
/// * `command` - Command string to execute
/// * `verbose` - Should we be verbose?
fn run_hook(command: Option<String>, shell: &str, verbose: bool) -> Result<()> {
if let Some(command) = command {
let full_command = shell.replace("{}", &command);
if verbose {
println!("running {}", full_command);
}
let command_vec = shell_words::split(&full_command)?;
if command_vec.len() == 1 {
process::Command::new(&command_vec[0])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
} else {
process::Command::new(&command_vec[0])
.args(&command_vec[1..])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
}
}
Ok(())
}
/// Replace with delimiter lines
///
/// In a string, removes everything from one line to another, and puts the built template in place
///
/// * `file_content` - String with lines to be replaced
/// * `start` - Where to start replacing
/// * `end` - Where to stop replacing
/// * `built_template` - Built template to be injected
fn | (
file_content: &str,
start: &str,
end: &str,
built_template: &str,
) -> Result<String> {
let mut changed_content = String::new();
let mut found_start = false;
let mut found_end = false;
let mut appended = false;
for line in file_content.lines() {
if found_start && !found_end {
if !appended {
changed_content.push_str(&built_template);
appended = true;
}
if line.trim().to_lowercase().eq(&end) {
changed_content.push_str(&format!("{}\n", line));
found_end = true;
}
} else {
changed_content.push_str(&format!("{}\n", line));
if line.trim().to_lowercase().eq(&start) {
found_start = true
}
}
}
if !found_start {
Err(anyhow!("Couldn't find starting string."))
} else if !found_end {
Err(anyhow!("Couldn't find ending string."))
} else {
Ok(changed_content)
}
}
/// Apply function
///
/// * `patterns` - Which patterns the user specified
/// * `base_dir` - Flavours base directory
/// * `config_path` - Flavours configuration path
/// * `light` - Don't run hooks marked as non-lightweight
/// * `from_stdin` - Read scheme from stdin?
/// * `verbose` - Should we be verbose?
pub fn apply(
patterns: Vec<&str>,
base_dir: &path::Path,
config_path: &path::Path,
light_mode: bool,
from_stdin: bool,
verbose: bool,
) -> Result<()> {
let (scheme_contents, scheme_slug) = if from_stdin {
let mut buffer = String::new();
let stdin = io::stdin();
let mut handle = stdin.lock();
handle.read_to_string(&mut buffer)?;
(buffer, String::from("generated"))
} else {
//Find schemes that match given patterns
let mut schemes = Vec::new();
for pattern in patterns {
let found_schemes = find(pattern, &base_dir.join("base16").join("schemes"))?;
for found_scheme in found_schemes {
schemes.push(found_scheme);
}
}
//Sort and remove duplicates
schemes.sort();
schemes.dedup();
//Get random scheme
let scheme_file = random(schemes)?;
let scheme_slug: String = scheme_file
.file_stem()
.ok_or_else(|| anyhow!("Couldn't get scheme name."))?
.to_str()
.ok_or_else(|| anyhow!("Couldn't convert scheme file name."))?
.into();
//Read chosen scheme
(
fs::read_to_string(&scheme_file)
.with_context(|| format!("Couldn't read scheme file at {:?}.", scheme_file))?,
scheme_slug,
)
};
let scheme = Scheme::from_str(&scheme_contents, &scheme_slug)?;
if verbose {
println!(
"Using scheme: {} ({}), by {}",
scheme.name, scheme.slug, scheme.author
);
println!();
}
//Check if config file exists
if !config_path.exists() {
eprintln!("Config {:?} doesn't exist, creating", config_path);
let default_content = match fs::read_to_string(path::Path::new("/etc/flavours.conf")) {
Ok(content) => content,
Err(_) => String::from(""),
};
let config_path_parent = config_path
.parent()
.with_context(|| format!("Couldn't get parent directory of {:?}", config_path))?;
fs::create_dir_all(config_path_parent).with_context(|| {
format!(
"Couldn't create configuration file parent directory {:?}",
config_path_parent
)
})?;
fs::write(config_path, default_content)
.with_context(|| format!("Couldn't create configuration file at {:?}", config_path))?;
}
let config_contents = fs::read_to_string(config_path)
.with_context(|| format!("Couldn't read configuration file {:?}.", config_path))?;
let config = Config::from_str(&config_contents)?;
// If shell is present, check if it contains the placeholder
let shell = config.shell.unwrap_or_else(|| "sh -c '{}'".into());
if !shell.contains("{}") {
return Err(anyhow!("The configured shell does not contain the required command placeholder '{}'. Check the default file or github for config examples."));
}
let mut hooks = Vec::new();
//Iterate configurated entries (templates)
let items_legacy = config.item.unwrap_or_default();
let mut items = config.items.unwrap_or_default();
items.extend(items_legacy.into_iter()) ;
if items.is_empty() {
return Err(anyhow!("Couldn't get items from config file. Check the default file or github for config examples."));
}
for item in items.iter() {
//Template name
let template = &item.template;
//Subtemplate name
let subtemplate = match &item.subtemplate {
Some(value) => String::from(value),
None => String::from("default"),
};
//Is the hook lightweight?
let light = match &item.light {
Some(value) => *value,
None => true,
};
//Rewrite or replace
let rewrite = match &item.rewrite {
Some(value) => *value,
None => false,
};
//Replace start delimiter
let start = match &item.start {
Some(value) => String::from(value),
None => String::from("# Start flavours"),
}
.trim()
.to_lowercase();
//Replace end delimiter
let end = match &item.end {
Some(value) => String::from(value),
None => String::from("# End flavours"),
}
.trim()
.to_lowercase();
//(sub)template file path
let subtemplate_file = &base_dir
.join("base16")
.join("templates")
.join(&template)
.join("templates")
.join(format!("{}.mustache", subtemplate));
//Template content
let template_content = fs::read_to_string(subtemplate_file)
.with_context(||format!("Couldn't read template {}/{} at {:?}. Check if the correct template/subtemplate was specified, and run the update templates command if you didn't already.", template, subtemplate, subtemplate_file))?;
//Template with correct colors
let built_template = build_template(template_content, &scheme)
.context("Couldn't replace placeholders. Check if all colors on the specified scheme file are valid (don't include a leading '#').")?;
//File to write
let file = shellexpand::full(&item.file)?.to_string();
//Rewrite file with built template
if rewrite {
std::path::Path::new(&file).parent().and_then(|p| fs::create_dir_all(p).ok());
fs::write(&file, built_template)
.with_context(|| format!("Couldn't write to file {:?}.", file))?;
if verbose {
println!("Wrote {}/{} on: {:?}", template, subtemplate, file)
}
} else {
//Or replace with delimiters
let file_content = fs::read_to_string(&file)?;
match replace_delimiter(&file_content, &start, &end, &built_template) {
Ok(content) => fs::write(&file, content)
.with_context(|| format!("Couldn't write to file {:?}", file))?,
Err(error) => eprintln!("Couldn't replace lines in {:?}: {}", file, error),
}
if verbose {
println!("Wrote {}/{} on {:?}", template, subtemplate, file);
}
}
let command = item.hook.clone();
let shell = shell.clone();
// Only add hook to queue if either:
// - Not running on lightweight mode
// - Hook is set as lightweight
if !light_mode || light {
hooks.push(thread::spawn(move || run_hook(command, &shell, verbose)));
}
}
let last_scheme_file = &base_dir.join("lastscheme");
fs::write(&last_scheme_file, &scheme.slug)
.with_context(|| "Couldn't update applied scheme name")?;
while !hooks.is_empty() {
hooks
.pop()
.ok_or_else(|| anyhow!("Couldn't pop hooks."))?
.join()
.unwrap()?;
}
if verbose {
println!("Successfully applied {}", &scheme.slug);
}
Ok(())
}
| replace_delimiter | identifier_name |
apply.rs | use anyhow::{anyhow, Context, Result};
use rand::seq::SliceRandom;
use std::fs;
use std::io::{self, Read};
use std::path;
use std::process;
use std::str;
use std::thread;
use crate::config::Config;
use crate::find::find;
use crate::operations::build::build_template;
use crate::scheme::Scheme;
/// Picks a random path, from given vec
/// * `values` - Vec with paths
fn random(values: Vec<path::PathBuf>) -> Result<path::PathBuf> {
let chosen = values.choose(&mut rand::thread_rng()).ok_or_else(|| {
anyhow!(
"Scheme not found. Check if it exists, or run update schemes if you didn't already."
)
})?;
Ok(chosen.to_path_buf())
}
/// Runs hook commands
///
/// * `command` - Command string to execute
/// * `verbose` - Should we be verbose?
fn run_hook(command: Option<String>, shell: &str, verbose: bool) -> Result<()> {
if let Some(command) = command {
let full_command = shell.replace("{}", &command);
if verbose {
println!("running {}", full_command);
}
let command_vec = shell_words::split(&full_command)?;
if command_vec.len() == 1 {
process::Command::new(&command_vec[0])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
} else {
process::Command::new(&command_vec[0])
.args(&command_vec[1..])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
}
}
Ok(())
}
/// Replace with delimiter lines
///
/// In a string, removes everything from one line to another, and puts the built template in place
///
/// * `file_content` - String with lines to be replaced
/// * `start` - Where to start replacing
/// * `end` - Where to stop replacing
/// * `built_template` - Built template to be injected
fn replace_delimiter(
file_content: &str,
start: &str,
end: &str,
built_template: &str,
) -> Result<String> {
let mut changed_content = String::new();
let mut found_start = false;
let mut found_end = false;
let mut appended = false;
for line in file_content.lines() {
if found_start && !found_end {
if !appended {
changed_content.push_str(&built_template);
appended = true;
}
if line.trim().to_lowercase().eq(&end) {
changed_content.push_str(&format!("{}\n", line));
found_end = true;
}
} else {
changed_content.push_str(&format!("{}\n", line));
if line.trim().to_lowercase().eq(&start) {
found_start = true
}
}
}
if !found_start {
Err(anyhow!("Couldn't find starting string."))
} else if !found_end {
Err(anyhow!("Couldn't find ending string."))
} else {
Ok(changed_content)
}
}
/// Apply function
///
/// * `patterns` - Which patterns the user specified
/// * `base_dir` - Flavours base directory
/// * `config_path` - Flavours configuration path
/// * `light` - Don't run hooks marked as non-lightweight
/// * `from_stdin` - Read scheme from stdin?
/// * `verbose` - Should we be verbose?
pub fn apply(
patterns: Vec<&str>,
base_dir: &path::Path,
config_path: &path::Path,
light_mode: bool,
from_stdin: bool,
verbose: bool,
) -> Result<()> {
let (scheme_contents, scheme_slug) = if from_stdin {
let mut buffer = String::new();
let stdin = io::stdin();
let mut handle = stdin.lock();
handle.read_to_string(&mut buffer)?;
(buffer, String::from("generated"))
} else {
//Find schemes that match given patterns
let mut schemes = Vec::new();
for pattern in patterns {
let found_schemes = find(pattern, &base_dir.join("base16").join("schemes"))?;
for found_scheme in found_schemes {
schemes.push(found_scheme);
}
}
//Sort and remove duplicates
schemes.sort();
schemes.dedup();
//Get random scheme
let scheme_file = random(schemes)?;
let scheme_slug: String = scheme_file
.file_stem()
.ok_or_else(|| anyhow!("Couldn't get scheme name."))?
.to_str()
.ok_or_else(|| anyhow!("Couldn't convert scheme file name."))?
.into();
//Read chosen scheme
(
fs::read_to_string(&scheme_file)
.with_context(|| format!("Couldn't read scheme file at {:?}.", scheme_file))?,
scheme_slug,
)
};
let scheme = Scheme::from_str(&scheme_contents, &scheme_slug)?;
if verbose {
println!(
"Using scheme: {} ({}), by {}",
scheme.name, scheme.slug, scheme.author
); | //Check if config file exists
if !config_path.exists() {
eprintln!("Config {:?} doesn't exist, creating", config_path);
let default_content = match fs::read_to_string(path::Path::new("/etc/flavours.conf")) {
Ok(content) => content,
Err(_) => String::from(""),
};
let config_path_parent = config_path
.parent()
.with_context(|| format!("Couldn't get parent directory of {:?}", config_path))?;
fs::create_dir_all(config_path_parent).with_context(|| {
format!(
"Couldn't create configuration file parent directory {:?}",
config_path_parent
)
})?;
fs::write(config_path, default_content)
.with_context(|| format!("Couldn't create configuration file at {:?}", config_path))?;
}
let config_contents = fs::read_to_string(config_path)
.with_context(|| format!("Couldn't read configuration file {:?}.", config_path))?;
let config = Config::from_str(&config_contents)?;
// If shell is present, check if it contains the placeholder
let shell = config.shell.unwrap_or_else(|| "sh -c '{}'".into());
if !shell.contains("{}") {
return Err(anyhow!("The configured shell does not contain the required command placeholder '{}'. Check the default file or github for config examples."));
}
let mut hooks = Vec::new();
//Iterate configurated entries (templates)
let items_legacy = config.item.unwrap_or_default();
let mut items = config.items.unwrap_or_default();
items.extend(items_legacy.into_iter()) ;
if items.is_empty() {
return Err(anyhow!("Couldn't get items from config file. Check the default file or github for config examples."));
}
for item in items.iter() {
//Template name
let template = &item.template;
//Subtemplate name
let subtemplate = match &item.subtemplate {
Some(value) => String::from(value),
None => String::from("default"),
};
//Is the hook lightweight?
let light = match &item.light {
Some(value) => *value,
None => true,
};
//Rewrite or replace
let rewrite = match &item.rewrite {
Some(value) => *value,
None => false,
};
//Replace start delimiter
let start = match &item.start {
Some(value) => String::from(value),
None => String::from("# Start flavours"),
}
.trim()
.to_lowercase();
//Replace end delimiter
let end = match &item.end {
Some(value) => String::from(value),
None => String::from("# End flavours"),
}
.trim()
.to_lowercase();
//(sub)template file path
let subtemplate_file = &base_dir
.join("base16")
.join("templates")
.join(&template)
.join("templates")
.join(format!("{}.mustache", subtemplate));
//Template content
let template_content = fs::read_to_string(subtemplate_file)
.with_context(||format!("Couldn't read template {}/{} at {:?}. Check if the correct template/subtemplate was specified, and run the update templates command if you didn't already.", template, subtemplate, subtemplate_file))?;
//Template with correct colors
let built_template = build_template(template_content, &scheme)
.context("Couldn't replace placeholders. Check if all colors on the specified scheme file are valid (don't include a leading '#').")?;
//File to write
let file = shellexpand::full(&item.file)?.to_string();
//Rewrite file with built template
if rewrite {
std::path::Path::new(&file).parent().and_then(|p| fs::create_dir_all(p).ok());
fs::write(&file, built_template)
.with_context(|| format!("Couldn't write to file {:?}.", file))?;
if verbose {
println!("Wrote {}/{} on: {:?}", template, subtemplate, file)
}
} else {
//Or replace with delimiters
let file_content = fs::read_to_string(&file)?;
match replace_delimiter(&file_content, &start, &end, &built_template) {
Ok(content) => fs::write(&file, content)
.with_context(|| format!("Couldn't write to file {:?}", file))?,
Err(error) => eprintln!("Couldn't replace lines in {:?}: {}", file, error),
}
if verbose {
println!("Wrote {}/{} on {:?}", template, subtemplate, file);
}
}
let command = item.hook.clone();
let shell = shell.clone();
// Only add hook to queue if either:
// - Not running on lightweight mode
// - Hook is set as lightweight
if !light_mode || light {
hooks.push(thread::spawn(move || run_hook(command, &shell, verbose)));
}
}
let last_scheme_file = &base_dir.join("lastscheme");
fs::write(&last_scheme_file, &scheme.slug)
.with_context(|| "Couldn't update applied scheme name")?;
while !hooks.is_empty() {
hooks
.pop()
.ok_or_else(|| anyhow!("Couldn't pop hooks."))?
.join()
.unwrap()?;
}
if verbose {
println!("Successfully applied {}", &scheme.slug);
}
Ok(())
} | println!();
}
| random_line_split |
apply.rs | use anyhow::{anyhow, Context, Result};
use rand::seq::SliceRandom;
use std::fs;
use std::io::{self, Read};
use std::path;
use std::process;
use std::str;
use std::thread;
use crate::config::Config;
use crate::find::find;
use crate::operations::build::build_template;
use crate::scheme::Scheme;
/// Picks a random path, from given vec
/// * `values` - Vec with paths
fn random(values: Vec<path::PathBuf>) -> Result<path::PathBuf> {
let chosen = values.choose(&mut rand::thread_rng()).ok_or_else(|| {
anyhow!(
"Scheme not found. Check if it exists, or run update schemes if you didn't already."
)
})?;
Ok(chosen.to_path_buf())
}
/// Runs hook commands
///
/// * `command` - Command string to execute
/// * `verbose` - Should we be verbose?
fn run_hook(command: Option<String>, shell: &str, verbose: bool) -> Result<()> |
/// Replace with delimiter lines
///
/// In a string, removes everything from one line to another, and puts the built template in place
///
/// * `file_content` - String with lines to be replaced
/// * `start` - Where to start replacing
/// * `end` - Where to stop replacing
/// * `built_template` - Built template to be injected
fn replace_delimiter(
file_content: &str,
start: &str,
end: &str,
built_template: &str,
) -> Result<String> {
let mut changed_content = String::new();
let mut found_start = false;
let mut found_end = false;
let mut appended = false;
for line in file_content.lines() {
if found_start && !found_end {
if !appended {
changed_content.push_str(&built_template);
appended = true;
}
if line.trim().to_lowercase().eq(&end) {
changed_content.push_str(&format!("{}\n", line));
found_end = true;
}
} else {
changed_content.push_str(&format!("{}\n", line));
if line.trim().to_lowercase().eq(&start) {
found_start = true
}
}
}
if !found_start {
Err(anyhow!("Couldn't find starting string."))
} else if !found_end {
Err(anyhow!("Couldn't find ending string."))
} else {
Ok(changed_content)
}
}
/// Apply function
///
/// * `patterns` - Which patterns the user specified
/// * `base_dir` - Flavours base directory
/// * `config_path` - Flavours configuration path
/// * `light` - Don't run hooks marked as non-lightweight
/// * `from_stdin` - Read scheme from stdin?
/// * `verbose` - Should we be verbose?
pub fn apply(
patterns: Vec<&str>,
base_dir: &path::Path,
config_path: &path::Path,
light_mode: bool,
from_stdin: bool,
verbose: bool,
) -> Result<()> {
let (scheme_contents, scheme_slug) = if from_stdin {
let mut buffer = String::new();
let stdin = io::stdin();
let mut handle = stdin.lock();
handle.read_to_string(&mut buffer)?;
(buffer, String::from("generated"))
} else {
//Find schemes that match given patterns
let mut schemes = Vec::new();
for pattern in patterns {
let found_schemes = find(pattern, &base_dir.join("base16").join("schemes"))?;
for found_scheme in found_schemes {
schemes.push(found_scheme);
}
}
//Sort and remove duplicates
schemes.sort();
schemes.dedup();
//Get random scheme
let scheme_file = random(schemes)?;
let scheme_slug: String = scheme_file
.file_stem()
.ok_or_else(|| anyhow!("Couldn't get scheme name."))?
.to_str()
.ok_or_else(|| anyhow!("Couldn't convert scheme file name."))?
.into();
//Read chosen scheme
(
fs::read_to_string(&scheme_file)
.with_context(|| format!("Couldn't read scheme file at {:?}.", scheme_file))?,
scheme_slug,
)
};
let scheme = Scheme::from_str(&scheme_contents, &scheme_slug)?;
if verbose {
println!(
"Using scheme: {} ({}), by {}",
scheme.name, scheme.slug, scheme.author
);
println!();
}
//Check if config file exists
if !config_path.exists() {
eprintln!("Config {:?} doesn't exist, creating", config_path);
let default_content = match fs::read_to_string(path::Path::new("/etc/flavours.conf")) {
Ok(content) => content,
Err(_) => String::from(""),
};
let config_path_parent = config_path
.parent()
.with_context(|| format!("Couldn't get parent directory of {:?}", config_path))?;
fs::create_dir_all(config_path_parent).with_context(|| {
format!(
"Couldn't create configuration file parent directory {:?}",
config_path_parent
)
})?;
fs::write(config_path, default_content)
.with_context(|| format!("Couldn't create configuration file at {:?}", config_path))?;
}
let config_contents = fs::read_to_string(config_path)
.with_context(|| format!("Couldn't read configuration file {:?}.", config_path))?;
let config = Config::from_str(&config_contents)?;
// If shell is present, check if it contains the placeholder
let shell = config.shell.unwrap_or_else(|| "sh -c '{}'".into());
if !shell.contains("{}") {
return Err(anyhow!("The configured shell does not contain the required command placeholder '{}'. Check the default file or github for config examples."));
}
let mut hooks = Vec::new();
//Iterate configurated entries (templates)
let items_legacy = config.item.unwrap_or_default();
let mut items = config.items.unwrap_or_default();
items.extend(items_legacy.into_iter()) ;
if items.is_empty() {
return Err(anyhow!("Couldn't get items from config file. Check the default file or github for config examples."));
}
for item in items.iter() {
//Template name
let template = &item.template;
//Subtemplate name
let subtemplate = match &item.subtemplate {
Some(value) => String::from(value),
None => String::from("default"),
};
//Is the hook lightweight?
let light = match &item.light {
Some(value) => *value,
None => true,
};
//Rewrite or replace
let rewrite = match &item.rewrite {
Some(value) => *value,
None => false,
};
//Replace start delimiter
let start = match &item.start {
Some(value) => String::from(value),
None => String::from("# Start flavours"),
}
.trim()
.to_lowercase();
//Replace end delimiter
let end = match &item.end {
Some(value) => String::from(value),
None => String::from("# End flavours"),
}
.trim()
.to_lowercase();
//(sub)template file path
let subtemplate_file = &base_dir
.join("base16")
.join("templates")
.join(&template)
.join("templates")
.join(format!("{}.mustache", subtemplate));
//Template content
let template_content = fs::read_to_string(subtemplate_file)
.with_context(||format!("Couldn't read template {}/{} at {:?}. Check if the correct template/subtemplate was specified, and run the update templates command if you didn't already.", template, subtemplate, subtemplate_file))?;
//Template with correct colors
let built_template = build_template(template_content, &scheme)
.context("Couldn't replace placeholders. Check if all colors on the specified scheme file are valid (don't include a leading '#').")?;
//File to write
let file = shellexpand::full(&item.file)?.to_string();
//Rewrite file with built template
if rewrite {
std::path::Path::new(&file).parent().and_then(|p| fs::create_dir_all(p).ok());
fs::write(&file, built_template)
.with_context(|| format!("Couldn't write to file {:?}.", file))?;
if verbose {
println!("Wrote {}/{} on: {:?}", template, subtemplate, file)
}
} else {
//Or replace with delimiters
let file_content = fs::read_to_string(&file)?;
match replace_delimiter(&file_content, &start, &end, &built_template) {
Ok(content) => fs::write(&file, content)
.with_context(|| format!("Couldn't write to file {:?}", file))?,
Err(error) => eprintln!("Couldn't replace lines in {:?}: {}", file, error),
}
if verbose {
println!("Wrote {}/{} on {:?}", template, subtemplate, file);
}
}
let command = item.hook.clone();
let shell = shell.clone();
// Only add hook to queue if either:
// - Not running on lightweight mode
// - Hook is set as lightweight
if !light_mode || light {
hooks.push(thread::spawn(move || run_hook(command, &shell, verbose)));
}
}
let last_scheme_file = &base_dir.join("lastscheme");
fs::write(&last_scheme_file, &scheme.slug)
.with_context(|| "Couldn't update applied scheme name")?;
while !hooks.is_empty() {
hooks
.pop()
.ok_or_else(|| anyhow!("Couldn't pop hooks."))?
.join()
.unwrap()?;
}
if verbose {
println!("Successfully applied {}", &scheme.slug);
}
Ok(())
}
| {
if let Some(command) = command {
let full_command = shell.replace("{}", &command);
if verbose {
println!("running {}", full_command);
}
let command_vec = shell_words::split(&full_command)?;
if command_vec.len() == 1 {
process::Command::new(&command_vec[0])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
} else {
process::Command::new(&command_vec[0])
.args(&command_vec[1..])
.stdout(process::Stdio::null())
.stderr(process::Stdio::null())
.status()
.with_context(|| format!("Couldn't run hook '{}'", full_command))?;
}
}
Ok(())
} | identifier_body |
constrained_attack.py | import sys
sys.path.append('../../../')
import keras
from keras.layers import Input, Dense, Activation
from keras.layers.merge import Maximum, Concatenate
from keras.models import Model
from keras.optimizers import Adam
from keras.utils import plot_model
# import required to load the attacked model
from autoencoder_BATADAL import load_AEED
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score, f1_score, roc_curve, auc, precision_score
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
def sort_temp_and_drop(row_index, temp):
"""
sort descending and drop values below theta.
given the current recontructions error we sort them descending and drop the values lower then theta treshold
Parameters
----------
row_index : int
temp : Pandas DataFrame
Autoencoder output vector
Returns
--------
recontrution vector sorted descending and dropped
"""
# print(temp)
temp = temp.sort_values(by=row_index, axis=1, ascending=False)
i = 0
for col in temp.columns:
if temp.loc[row_index, col] < theta:
break
i = i + 1
temp = temp.drop(columns=temp.columns[i:43])
return(temp.copy())
def scale_input_and_detect_single(index, X):
"""
given a row of the dataset we transform it with the scaler and we see if it is detected by the model.
Parameters
----------
index : int
row index
X : pandas DataFrame
Dataframe Containing one row of sensor readings
Returns
--------
bool
detection outcome
float
average recontrution error
pandas dataframe
reconstruction error vector for the considered sensor readings
"""
X_transformed = pd.DataFrame(
index=[index], columns=xset, data=scaler.transform(X))
Yhat, error, temp, _ = autoencoder.detect(
X_transformed, theta=theta, window=1, average=True)
return Yhat, error, temp
def scale_input_and_detect(index, X):
"""
given a row of the dataset we transform it with the scaler and we see if it is detected by the model.
Parameters
----------
index : int
row index
X : pandas DataFrame
Dataframe Containing one row of sensor readings
Returns
--------
bool
detection outcome
float
average recontrution error
pandas dataframe
reconstruction error vector for the considered sensor readings
"""
X_transformed = pd.DataFrame(
columns=xset, data=scaler.transform(X), index=X.index)
_, error, _, _ = autoencoder.detect(
X_transformed, theta=theta, window=1, average=True)
error_df = pd.DataFrame({'error': error})
X = pd.concat([X, error_df], axis=1)
X = X.iloc[X['error'].idxmin()]
# print(X)
error = X['error']
X = X.drop('error')
X = pd.DataFrame([X])
# print(X)
return X, error
def compute_mutation_factor(att_data, newBest):
"""
compute how many columns values have been changed at the end of the transformation.
Parameters
----------
att_data : pandas DataFrame
original sensor readings
newBest : pandas DataFrame
concealed sensor readings
"""
X2 = pd.DataFrame(index=att_data.index,
columns=xset, data=att_data[xset])
frames = [X2, newBest]
merge = pd.concat(frames)
merge.loc['Diff'] = merge.iloc[0] - merge.iloc[1]
changed_columns[row_index] = merge.loc['Diff'].astype(bool).sum()
print('changed tuples: ' + str(len(changed_columns)))
def | (row_index, prev_col_name, changed_variables, max_concealable_variables):
"""
select the sensor value to be manipulated depending on the constrints
Parameters
----------
row_index : int
prev_col_name : string
changed_variables : list
variables that can be manipulated
max_concealable_variables : int
number of variables that can manipulated
Returns
----------
string
the column that will be manipulated in the current iteration
"""
if(prev_col_name == None):
return changed_variables[row_index][0]
return changed_variables[row_index][(changed_variables[row_index].index(prev_col_name)+1) % max_concealable_variables]
# this is the main algorithm, it actually transforms the input row trying to change its label.
# second attempt, updates after 5 changes on the same variable the ranking and optimizes this variable
def change_vector_label(row_index, att_data, solutions_found, changed_variables, variables):
"""
this is the main algorithm, it actually transforms the input row trying to change its predicted label.
updates after 5 changes on the same variable the ranking and optimizes the new ranked 1 variable
Parameters
----------
row_index : int
att_data : pandas DataFrame
original data to be concealed
solutions_found : int
counter of found solution
Returns
--------
pandas dataframe
solution found
int
updated counter of solutions
"""
original_vector = att_data.copy()
changes = 0
found_solution = 0
_, error, temp = scale_input_and_detect_single(row_index, att_data)
previous_best_error = error[row_index]
temp = sort_temp_and_drop(row_index, temp)
prev_col_name = None
num_changes_without_optimizations = 0
last_optimization = 0
newBest = att_data.copy()
optimized = False
changed_variables[row_index] = variables[max_concealable_variables]
while changes < budget and (changes - last_optimization) < patience and not(found_solution):
col_name = choose_column(row_index, temp, prev_col_name, num_changes_without_optimizations,
changed_variables, max_concealable_variables)
prev_col_name = col_name
if debug:
print('______________________________')
print(col_name)
print('______________________________')
values = np.arange(
normal_op_ranges[col_name]['min'], normal_op_ranges[col_name]['max']+0.1, normal_op_ranges[col_name]['step'])
# print(values)
att_data = att_data.append(
[att_data] * (len(values)), ignore_index=True)
att_data = att_data[:-1] # delete eccessive lenght
# substitute column values usign normal operations
att_data[col_name] = values
att_data, error = scale_input_and_detect(row_index, att_data)
if error < previous_best_error:
if debug:
print(error, previous_best_error)
previous_best_error = error
newBest = att_data.copy()
last_optimization = changes
num_changes_without_optimizations = 0
optimized = True
try:
if not(col_name) in changed_variables[row_index]:
changed_variables[row_index].append(col_name)
except:
changed_variables[row_index] = [col_name]
else:
optimized = False
if error < theta:
solutions_found = solutions_found + 1
found_solution = 1
print('Found solution number: ' + str(solutions_found))
if optimized == False:
num_changes_without_optimizations = num_changes_without_optimizations + 1
att_data = newBest.copy()
_, error, temp = scale_input_and_detect_single(
row_index, att_data)
temp = sort_temp_and_drop(row_index, temp)
changes = changes + 1
if debug:
print(temp)
print('--__--__--')
print(changes)
print('--__--__--')
compute_mutation_factor(original_vector, att_data.copy())
return newBest.copy(), solutions_found
"""
Select wich dataset are you considering
(we are not allowed to publish WADI data, please request them itrust Singapore website)
"""
dataset = 'BATADAL' #'WADI'
data_folder = '../../Data/'+dataset
if dataset == 'BATADAL':
attack_ids = range(1,15)
att_data = pd.read_csv(data_folder+'/attack_1_from_test_dataset.csv')
xset = [col for col in att_data.columns if col not in [
'Unnamed: 0', 'DATETIME', 'ATT_FLAG']]
budget = 200
patience = 15
if dataset == 'WADI':
attack_ids = [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
att_data = pd.read_csv(data_folder+'/attack_1_from_test_dataset.csv')
xset = [col for col in att_data.columns if col not in [
'Row', 'DATETIME','ATT_FLAG', '2_MV_001_STATUS', '2_LT_001_PV', '2_MV_002_STATUS']]
budget = 300
patience = 40
yset = ['ATT_FLAG']
autoencoder = load_AEED("../../Attacked_Model/"+dataset+"/autoencoder.json", "../../Attacked_Model/"+dataset+"/autoencoder.h5")
scaler = pickle.load(open("../../Attacked_Model/"+dataset+"/scaler.p", "rb"))
with open("../../Attacked_Model/"+dataset+"/theta") as f:
theta = float(f.read())
normal_op_ranges = pickle.load(open('dict_'+dataset+'.p', 'rb'))
for att_number in attack_ids:
variables = {}
f = open("./constraints/"+dataset+"/constraint_variables_attack_"+str(att_number)+".txt", 'r').read()
variables = eval(f)
for max_concealable_variables in [2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20]:
debug = False
changed_columns = {}
changed_variables = {}
print('ATT NUMBER: '+str(att_number))
att_data = pd.read_csv(
data_folder+'/attack_'+str(att_number)+'_from_test_dataset.csv')
y_att = att_data[yset]
X = pd.DataFrame(index=att_data.index,
columns=xset, data=att_data[xset])
new_tuples = pd.DataFrame(columns=xset)
# main loop that iterates over every row in the dataset
changed_rows = 0
solutions_found = 0
max_spent_time = 0
sum_spent = 0
times = []
import time
for row_index, row in X.iterrows():
prov = pd.DataFrame(index=[row_index],
columns=xset, data=att_data[xset])
Yhat, original_error, temp = scale_input_and_detect_single(
row_index, prov)
if Yhat[row_index]:
start_time = time.time()
modified_row, solutions_found = change_vector_label(
row_index, prov, solutions_found, changed_variables, variables)
spent_time = time.time() - start_time
print("--- %s seconds ---" % spent_time)
sum_spent = sum_spent + spent_time
if max_spent_time < spent_time:
max_spent_time = spent_time
new_tuples = new_tuples.append(modified_row, ignore_index=True)
changed_rows = changed_rows + 1
times.append(spent_time)
else:
new_tuples = new_tuples.append(prov)
new_tuples['DATETIME'] = att_data['DATETIME']
new_tuples['ATT_FLAG'] = att_data['ATT_FLAG']
new_tuples.to_csv('./results/'+dataset+'/max_constraints_fixed/whitebox_attack_' +
str(att_number)+'_from_test_dataset_max_'+str(max_concealable_variables)+'.csv')
print('mean spent time: ' + str(sum_spent/changed_rows))
with open('./results/'+dataset+'/max_constraints_fixed/time_spent_new_sequential_v2.0_fixed_budget.txt', 'a') as f:
f.write('______attack: '+str(att_number)+'max: '+str(max_concealable_variables)+'______\n')
f.write('Mean: ' + str(np.mean(times)))
f.write('\n')
f.write('STD: ' + str(np.std(times)))
f.write('\n') | choose_column | identifier_name |
constrained_attack.py | import sys
sys.path.append('../../../')
import keras
from keras.layers import Input, Dense, Activation
from keras.layers.merge import Maximum, Concatenate
from keras.models import Model
from keras.optimizers import Adam
from keras.utils import plot_model
# import required to load the attacked model
from autoencoder_BATADAL import load_AEED
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score, f1_score, roc_curve, auc, precision_score
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
def sort_temp_and_drop(row_index, temp):
"""
sort descending and drop values below theta.
given the current recontructions error we sort them descending and drop the values lower then theta treshold
Parameters
----------
row_index : int
temp : Pandas DataFrame
Autoencoder output vector
Returns
--------
recontrution vector sorted descending and dropped
"""
# print(temp)
temp = temp.sort_values(by=row_index, axis=1, ascending=False)
i = 0
for col in temp.columns:
if temp.loc[row_index, col] < theta:
break
i = i + 1
temp = temp.drop(columns=temp.columns[i:43])
return(temp.copy())
def scale_input_and_detect_single(index, X):
"""
given a row of the dataset we transform it with the scaler and we see if it is detected by the model.
Parameters
----------
index : int
row index
X : pandas DataFrame
Dataframe Containing one row of sensor readings
Returns
--------
bool
detection outcome
float
average recontrution error
pandas dataframe
reconstruction error vector for the considered sensor readings
"""
X_transformed = pd.DataFrame(
index=[index], columns=xset, data=scaler.transform(X))
Yhat, error, temp, _ = autoencoder.detect(
X_transformed, theta=theta, window=1, average=True)
return Yhat, error, temp
def scale_input_and_detect(index, X):
"""
given a row of the dataset we transform it with the scaler and we see if it is detected by the model.
Parameters
----------
index : int
row index
X : pandas DataFrame
Dataframe Containing one row of sensor readings
Returns
--------
bool
detection outcome
float
average recontrution error
pandas dataframe
reconstruction error vector for the considered sensor readings
"""
X_transformed = pd.DataFrame(
columns=xset, data=scaler.transform(X), index=X.index)
_, error, _, _ = autoencoder.detect(
X_transformed, theta=theta, window=1, average=True)
error_df = pd.DataFrame({'error': error})
X = pd.concat([X, error_df], axis=1)
X = X.iloc[X['error'].idxmin()]
# print(X)
error = X['error']
X = X.drop('error')
X = pd.DataFrame([X])
# print(X)
return X, error
def compute_mutation_factor(att_data, newBest):
"""
compute how many columns values have been changed at the end of the transformation.
Parameters
----------
att_data : pandas DataFrame
original sensor readings
newBest : pandas DataFrame
concealed sensor readings
"""
X2 = pd.DataFrame(index=att_data.index,
columns=xset, data=att_data[xset])
frames = [X2, newBest]
merge = pd.concat(frames)
merge.loc['Diff'] = merge.iloc[0] - merge.iloc[1]
changed_columns[row_index] = merge.loc['Diff'].astype(bool).sum()
print('changed tuples: ' + str(len(changed_columns)))
def choose_column(row_index, prev_col_name, changed_variables, max_concealable_variables):
"""
select the sensor value to be manipulated depending on the constrints
Parameters
----------
row_index : int
prev_col_name : string
changed_variables : list
variables that can be manipulated
max_concealable_variables : int
number of variables that can manipulated
Returns
----------
string
the column that will be manipulated in the current iteration
"""
if(prev_col_name == None):
return changed_variables[row_index][0]
return changed_variables[row_index][(changed_variables[row_index].index(prev_col_name)+1) % max_concealable_variables]
# this is the main algorithm, it actually transforms the input row trying to change its label.
# second attempt, updates after 5 changes on the same variable the ranking and optimizes this variable
def change_vector_label(row_index, att_data, solutions_found, changed_variables, variables):
"""
this is the main algorithm, it actually transforms the input row trying to change its predicted label.
updates after 5 changes on the same variable the ranking and optimizes the new ranked 1 variable
Parameters
----------
row_index : int
att_data : pandas DataFrame
original data to be concealed
solutions_found : int
counter of found solution
Returns
--------
pandas dataframe
solution found
int
updated counter of solutions
"""
original_vector = att_data.copy()
changes = 0
found_solution = 0
_, error, temp = scale_input_and_detect_single(row_index, att_data)
previous_best_error = error[row_index]
temp = sort_temp_and_drop(row_index, temp)
prev_col_name = None
num_changes_without_optimizations = 0
last_optimization = 0
newBest = att_data.copy()
optimized = False
changed_variables[row_index] = variables[max_concealable_variables]
while changes < budget and (changes - last_optimization) < patience and not(found_solution):
col_name = choose_column(row_index, temp, prev_col_name, num_changes_without_optimizations,
changed_variables, max_concealable_variables)
prev_col_name = col_name
if debug:
print('______________________________')
print(col_name)
print('______________________________')
values = np.arange(
normal_op_ranges[col_name]['min'], normal_op_ranges[col_name]['max']+0.1, normal_op_ranges[col_name]['step'])
# print(values)
att_data = att_data.append(
[att_data] * (len(values)), ignore_index=True)
att_data = att_data[:-1] # delete eccessive lenght
# substitute column values usign normal operations
att_data[col_name] = values
att_data, error = scale_input_and_detect(row_index, att_data)
if error < previous_best_error:
if debug:
|
previous_best_error = error
newBest = att_data.copy()
last_optimization = changes
num_changes_without_optimizations = 0
optimized = True
try:
if not(col_name) in changed_variables[row_index]:
changed_variables[row_index].append(col_name)
except:
changed_variables[row_index] = [col_name]
else:
optimized = False
if error < theta:
solutions_found = solutions_found + 1
found_solution = 1
print('Found solution number: ' + str(solutions_found))
if optimized == False:
num_changes_without_optimizations = num_changes_without_optimizations + 1
att_data = newBest.copy()
_, error, temp = scale_input_and_detect_single(
row_index, att_data)
temp = sort_temp_and_drop(row_index, temp)
changes = changes + 1
if debug:
print(temp)
print('--__--__--')
print(changes)
print('--__--__--')
compute_mutation_factor(original_vector, att_data.copy())
return newBest.copy(), solutions_found
"""
Select wich dataset are you considering
(we are not allowed to publish WADI data, please request them itrust Singapore website)
"""
dataset = 'BATADAL' #'WADI'
data_folder = '../../Data/'+dataset
if dataset == 'BATADAL':
attack_ids = range(1,15)
att_data = pd.read_csv(data_folder+'/attack_1_from_test_dataset.csv')
xset = [col for col in att_data.columns if col not in [
'Unnamed: 0', 'DATETIME', 'ATT_FLAG']]
budget = 200
patience = 15
if dataset == 'WADI':
attack_ids = [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
att_data = pd.read_csv(data_folder+'/attack_1_from_test_dataset.csv')
xset = [col for col in att_data.columns if col not in [
'Row', 'DATETIME','ATT_FLAG', '2_MV_001_STATUS', '2_LT_001_PV', '2_MV_002_STATUS']]
budget = 300
patience = 40
yset = ['ATT_FLAG']
autoencoder = load_AEED("../../Attacked_Model/"+dataset+"/autoencoder.json", "../../Attacked_Model/"+dataset+"/autoencoder.h5")
scaler = pickle.load(open("../../Attacked_Model/"+dataset+"/scaler.p", "rb"))
with open("../../Attacked_Model/"+dataset+"/theta") as f:
theta = float(f.read())
normal_op_ranges = pickle.load(open('dict_'+dataset+'.p', 'rb'))
for att_number in attack_ids:
variables = {}
f = open("./constraints/"+dataset+"/constraint_variables_attack_"+str(att_number)+".txt", 'r').read()
variables = eval(f)
for max_concealable_variables in [2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20]:
debug = False
changed_columns = {}
changed_variables = {}
print('ATT NUMBER: '+str(att_number))
att_data = pd.read_csv(
data_folder+'/attack_'+str(att_number)+'_from_test_dataset.csv')
y_att = att_data[yset]
X = pd.DataFrame(index=att_data.index,
columns=xset, data=att_data[xset])
new_tuples = pd.DataFrame(columns=xset)
# main loop that iterates over every row in the dataset
changed_rows = 0
solutions_found = 0
max_spent_time = 0
sum_spent = 0
times = []
import time
for row_index, row in X.iterrows():
prov = pd.DataFrame(index=[row_index],
columns=xset, data=att_data[xset])
Yhat, original_error, temp = scale_input_and_detect_single(
row_index, prov)
if Yhat[row_index]:
start_time = time.time()
modified_row, solutions_found = change_vector_label(
row_index, prov, solutions_found, changed_variables, variables)
spent_time = time.time() - start_time
print("--- %s seconds ---" % spent_time)
sum_spent = sum_spent + spent_time
if max_spent_time < spent_time:
max_spent_time = spent_time
new_tuples = new_tuples.append(modified_row, ignore_index=True)
changed_rows = changed_rows + 1
times.append(spent_time)
else:
new_tuples = new_tuples.append(prov)
new_tuples['DATETIME'] = att_data['DATETIME']
new_tuples['ATT_FLAG'] = att_data['ATT_FLAG']
new_tuples.to_csv('./results/'+dataset+'/max_constraints_fixed/whitebox_attack_' +
str(att_number)+'_from_test_dataset_max_'+str(max_concealable_variables)+'.csv')
print('mean spent time: ' + str(sum_spent/changed_rows))
with open('./results/'+dataset+'/max_constraints_fixed/time_spent_new_sequential_v2.0_fixed_budget.txt', 'a') as f:
f.write('______attack: '+str(att_number)+'max: '+str(max_concealable_variables)+'______\n')
f.write('Mean: ' + str(np.mean(times)))
f.write('\n')
f.write('STD: ' + str(np.std(times)))
f.write('\n') | print(error, previous_best_error) | conditional_block |
constrained_attack.py | import sys
sys.path.append('../../../')
import keras
from keras.layers import Input, Dense, Activation
from keras.layers.merge import Maximum, Concatenate
from keras.models import Model
from keras.optimizers import Adam
from keras.utils import plot_model
# import required to load the attacked model
from autoencoder_BATADAL import load_AEED
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score, f1_score, roc_curve, auc, precision_score
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
def sort_temp_and_drop(row_index, temp):
"""
sort descending and drop values below theta.
given the current recontructions error we sort them descending and drop the values lower then theta treshold
Parameters
----------
row_index : int
temp : Pandas DataFrame
Autoencoder output vector
Returns
--------
recontrution vector sorted descending and dropped
"""
# print(temp)
temp = temp.sort_values(by=row_index, axis=1, ascending=False)
i = 0
for col in temp.columns:
if temp.loc[row_index, col] < theta:
break
i = i + 1
temp = temp.drop(columns=temp.columns[i:43])
return(temp.copy())
def scale_input_and_detect_single(index, X):
"""
given a row of the dataset we transform it with the scaler and we see if it is detected by the model.
Parameters
----------
index : int
row index
X : pandas DataFrame
Dataframe Containing one row of sensor readings
Returns
--------
bool
detection outcome
float
average recontrution error
pandas dataframe
reconstruction error vector for the considered sensor readings
"""
X_transformed = pd.DataFrame(
index=[index], columns=xset, data=scaler.transform(X))
Yhat, error, temp, _ = autoencoder.detect(
X_transformed, theta=theta, window=1, average=True)
return Yhat, error, temp
def scale_input_and_detect(index, X):
"""
given a row of the dataset we transform it with the scaler and we see if it is detected by the model.
Parameters
----------
index : int
row index
X : pandas DataFrame
Dataframe Containing one row of sensor readings
Returns
--------
bool
detection outcome
float
average recontrution error
pandas dataframe
reconstruction error vector for the considered sensor readings
"""
X_transformed = pd.DataFrame(
columns=xset, data=scaler.transform(X), index=X.index)
_, error, _, _ = autoencoder.detect(
X_transformed, theta=theta, window=1, average=True)
error_df = pd.DataFrame({'error': error})
X = pd.concat([X, error_df], axis=1)
X = X.iloc[X['error'].idxmin()]
# print(X)
error = X['error']
X = X.drop('error')
X = pd.DataFrame([X])
# print(X)
return X, error
def compute_mutation_factor(att_data, newBest):
"""
compute how many columns values have been changed at the end of the transformation.
Parameters
----------
att_data : pandas DataFrame
original sensor readings
newBest : pandas DataFrame
concealed sensor readings
"""
X2 = pd.DataFrame(index=att_data.index,
columns=xset, data=att_data[xset])
frames = [X2, newBest]
merge = pd.concat(frames)
merge.loc['Diff'] = merge.iloc[0] - merge.iloc[1]
changed_columns[row_index] = merge.loc['Diff'].astype(bool).sum()
print('changed tuples: ' + str(len(changed_columns)))
def choose_column(row_index, prev_col_name, changed_variables, max_concealable_variables):
"""
select the sensor value to be manipulated depending on the constrints
Parameters
----------
row_index : int
prev_col_name : string
changed_variables : list
variables that can be manipulated
max_concealable_variables : int
number of variables that can manipulated
Returns
----------
string
the column that will be manipulated in the current iteration
"""
if(prev_col_name == None):
return changed_variables[row_index][0]
return changed_variables[row_index][(changed_variables[row_index].index(prev_col_name)+1) % max_concealable_variables]
# this is the main algorithm, it actually transforms the input row trying to change its label.
# second attempt, updates after 5 changes on the same variable the ranking and optimizes this variable
def change_vector_label(row_index, att_data, solutions_found, changed_variables, variables):
"""
this is the main algorithm, it actually transforms the input row trying to change its predicted label.
updates after 5 changes on the same variable the ranking and optimizes the new ranked 1 variable
Parameters
----------
row_index : int
att_data : pandas DataFrame
original data to be concealed
solutions_found : int
counter of found solution
Returns
--------
pandas dataframe
solution found
int
updated counter of solutions
"""
original_vector = att_data.copy()
changes = 0
found_solution = 0
_, error, temp = scale_input_and_detect_single(row_index, att_data)
previous_best_error = error[row_index]
temp = sort_temp_and_drop(row_index, temp)
prev_col_name = None
num_changes_without_optimizations = 0
last_optimization = 0
newBest = att_data.copy()
optimized = False
changed_variables[row_index] = variables[max_concealable_variables]
while changes < budget and (changes - last_optimization) < patience and not(found_solution):
col_name = choose_column(row_index, temp, prev_col_name, num_changes_without_optimizations,
changed_variables, max_concealable_variables)
prev_col_name = col_name
if debug:
print('______________________________')
print(col_name)
print('______________________________')
values = np.arange(
normal_op_ranges[col_name]['min'], normal_op_ranges[col_name]['max']+0.1, normal_op_ranges[col_name]['step'])
# print(values)
att_data = att_data.append(
[att_data] * (len(values)), ignore_index=True)
att_data = att_data[:-1] # delete eccessive lenght
# substitute column values usign normal operations
att_data[col_name] = values
att_data, error = scale_input_and_detect(row_index, att_data)
if error < previous_best_error:
if debug:
print(error, previous_best_error)
previous_best_error = error
newBest = att_data.copy()
last_optimization = changes
num_changes_without_optimizations = 0
optimized = True
try:
if not(col_name) in changed_variables[row_index]:
changed_variables[row_index].append(col_name)
except:
changed_variables[row_index] = [col_name]
else:
optimized = False
if error < theta:
solutions_found = solutions_found + 1
found_solution = 1
print('Found solution number: ' + str(solutions_found))
if optimized == False:
num_changes_without_optimizations = num_changes_without_optimizations + 1
att_data = newBest.copy()
_, error, temp = scale_input_and_detect_single(
row_index, att_data)
temp = sort_temp_and_drop(row_index, temp)
changes = changes + 1
if debug:
print(temp)
print('--__--__--')
print(changes)
print('--__--__--')
compute_mutation_factor(original_vector, att_data.copy())
return newBest.copy(), solutions_found
"""
Select wich dataset are you considering
(we are not allowed to publish WADI data, please request them itrust Singapore website)
"""
dataset = 'BATADAL' #'WADI'
data_folder = '../../Data/'+dataset
if dataset == 'BATADAL':
attack_ids = range(1,15)
att_data = pd.read_csv(data_folder+'/attack_1_from_test_dataset.csv')
xset = [col for col in att_data.columns if col not in [
'Unnamed: 0', 'DATETIME', 'ATT_FLAG']]
budget = 200
patience = 15
if dataset == 'WADI':
attack_ids = [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
att_data = pd.read_csv(data_folder+'/attack_1_from_test_dataset.csv')
xset = [col for col in att_data.columns if col not in [
'Row', 'DATETIME','ATT_FLAG', '2_MV_001_STATUS', '2_LT_001_PV', '2_MV_002_STATUS']]
budget = 300
patience = 40
yset = ['ATT_FLAG']
autoencoder = load_AEED("../../Attacked_Model/"+dataset+"/autoencoder.json", "../../Attacked_Model/"+dataset+"/autoencoder.h5")
scaler = pickle.load(open("../../Attacked_Model/"+dataset+"/scaler.p", "rb"))
with open("../../Attacked_Model/"+dataset+"/theta") as f:
theta = float(f.read())
normal_op_ranges = pickle.load(open('dict_'+dataset+'.p', 'rb'))
for att_number in attack_ids:
variables = {}
f = open("./constraints/"+dataset+"/constraint_variables_attack_"+str(att_number)+".txt", 'r').read()
variables = eval(f)
for max_concealable_variables in [2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20]:
debug = False
changed_columns = {}
changed_variables = {}
print('ATT NUMBER: '+str(att_number))
att_data = pd.read_csv(
data_folder+'/attack_'+str(att_number)+'_from_test_dataset.csv')
y_att = att_data[yset]
X = pd.DataFrame(index=att_data.index,
columns=xset, data=att_data[xset])
new_tuples = pd.DataFrame(columns=xset)
# main loop that iterates over every row in the dataset
changed_rows = 0
solutions_found = 0
max_spent_time = 0
sum_spent = 0
times = []
import time
for row_index, row in X.iterrows(): | if Yhat[row_index]:
start_time = time.time()
modified_row, solutions_found = change_vector_label(
row_index, prov, solutions_found, changed_variables, variables)
spent_time = time.time() - start_time
print("--- %s seconds ---" % spent_time)
sum_spent = sum_spent + spent_time
if max_spent_time < spent_time:
max_spent_time = spent_time
new_tuples = new_tuples.append(modified_row, ignore_index=True)
changed_rows = changed_rows + 1
times.append(spent_time)
else:
new_tuples = new_tuples.append(prov)
new_tuples['DATETIME'] = att_data['DATETIME']
new_tuples['ATT_FLAG'] = att_data['ATT_FLAG']
new_tuples.to_csv('./results/'+dataset+'/max_constraints_fixed/whitebox_attack_' +
str(att_number)+'_from_test_dataset_max_'+str(max_concealable_variables)+'.csv')
print('mean spent time: ' + str(sum_spent/changed_rows))
with open('./results/'+dataset+'/max_constraints_fixed/time_spent_new_sequential_v2.0_fixed_budget.txt', 'a') as f:
f.write('______attack: '+str(att_number)+'max: '+str(max_concealable_variables)+'______\n')
f.write('Mean: ' + str(np.mean(times)))
f.write('\n')
f.write('STD: ' + str(np.std(times)))
f.write('\n') | prov = pd.DataFrame(index=[row_index],
columns=xset, data=att_data[xset])
Yhat, original_error, temp = scale_input_and_detect_single(
row_index, prov) | random_line_split |
constrained_attack.py | import sys
sys.path.append('../../../')
import keras
from keras.layers import Input, Dense, Activation
from keras.layers.merge import Maximum, Concatenate
from keras.models import Model
from keras.optimizers import Adam
from keras.utils import plot_model
# import required to load the attacked model
from autoencoder_BATADAL import load_AEED
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score, f1_score, roc_curve, auc, precision_score
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
def sort_temp_and_drop(row_index, temp):
"""
sort descending and drop values below theta.
given the current recontructions error we sort them descending and drop the values lower then theta treshold
Parameters
----------
row_index : int
temp : Pandas DataFrame
Autoencoder output vector
Returns
--------
recontrution vector sorted descending and dropped
"""
# print(temp)
temp = temp.sort_values(by=row_index, axis=1, ascending=False)
i = 0
for col in temp.columns:
if temp.loc[row_index, col] < theta:
break
i = i + 1
temp = temp.drop(columns=temp.columns[i:43])
return(temp.copy())
def scale_input_and_detect_single(index, X):
"""
given a row of the dataset we transform it with the scaler and we see if it is detected by the model.
Parameters
----------
index : int
row index
X : pandas DataFrame
Dataframe Containing one row of sensor readings
Returns
--------
bool
detection outcome
float
average recontrution error
pandas dataframe
reconstruction error vector for the considered sensor readings
"""
X_transformed = pd.DataFrame(
index=[index], columns=xset, data=scaler.transform(X))
Yhat, error, temp, _ = autoencoder.detect(
X_transformed, theta=theta, window=1, average=True)
return Yhat, error, temp
def scale_input_and_detect(index, X):
"""
given a row of the dataset we transform it with the scaler and we see if it is detected by the model.
Parameters
----------
index : int
row index
X : pandas DataFrame
Dataframe Containing one row of sensor readings
Returns
--------
bool
detection outcome
float
average recontrution error
pandas dataframe
reconstruction error vector for the considered sensor readings
"""
X_transformed = pd.DataFrame(
columns=xset, data=scaler.transform(X), index=X.index)
_, error, _, _ = autoencoder.detect(
X_transformed, theta=theta, window=1, average=True)
error_df = pd.DataFrame({'error': error})
X = pd.concat([X, error_df], axis=1)
X = X.iloc[X['error'].idxmin()]
# print(X)
error = X['error']
X = X.drop('error')
X = pd.DataFrame([X])
# print(X)
return X, error
def compute_mutation_factor(att_data, newBest):
"""
compute how many columns values have been changed at the end of the transformation.
Parameters
----------
att_data : pandas DataFrame
original sensor readings
newBest : pandas DataFrame
concealed sensor readings
"""
X2 = pd.DataFrame(index=att_data.index,
columns=xset, data=att_data[xset])
frames = [X2, newBest]
merge = pd.concat(frames)
merge.loc['Diff'] = merge.iloc[0] - merge.iloc[1]
changed_columns[row_index] = merge.loc['Diff'].astype(bool).sum()
print('changed tuples: ' + str(len(changed_columns)))
def choose_column(row_index, prev_col_name, changed_variables, max_concealable_variables):
"""
select the sensor value to be manipulated depending on the constrints
Parameters
----------
row_index : int
prev_col_name : string
changed_variables : list
variables that can be manipulated
max_concealable_variables : int
number of variables that can manipulated
Returns
----------
string
the column that will be manipulated in the current iteration
"""
if(prev_col_name == None):
return changed_variables[row_index][0]
return changed_variables[row_index][(changed_variables[row_index].index(prev_col_name)+1) % max_concealable_variables]
# this is the main algorithm, it actually transforms the input row trying to change its label.
# second attempt, updates after 5 changes on the same variable the ranking and optimizes this variable
def change_vector_label(row_index, att_data, solutions_found, changed_variables, variables):
|
"""
Select wich dataset are you considering
(we are not allowed to publish WADI data, please request them itrust Singapore website)
"""
dataset = 'BATADAL' #'WADI'
data_folder = '../../Data/'+dataset
if dataset == 'BATADAL':
attack_ids = range(1,15)
att_data = pd.read_csv(data_folder+'/attack_1_from_test_dataset.csv')
xset = [col for col in att_data.columns if col not in [
'Unnamed: 0', 'DATETIME', 'ATT_FLAG']]
budget = 200
patience = 15
if dataset == 'WADI':
attack_ids = [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
att_data = pd.read_csv(data_folder+'/attack_1_from_test_dataset.csv')
xset = [col for col in att_data.columns if col not in [
'Row', 'DATETIME','ATT_FLAG', '2_MV_001_STATUS', '2_LT_001_PV', '2_MV_002_STATUS']]
budget = 300
patience = 40
yset = ['ATT_FLAG']
autoencoder = load_AEED("../../Attacked_Model/"+dataset+"/autoencoder.json", "../../Attacked_Model/"+dataset+"/autoencoder.h5")
scaler = pickle.load(open("../../Attacked_Model/"+dataset+"/scaler.p", "rb"))
with open("../../Attacked_Model/"+dataset+"/theta") as f:
theta = float(f.read())
normal_op_ranges = pickle.load(open('dict_'+dataset+'.p', 'rb'))
for att_number in attack_ids:
variables = {}
f = open("./constraints/"+dataset+"/constraint_variables_attack_"+str(att_number)+".txt", 'r').read()
variables = eval(f)
for max_concealable_variables in [2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20]:
debug = False
changed_columns = {}
changed_variables = {}
print('ATT NUMBER: '+str(att_number))
att_data = pd.read_csv(
data_folder+'/attack_'+str(att_number)+'_from_test_dataset.csv')
y_att = att_data[yset]
X = pd.DataFrame(index=att_data.index,
columns=xset, data=att_data[xset])
new_tuples = pd.DataFrame(columns=xset)
# main loop that iterates over every row in the dataset
changed_rows = 0
solutions_found = 0
max_spent_time = 0
sum_spent = 0
times = []
import time
for row_index, row in X.iterrows():
prov = pd.DataFrame(index=[row_index],
columns=xset, data=att_data[xset])
Yhat, original_error, temp = scale_input_and_detect_single(
row_index, prov)
if Yhat[row_index]:
start_time = time.time()
modified_row, solutions_found = change_vector_label(
row_index, prov, solutions_found, changed_variables, variables)
spent_time = time.time() - start_time
print("--- %s seconds ---" % spent_time)
sum_spent = sum_spent + spent_time
if max_spent_time < spent_time:
max_spent_time = spent_time
new_tuples = new_tuples.append(modified_row, ignore_index=True)
changed_rows = changed_rows + 1
times.append(spent_time)
else:
new_tuples = new_tuples.append(prov)
new_tuples['DATETIME'] = att_data['DATETIME']
new_tuples['ATT_FLAG'] = att_data['ATT_FLAG']
new_tuples.to_csv('./results/'+dataset+'/max_constraints_fixed/whitebox_attack_' +
str(att_number)+'_from_test_dataset_max_'+str(max_concealable_variables)+'.csv')
print('mean spent time: ' + str(sum_spent/changed_rows))
with open('./results/'+dataset+'/max_constraints_fixed/time_spent_new_sequential_v2.0_fixed_budget.txt', 'a') as f:
f.write('______attack: '+str(att_number)+'max: '+str(max_concealable_variables)+'______\n')
f.write('Mean: ' + str(np.mean(times)))
f.write('\n')
f.write('STD: ' + str(np.std(times)))
f.write('\n') | """
this is the main algorithm, it actually transforms the input row trying to change its predicted label.
updates after 5 changes on the same variable the ranking and optimizes the new ranked 1 variable
Parameters
----------
row_index : int
att_data : pandas DataFrame
original data to be concealed
solutions_found : int
counter of found solution
Returns
--------
pandas dataframe
solution found
int
updated counter of solutions
"""
original_vector = att_data.copy()
changes = 0
found_solution = 0
_, error, temp = scale_input_and_detect_single(row_index, att_data)
previous_best_error = error[row_index]
temp = sort_temp_and_drop(row_index, temp)
prev_col_name = None
num_changes_without_optimizations = 0
last_optimization = 0
newBest = att_data.copy()
optimized = False
changed_variables[row_index] = variables[max_concealable_variables]
while changes < budget and (changes - last_optimization) < patience and not(found_solution):
col_name = choose_column(row_index, temp, prev_col_name, num_changes_without_optimizations,
changed_variables, max_concealable_variables)
prev_col_name = col_name
if debug:
print('______________________________')
print(col_name)
print('______________________________')
values = np.arange(
normal_op_ranges[col_name]['min'], normal_op_ranges[col_name]['max']+0.1, normal_op_ranges[col_name]['step'])
# print(values)
att_data = att_data.append(
[att_data] * (len(values)), ignore_index=True)
att_data = att_data[:-1] # delete eccessive lenght
# substitute column values usign normal operations
att_data[col_name] = values
att_data, error = scale_input_and_detect(row_index, att_data)
if error < previous_best_error:
if debug:
print(error, previous_best_error)
previous_best_error = error
newBest = att_data.copy()
last_optimization = changes
num_changes_without_optimizations = 0
optimized = True
try:
if not(col_name) in changed_variables[row_index]:
changed_variables[row_index].append(col_name)
except:
changed_variables[row_index] = [col_name]
else:
optimized = False
if error < theta:
solutions_found = solutions_found + 1
found_solution = 1
print('Found solution number: ' + str(solutions_found))
if optimized == False:
num_changes_without_optimizations = num_changes_without_optimizations + 1
att_data = newBest.copy()
_, error, temp = scale_input_and_detect_single(
row_index, att_data)
temp = sort_temp_and_drop(row_index, temp)
changes = changes + 1
if debug:
print(temp)
print('--__--__--')
print(changes)
print('--__--__--')
compute_mutation_factor(original_vector, att_data.copy())
return newBest.copy(), solutions_found | identifier_body |
watchman-diag | #!/usr/bin/env python3
# Collect some FB specific watchman diagnostics
import glob
import json
import os
import re
import stat
import subprocess
import sys
import time
import pywatchman
def print_table(table):
col_width = [max(len(x) for x in col) for col in zip(*table)]
for line in table:
print(
" ".join("{0:{1}}".format(x, col_width[i]) for i, x in enumerate(line))
+ " "
)
def print_status(msg):
print(msg)
# If they're piping into `arc paste`, they may get impatient
if not os.isatty(sys.stdout.fileno()):
sys.stderr.write(msg + "\n")
class ProcessInfo(object):
def __init__(self):
self.pid_to_name = {}
def procname(self, pid):
if pid in self.pid_to_name:
return self.pid_to_name[pid]
name = os.readlink("/proc/%s/exe" % pid)
try:
name = os.path.realpath(name)
except Exception as e:
pass
self.pid_to_name[pid] = name
return name
class InotifyInfo(object):
"""Extract information about users of inotify on the system"""
def __init__(self, procinfo):
self.procinfo = procinfo
def procname(self, pid):
return self.procinfo.procname(pid)
def read_fdinfo(self, path):
bufsize = 65536
blob = []
fd = os.open(path, os.O_RDONLY)
while True:
buf = os.read(fd, bufsize)
if len(buf) == 0:
break
blob.append(buf)
os.close(fd)
if len(blob) == 0:
return None
return "".join(blob)
def parsefdinfo(self, blob):
watches = 0
for line in blob.split("\n"):
if line.find("inotify wd") != -1:
watches = watches + 1
return watches
def get_watches(self):
watches = [("PID", "EXE", "FD", "WATCHES")]
for fddir in glob.glob("/proc/*/fd"):
for fdnode in glob.glob(fddir + "/*"):
try:
l = os.readlink(fdnode)
if l != "anon_inode:inotify":
continue
_, _, pid, _, fdnum = fdnode.split("/")
info = self.read_fdinfo("/proc/%s/fdinfo/%s" % (pid, fdnum))
if info is None:
watches.append(
(pid, self.procname(pid), fdnum, "<unknown> (see t8692428)")
)
continue
watches.append(
(pid, self.procname(pid), fdnum, str(self.parsefdinfo(info)))
)
except Exception as e:
pass
return watches
def walk_root(root, case_sensitive, ignores):
"""Generate a map of file nodes for the given dir by looking
at the filesystem"""
# we can't use os.walk because it insists on stating and derefing
# dewey and gvfs symlinks (== slow)
results = {}
# the queue of dirs to analyze
dirs = [root]
while len(dirs) > 0:
dir = dirs.pop()
for ent in os.listdir(dir):
full = os.path.join(dir, ent)
rel = os.path.relpath(full, root)
if rel in ignores:
continue
st = os.lstat(full)
if stat.S_ISDIR(st.st_mode):
# add this child to our dir queue
dirs.append(full)
item = (rel, st)
if not case_sensitive:
rel = rel.lower()
results[rel] = item
return results
def collect_watch_info(watchman, watch):
root_config = watchman.query("get-config", watch)["config"]
watchmanconfig_file = os.path.join(watch, ".watchmanconfig")
file_config = {}
if os.path.exists(watchmanconfig_file):
with open(watchmanconfig_file) as f:
file_config = json.load(f)
if file_config != root_config:
print("Watchman root %s is using this configuration: %s" % (watch, root_config))
print("%s has this configuration: %s" % (watchmanconfig_file, file_config))
print_status(
(
"** You should run: `watchman watch-del %s ; "
+ "watchman watch %s` to reload .watchmanconfig **\n"
)
% (watch, watch)
)
if not is_eden(watch):
# Eden mounts don't use the sparse extension, so skip this bit
print("\nSparse configuration for %s:" % watch)
passthru("cd %s && hg sparse" % watch, shell=True)
# Eden watcher is stateless and doesn't have this
print("\nContent hash cache stats for %s:" % watch)
passthru(["watchman", "debug-contenthash", watch])
print("\nSymlink target cache stats for %s:" % watch)
passthru(["watchman", "debug-symlink-target-cache", watch])
print("\nSubscriptions for %s:" % watch)
passthru(["watchman", "debug-get-subscriptions", watch])
print("\nAsserted states for %s:" % watch)
passthru(["watchman", "debug-get-asserted-states", watch])
def is_eden(dirpath):
if sys.platform == "win32":
return os.path.isfile(os.path.join(dirpath, ".eden", "config"))
return os.path.islink(os.path.join(dirpath, ".eden", "root"))
def cross_check_watch(watchman, watch, case_sensitive):
if is_eden(watch):
# We don't keep any state in watchman for eden mounts
# that is worth testing against an O(repo) crawl
print_status(
"\nSkipping filesystem sanity check for %s as it is an eden mount\n" % watch
)
return
print_status(
"\nSanity checking the filesystem at %s against watchman; this may take a couple of minutes."
% watch
)
root_config = watchman.query("get-config", watch)["config"]
ignores = []
if "ignore_dirs" in root_config:
ignores = root_config["ignore_dirs"]
ignores.append(".hg")
ignores.append(".git")
ignores.append(".svn")
print_status("Crawling %s..." % watch)
start = time.time()
fs = walk_root(watch, case_sensitive, ignores)
print_status("(took %ds)" % (time.time() - start))
start = time.time()
print_status("Interrogating watchman about %s..." % watch)
fields = ["name", "mode", "size", "mtime_f", "oclock"]
if os.name == "posix":
fields.append("ino")
files = watchman.query(
"query",
watch,
{
"expression": [
"allof",
[
"not",
[
"anyof",
["dirname", ".git"],
["dirname", ".hg"],
["dirname", ".svn"],
["name", ".git", "wholename"],
["name", ".svn", "wholename"],
["name", ".hg", "wholename"],
],
],
"exists",
],
"fields": fields,
},
)
print_status("(took %ds)" % (time.time() - start))
print_status("Comparing results...")
phantoms = []
bad_deletes = []
mismatched = []
missing = []
all_names_in_watchman = set()
def diff_item(w_item, fs_item):
diffs = []
if w_item["name"] != fs_item[0]:
diffs.append(
"watchman name is `%s` vs fs `%s" % (w_item["name"], fs_item[0])
)
st = fs_item[1]
if w_item["mode"] != st.st_mode:
diffs.append(
"watchman mode is 0%o vs fs 0%o" % (w_item["mode"], st.st_mode)
)
if w_item["size"] != st.st_size and not stat.S_ISDIR(st.st_mode):
diffs.append("watchman size is %d vs fs %d" % (w_item["size"], st.st_size))
if w_item["mtime_f"] != st.st_mtime:
diffs.append(
"watchman mtime is %s vs fs %s" % (w_item["mtime_f"], st.st_mtime)
)
if os.name == "posix" and (w_item["ino"] != st.st_ino):
diffs.append("watchman ino is %d vs fs %d" % (w_item["ino"], st.st_ino))
if len(diffs) > 0:
diffs.append(" oclock is %s" % w_item["oclock"])
return diffs
return None
for f in files["files"]:
key = f["name"]
if not case_sensitive:
key = key.lower()
all_names_in_watchman.add(key)
if key not in fs:
phantoms.append(f)
else:
diff = diff_item(f, fs[key])
if diff:
print("Conflicting information for %s:" % f["name"])
for d in diff:
print(d)
for key in fs:
if not key in all_names_in_watchman:
missing.append(fs[key])
print_status(
"There are %d items reported by watchman that do not exist on the fs:"
% len(phantoms)
)
if len(phantoms) > 0:
for item in phantoms:
print(item)
print_status(
"There are %d items on the filesystem not reported by watchman:" % len(missing)
)
if len(missing) > 0:
# Let's see if watchman had previously seen any of these
names = ["anyof"]
for item in missing:
name = item[0]
print(name, item[1])
names.append(["name", name, "wholename"])
files = watchman.query("query", watch, {"expression": names, "fields": fields})
print("This is what watchman knows about this set of files:")
for f in files["files"]:
print(f)
def passthru(*args, **kwargs):
sys.stdout.flush()
try:
subprocess.call(*args, **kwargs)
except Exception as e:
print("Error while running %s %s: %s" % (args, kwargs, e))
if not os.isatty(sys.stdout.fileno()):
sys.stderr.write(
"(most output is going to your pipe, some short summary will show here on stderr)\n" | )
# Print the basic system info
print("Platform: %s" % sys.platform)
if os.name == "posix":
uname = os.uname()
print_table([uname])
print("Running watchman-diag as uid %d" % os.getuid())
print
if os.name == "posix":
print("RPM version: (rpm -q fb-watchman)")
passthru(["rpm", "-q", "fb-watchman"])
if sys.platform == "win32":
print("choco package version:")
passthru(["choco", "list", "--local-only", "watchman"])
print("CLI version: (watchman -v)")
passthru(["watchman", "--no-spawn", "-v"])
print
watchman_env_vars = [v for v in os.environ.keys() if v.startswith("WATCHMAN_")]
if watchman_env_vars:
print()
print(
"!!!WARNING!!! The following watchman related environment variables are set",
"(this is unusual and may cause problems):",
)
for var in watchman_env_vars:
print("%s=%s" % (var, os.getenv(var)))
print()
procinfo = ProcessInfo()
case_sensitive = True
if sys.platform == "linux":
inotify = InotifyInfo(procinfo)
print("Inotify watch information:")
print_table(inotify.get_watches())
print
if sys.platform == "darwin":
print("launchd info:")
passthru(["launchctl", "list", "com.github.facebook.watchman"])
case_sensitive = False
if sys.platform == "win32":
case_sensitive = False
def collect_state_info(path):
try:
print("State information from %s" % path)
print
print("State file: %s/state" % path)
try:
with open(os.path.join(path, "state"), "r") as f:
print(f.read())
except FileNotFoundError:
pass
try:
with open(os.path.join(path, "log"), "r") as f:
lines = f.readlines()
tail = lines[-300:]
print("Log samples: %s/log" % path)
for line in tail:
print(line.rstrip())
except FileNotFoundError:
pass
except Exception as e:
print("# %s" % str(e))
pass
print
for root in [
"/var/facebook/watchman",
"/opt/facebook/var/run/watchman",
"/opt/facebook/watchman/var/run/watchman",
os.environ.get("TEMP"),
os.environ.get("TMP"),
]:
if root is None or root == "":
continue
for path in glob.glob("%s/*-state" % root):
collect_state_info(path)
appdata = os.environ.get("LOCALAPPDATA")
if appdata:
watchman_appdata = os.path.join(appdata, "watchman")
if os.path.exists(watchman_appdata):
collect_state_info(watchman_appdata)
if os.name == "posix":
print("List of running watchman processes:")
passthru("ps -ef | grep watchman", shell=True)
print
# We do these last, as they depend on watchman being able to respond:
if os.name != "posix" or (os.getuid() != 0 or not os.environ.get("SUDO_UID")):
# Safe to run watchman commands
print("Watchman service information:")
watchman = pywatchman.client(timeout=600)
print(watchman.query("version"))
print("\nStatus:\n")
passthru(["watchman", "--pretty", "debug-status"])
watches = watchman.query("watch-list")["roots"]
print("Watches:\n%s\n" % watches)
for watch in watches:
cross_check_watch(watchman, watch, case_sensitive)
collect_watch_info(watchman, watch) | random_line_split | |
SurfstoreClientUtils.go | package surfstore
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
)
/*
Implement the logic for a client syncing with the server here.
*/
func ClientSync(client RPCClient) {
// ================================== create a map for old index.txt===============================
fileMetaMap := readIndexFile(client)
// =============================create map for local dir======================
fileMetaMap = updateFileMetaMapWithLocalFiles(client, fileMetaMap)
// PrintMetaMap(fileMetaMap)
// ============================ Now idxMetaMap is updated; try to compare with server map ===============
dummyRPCParam := true
// the idea is : if cannot update then download
retryMax := 3
for i := 0; i < retryMax; i++ {
// get server map
remoteFileMetaMap := make(map[string]FileMetaData)
err := client.GetFileInfoMap(&dummyRPCParam, &remoteFileMetaMap)
if err != nil {
log.Println("Failed to get remote file meta map", err)
continue
}
isUploadFailed := false
// working on existing files in server and local
for remoteFilename, remoteFileMeta := range remoteFileMetaMap {
// if server match local file
if localFileMeta, ok := fileMetaMap[remoteFilename]; ok {
// modify and upload newest file to server
if localFileMeta.Version > remoteFileMeta.Version {
isUploadFailed = isUploadFailed || !uploadFile(client, localFileMeta)
} else {
err := downloadFile(client, localFileMeta, &remoteFileMeta)
if err == nil {
*localFileMeta = remoteFileMeta
}
}
} else {
err := downloadFile(client, nil, &remoteFileMeta)
if err == nil {
localFileMeta := remoteFileMeta
fileMetaMap[remoteFilename] = &localFileMeta
}
}
}
// working on files only on local -> upload
for localFilename, localFileMeta := range fileMetaMap {
if _, ok := remoteFileMetaMap[localFilename]; !ok {
isUploadFailed = isUploadFailed || !uploadFile(client, localFileMeta)
}
}
if !isUploadFailed {
break
}
}
// ==================================Finally, Write into a index file=============================
writeIndexFile(client, fileMetaMap)
}
func uploadFile(client RPCClient, fileMeta *FileMetaData) bool {
// divide into blocks
filename := fileMeta.Filename
if fileMeta.IsTombstone() {
var latestVersion int
err := client.UpdateFile(fileMeta, &latestVersion)
if err != nil {
return false
}
return fileMeta.Version == latestVersion
}
file, err := os.Open(filepath.Join(client.BaseDir, filename))
if err != nil {
log.Println("uploadFile: Failed to open file", filename, err)
return false
}
blockSize := client.BlockSize
fileInfo, _ := file.Stat()
fileSize := fileInfo.Size()
numBlocks := fileSize / int64(blockSize)
if fileSize%int64(blockSize) != 0 {
numBlocks++
}
if numBlocks == 0 {
// for empty file
blockBuffer := make([]byte, 0)
block := Block{BlockData: blockBuffer, BlockSize: 0}
succ := false
err := client.PutBlock(block, &succ)
if !succ || err != nil {
log.Println("uploadFile: Failed to put empty block to the server")
return false
}
} else {
for i := int64(0); i < numBlocks; i++ {
currentBlockOffset := i * int64(blockSize)
var currentBlockSize int
if blockSize < int(fileSize-currentBlockOffset) {
currentBlockSize = blockSize
} else {
currentBlockSize = int(fileSize - currentBlockOffset)
}
block := NewBlock(currentBlockSize)
_, err := file.Read(block.BlockData)
if err != nil {
return false
}
// write block to server
// if there is error -> get block fail -> put block
// if the error is nil -> get block succ -> no need
succ := false
err = client.HasBlock(block.Hash(), &succ)
if err != nil {
return false
}
if !succ {
succ := false
err := client.PutBlock(block, &succ)
if !succ || err != nil {
log.Println("uploadFile: Failed to put block to server")
return false
}
}
}
}
latestVersion := -1
err = client.UpdateFile(fileMeta, &latestVersion)
if err != nil {
return false
}
return fileMeta.Version == latestVersion
}
func readIndexFile(client RPCClient) map[string]*FileMetaData {
// For read access.
indexFilename := filepath.Join(client.BaseDir, "index.txt")
indexFile, err := os.Open(indexFilename)
if err != nil {
// index.txt does not exit
indexFile, err = os.Create(indexFilename)
if err != nil {
panic(err)
}
}
defer indexFile.Close()
fileMetaMap := make(map[string]*FileMetaData)
// read index file
reader := bufio.NewReader(indexFile)
isReaderEnded := false
for !isReaderEnded {
line, err := reader.ReadString('\n')
isReaderEnded = err == io.EOF
if err != nil && err != io.EOF {
panic(err)
}
if line == "" {
break
}
text := strings.TrimSuffix(line, "\n")
lineParts := strings.Split(text, ",")
if len(lineParts) == 3 {
filename := lineParts[0]
version, _ := strconv.Atoi(lineParts[1])
blockHasheListString := lineParts[2]
blockHasheList := strings.Split(blockHasheListString, " ")
fileMeta := FileMetaData{
Filename: filename,
Version: version,
BlockHashList: blockHasheList,
}
fileMetaMap[filename] = &fileMeta
} else {
panic("Invalid index.txt")
}
}
return fileMetaMap
}
func updateFileMetaMapWithLocalFiles(client RPCClient, fileMetaMap map[string]*FileMetaData) map[string]*FileMetaData {
localFileMap := getLocalFileHashBlockListMap(client)
// iterate over the file meta map and see if old file exists
for filename, fileMeta := range fileMetaMap {
if localBlockHashList, ok := localFileMap[filename]; ok {
// find the existing file
if len(localBlockHashList) != len(fileMeta.BlockHashList) {
fileMeta.BlockHashList = localBlockHashList
fileMeta.Version++
} else {
isFileUpdated := false
for i, blockHash := range localBlockHashList {
if blockHash != fileMeta.BlockHashList[i] {
fileMeta.BlockHashList[i] = blockHash
isFileUpdated = true
}
}
if isFileUpdated {
fileMeta.Version++
}
}
} else {
// file does not exist in dir, shoud be deleted
// if file is not mark as deleted in file meta, update it
if !fileMeta.IsTombstone() {
fileMeta.MarkTombstone()
fileMeta.Version++
}
}
}
// iterate over the local files and create new files
for filename, localBlockHashList := range localFileMap {
if _, ok := fileMetaMap[filename]; !ok {
fileMeta := FileMetaData{
Filename: filename,
Version: 1,
BlockHashList: localBlockHashList,
}
fileMetaMap[filename] = &fileMeta
}
}
return fileMetaMap
}
func | (client RPCClient) map[string][]string {
// open directory
localFileInfos, err := ioutil.ReadDir(client.BaseDir)
if err != nil {
panic(err)
}
localFileMap := make(map[string][]string)
// iterate over all the local files
for _, fileInfo := range localFileInfos {
if fileInfo.Name() == "index.txt" {
continue
}
// check if the file is modified
file, err := os.Open(filepath.Join(client.BaseDir, fileInfo.Name()))
if err != nil {
panic(err)
}
// divide into blocks
fileSize := fileInfo.Size()
blockSize := client.BlockSize
numBlocks := fileSize / int64(blockSize)
if fileSize%int64(blockSize) != 0 {
numBlocks++
}
var blockHashList []string
// for empty file
if numBlocks == 0 {
// write to hash
block := NewBlock(0)
blockHashList = append(blockHashList, block.Hash())
}
for i := int64(0); i < numBlocks; i++ {
currentBlockOffset := i * int64(blockSize)
var currentBlockSize int
if blockSize < int(fileSize-currentBlockOffset) {
currentBlockSize = blockSize
} else {
currentBlockSize = int(fileSize - currentBlockOffset)
}
block := NewBlock(currentBlockSize)
_, err := file.Read(block.BlockData)
if err != nil {
panic("Invalid file read")
}
blockHashList = append(blockHashList, block.Hash())
}
localFileMap[fileInfo.Name()] = blockHashList
}
return localFileMap
}
func writeIndexFile(client RPCClient, fileMetaMap map[string]*FileMetaData) {
// err := os.Truncate(filepath.Join(client.BaseDir, "index.txt"), 0)
file, err := os.OpenFile(filepath.Join(client.BaseDir, "index.txt"), os.O_RDWR|os.O_TRUNC, 0755)
if err != nil {
panic(err)
}
for _, fileMeta := range fileMetaMap {
line := fmt.Sprintf(
"%s,%d,%s",
fileMeta.Filename,
fileMeta.Version,
strings.Join(fileMeta.BlockHashList, " "),
)
line = strings.TrimSpace(line)
_, err := file.WriteString(line + "\n")
if err != nil {
panic(err)
}
}
err = file.Sync()
if err != nil {
panic(err)
}
}
func downloadFile(client RPCClient, localFileMeta *FileMetaData, remoteFileMeta *FileMetaData) error {
if remoteFileMeta == nil {
return nil
}
if localFileMeta != nil && len(localFileMeta.BlockHashList) == len(remoteFileMeta.BlockHashList) {
isHashListEqual := true
for i, hash := range localFileMeta.BlockHashList {
if hash != remoteFileMeta.BlockHashList[i] {
isHashListEqual = false
break
}
}
if isHashListEqual {
return nil
}
}
var fileBlocks []*Block
if !remoteFileMeta.IsTombstone() {
// get block map for remote
blockMap := make(map[string]*Block)
for _, hash := range remoteFileMeta.BlockHashList {
blockMap[hash] = nil
}
// update map with local blocks with existing files
if localFileMeta != nil && !localFileMeta.IsTombstone() {
var fileInfo os.FileInfo
file, err := os.Open(filepath.Join(client.BaseDir, localFileMeta.Filename))
if err == nil {
fileInfo, err = file.Stat()
}
if err == nil {
// successfully access local file
// divide into blocks
fileSize := fileInfo.Size()
blockSize := client.BlockSize
// for empty file
if len(localFileMeta.BlockHashList) == 0 {
// write to hash
localBlock := NewBlock(0)
blockHash := localBlock.Hash()
block, found := blockMap[blockHash]
if found && block == nil {
blockMap[blockHash] = &localBlock
}
}
for i, localBlockHash := range localFileMeta.BlockHashList {
block, found := blockMap[localBlockHash]
if found && block == nil {
currentBlockOffset := int64(i) * int64(blockSize)
var currentBlockSize int
if blockSize < int(fileSize-currentBlockOffset) {
currentBlockSize = blockSize
} else {
currentBlockSize = int(fileSize - currentBlockOffset)
}
localBlock := NewBlock(currentBlockSize)
readBlockSize, err := file.ReadAt(localBlock.BlockData, currentBlockOffset)
if readBlockSize != currentBlockSize || (err != nil && err != io.EOF) {
continue
}
blockMap[localBlockHash] = &localBlock
}
}
}
}
for _, blockHash := range remoteFileMeta.BlockHashList {
if blockMap[blockHash] != nil {
localBlock := blockMap[blockHash]
fileBlocks = append(fileBlocks, localBlock)
} else {
var block Block
err := client.GetBlock(blockHash, &block)
if err != nil {
panic(err)
}
fileBlocks = append(fileBlocks, &block)
blockMap[blockHash] = &block
}
}
}
return writeFile(client, remoteFileMeta, &fileBlocks)
}
func writeFile(client RPCClient, fileMeta *FileMetaData, blocks *[]*Block) error {
if fileMeta.IsTombstone() {
return os.Remove(filepath.Join(client.BaseDir, fileMeta.Filename))
}
file, err := os.Create(filepath.Join(client.BaseDir, fileMeta.Filename))
if err != nil {
log.Println("writeFile: Failed to open file:", fileMeta.Filename, err)
return err
}
defer file.Close()
for _, block := range *blocks {
_, err := file.Write(block.BlockData)
if err != nil {
log.Println("writeFile: Failed to write to file:", fileMeta.Filename, err)
return err
}
}
return file.Sync()
}
/*
Helper function to print the contents of the metadata map.
*/
func PrintMetaMap(metaMap map[string]*FileMetaData) {
fmt.Println("--------BEGIN PRINT MAP--------")
for _, filemeta := range metaMap {
fmt.Println("\t", filemeta.Filename, filemeta.Version, filemeta.BlockHashList)
}
fmt.Println("---------END PRINT MAP--------")
}
| getLocalFileHashBlockListMap | identifier_name |
SurfstoreClientUtils.go | package surfstore
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
)
/*
Implement the logic for a client syncing with the server here.
*/
func ClientSync(client RPCClient) {
// ================================== create a map for old index.txt===============================
fileMetaMap := readIndexFile(client)
// =============================create map for local dir======================
fileMetaMap = updateFileMetaMapWithLocalFiles(client, fileMetaMap)
// PrintMetaMap(fileMetaMap)
// ============================ Now idxMetaMap is updated; try to compare with server map ===============
dummyRPCParam := true
// the idea is : if cannot update then download
retryMax := 3
for i := 0; i < retryMax; i++ {
// get server map
remoteFileMetaMap := make(map[string]FileMetaData)
err := client.GetFileInfoMap(&dummyRPCParam, &remoteFileMetaMap)
if err != nil {
log.Println("Failed to get remote file meta map", err)
continue
}
isUploadFailed := false
// working on existing files in server and local
for remoteFilename, remoteFileMeta := range remoteFileMetaMap {
// if server match local file
if localFileMeta, ok := fileMetaMap[remoteFilename]; ok {
// modify and upload newest file to server
if localFileMeta.Version > remoteFileMeta.Version {
isUploadFailed = isUploadFailed || !uploadFile(client, localFileMeta)
} else {
err := downloadFile(client, localFileMeta, &remoteFileMeta)
if err == nil {
*localFileMeta = remoteFileMeta
}
}
} else {
err := downloadFile(client, nil, &remoteFileMeta)
if err == nil {
localFileMeta := remoteFileMeta
fileMetaMap[remoteFilename] = &localFileMeta
}
}
}
// working on files only on local -> upload
for localFilename, localFileMeta := range fileMetaMap {
if _, ok := remoteFileMetaMap[localFilename]; !ok {
isUploadFailed = isUploadFailed || !uploadFile(client, localFileMeta)
}
}
if !isUploadFailed {
break
}
}
// ==================================Finally, Write into a index file=============================
writeIndexFile(client, fileMetaMap)
}
func uploadFile(client RPCClient, fileMeta *FileMetaData) bool {
// divide into blocks
filename := fileMeta.Filename
if fileMeta.IsTombstone() {
var latestVersion int
err := client.UpdateFile(fileMeta, &latestVersion)
if err != nil {
return false
}
return fileMeta.Version == latestVersion
}
file, err := os.Open(filepath.Join(client.BaseDir, filename))
if err != nil {
log.Println("uploadFile: Failed to open file", filename, err)
return false
}
blockSize := client.BlockSize
fileInfo, _ := file.Stat()
fileSize := fileInfo.Size()
numBlocks := fileSize / int64(blockSize)
if fileSize%int64(blockSize) != 0 {
numBlocks++
}
if numBlocks == 0 {
// for empty file
blockBuffer := make([]byte, 0)
block := Block{BlockData: blockBuffer, BlockSize: 0}
succ := false
err := client.PutBlock(block, &succ)
if !succ || err != nil |
} else {
for i := int64(0); i < numBlocks; i++ {
currentBlockOffset := i * int64(blockSize)
var currentBlockSize int
if blockSize < int(fileSize-currentBlockOffset) {
currentBlockSize = blockSize
} else {
currentBlockSize = int(fileSize - currentBlockOffset)
}
block := NewBlock(currentBlockSize)
_, err := file.Read(block.BlockData)
if err != nil {
return false
}
// write block to server
// if there is error -> get block fail -> put block
// if the error is nil -> get block succ -> no need
succ := false
err = client.HasBlock(block.Hash(), &succ)
if err != nil {
return false
}
if !succ {
succ := false
err := client.PutBlock(block, &succ)
if !succ || err != nil {
log.Println("uploadFile: Failed to put block to server")
return false
}
}
}
}
latestVersion := -1
err = client.UpdateFile(fileMeta, &latestVersion)
if err != nil {
return false
}
return fileMeta.Version == latestVersion
}
func readIndexFile(client RPCClient) map[string]*FileMetaData {
// For read access.
indexFilename := filepath.Join(client.BaseDir, "index.txt")
indexFile, err := os.Open(indexFilename)
if err != nil {
// index.txt does not exit
indexFile, err = os.Create(indexFilename)
if err != nil {
panic(err)
}
}
defer indexFile.Close()
fileMetaMap := make(map[string]*FileMetaData)
// read index file
reader := bufio.NewReader(indexFile)
isReaderEnded := false
for !isReaderEnded {
line, err := reader.ReadString('\n')
isReaderEnded = err == io.EOF
if err != nil && err != io.EOF {
panic(err)
}
if line == "" {
break
}
text := strings.TrimSuffix(line, "\n")
lineParts := strings.Split(text, ",")
if len(lineParts) == 3 {
filename := lineParts[0]
version, _ := strconv.Atoi(lineParts[1])
blockHasheListString := lineParts[2]
blockHasheList := strings.Split(blockHasheListString, " ")
fileMeta := FileMetaData{
Filename: filename,
Version: version,
BlockHashList: blockHasheList,
}
fileMetaMap[filename] = &fileMeta
} else {
panic("Invalid index.txt")
}
}
return fileMetaMap
}
func updateFileMetaMapWithLocalFiles(client RPCClient, fileMetaMap map[string]*FileMetaData) map[string]*FileMetaData {
localFileMap := getLocalFileHashBlockListMap(client)
// iterate over the file meta map and see if old file exists
for filename, fileMeta := range fileMetaMap {
if localBlockHashList, ok := localFileMap[filename]; ok {
// find the existing file
if len(localBlockHashList) != len(fileMeta.BlockHashList) {
fileMeta.BlockHashList = localBlockHashList
fileMeta.Version++
} else {
isFileUpdated := false
for i, blockHash := range localBlockHashList {
if blockHash != fileMeta.BlockHashList[i] {
fileMeta.BlockHashList[i] = blockHash
isFileUpdated = true
}
}
if isFileUpdated {
fileMeta.Version++
}
}
} else {
// file does not exist in dir, shoud be deleted
// if file is not mark as deleted in file meta, update it
if !fileMeta.IsTombstone() {
fileMeta.MarkTombstone()
fileMeta.Version++
}
}
}
// iterate over the local files and create new files
for filename, localBlockHashList := range localFileMap {
if _, ok := fileMetaMap[filename]; !ok {
fileMeta := FileMetaData{
Filename: filename,
Version: 1,
BlockHashList: localBlockHashList,
}
fileMetaMap[filename] = &fileMeta
}
}
return fileMetaMap
}
func getLocalFileHashBlockListMap(client RPCClient) map[string][]string {
// open directory
localFileInfos, err := ioutil.ReadDir(client.BaseDir)
if err != nil {
panic(err)
}
localFileMap := make(map[string][]string)
// iterate over all the local files
for _, fileInfo := range localFileInfos {
if fileInfo.Name() == "index.txt" {
continue
}
// check if the file is modified
file, err := os.Open(filepath.Join(client.BaseDir, fileInfo.Name()))
if err != nil {
panic(err)
}
// divide into blocks
fileSize := fileInfo.Size()
blockSize := client.BlockSize
numBlocks := fileSize / int64(blockSize)
if fileSize%int64(blockSize) != 0 {
numBlocks++
}
var blockHashList []string
// for empty file
if numBlocks == 0 {
// write to hash
block := NewBlock(0)
blockHashList = append(blockHashList, block.Hash())
}
for i := int64(0); i < numBlocks; i++ {
currentBlockOffset := i * int64(blockSize)
var currentBlockSize int
if blockSize < int(fileSize-currentBlockOffset) {
currentBlockSize = blockSize
} else {
currentBlockSize = int(fileSize - currentBlockOffset)
}
block := NewBlock(currentBlockSize)
_, err := file.Read(block.BlockData)
if err != nil {
panic("Invalid file read")
}
blockHashList = append(blockHashList, block.Hash())
}
localFileMap[fileInfo.Name()] = blockHashList
}
return localFileMap
}
func writeIndexFile(client RPCClient, fileMetaMap map[string]*FileMetaData) {
// err := os.Truncate(filepath.Join(client.BaseDir, "index.txt"), 0)
file, err := os.OpenFile(filepath.Join(client.BaseDir, "index.txt"), os.O_RDWR|os.O_TRUNC, 0755)
if err != nil {
panic(err)
}
for _, fileMeta := range fileMetaMap {
line := fmt.Sprintf(
"%s,%d,%s",
fileMeta.Filename,
fileMeta.Version,
strings.Join(fileMeta.BlockHashList, " "),
)
line = strings.TrimSpace(line)
_, err := file.WriteString(line + "\n")
if err != nil {
panic(err)
}
}
err = file.Sync()
if err != nil {
panic(err)
}
}
func downloadFile(client RPCClient, localFileMeta *FileMetaData, remoteFileMeta *FileMetaData) error {
if remoteFileMeta == nil {
return nil
}
if localFileMeta != nil && len(localFileMeta.BlockHashList) == len(remoteFileMeta.BlockHashList) {
isHashListEqual := true
for i, hash := range localFileMeta.BlockHashList {
if hash != remoteFileMeta.BlockHashList[i] {
isHashListEqual = false
break
}
}
if isHashListEqual {
return nil
}
}
var fileBlocks []*Block
if !remoteFileMeta.IsTombstone() {
// get block map for remote
blockMap := make(map[string]*Block)
for _, hash := range remoteFileMeta.BlockHashList {
blockMap[hash] = nil
}
// update map with local blocks with existing files
if localFileMeta != nil && !localFileMeta.IsTombstone() {
var fileInfo os.FileInfo
file, err := os.Open(filepath.Join(client.BaseDir, localFileMeta.Filename))
if err == nil {
fileInfo, err = file.Stat()
}
if err == nil {
// successfully access local file
// divide into blocks
fileSize := fileInfo.Size()
blockSize := client.BlockSize
// for empty file
if len(localFileMeta.BlockHashList) == 0 {
// write to hash
localBlock := NewBlock(0)
blockHash := localBlock.Hash()
block, found := blockMap[blockHash]
if found && block == nil {
blockMap[blockHash] = &localBlock
}
}
for i, localBlockHash := range localFileMeta.BlockHashList {
block, found := blockMap[localBlockHash]
if found && block == nil {
currentBlockOffset := int64(i) * int64(blockSize)
var currentBlockSize int
if blockSize < int(fileSize-currentBlockOffset) {
currentBlockSize = blockSize
} else {
currentBlockSize = int(fileSize - currentBlockOffset)
}
localBlock := NewBlock(currentBlockSize)
readBlockSize, err := file.ReadAt(localBlock.BlockData, currentBlockOffset)
if readBlockSize != currentBlockSize || (err != nil && err != io.EOF) {
continue
}
blockMap[localBlockHash] = &localBlock
}
}
}
}
for _, blockHash := range remoteFileMeta.BlockHashList {
if blockMap[blockHash] != nil {
localBlock := blockMap[blockHash]
fileBlocks = append(fileBlocks, localBlock)
} else {
var block Block
err := client.GetBlock(blockHash, &block)
if err != nil {
panic(err)
}
fileBlocks = append(fileBlocks, &block)
blockMap[blockHash] = &block
}
}
}
return writeFile(client, remoteFileMeta, &fileBlocks)
}
func writeFile(client RPCClient, fileMeta *FileMetaData, blocks *[]*Block) error {
if fileMeta.IsTombstone() {
return os.Remove(filepath.Join(client.BaseDir, fileMeta.Filename))
}
file, err := os.Create(filepath.Join(client.BaseDir, fileMeta.Filename))
if err != nil {
log.Println("writeFile: Failed to open file:", fileMeta.Filename, err)
return err
}
defer file.Close()
for _, block := range *blocks {
_, err := file.Write(block.BlockData)
if err != nil {
log.Println("writeFile: Failed to write to file:", fileMeta.Filename, err)
return err
}
}
return file.Sync()
}
/*
Helper function to print the contents of the metadata map.
*/
func PrintMetaMap(metaMap map[string]*FileMetaData) {
fmt.Println("--------BEGIN PRINT MAP--------")
for _, filemeta := range metaMap {
fmt.Println("\t", filemeta.Filename, filemeta.Version, filemeta.BlockHashList)
}
fmt.Println("---------END PRINT MAP--------")
}
| {
log.Println("uploadFile: Failed to put empty block to the server")
return false
} | conditional_block |
SurfstoreClientUtils.go | package surfstore
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
)
/*
Implement the logic for a client syncing with the server here.
*/
func ClientSync(client RPCClient) {
// ================================== create a map for old index.txt===============================
fileMetaMap := readIndexFile(client)
// =============================create map for local dir======================
fileMetaMap = updateFileMetaMapWithLocalFiles(client, fileMetaMap)
// PrintMetaMap(fileMetaMap)
// ============================ Now idxMetaMap is updated; try to compare with server map ===============
dummyRPCParam := true
// the idea is : if cannot update then download
retryMax := 3
for i := 0; i < retryMax; i++ {
// get server map
remoteFileMetaMap := make(map[string]FileMetaData)
err := client.GetFileInfoMap(&dummyRPCParam, &remoteFileMetaMap)
if err != nil {
log.Println("Failed to get remote file meta map", err)
continue
}
isUploadFailed := false
// working on existing files in server and local
for remoteFilename, remoteFileMeta := range remoteFileMetaMap {
// if server match local file
if localFileMeta, ok := fileMetaMap[remoteFilename]; ok {
// modify and upload newest file to server
if localFileMeta.Version > remoteFileMeta.Version {
isUploadFailed = isUploadFailed || !uploadFile(client, localFileMeta)
} else {
err := downloadFile(client, localFileMeta, &remoteFileMeta)
if err == nil {
*localFileMeta = remoteFileMeta
}
}
} else {
err := downloadFile(client, nil, &remoteFileMeta)
if err == nil {
localFileMeta := remoteFileMeta
fileMetaMap[remoteFilename] = &localFileMeta
}
}
}
// working on files only on local -> upload
for localFilename, localFileMeta := range fileMetaMap {
if _, ok := remoteFileMetaMap[localFilename]; !ok {
isUploadFailed = isUploadFailed || !uploadFile(client, localFileMeta)
}
}
if !isUploadFailed {
break
}
}
// ==================================Finally, Write into a index file=============================
writeIndexFile(client, fileMetaMap)
}
func uploadFile(client RPCClient, fileMeta *FileMetaData) bool {
// divide into blocks
filename := fileMeta.Filename
if fileMeta.IsTombstone() {
var latestVersion int
err := client.UpdateFile(fileMeta, &latestVersion)
if err != nil {
return false
}
return fileMeta.Version == latestVersion
}
file, err := os.Open(filepath.Join(client.BaseDir, filename))
if err != nil {
log.Println("uploadFile: Failed to open file", filename, err)
return false
}
blockSize := client.BlockSize
fileInfo, _ := file.Stat()
fileSize := fileInfo.Size()
numBlocks := fileSize / int64(blockSize)
if fileSize%int64(blockSize) != 0 {
numBlocks++
}
if numBlocks == 0 {
// for empty file
blockBuffer := make([]byte, 0)
block := Block{BlockData: blockBuffer, BlockSize: 0}
succ := false
err := client.PutBlock(block, &succ)
if !succ || err != nil {
log.Println("uploadFile: Failed to put empty block to the server")
return false
}
} else {
for i := int64(0); i < numBlocks; i++ {
currentBlockOffset := i * int64(blockSize)
var currentBlockSize int
if blockSize < int(fileSize-currentBlockOffset) {
currentBlockSize = blockSize
} else {
currentBlockSize = int(fileSize - currentBlockOffset)
}
block := NewBlock(currentBlockSize)
_, err := file.Read(block.BlockData)
if err != nil {
return false
}
// write block to server
// if there is error -> get block fail -> put block
// if the error is nil -> get block succ -> no need
succ := false
err = client.HasBlock(block.Hash(), &succ)
if err != nil {
return false
}
if !succ {
succ := false
err := client.PutBlock(block, &succ)
if !succ || err != nil {
log.Println("uploadFile: Failed to put block to server")
return false
}
}
}
}
latestVersion := -1
err = client.UpdateFile(fileMeta, &latestVersion)
if err != nil {
return false
}
return fileMeta.Version == latestVersion
}
func readIndexFile(client RPCClient) map[string]*FileMetaData {
// For read access.
indexFilename := filepath.Join(client.BaseDir, "index.txt")
indexFile, err := os.Open(indexFilename)
if err != nil {
// index.txt does not exit
indexFile, err = os.Create(indexFilename)
if err != nil {
panic(err)
}
}
defer indexFile.Close()
fileMetaMap := make(map[string]*FileMetaData)
// read index file
reader := bufio.NewReader(indexFile)
isReaderEnded := false
for !isReaderEnded {
line, err := reader.ReadString('\n')
isReaderEnded = err == io.EOF
if err != nil && err != io.EOF {
panic(err)
}
if line == "" {
break
}
text := strings.TrimSuffix(line, "\n")
lineParts := strings.Split(text, ",")
if len(lineParts) == 3 {
filename := lineParts[0]
version, _ := strconv.Atoi(lineParts[1])
blockHasheListString := lineParts[2]
blockHasheList := strings.Split(blockHasheListString, " ")
fileMeta := FileMetaData{
Filename: filename,
Version: version,
BlockHashList: blockHasheList,
}
fileMetaMap[filename] = &fileMeta
} else {
panic("Invalid index.txt")
}
}
return fileMetaMap
}
func updateFileMetaMapWithLocalFiles(client RPCClient, fileMetaMap map[string]*FileMetaData) map[string]*FileMetaData {
localFileMap := getLocalFileHashBlockListMap(client)
// iterate over the file meta map and see if old file exists
for filename, fileMeta := range fileMetaMap {
if localBlockHashList, ok := localFileMap[filename]; ok {
// find the existing file
if len(localBlockHashList) != len(fileMeta.BlockHashList) {
fileMeta.BlockHashList = localBlockHashList
fileMeta.Version++
} else {
isFileUpdated := false
for i, blockHash := range localBlockHashList {
if blockHash != fileMeta.BlockHashList[i] {
fileMeta.BlockHashList[i] = blockHash
isFileUpdated = true
}
}
if isFileUpdated {
fileMeta.Version++
}
}
} else {
// file does not exist in dir, shoud be deleted
// if file is not mark as deleted in file meta, update it
if !fileMeta.IsTombstone() {
fileMeta.MarkTombstone()
fileMeta.Version++
}
}
}
// iterate over the local files and create new files
for filename, localBlockHashList := range localFileMap {
if _, ok := fileMetaMap[filename]; !ok {
fileMeta := FileMetaData{
Filename: filename,
Version: 1,
BlockHashList: localBlockHashList,
}
fileMetaMap[filename] = &fileMeta
}
}
return fileMetaMap
}
func getLocalFileHashBlockListMap(client RPCClient) map[string][]string {
// open directory
localFileInfos, err := ioutil.ReadDir(client.BaseDir)
if err != nil {
panic(err)
}
localFileMap := make(map[string][]string)
// iterate over all the local files
for _, fileInfo := range localFileInfos {
if fileInfo.Name() == "index.txt" {
continue
}
// check if the file is modified
file, err := os.Open(filepath.Join(client.BaseDir, fileInfo.Name()))
if err != nil {
panic(err)
}
// divide into blocks
fileSize := fileInfo.Size()
blockSize := client.BlockSize
numBlocks := fileSize / int64(blockSize)
if fileSize%int64(blockSize) != 0 {
numBlocks++
}
var blockHashList []string
// for empty file
if numBlocks == 0 {
// write to hash
block := NewBlock(0)
blockHashList = append(blockHashList, block.Hash())
}
for i := int64(0); i < numBlocks; i++ {
currentBlockOffset := i * int64(blockSize)
var currentBlockSize int
if blockSize < int(fileSize-currentBlockOffset) {
currentBlockSize = blockSize
} else {
currentBlockSize = int(fileSize - currentBlockOffset)
}
block := NewBlock(currentBlockSize)
_, err := file.Read(block.BlockData)
if err != nil {
panic("Invalid file read")
}
blockHashList = append(blockHashList, block.Hash())
}
localFileMap[fileInfo.Name()] = blockHashList
}
return localFileMap
}
func writeIndexFile(client RPCClient, fileMetaMap map[string]*FileMetaData) {
// err := os.Truncate(filepath.Join(client.BaseDir, "index.txt"), 0)
file, err := os.OpenFile(filepath.Join(client.BaseDir, "index.txt"), os.O_RDWR|os.O_TRUNC, 0755)
if err != nil {
panic(err)
}
for _, fileMeta := range fileMetaMap {
line := fmt.Sprintf(
"%s,%d,%s",
fileMeta.Filename,
fileMeta.Version,
strings.Join(fileMeta.BlockHashList, " "),
)
line = strings.TrimSpace(line)
_, err := file.WriteString(line + "\n")
if err != nil {
panic(err)
}
}
err = file.Sync()
if err != nil {
panic(err)
}
}
func downloadFile(client RPCClient, localFileMeta *FileMetaData, remoteFileMeta *FileMetaData) error {
if remoteFileMeta == nil {
return nil
}
if localFileMeta != nil && len(localFileMeta.BlockHashList) == len(remoteFileMeta.BlockHashList) {
isHashListEqual := true
for i, hash := range localFileMeta.BlockHashList {
if hash != remoteFileMeta.BlockHashList[i] {
isHashListEqual = false
break
}
}
if isHashListEqual {
return nil
}
}
var fileBlocks []*Block
if !remoteFileMeta.IsTombstone() {
// get block map for remote
blockMap := make(map[string]*Block)
for _, hash := range remoteFileMeta.BlockHashList {
blockMap[hash] = nil
}
// update map with local blocks with existing files
if localFileMeta != nil && !localFileMeta.IsTombstone() {
var fileInfo os.FileInfo
file, err := os.Open(filepath.Join(client.BaseDir, localFileMeta.Filename))
if err == nil {
fileInfo, err = file.Stat()
}
if err == nil {
// successfully access local file
// divide into blocks
fileSize := fileInfo.Size()
blockSize := client.BlockSize
// for empty file
if len(localFileMeta.BlockHashList) == 0 {
// write to hash
localBlock := NewBlock(0)
blockHash := localBlock.Hash()
block, found := blockMap[blockHash]
if found && block == nil {
blockMap[blockHash] = &localBlock
}
}
for i, localBlockHash := range localFileMeta.BlockHashList { | block, found := blockMap[localBlockHash]
if found && block == nil {
currentBlockOffset := int64(i) * int64(blockSize)
var currentBlockSize int
if blockSize < int(fileSize-currentBlockOffset) {
currentBlockSize = blockSize
} else {
currentBlockSize = int(fileSize - currentBlockOffset)
}
localBlock := NewBlock(currentBlockSize)
readBlockSize, err := file.ReadAt(localBlock.BlockData, currentBlockOffset)
if readBlockSize != currentBlockSize || (err != nil && err != io.EOF) {
continue
}
blockMap[localBlockHash] = &localBlock
}
}
}
}
for _, blockHash := range remoteFileMeta.BlockHashList {
if blockMap[blockHash] != nil {
localBlock := blockMap[blockHash]
fileBlocks = append(fileBlocks, localBlock)
} else {
var block Block
err := client.GetBlock(blockHash, &block)
if err != nil {
panic(err)
}
fileBlocks = append(fileBlocks, &block)
blockMap[blockHash] = &block
}
}
}
return writeFile(client, remoteFileMeta, &fileBlocks)
}
func writeFile(client RPCClient, fileMeta *FileMetaData, blocks *[]*Block) error {
if fileMeta.IsTombstone() {
return os.Remove(filepath.Join(client.BaseDir, fileMeta.Filename))
}
file, err := os.Create(filepath.Join(client.BaseDir, fileMeta.Filename))
if err != nil {
log.Println("writeFile: Failed to open file:", fileMeta.Filename, err)
return err
}
defer file.Close()
for _, block := range *blocks {
_, err := file.Write(block.BlockData)
if err != nil {
log.Println("writeFile: Failed to write to file:", fileMeta.Filename, err)
return err
}
}
return file.Sync()
}
/*
Helper function to print the contents of the metadata map.
*/
func PrintMetaMap(metaMap map[string]*FileMetaData) {
fmt.Println("--------BEGIN PRINT MAP--------")
for _, filemeta := range metaMap {
fmt.Println("\t", filemeta.Filename, filemeta.Version, filemeta.BlockHashList)
}
fmt.Println("---------END PRINT MAP--------")
} | random_line_split | |
SurfstoreClientUtils.go | package surfstore
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
)
/*
Implement the logic for a client syncing with the server here.
*/
func ClientSync(client RPCClient) {
// ================================== create a map for old index.txt===============================
fileMetaMap := readIndexFile(client)
// =============================create map for local dir======================
fileMetaMap = updateFileMetaMapWithLocalFiles(client, fileMetaMap)
// PrintMetaMap(fileMetaMap)
// ============================ Now idxMetaMap is updated; try to compare with server map ===============
dummyRPCParam := true
// the idea is : if cannot update then download
retryMax := 3
for i := 0; i < retryMax; i++ {
// get server map
remoteFileMetaMap := make(map[string]FileMetaData)
err := client.GetFileInfoMap(&dummyRPCParam, &remoteFileMetaMap)
if err != nil {
log.Println("Failed to get remote file meta map", err)
continue
}
isUploadFailed := false
// working on existing files in server and local
for remoteFilename, remoteFileMeta := range remoteFileMetaMap {
// if server match local file
if localFileMeta, ok := fileMetaMap[remoteFilename]; ok {
// modify and upload newest file to server
if localFileMeta.Version > remoteFileMeta.Version {
isUploadFailed = isUploadFailed || !uploadFile(client, localFileMeta)
} else {
err := downloadFile(client, localFileMeta, &remoteFileMeta)
if err == nil {
*localFileMeta = remoteFileMeta
}
}
} else {
err := downloadFile(client, nil, &remoteFileMeta)
if err == nil {
localFileMeta := remoteFileMeta
fileMetaMap[remoteFilename] = &localFileMeta
}
}
}
// working on files only on local -> upload
for localFilename, localFileMeta := range fileMetaMap {
if _, ok := remoteFileMetaMap[localFilename]; !ok {
isUploadFailed = isUploadFailed || !uploadFile(client, localFileMeta)
}
}
if !isUploadFailed {
break
}
}
// ==================================Finally, Write into a index file=============================
writeIndexFile(client, fileMetaMap)
}
func uploadFile(client RPCClient, fileMeta *FileMetaData) bool {
// divide into blocks
filename := fileMeta.Filename
if fileMeta.IsTombstone() {
var latestVersion int
err := client.UpdateFile(fileMeta, &latestVersion)
if err != nil {
return false
}
return fileMeta.Version == latestVersion
}
file, err := os.Open(filepath.Join(client.BaseDir, filename))
if err != nil {
log.Println("uploadFile: Failed to open file", filename, err)
return false
}
blockSize := client.BlockSize
fileInfo, _ := file.Stat()
fileSize := fileInfo.Size()
numBlocks := fileSize / int64(blockSize)
if fileSize%int64(blockSize) != 0 {
numBlocks++
}
if numBlocks == 0 {
// for empty file
blockBuffer := make([]byte, 0)
block := Block{BlockData: blockBuffer, BlockSize: 0}
succ := false
err := client.PutBlock(block, &succ)
if !succ || err != nil {
log.Println("uploadFile: Failed to put empty block to the server")
return false
}
} else {
for i := int64(0); i < numBlocks; i++ {
currentBlockOffset := i * int64(blockSize)
var currentBlockSize int
if blockSize < int(fileSize-currentBlockOffset) {
currentBlockSize = blockSize
} else {
currentBlockSize = int(fileSize - currentBlockOffset)
}
block := NewBlock(currentBlockSize)
_, err := file.Read(block.BlockData)
if err != nil {
return false
}
// write block to server
// if there is error -> get block fail -> put block
// if the error is nil -> get block succ -> no need
succ := false
err = client.HasBlock(block.Hash(), &succ)
if err != nil {
return false
}
if !succ {
succ := false
err := client.PutBlock(block, &succ)
if !succ || err != nil {
log.Println("uploadFile: Failed to put block to server")
return false
}
}
}
}
latestVersion := -1
err = client.UpdateFile(fileMeta, &latestVersion)
if err != nil {
return false
}
return fileMeta.Version == latestVersion
}
func readIndexFile(client RPCClient) map[string]*FileMetaData |
func updateFileMetaMapWithLocalFiles(client RPCClient, fileMetaMap map[string]*FileMetaData) map[string]*FileMetaData {
localFileMap := getLocalFileHashBlockListMap(client)
// iterate over the file meta map and see if old file exists
for filename, fileMeta := range fileMetaMap {
if localBlockHashList, ok := localFileMap[filename]; ok {
// find the existing file
if len(localBlockHashList) != len(fileMeta.BlockHashList) {
fileMeta.BlockHashList = localBlockHashList
fileMeta.Version++
} else {
isFileUpdated := false
for i, blockHash := range localBlockHashList {
if blockHash != fileMeta.BlockHashList[i] {
fileMeta.BlockHashList[i] = blockHash
isFileUpdated = true
}
}
if isFileUpdated {
fileMeta.Version++
}
}
} else {
// file does not exist in dir, shoud be deleted
// if file is not mark as deleted in file meta, update it
if !fileMeta.IsTombstone() {
fileMeta.MarkTombstone()
fileMeta.Version++
}
}
}
// iterate over the local files and create new files
for filename, localBlockHashList := range localFileMap {
if _, ok := fileMetaMap[filename]; !ok {
fileMeta := FileMetaData{
Filename: filename,
Version: 1,
BlockHashList: localBlockHashList,
}
fileMetaMap[filename] = &fileMeta
}
}
return fileMetaMap
}
func getLocalFileHashBlockListMap(client RPCClient) map[string][]string {
// open directory
localFileInfos, err := ioutil.ReadDir(client.BaseDir)
if err != nil {
panic(err)
}
localFileMap := make(map[string][]string)
// iterate over all the local files
for _, fileInfo := range localFileInfos {
if fileInfo.Name() == "index.txt" {
continue
}
// check if the file is modified
file, err := os.Open(filepath.Join(client.BaseDir, fileInfo.Name()))
if err != nil {
panic(err)
}
// divide into blocks
fileSize := fileInfo.Size()
blockSize := client.BlockSize
numBlocks := fileSize / int64(blockSize)
if fileSize%int64(blockSize) != 0 {
numBlocks++
}
var blockHashList []string
// for empty file
if numBlocks == 0 {
// write to hash
block := NewBlock(0)
blockHashList = append(blockHashList, block.Hash())
}
for i := int64(0); i < numBlocks; i++ {
currentBlockOffset := i * int64(blockSize)
var currentBlockSize int
if blockSize < int(fileSize-currentBlockOffset) {
currentBlockSize = blockSize
} else {
currentBlockSize = int(fileSize - currentBlockOffset)
}
block := NewBlock(currentBlockSize)
_, err := file.Read(block.BlockData)
if err != nil {
panic("Invalid file read")
}
blockHashList = append(blockHashList, block.Hash())
}
localFileMap[fileInfo.Name()] = blockHashList
}
return localFileMap
}
func writeIndexFile(client RPCClient, fileMetaMap map[string]*FileMetaData) {
// err := os.Truncate(filepath.Join(client.BaseDir, "index.txt"), 0)
file, err := os.OpenFile(filepath.Join(client.BaseDir, "index.txt"), os.O_RDWR|os.O_TRUNC, 0755)
if err != nil {
panic(err)
}
for _, fileMeta := range fileMetaMap {
line := fmt.Sprintf(
"%s,%d,%s",
fileMeta.Filename,
fileMeta.Version,
strings.Join(fileMeta.BlockHashList, " "),
)
line = strings.TrimSpace(line)
_, err := file.WriteString(line + "\n")
if err != nil {
panic(err)
}
}
err = file.Sync()
if err != nil {
panic(err)
}
}
func downloadFile(client RPCClient, localFileMeta *FileMetaData, remoteFileMeta *FileMetaData) error {
if remoteFileMeta == nil {
return nil
}
if localFileMeta != nil && len(localFileMeta.BlockHashList) == len(remoteFileMeta.BlockHashList) {
isHashListEqual := true
for i, hash := range localFileMeta.BlockHashList {
if hash != remoteFileMeta.BlockHashList[i] {
isHashListEqual = false
break
}
}
if isHashListEqual {
return nil
}
}
var fileBlocks []*Block
if !remoteFileMeta.IsTombstone() {
// get block map for remote
blockMap := make(map[string]*Block)
for _, hash := range remoteFileMeta.BlockHashList {
blockMap[hash] = nil
}
// update map with local blocks with existing files
if localFileMeta != nil && !localFileMeta.IsTombstone() {
var fileInfo os.FileInfo
file, err := os.Open(filepath.Join(client.BaseDir, localFileMeta.Filename))
if err == nil {
fileInfo, err = file.Stat()
}
if err == nil {
// successfully access local file
// divide into blocks
fileSize := fileInfo.Size()
blockSize := client.BlockSize
// for empty file
if len(localFileMeta.BlockHashList) == 0 {
// write to hash
localBlock := NewBlock(0)
blockHash := localBlock.Hash()
block, found := blockMap[blockHash]
if found && block == nil {
blockMap[blockHash] = &localBlock
}
}
for i, localBlockHash := range localFileMeta.BlockHashList {
block, found := blockMap[localBlockHash]
if found && block == nil {
currentBlockOffset := int64(i) * int64(blockSize)
var currentBlockSize int
if blockSize < int(fileSize-currentBlockOffset) {
currentBlockSize = blockSize
} else {
currentBlockSize = int(fileSize - currentBlockOffset)
}
localBlock := NewBlock(currentBlockSize)
readBlockSize, err := file.ReadAt(localBlock.BlockData, currentBlockOffset)
if readBlockSize != currentBlockSize || (err != nil && err != io.EOF) {
continue
}
blockMap[localBlockHash] = &localBlock
}
}
}
}
for _, blockHash := range remoteFileMeta.BlockHashList {
if blockMap[blockHash] != nil {
localBlock := blockMap[blockHash]
fileBlocks = append(fileBlocks, localBlock)
} else {
var block Block
err := client.GetBlock(blockHash, &block)
if err != nil {
panic(err)
}
fileBlocks = append(fileBlocks, &block)
blockMap[blockHash] = &block
}
}
}
return writeFile(client, remoteFileMeta, &fileBlocks)
}
func writeFile(client RPCClient, fileMeta *FileMetaData, blocks *[]*Block) error {
if fileMeta.IsTombstone() {
return os.Remove(filepath.Join(client.BaseDir, fileMeta.Filename))
}
file, err := os.Create(filepath.Join(client.BaseDir, fileMeta.Filename))
if err != nil {
log.Println("writeFile: Failed to open file:", fileMeta.Filename, err)
return err
}
defer file.Close()
for _, block := range *blocks {
_, err := file.Write(block.BlockData)
if err != nil {
log.Println("writeFile: Failed to write to file:", fileMeta.Filename, err)
return err
}
}
return file.Sync()
}
/*
Helper function to print the contents of the metadata map.
*/
func PrintMetaMap(metaMap map[string]*FileMetaData) {
fmt.Println("--------BEGIN PRINT MAP--------")
for _, filemeta := range metaMap {
fmt.Println("\t", filemeta.Filename, filemeta.Version, filemeta.BlockHashList)
}
fmt.Println("---------END PRINT MAP--------")
}
| {
// For read access.
indexFilename := filepath.Join(client.BaseDir, "index.txt")
indexFile, err := os.Open(indexFilename)
if err != nil {
// index.txt does not exit
indexFile, err = os.Create(indexFilename)
if err != nil {
panic(err)
}
}
defer indexFile.Close()
fileMetaMap := make(map[string]*FileMetaData)
// read index file
reader := bufio.NewReader(indexFile)
isReaderEnded := false
for !isReaderEnded {
line, err := reader.ReadString('\n')
isReaderEnded = err == io.EOF
if err != nil && err != io.EOF {
panic(err)
}
if line == "" {
break
}
text := strings.TrimSuffix(line, "\n")
lineParts := strings.Split(text, ",")
if len(lineParts) == 3 {
filename := lineParts[0]
version, _ := strconv.Atoi(lineParts[1])
blockHasheListString := lineParts[2]
blockHasheList := strings.Split(blockHasheListString, " ")
fileMeta := FileMetaData{
Filename: filename,
Version: version,
BlockHashList: blockHasheList,
}
fileMetaMap[filename] = &fileMeta
} else {
panic("Invalid index.txt")
}
}
return fileMetaMap
} | identifier_body |
player.rs | use std::collections::HashMap;
use crate::card::{Card, Colour};
use crate::game::{Action, VisibleGame};
use crate::power::Power;
use crate::power::ScienceItem;
use crate::resources::{ProducedResources, Resources};
use crate::wonder::{WonderBoard, WonderSide, WonderType};
use std::fmt::Debug;
use crate::algorithms::PlayingAlgorithm;
use std::mem;
#[derive(Debug)]
pub struct Player {
algorithm: Box<dyn PlayingAlgorithm>,
wonder: WonderBoard,
built_structures: Vec<Card>,
built_wonder_stages: Vec<Option<Card>>, // TODO: how to represent this?
coins: u32,
hand: Vec<Card>,
}
#[allow(dead_code)]
impl Player {
pub fn new(
wonder_type: WonderType,
wonder_side: WonderSide,
algorithm: Box<dyn PlayingAlgorithm>) -> Player {
Player {
algorithm,
wonder: WonderBoard { wonder_type, wonder_side },
built_structures: vec![],
built_wonder_stages: vec![],
coins: 3,
hand: vec![],
}
}
pub fn algorithm(&self) -> &dyn PlayingAlgorithm {
&*self.algorithm
}
pub fn wonder(&self) -> &WonderBoard {
&self.wonder
}
pub fn built_structures(&self) -> &Vec<Card> {
&self.built_structures
}
pub fn coins(&self) -> u32 {
self.coins
}
pub fn hand(&self) -> &Vec<Card> {
&self.hand
}
/// Performs the given [`Action`] on the current player, for example moving a card from the player's hand into the
/// player's built structures. Returns `true` if the action is legal, `false` otherwise (in which case this function
/// otherwise does nothing).
pub fn do_action(&mut self, action: &Action, visible_game: &VisibleGame, discard_pile: &mut Vec<Card>) -> bool {
// Removes and returns the given card from the player's hand.
fn remove_from_hand(hand: &mut Vec<Card>, card: &Card) -> Card {
let index = hand.iter().position(|c| c == card).unwrap();
hand.swap_remove(index)
}
if self.can_play(action, visible_game) {
match action {
Action::Build(card) => {
let card_from_hand = remove_from_hand(&mut self.hand, card);
self.built_structures.push(card_from_hand);
self.coins -= card_from_hand.cost().coins;
// TODO: deal with borrowed resources
}
Action::Wonder(_) => todo!(),
Action::Discard(card) => {
discard_pile.push(remove_from_hand(&mut self.hand, card));
self.coins += 3;
}
}
true
} else {
false
}
}
/// Replaces this player's hand with the given cards, returning the hand the player had before the swap.
pub fn swap_hand(&mut self, new_hand: Vec<Card>) -> Vec<Card> {
mem::replace(&mut self.hand, new_hand)
}
fn evaluate_green(colour_cards: &[Card]) -> f32 {
let mut science_items_count: HashMap<ScienceItem, i32> = HashMap::new();
science_items_count.insert(ScienceItem::Compass, 0);
science_items_count.insert(ScienceItem::Cog, 0);
science_items_count.insert(ScienceItem::Tablet, 0);
for card in colour_cards.iter() {
if let Power::Science(science_items) = card.power() {
for science_item in science_items.iter() {
let count = science_items_count.entry(*science_item).or_insert(0);
*count += 1;
}
}
}
let score_for_sets_of_identical_symbols: f32 = science_items_count.iter()
.filter(|(_, count)| **count > 0)
.map(|(_, count)| {
(*count as f32).powf(2f32)
})
.sum();
let score_for_all_symbol_groups: f32 = 7f32 *
*science_items_count.iter().min_by_key(|(_, count)| *count).unwrap().1 as f32;
score_for_all_symbol_groups + score_for_sets_of_identical_symbols
}
fn evaluate_colour(cards_of_given_colour: &[Card]) -> f32 {
let colour = cards_of_given_colour.get(0).unwrap().colour();
match colour {
Colour::Green => Self::evaluate_green(cards_of_given_colour),
_ => cards_of_given_colour.iter().map(|card| card.immediate_strength()).sum(),
}
}
fn strength_internal(cards: &[Card]) -> f32 {
let mut colour_to_structure = HashMap::new();
for structure in cards.iter() {
let colour_structures = colour_to_structure.entry(structure.colour()).or_insert_with(Vec::new);
colour_structures.push(*structure)
}
colour_to_structure.iter()
.map(|colour_entry| Self::evaluate_colour(colour_entry.1))
.sum()
}
/// Returns this player's "strength" -- a number where a higher value means the player is doing better than a lower
/// value.
pub fn strength(&self) -> f32 {
Self::strength_internal(&self.built_structures)
}
pub fn can_play(&self, action: &Action, visible_game: &VisibleGame) -> bool {
match action {
Action::Build(card) => self.can_play_card(card, visible_game),
Action::Wonder(_) => todo!(),
Action::Discard(_) => true,
}
}
/// Returns `true` if the user can afford to play the given card, given the resources the player
/// has access to.
///
/// TODO: doesn't currently deal with borrowing resources from neighbours.
fn can_play_card(&self, card: &Card, _visible_game: &VisibleGame) -> bool {
if !self.hand.iter().any(|c| c == card) {
return false;
}
// Initialise a Resources struct with the number of coins we have.
let mut available_resources = Resources::coins(self.coins);
// Add all the other resources we always have access to (ie. those that are not resource
// "choice" cards. At the same time, make a vector of resources choices available to us.
let mut choices = Vec::new();
for card in &self.built_structures {
match card.power() {
// TODO: can we write these four options more succinctly?
Power::PurchasableProducer(ProducedResources::Single(resources)) => {
available_resources += &resources;
}
Power::Producer(ProducedResources::Single(resources)) => {
available_resources += &resources;
}
Power::PurchasableProducer(ProducedResources::Choice(choice)) => {
choices.push(choice);
}
Power::Producer(ProducedResources::Choice(choice)) => {
choices.push(choice);
}
_ => {}
}
}
// Add Wonder starting resources.
available_resources += &self.wonder.starting_resource();
if available_resources.can_afford(&card.cost()) {
return true;
}
if !choices.is_empty() {
// Iterate through all possible combinations of the choices we have. Use the iteration
// index to work out which choice to make for each card.
let combinations: u32 = choices.iter()
.fold(1, |x, y| x * y.len() as u32);
for combination in 0..combinations {
let mut available_resources_option = available_resources.clone();
let mut combination = combination;
for choice in &choices {
let index = combination % choice.len() as u32;
available_resources_option += &choice[index as usize];
combination /= choice.len() as u32;
}
if available_resources_option.can_afford(&card.cost()) {
return true;
}
}
}
false
}
}
/// Represents the aspects of [`Player`] that are public knowledge (ie. visible on the table). Things like a player's
/// current hand are not included.
pub struct PublicPlayer {
pub wonder: WonderBoard,
pub built_structures: Vec<Card>,
pub coins: u32,
}
impl PublicPlayer {
/// Creates a [`PublicPlayer`] from a [`Player`], copy/cloning the values so the originals can be mutated later
/// without issue.
pub fn new(player: &Player) -> PublicPlayer {
PublicPlayer {
wonder: player.wonder,
built_structures: player.built_structures.clone(),
coins: player.coins,
}
}
}
#[cfg(test)]
mod tests {
use Card::*;
use super::*;
use crate::algorithms::random::Random;
#[test]
fn can_play_returns_true_when_player_can_afford_card() {
// TODO: @Before etc
let player = new_player(vec![LumberYard]);
assert_eq!(true, player.can_play(&Action::Build(LumberYard), &visible_game()));
}
#[test]
fn can_play_returns_true_after_player_builds_required_resources() {
let mut player = new_player(vec![StonePit, Quarry, Aqueduct]);
player.do_action(&Action::Build(StonePit), &visible_game(), &mut vec![]);
assert_eq!(false, player.can_play(&Action::Build(Aqueduct), &visible_game()));
assert_eq!(true, player.do_action(&Action::Build(Quarry), &visible_game(), &mut vec![]));
assert_eq!(true, player.can_play(&Action::Build(Aqueduct), &visible_game()));
}
#[test]
fn strength_returns_sum_of_card_strengths() {
assert_eq!(0.0, Player::strength_internal(&vec![StonePit]));
assert_eq!(5.0, Player::strength_internal(&vec![StonePit, Quarry, Aqueduct]));
assert_eq!(6.0, Player::strength_internal(&vec![StonePit, Quarry, Aqueduct, Loom1, Apothecary]));
}
#[test]
fn strength_returns_correct_strength_of_green_structures() {
assert_eq!(1.0, Player::strength_internal(&vec![Lodge]));
assert_eq!(4.0, Player::strength_internal(&vec![Lodge, Apothecary]));
assert_eq!(9.0, Player::strength_internal(&vec![Lodge, Apothecary, Dispensary]));
assert_eq!(10.0, Player::strength_internal(&vec![Lodge, Workshop, Library]));
assert_eq!(21.0, Player::strength_internal(&vec![Lodge, Apothecary, Dispensary, Laboratory, Workshop, Library])); // rulebook example
}
#[test]
fn can_play_returns_false_when_player_cannot_pay() {
let mut player = new_player(vec![]);
player.coins = 0; //TODO introduce a Bank type to allow for double-entry bookkeeping instead of this
assert_eq!(false, player.can_play(&Action::Build(TreeFarm), &visible_game()));
}
#[test]
fn can_play_returns_false_when_both_choice_resources_needed() |
#[test]
fn do_action_returns_false_if_action_not_playable() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(false, player.do_action(&Action::Build(StonePit), &visible_game(), &mut vec![]));
}
#[test]
fn do_action_transfers_built_card_from_hand_to_built_structures() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(0, player.built_structures.len());
assert_eq!(1, player.hand.len());
assert_eq!(true, player.do_action(&Action::Build(LumberYard), &visible_game(), &mut vec![]));
assert_eq!(1, player.built_structures.len());
assert_eq!(0, player.hand.len());
}
#[test]
fn do_action_decrements_cost_in_coins_when_building() {
let mut player = new_player(vec![TreeFarm]);
assert_eq!(3, player.coins);
assert_eq!(true, player.do_action(&Action::Build(TreeFarm), &visible_game(), &mut vec![]));
assert_eq!(2, player.coins);
}
#[test]
fn do_action_transfers_discarded_card_from_hand_to_discard_pile() {
let mut player = new_player(vec![LumberYard]);
let mut discard_pile = vec![];
assert_eq!(1, player.hand.len());
assert_eq!(true, player.do_action(&Action::Discard(LumberYard), &visible_game(), &mut discard_pile));
assert_eq!(1, discard_pile.len());
assert_eq!(0, player.hand.len());
}
#[test]
fn do_action_adds_three_coins_when_discarding() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(3, player.coins);
assert_eq!(true, player.do_action(&Action::Discard(LumberYard), &visible_game(), &mut vec![]));
assert_eq!(6, player.coins);
}
#[test]
fn new_public_player() {
let player = new_player(vec![LumberYard]);
let public_player = PublicPlayer::new(&player);
assert_eq!(player.wonder, public_player.wonder);
assert_eq!(player.built_structures, public_player.built_structures);
assert_eq!(player.coins, public_player.coins);
}
fn new_player(hand: Vec<Card>) -> Player {
let mut player = Player::new(WonderType::ColossusOfRhodes, WonderSide::A, Box::new(Random {}));
player.swap_hand(hand);
player
}
fn visible_game() -> VisibleGame<'static> {
VisibleGame { players: &[], player_index: 0 }
}
}
| {
// TODO implement
} | identifier_body |
player.rs | use std::collections::HashMap;
use crate::card::{Card, Colour};
use crate::game::{Action, VisibleGame};
use crate::power::Power;
use crate::power::ScienceItem;
use crate::resources::{ProducedResources, Resources};
use crate::wonder::{WonderBoard, WonderSide, WonderType};
use std::fmt::Debug;
use crate::algorithms::PlayingAlgorithm;
use std::mem;
#[derive(Debug)]
pub struct Player {
algorithm: Box<dyn PlayingAlgorithm>,
wonder: WonderBoard,
built_structures: Vec<Card>,
built_wonder_stages: Vec<Option<Card>>, // TODO: how to represent this?
coins: u32,
hand: Vec<Card>,
}
#[allow(dead_code)]
impl Player {
pub fn new(
wonder_type: WonderType,
wonder_side: WonderSide,
algorithm: Box<dyn PlayingAlgorithm>) -> Player {
Player {
algorithm,
wonder: WonderBoard { wonder_type, wonder_side },
built_structures: vec![],
built_wonder_stages: vec![],
coins: 3,
hand: vec![],
}
}
pub fn algorithm(&self) -> &dyn PlayingAlgorithm {
&*self.algorithm
}
pub fn wonder(&self) -> &WonderBoard {
&self.wonder
}
pub fn built_structures(&self) -> &Vec<Card> {
&self.built_structures
}
pub fn coins(&self) -> u32 {
self.coins
}
pub fn hand(&self) -> &Vec<Card> {
&self.hand
}
/// Performs the given [`Action`] on the current player, for example moving a card from the player's hand into the
/// player's built structures. Returns `true` if the action is legal, `false` otherwise (in which case this function
/// otherwise does nothing).
pub fn do_action(&mut self, action: &Action, visible_game: &VisibleGame, discard_pile: &mut Vec<Card>) -> bool {
// Removes and returns the given card from the player's hand.
fn remove_from_hand(hand: &mut Vec<Card>, card: &Card) -> Card {
let index = hand.iter().position(|c| c == card).unwrap();
hand.swap_remove(index)
}
if self.can_play(action, visible_game) {
match action {
Action::Build(card) => {
let card_from_hand = remove_from_hand(&mut self.hand, card);
self.built_structures.push(card_from_hand);
self.coins -= card_from_hand.cost().coins;
// TODO: deal with borrowed resources
}
Action::Wonder(_) => todo!(),
Action::Discard(card) => {
discard_pile.push(remove_from_hand(&mut self.hand, card));
self.coins += 3;
}
}
true
} else {
false
}
}
/// Replaces this player's hand with the given cards, returning the hand the player had before the swap.
pub fn swap_hand(&mut self, new_hand: Vec<Card>) -> Vec<Card> {
mem::replace(&mut self.hand, new_hand)
}
fn evaluate_green(colour_cards: &[Card]) -> f32 {
let mut science_items_count: HashMap<ScienceItem, i32> = HashMap::new();
science_items_count.insert(ScienceItem::Compass, 0);
science_items_count.insert(ScienceItem::Cog, 0);
science_items_count.insert(ScienceItem::Tablet, 0);
for card in colour_cards.iter() {
if let Power::Science(science_items) = card.power() {
for science_item in science_items.iter() {
let count = science_items_count.entry(*science_item).or_insert(0);
*count += 1;
}
}
}
let score_for_sets_of_identical_symbols: f32 = science_items_count.iter()
.filter(|(_, count)| **count > 0)
.map(|(_, count)| {
(*count as f32).powf(2f32)
})
.sum();
let score_for_all_symbol_groups: f32 = 7f32 *
*science_items_count.iter().min_by_key(|(_, count)| *count).unwrap().1 as f32;
score_for_all_symbol_groups + score_for_sets_of_identical_symbols
}
fn evaluate_colour(cards_of_given_colour: &[Card]) -> f32 {
let colour = cards_of_given_colour.get(0).unwrap().colour();
match colour {
Colour::Green => Self::evaluate_green(cards_of_given_colour),
_ => cards_of_given_colour.iter().map(|card| card.immediate_strength()).sum(),
}
}
fn strength_internal(cards: &[Card]) -> f32 {
let mut colour_to_structure = HashMap::new();
for structure in cards.iter() {
let colour_structures = colour_to_structure.entry(structure.colour()).or_insert_with(Vec::new);
colour_structures.push(*structure)
}
colour_to_structure.iter()
.map(|colour_entry| Self::evaluate_colour(colour_entry.1))
.sum()
}
/// Returns this player's "strength" -- a number where a higher value means the player is doing better than a lower
/// value.
pub fn strength(&self) -> f32 {
Self::strength_internal(&self.built_structures)
}
pub fn can_play(&self, action: &Action, visible_game: &VisibleGame) -> bool {
match action {
Action::Build(card) => self.can_play_card(card, visible_game),
Action::Wonder(_) => todo!(),
Action::Discard(_) => true,
}
}
/// Returns `true` if the user can afford to play the given card, given the resources the player
/// has access to.
///
/// TODO: doesn't currently deal with borrowing resources from neighbours.
fn can_play_card(&self, card: &Card, _visible_game: &VisibleGame) -> bool {
if !self.hand.iter().any(|c| c == card) {
return false;
}
// Initialise a Resources struct with the number of coins we have.
let mut available_resources = Resources::coins(self.coins);
// Add all the other resources we always have access to (ie. those that are not resource
// "choice" cards. At the same time, make a vector of resources choices available to us.
let mut choices = Vec::new();
for card in &self.built_structures {
match card.power() {
// TODO: can we write these four options more succinctly?
Power::PurchasableProducer(ProducedResources::Single(resources)) => {
available_resources += &resources;
}
Power::Producer(ProducedResources::Single(resources)) => {
available_resources += &resources;
}
Power::PurchasableProducer(ProducedResources::Choice(choice)) => {
choices.push(choice);
}
Power::Producer(ProducedResources::Choice(choice)) => {
choices.push(choice);
}
_ => {}
}
}
// Add Wonder starting resources.
available_resources += &self.wonder.starting_resource();
if available_resources.can_afford(&card.cost()) {
return true;
}
if !choices.is_empty() {
// Iterate through all possible combinations of the choices we have. Use the iteration
// index to work out which choice to make for each card.
let combinations: u32 = choices.iter()
.fold(1, |x, y| x * y.len() as u32);
for combination in 0..combinations {
let mut available_resources_option = available_resources.clone();
let mut combination = combination;
for choice in &choices {
let index = combination % choice.len() as u32;
available_resources_option += &choice[index as usize];
combination /= choice.len() as u32;
}
if available_resources_option.can_afford(&card.cost()) {
return true;
}
}
}
false
}
}
/// Represents the aspects of [`Player`] that are public knowledge (ie. visible on the table). Things like a player's
/// current hand are not included.
pub struct PublicPlayer {
pub wonder: WonderBoard,
pub built_structures: Vec<Card>,
pub coins: u32,
}
impl PublicPlayer {
/// Creates a [`PublicPlayer`] from a [`Player`], copy/cloning the values so the originals can be mutated later
/// without issue.
pub fn new(player: &Player) -> PublicPlayer {
PublicPlayer {
wonder: player.wonder,
built_structures: player.built_structures.clone(),
coins: player.coins,
}
}
}
#[cfg(test)]
mod tests {
use Card::*;
use super::*;
use crate::algorithms::random::Random;
#[test]
fn can_play_returns_true_when_player_can_afford_card() {
// TODO: @Before etc
let player = new_player(vec![LumberYard]);
assert_eq!(true, player.can_play(&Action::Build(LumberYard), &visible_game()));
}
#[test]
fn can_play_returns_true_after_player_builds_required_resources() {
let mut player = new_player(vec![StonePit, Quarry, Aqueduct]);
player.do_action(&Action::Build(StonePit), &visible_game(), &mut vec![]);
assert_eq!(false, player.can_play(&Action::Build(Aqueduct), &visible_game()));
assert_eq!(true, player.do_action(&Action::Build(Quarry), &visible_game(), &mut vec![]));
assert_eq!(true, player.can_play(&Action::Build(Aqueduct), &visible_game()));
}
#[test]
fn strength_returns_sum_of_card_strengths() {
assert_eq!(0.0, Player::strength_internal(&vec![StonePit]));
assert_eq!(5.0, Player::strength_internal(&vec![StonePit, Quarry, Aqueduct]));
assert_eq!(6.0, Player::strength_internal(&vec![StonePit, Quarry, Aqueduct, Loom1, Apothecary]));
}
#[test]
fn strength_returns_correct_strength_of_green_structures() {
assert_eq!(1.0, Player::strength_internal(&vec![Lodge]));
assert_eq!(4.0, Player::strength_internal(&vec![Lodge, Apothecary]));
assert_eq!(9.0, Player::strength_internal(&vec![Lodge, Apothecary, Dispensary]));
assert_eq!(10.0, Player::strength_internal(&vec![Lodge, Workshop, Library]));
assert_eq!(21.0, Player::strength_internal(&vec![Lodge, Apothecary, Dispensary, Laboratory, Workshop, Library])); // rulebook example
}
#[test]
fn can_play_returns_false_when_player_cannot_pay() {
let mut player = new_player(vec![]);
player.coins = 0; //TODO introduce a Bank type to allow for double-entry bookkeeping instead of this
assert_eq!(false, player.can_play(&Action::Build(TreeFarm), &visible_game()));
}
#[test]
fn can_play_returns_false_when_both_choice_resources_needed() {
// TODO implement
}
#[test]
fn | () {
let mut player = new_player(vec![LumberYard]);
assert_eq!(false, player.do_action(&Action::Build(StonePit), &visible_game(), &mut vec![]));
}
#[test]
fn do_action_transfers_built_card_from_hand_to_built_structures() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(0, player.built_structures.len());
assert_eq!(1, player.hand.len());
assert_eq!(true, player.do_action(&Action::Build(LumberYard), &visible_game(), &mut vec![]));
assert_eq!(1, player.built_structures.len());
assert_eq!(0, player.hand.len());
}
#[test]
fn do_action_decrements_cost_in_coins_when_building() {
let mut player = new_player(vec![TreeFarm]);
assert_eq!(3, player.coins);
assert_eq!(true, player.do_action(&Action::Build(TreeFarm), &visible_game(), &mut vec![]));
assert_eq!(2, player.coins);
}
#[test]
fn do_action_transfers_discarded_card_from_hand_to_discard_pile() {
let mut player = new_player(vec![LumberYard]);
let mut discard_pile = vec![];
assert_eq!(1, player.hand.len());
assert_eq!(true, player.do_action(&Action::Discard(LumberYard), &visible_game(), &mut discard_pile));
assert_eq!(1, discard_pile.len());
assert_eq!(0, player.hand.len());
}
#[test]
fn do_action_adds_three_coins_when_discarding() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(3, player.coins);
assert_eq!(true, player.do_action(&Action::Discard(LumberYard), &visible_game(), &mut vec![]));
assert_eq!(6, player.coins);
}
#[test]
fn new_public_player() {
let player = new_player(vec![LumberYard]);
let public_player = PublicPlayer::new(&player);
assert_eq!(player.wonder, public_player.wonder);
assert_eq!(player.built_structures, public_player.built_structures);
assert_eq!(player.coins, public_player.coins);
}
fn new_player(hand: Vec<Card>) -> Player {
let mut player = Player::new(WonderType::ColossusOfRhodes, WonderSide::A, Box::new(Random {}));
player.swap_hand(hand);
player
}
fn visible_game() -> VisibleGame<'static> {
VisibleGame { players: &[], player_index: 0 }
}
}
| do_action_returns_false_if_action_not_playable | identifier_name |
player.rs | use std::collections::HashMap;
use crate::card::{Card, Colour};
use crate::game::{Action, VisibleGame};
use crate::power::Power;
use crate::power::ScienceItem;
use crate::resources::{ProducedResources, Resources};
use crate::wonder::{WonderBoard, WonderSide, WonderType};
use std::fmt::Debug;
use crate::algorithms::PlayingAlgorithm;
use std::mem;
#[derive(Debug)]
pub struct Player {
algorithm: Box<dyn PlayingAlgorithm>,
wonder: WonderBoard,
built_structures: Vec<Card>,
built_wonder_stages: Vec<Option<Card>>, // TODO: how to represent this?
coins: u32,
hand: Vec<Card>,
}
#[allow(dead_code)]
impl Player {
pub fn new(
wonder_type: WonderType,
wonder_side: WonderSide,
algorithm: Box<dyn PlayingAlgorithm>) -> Player {
Player {
algorithm,
wonder: WonderBoard { wonder_type, wonder_side },
built_structures: vec![],
built_wonder_stages: vec![],
coins: 3,
hand: vec![],
}
}
pub fn algorithm(&self) -> &dyn PlayingAlgorithm {
&*self.algorithm
}
pub fn wonder(&self) -> &WonderBoard {
&self.wonder
}
pub fn built_structures(&self) -> &Vec<Card> {
&self.built_structures
}
pub fn coins(&self) -> u32 {
self.coins
}
pub fn hand(&self) -> &Vec<Card> {
&self.hand
}
/// Performs the given [`Action`] on the current player, for example moving a card from the player's hand into the
/// player's built structures. Returns `true` if the action is legal, `false` otherwise (in which case this function
/// otherwise does nothing).
pub fn do_action(&mut self, action: &Action, visible_game: &VisibleGame, discard_pile: &mut Vec<Card>) -> bool {
// Removes and returns the given card from the player's hand.
fn remove_from_hand(hand: &mut Vec<Card>, card: &Card) -> Card {
let index = hand.iter().position(|c| c == card).unwrap();
hand.swap_remove(index)
}
if self.can_play(action, visible_game) {
match action {
Action::Build(card) => {
let card_from_hand = remove_from_hand(&mut self.hand, card);
self.built_structures.push(card_from_hand);
self.coins -= card_from_hand.cost().coins;
// TODO: deal with borrowed resources
}
Action::Wonder(_) => todo!(),
Action::Discard(card) => {
discard_pile.push(remove_from_hand(&mut self.hand, card));
self.coins += 3;
}
}
true
} else {
false
}
}
/// Replaces this player's hand with the given cards, returning the hand the player had before the swap.
pub fn swap_hand(&mut self, new_hand: Vec<Card>) -> Vec<Card> {
mem::replace(&mut self.hand, new_hand)
}
fn evaluate_green(colour_cards: &[Card]) -> f32 {
let mut science_items_count: HashMap<ScienceItem, i32> = HashMap::new();
science_items_count.insert(ScienceItem::Compass, 0);
science_items_count.insert(ScienceItem::Cog, 0);
science_items_count.insert(ScienceItem::Tablet, 0);
for card in colour_cards.iter() {
if let Power::Science(science_items) = card.power() {
for science_item in science_items.iter() {
let count = science_items_count.entry(*science_item).or_insert(0);
*count += 1;
}
}
}
let score_for_sets_of_identical_symbols: f32 = science_items_count.iter()
.filter(|(_, count)| **count > 0)
.map(|(_, count)| {
(*count as f32).powf(2f32)
})
.sum();
let score_for_all_symbol_groups: f32 = 7f32 *
*science_items_count.iter().min_by_key(|(_, count)| *count).unwrap().1 as f32;
score_for_all_symbol_groups + score_for_sets_of_identical_symbols
}
fn evaluate_colour(cards_of_given_colour: &[Card]) -> f32 {
let colour = cards_of_given_colour.get(0).unwrap().colour();
match colour {
Colour::Green => Self::evaluate_green(cards_of_given_colour),
_ => cards_of_given_colour.iter().map(|card| card.immediate_strength()).sum(),
}
}
fn strength_internal(cards: &[Card]) -> f32 {
let mut colour_to_structure = HashMap::new();
for structure in cards.iter() {
let colour_structures = colour_to_structure.entry(structure.colour()).or_insert_with(Vec::new);
colour_structures.push(*structure)
}
colour_to_structure.iter()
.map(|colour_entry| Self::evaluate_colour(colour_entry.1))
.sum()
}
/// Returns this player's "strength" -- a number where a higher value means the player is doing better than a lower
/// value.
pub fn strength(&self) -> f32 {
Self::strength_internal(&self.built_structures)
}
pub fn can_play(&self, action: &Action, visible_game: &VisibleGame) -> bool {
match action {
Action::Build(card) => self.can_play_card(card, visible_game),
Action::Wonder(_) => todo!(),
Action::Discard(_) => true,
}
}
/// Returns `true` if the user can afford to play the given card, given the resources the player
/// has access to.
///
/// TODO: doesn't currently deal with borrowing resources from neighbours.
fn can_play_card(&self, card: &Card, _visible_game: &VisibleGame) -> bool {
if !self.hand.iter().any(|c| c == card) {
return false;
}
| // "choice" cards. At the same time, make a vector of resources choices available to us.
let mut choices = Vec::new();
for card in &self.built_structures {
match card.power() {
// TODO: can we write these four options more succinctly?
Power::PurchasableProducer(ProducedResources::Single(resources)) => {
available_resources += &resources;
}
Power::Producer(ProducedResources::Single(resources)) => {
available_resources += &resources;
}
Power::PurchasableProducer(ProducedResources::Choice(choice)) => {
choices.push(choice);
}
Power::Producer(ProducedResources::Choice(choice)) => {
choices.push(choice);
}
_ => {}
}
}
// Add Wonder starting resources.
available_resources += &self.wonder.starting_resource();
if available_resources.can_afford(&card.cost()) {
return true;
}
if !choices.is_empty() {
// Iterate through all possible combinations of the choices we have. Use the iteration
// index to work out which choice to make for each card.
let combinations: u32 = choices.iter()
.fold(1, |x, y| x * y.len() as u32);
for combination in 0..combinations {
let mut available_resources_option = available_resources.clone();
let mut combination = combination;
for choice in &choices {
let index = combination % choice.len() as u32;
available_resources_option += &choice[index as usize];
combination /= choice.len() as u32;
}
if available_resources_option.can_afford(&card.cost()) {
return true;
}
}
}
false
}
}
/// Represents the aspects of [`Player`] that are public knowledge (ie. visible on the table). Things like a player's
/// current hand are not included.
pub struct PublicPlayer {
pub wonder: WonderBoard,
pub built_structures: Vec<Card>,
pub coins: u32,
}
impl PublicPlayer {
/// Creates a [`PublicPlayer`] from a [`Player`], copy/cloning the values so the originals can be mutated later
/// without issue.
pub fn new(player: &Player) -> PublicPlayer {
PublicPlayer {
wonder: player.wonder,
built_structures: player.built_structures.clone(),
coins: player.coins,
}
}
}
#[cfg(test)]
mod tests {
use Card::*;
use super::*;
use crate::algorithms::random::Random;
#[test]
fn can_play_returns_true_when_player_can_afford_card() {
// TODO: @Before etc
let player = new_player(vec![LumberYard]);
assert_eq!(true, player.can_play(&Action::Build(LumberYard), &visible_game()));
}
#[test]
fn can_play_returns_true_after_player_builds_required_resources() {
let mut player = new_player(vec![StonePit, Quarry, Aqueduct]);
player.do_action(&Action::Build(StonePit), &visible_game(), &mut vec![]);
assert_eq!(false, player.can_play(&Action::Build(Aqueduct), &visible_game()));
assert_eq!(true, player.do_action(&Action::Build(Quarry), &visible_game(), &mut vec![]));
assert_eq!(true, player.can_play(&Action::Build(Aqueduct), &visible_game()));
}
#[test]
fn strength_returns_sum_of_card_strengths() {
assert_eq!(0.0, Player::strength_internal(&vec![StonePit]));
assert_eq!(5.0, Player::strength_internal(&vec![StonePit, Quarry, Aqueduct]));
assert_eq!(6.0, Player::strength_internal(&vec![StonePit, Quarry, Aqueduct, Loom1, Apothecary]));
}
#[test]
fn strength_returns_correct_strength_of_green_structures() {
assert_eq!(1.0, Player::strength_internal(&vec![Lodge]));
assert_eq!(4.0, Player::strength_internal(&vec![Lodge, Apothecary]));
assert_eq!(9.0, Player::strength_internal(&vec![Lodge, Apothecary, Dispensary]));
assert_eq!(10.0, Player::strength_internal(&vec![Lodge, Workshop, Library]));
assert_eq!(21.0, Player::strength_internal(&vec![Lodge, Apothecary, Dispensary, Laboratory, Workshop, Library])); // rulebook example
}
#[test]
fn can_play_returns_false_when_player_cannot_pay() {
let mut player = new_player(vec![]);
player.coins = 0; //TODO introduce a Bank type to allow for double-entry bookkeeping instead of this
assert_eq!(false, player.can_play(&Action::Build(TreeFarm), &visible_game()));
}
#[test]
fn can_play_returns_false_when_both_choice_resources_needed() {
// TODO implement
}
#[test]
fn do_action_returns_false_if_action_not_playable() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(false, player.do_action(&Action::Build(StonePit), &visible_game(), &mut vec![]));
}
#[test]
fn do_action_transfers_built_card_from_hand_to_built_structures() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(0, player.built_structures.len());
assert_eq!(1, player.hand.len());
assert_eq!(true, player.do_action(&Action::Build(LumberYard), &visible_game(), &mut vec![]));
assert_eq!(1, player.built_structures.len());
assert_eq!(0, player.hand.len());
}
#[test]
fn do_action_decrements_cost_in_coins_when_building() {
let mut player = new_player(vec![TreeFarm]);
assert_eq!(3, player.coins);
assert_eq!(true, player.do_action(&Action::Build(TreeFarm), &visible_game(), &mut vec![]));
assert_eq!(2, player.coins);
}
#[test]
fn do_action_transfers_discarded_card_from_hand_to_discard_pile() {
let mut player = new_player(vec![LumberYard]);
let mut discard_pile = vec![];
assert_eq!(1, player.hand.len());
assert_eq!(true, player.do_action(&Action::Discard(LumberYard), &visible_game(), &mut discard_pile));
assert_eq!(1, discard_pile.len());
assert_eq!(0, player.hand.len());
}
#[test]
fn do_action_adds_three_coins_when_discarding() {
let mut player = new_player(vec![LumberYard]);
assert_eq!(3, player.coins);
assert_eq!(true, player.do_action(&Action::Discard(LumberYard), &visible_game(), &mut vec![]));
assert_eq!(6, player.coins);
}
#[test]
fn new_public_player() {
let player = new_player(vec![LumberYard]);
let public_player = PublicPlayer::new(&player);
assert_eq!(player.wonder, public_player.wonder);
assert_eq!(player.built_structures, public_player.built_structures);
assert_eq!(player.coins, public_player.coins);
}
fn new_player(hand: Vec<Card>) -> Player {
let mut player = Player::new(WonderType::ColossusOfRhodes, WonderSide::A, Box::new(Random {}));
player.swap_hand(hand);
player
}
fn visible_game() -> VisibleGame<'static> {
VisibleGame { players: &[], player_index: 0 }
}
} | // Initialise a Resources struct with the number of coins we have.
let mut available_resources = Resources::coins(self.coins);
// Add all the other resources we always have access to (ie. those that are not resource | random_line_split |
runtime.rs | use crate::runtime::blocking::BlockingPool;
use crate::runtime::scheduler::CurrentThread;
use crate::runtime::{context, EnterGuard, Handle};
use crate::task::JoinHandle;
use std::future::Future;
use std::time::Duration;
cfg_rt_multi_thread! {
use crate::runtime::Builder;
use crate::runtime::scheduler::MultiThread;
cfg_unstable! {
use crate::runtime::scheduler::MultiThreadAlt;
}
}
/// The Tokio runtime.
///
/// The runtime provides an I/O driver, task scheduler, [timer], and
/// blocking pool, necessary for running asynchronous tasks.
///
/// Instances of `Runtime` can be created using [`new`], or [`Builder`].
/// However, most users will use the `#[tokio::main]` annotation on their
/// entry point instead.
///
/// See [module level][mod] documentation for more details.
///
/// # Shutdown
///
/// Shutting down the runtime is done by dropping the value, or calling
/// [`shutdown_background`] or [`shutdown_timeout`].
///
/// Tasks spawned through [`Runtime::spawn`] keep running until they yield.
/// Then they are dropped. They are not *guaranteed* to run to completion, but
/// *might* do so if they do not yield until completion.
///
/// Blocking functions spawned through [`Runtime::spawn_blocking`] keep running
/// until they return.
///
/// The thread initiating the shutdown blocks until all spawned work has been
/// stopped. This can take an indefinite amount of time. The `Drop`
/// implementation waits forever for this.
///
/// The [`shutdown_background`] and [`shutdown_timeout`] methods can be used if
/// waiting forever is undesired. When the timeout is reached, spawned work that
/// did not stop in time and threads running it are leaked. The work continues
/// to run until one of the stopping conditions is fulfilled, but the thread
/// initiating the shutdown is unblocked.
///
/// Once the runtime has been dropped, any outstanding I/O resources bound to
/// it will no longer function. Calling any method on them will result in an
/// error.
///
/// # Sharing
///
/// There are several ways to establish shared access to a Tokio runtime:
///
/// * Using an <code>[Arc]\<Runtime></code>.
/// * Using a [`Handle`].
/// * Entering the runtime context.
///
/// Using an <code>[Arc]\<Runtime></code> or [`Handle`] allows you to do various
/// things with the runtime such as spawning new tasks or entering the runtime
/// context. Both types can be cloned to create a new handle that allows access
/// to the same runtime. By passing clones into different tasks or threads, you
/// will be able to access the runtime from those tasks or threads.
///
/// The difference between <code>[Arc]\<Runtime></code> and [`Handle`] is that
/// an <code>[Arc]\<Runtime></code> will prevent the runtime from shutting down,
/// whereas a [`Handle`] does not prevent that. This is because shutdown of the
/// runtime happens when the destructor of the `Runtime` object runs.
///
/// Calls to [`shutdown_background`] and [`shutdown_timeout`] require exclusive
/// ownership of the `Runtime` type. When using an <code>[Arc]\<Runtime></code>,
/// this can be achieved via [`Arc::try_unwrap`] when only one strong count
/// reference is left over.
///
/// The runtime context is entered using the [`Runtime::enter`] or
/// [`Handle::enter`] methods, which use a thread-local variable to store the
/// current runtime. Whenever you are inside the runtime context, methods such
/// as [`tokio::spawn`] will use the runtime whose context you are inside.
///
/// [timer]: crate::time
/// [mod]: index.html
/// [`new`]: method@Self::new
/// [`Builder`]: struct@Builder
/// [`Handle`]: struct@Handle
/// [`tokio::spawn`]: crate::spawn
/// [`Arc::try_unwrap`]: std::sync::Arc::try_unwrap
/// [Arc]: std::sync::Arc
/// [`shutdown_background`]: method@Runtime::shutdown_background
/// [`shutdown_timeout`]: method@Runtime::shutdown_timeout
#[derive(Debug)]
pub struct Runtime {
/// Task scheduler
scheduler: Scheduler,
/// Handle to runtime, also contains driver handles
handle: Handle,
/// Blocking pool handle, used to signal shutdown
blocking_pool: BlockingPool,
}
/// The flavor of a `Runtime`.
///
/// This is the return type for [`Handle::runtime_flavor`](crate::runtime::Handle::runtime_flavor()).
#[derive(Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum RuntimeFlavor {
/// The flavor that executes all tasks on the current thread.
CurrentThread,
/// The flavor that executes tasks across multiple threads.
MultiThread,
/// The flavor that executes tasks across multiple threads.
#[cfg(tokio_unstable)]
MultiThreadAlt,
}
/// The runtime scheduler is either a multi-thread or a current-thread executor.
#[derive(Debug)]
pub(super) enum Scheduler {
/// Execute all tasks on the current-thread.
CurrentThread(CurrentThread),
/// Execute tasks across multiple threads.
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
MultiThread(MultiThread),
/// Execute tasks across multiple threads.
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
MultiThreadAlt(MultiThreadAlt),
}
impl Runtime {
pub(super) fn from_parts(
scheduler: Scheduler,
handle: Handle,
blocking_pool: BlockingPool,
) -> Runtime {
Runtime {
scheduler,
handle,
blocking_pool,
}
}
cfg_not_wasi! {
/// Creates a new runtime instance with default configuration values.
///
/// This results in the multi threaded scheduler, I/O driver, and time driver being
/// initialized.
///
/// Most applications will not need to call this function directly. Instead,
/// they will use the [`#[tokio::main]` attribute][main]. When a more complex
/// configuration is necessary, the [runtime builder] may be used.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
/// ``` | /// [runtime builder]: crate::runtime::Builder
#[cfg(feature = "rt-multi-thread")]
#[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))]
pub fn new() -> std::io::Result<Runtime> {
Builder::new_multi_thread().enable_all().build()
}
}
/// Returns a handle to the runtime's spawner.
///
/// The returned handle can be used to spawn tasks that run on this runtime, and can
/// be cloned to allow moving the `Handle` to other threads.
///
/// Calling [`Handle::block_on`] on a handle to a `current_thread` runtime is error-prone.
/// Refer to the documentation of [`Handle::block_on`] for more.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let handle = rt.handle();
///
/// // Use the handle...
/// ```
pub fn handle(&self) -> &Handle {
&self.handle
}
/// Spawns a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// The provided future will start running in the background immediately
/// when `spawn` is called, even if you don't await the returned
/// `JoinHandle`.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(async {
/// println!("now running on a worker thread");
/// });
/// # }
/// ```
#[track_caller]
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
self.handle.spawn(future)
}
/// Runs the provided function on an executor dedicated to blocking operations.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a blocking function onto the runtime
/// rt.spawn_blocking(|| {
/// println!("now running on a worker thread");
/// });
/// # }
/// ```
#[track_caller]
pub fn spawn_blocking<F, R>(&self, func: F) -> JoinHandle<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
self.handle.spawn_blocking(func)
}
/// Runs a future to completion on the Tokio runtime. This is the
/// runtime's entry point.
///
/// This runs the given future on the current thread, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers
/// which the future spawns internally will be executed on the runtime.
///
/// # Non-worker future
///
/// Note that the future required by this function does not run as a
/// worker. The expectation is that other tasks are spawned by the future here.
/// Awaiting on other futures from the future provided here will not
/// perform as fast as those spawned as workers.
///
/// # Multi thread scheduler
///
/// When the multi thread scheduler is used this will allow futures
/// to run within the io driver and timer context of the overall runtime.
///
/// Any spawned tasks will continue running after `block_on` returns.
///
/// # Current thread scheduler
///
/// When the current thread scheduler is enabled `block_on`
/// can be called concurrently from multiple threads. The first call
/// will take ownership of the io and timer drivers. This means
/// other threads which do not own the drivers will hook into that one.
/// When the first `block_on` completes, other threads will be able to
/// "steal" the driver to allow continued execution of their futures.
///
/// Any spawned tasks will be suspended after `block_on` returns. Calling
/// `block_on` again will resume previously spawned tasks.
///
/// # Panics
///
/// This function panics if the provided future panics, or if called within an
/// asynchronous execution context.
///
/// # Examples
///
/// ```no_run
/// use tokio::runtime::Runtime;
///
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Execute the future, blocking the current thread until completion
/// rt.block_on(async {
/// println!("hello");
/// });
/// ```
///
/// [handle]: fn@Handle::block_on
#[track_caller]
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
#[cfg(all(
tokio_unstable,
tokio_taskdump,
feature = "rt",
target_os = "linux",
any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64")
))]
let future = super::task::trace::Trace::root(future);
#[cfg(all(tokio_unstable, feature = "tracing"))]
let future = crate::util::trace::task(
future,
"block_on",
None,
crate::runtime::task::Id::next().as_u64(),
);
let _enter = self.enter();
match &self.scheduler {
Scheduler::CurrentThread(exec) => exec.block_on(&self.handle.inner, future),
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThread(exec) => exec.block_on(&self.handle.inner, future),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThreadAlt(exec) => exec.block_on(&self.handle.inner, future),
}
}
/// Enters the runtime context.
///
/// This allows you to construct types that must have an executor
/// available on creation such as [`Sleep`] or [`TcpStream`]. It will
/// also allow you to call methods such as [`tokio::spawn`].
///
/// [`Sleep`]: struct@crate::time::Sleep
/// [`TcpStream`]: struct@crate::net::TcpStream
/// [`tokio::spawn`]: fn@crate::spawn
///
/// # Example
///
/// ```
/// use tokio::runtime::Runtime;
///
/// fn function_that_spawns(msg: String) {
/// // Had we not used `rt.enter` below, this would panic.
/// tokio::spawn(async move {
/// println!("{}", msg);
/// });
/// }
///
/// fn main() {
/// let rt = Runtime::new().unwrap();
///
/// let s = "Hello World!".to_string();
///
/// // By entering the context, we tie `tokio::spawn` to this executor.
/// let _guard = rt.enter();
/// function_that_spawns(s);
/// }
/// ```
pub fn enter(&self) -> EnterGuard<'_> {
self.handle.enter()
}
/// Shuts down the runtime, waiting for at most `duration` for all spawned
/// work to stop.
///
/// See the [struct level documentation](Runtime#shutdown) for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::task;
///
/// use std::thread;
/// use std::time::Duration;
///
/// fn main() {
/// let runtime = Runtime::new().unwrap();
///
/// runtime.block_on(async move {
/// task::spawn_blocking(move || {
/// thread::sleep(Duration::from_secs(10_000));
/// });
/// });
///
/// runtime.shutdown_timeout(Duration::from_millis(100));
/// }
/// ```
pub fn shutdown_timeout(mut self, duration: Duration) {
// Wakeup and shutdown all the worker threads
self.handle.inner.shutdown();
self.blocking_pool.shutdown(Some(duration));
}
/// Shuts down the runtime, without waiting for any spawned work to stop.
///
/// This can be useful if you want to drop a runtime from within another runtime.
/// Normally, dropping a runtime will block indefinitely for spawned blocking tasks
/// to complete, which would normally not be permitted within an asynchronous context.
/// By calling `shutdown_background()`, you can drop the runtime from such a context.
///
/// Note however, that because we do not wait for any blocking tasks to complete, this
/// may result in a resource leak (in that any blocking tasks are still running until they
/// return.
///
/// See the [struct level documentation](Runtime#shutdown) for more details.
///
/// This function is equivalent to calling `shutdown_timeout(Duration::from_nanos(0))`.
///
/// ```
/// use tokio::runtime::Runtime;
///
/// fn main() {
/// let runtime = Runtime::new().unwrap();
///
/// runtime.block_on(async move {
/// let inner_runtime = Runtime::new().unwrap();
/// // ...
/// inner_runtime.shutdown_background();
/// });
/// }
/// ```
pub fn shutdown_background(self) {
self.shutdown_timeout(Duration::from_nanos(0))
}
}
#[allow(clippy::single_match)] // there are comments in the error branch, so we don't want if-let
impl Drop for Runtime {
fn drop(&mut self) {
match &mut self.scheduler {
Scheduler::CurrentThread(current_thread) => {
// This ensures that tasks spawned on the current-thread
// runtime are dropped inside the runtime's context.
let _guard = context::try_set_current(&self.handle.inner);
current_thread.shutdown(&self.handle.inner);
}
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThread(multi_thread) => {
// The threaded scheduler drops its tasks on its worker threads, which is
// already in the runtime's context.
multi_thread.shutdown(&self.handle.inner);
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThreadAlt(multi_thread) => {
// The threaded scheduler drops its tasks on its worker threads, which is
// already in the runtime's context.
multi_thread.shutdown(&self.handle.inner);
}
}
}
}
cfg_metrics! {
impl Runtime {
/// TODO
pub fn metrics(&self) -> crate::runtime::RuntimeMetrics {
self.handle.metrics()
}
}
} | ///
/// [mod]: index.html
/// [main]: ../attr.main.html
/// [threaded scheduler]: index.html#threaded-scheduler | random_line_split |
runtime.rs | use crate::runtime::blocking::BlockingPool;
use crate::runtime::scheduler::CurrentThread;
use crate::runtime::{context, EnterGuard, Handle};
use crate::task::JoinHandle;
use std::future::Future;
use std::time::Duration;
cfg_rt_multi_thread! {
use crate::runtime::Builder;
use crate::runtime::scheduler::MultiThread;
cfg_unstable! {
use crate::runtime::scheduler::MultiThreadAlt;
}
}
/// The Tokio runtime.
///
/// The runtime provides an I/O driver, task scheduler, [timer], and
/// blocking pool, necessary for running asynchronous tasks.
///
/// Instances of `Runtime` can be created using [`new`], or [`Builder`].
/// However, most users will use the `#[tokio::main]` annotation on their
/// entry point instead.
///
/// See [module level][mod] documentation for more details.
///
/// # Shutdown
///
/// Shutting down the runtime is done by dropping the value, or calling
/// [`shutdown_background`] or [`shutdown_timeout`].
///
/// Tasks spawned through [`Runtime::spawn`] keep running until they yield.
/// Then they are dropped. They are not *guaranteed* to run to completion, but
/// *might* do so if they do not yield until completion.
///
/// Blocking functions spawned through [`Runtime::spawn_blocking`] keep running
/// until they return.
///
/// The thread initiating the shutdown blocks until all spawned work has been
/// stopped. This can take an indefinite amount of time. The `Drop`
/// implementation waits forever for this.
///
/// The [`shutdown_background`] and [`shutdown_timeout`] methods can be used if
/// waiting forever is undesired. When the timeout is reached, spawned work that
/// did not stop in time and threads running it are leaked. The work continues
/// to run until one of the stopping conditions is fulfilled, but the thread
/// initiating the shutdown is unblocked.
///
/// Once the runtime has been dropped, any outstanding I/O resources bound to
/// it will no longer function. Calling any method on them will result in an
/// error.
///
/// # Sharing
///
/// There are several ways to establish shared access to a Tokio runtime:
///
/// * Using an <code>[Arc]\<Runtime></code>.
/// * Using a [`Handle`].
/// * Entering the runtime context.
///
/// Using an <code>[Arc]\<Runtime></code> or [`Handle`] allows you to do various
/// things with the runtime such as spawning new tasks or entering the runtime
/// context. Both types can be cloned to create a new handle that allows access
/// to the same runtime. By passing clones into different tasks or threads, you
/// will be able to access the runtime from those tasks or threads.
///
/// The difference between <code>[Arc]\<Runtime></code> and [`Handle`] is that
/// an <code>[Arc]\<Runtime></code> will prevent the runtime from shutting down,
/// whereas a [`Handle`] does not prevent that. This is because shutdown of the
/// runtime happens when the destructor of the `Runtime` object runs.
///
/// Calls to [`shutdown_background`] and [`shutdown_timeout`] require exclusive
/// ownership of the `Runtime` type. When using an <code>[Arc]\<Runtime></code>,
/// this can be achieved via [`Arc::try_unwrap`] when only one strong count
/// reference is left over.
///
/// The runtime context is entered using the [`Runtime::enter`] or
/// [`Handle::enter`] methods, which use a thread-local variable to store the
/// current runtime. Whenever you are inside the runtime context, methods such
/// as [`tokio::spawn`] will use the runtime whose context you are inside.
///
/// [timer]: crate::time
/// [mod]: index.html
/// [`new`]: method@Self::new
/// [`Builder`]: struct@Builder
/// [`Handle`]: struct@Handle
/// [`tokio::spawn`]: crate::spawn
/// [`Arc::try_unwrap`]: std::sync::Arc::try_unwrap
/// [Arc]: std::sync::Arc
/// [`shutdown_background`]: method@Runtime::shutdown_background
/// [`shutdown_timeout`]: method@Runtime::shutdown_timeout
#[derive(Debug)]
pub struct Runtime {
/// Task scheduler
scheduler: Scheduler,
/// Handle to runtime, also contains driver handles
handle: Handle,
/// Blocking pool handle, used to signal shutdown
blocking_pool: BlockingPool,
}
/// The flavor of a `Runtime`.
///
/// This is the return type for [`Handle::runtime_flavor`](crate::runtime::Handle::runtime_flavor()).
#[derive(Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum RuntimeFlavor {
/// The flavor that executes all tasks on the current thread.
CurrentThread,
/// The flavor that executes tasks across multiple threads.
MultiThread,
/// The flavor that executes tasks across multiple threads.
#[cfg(tokio_unstable)]
MultiThreadAlt,
}
/// The runtime scheduler is either a multi-thread or a current-thread executor.
#[derive(Debug)]
pub(super) enum Scheduler {
/// Execute all tasks on the current-thread.
CurrentThread(CurrentThread),
/// Execute tasks across multiple threads.
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
MultiThread(MultiThread),
/// Execute tasks across multiple threads.
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
MultiThreadAlt(MultiThreadAlt),
}
impl Runtime {
pub(super) fn | (
scheduler: Scheduler,
handle: Handle,
blocking_pool: BlockingPool,
) -> Runtime {
Runtime {
scheduler,
handle,
blocking_pool,
}
}
cfg_not_wasi! {
/// Creates a new runtime instance with default configuration values.
///
/// This results in the multi threaded scheduler, I/O driver, and time driver being
/// initialized.
///
/// Most applications will not need to call this function directly. Instead,
/// they will use the [`#[tokio::main]` attribute][main]. When a more complex
/// configuration is necessary, the [runtime builder] may be used.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
/// ```
///
/// [mod]: index.html
/// [main]: ../attr.main.html
/// [threaded scheduler]: index.html#threaded-scheduler
/// [runtime builder]: crate::runtime::Builder
#[cfg(feature = "rt-multi-thread")]
#[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))]
pub fn new() -> std::io::Result<Runtime> {
Builder::new_multi_thread().enable_all().build()
}
}
/// Returns a handle to the runtime's spawner.
///
/// The returned handle can be used to spawn tasks that run on this runtime, and can
/// be cloned to allow moving the `Handle` to other threads.
///
/// Calling [`Handle::block_on`] on a handle to a `current_thread` runtime is error-prone.
/// Refer to the documentation of [`Handle::block_on`] for more.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let handle = rt.handle();
///
/// // Use the handle...
/// ```
pub fn handle(&self) -> &Handle {
&self.handle
}
/// Spawns a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// The provided future will start running in the background immediately
/// when `spawn` is called, even if you don't await the returned
/// `JoinHandle`.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(async {
/// println!("now running on a worker thread");
/// });
/// # }
/// ```
#[track_caller]
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
self.handle.spawn(future)
}
/// Runs the provided function on an executor dedicated to blocking operations.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a blocking function onto the runtime
/// rt.spawn_blocking(|| {
/// println!("now running on a worker thread");
/// });
/// # }
/// ```
#[track_caller]
pub fn spawn_blocking<F, R>(&self, func: F) -> JoinHandle<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
self.handle.spawn_blocking(func)
}
/// Runs a future to completion on the Tokio runtime. This is the
/// runtime's entry point.
///
/// This runs the given future on the current thread, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers
/// which the future spawns internally will be executed on the runtime.
///
/// # Non-worker future
///
/// Note that the future required by this function does not run as a
/// worker. The expectation is that other tasks are spawned by the future here.
/// Awaiting on other futures from the future provided here will not
/// perform as fast as those spawned as workers.
///
/// # Multi thread scheduler
///
/// When the multi thread scheduler is used this will allow futures
/// to run within the io driver and timer context of the overall runtime.
///
/// Any spawned tasks will continue running after `block_on` returns.
///
/// # Current thread scheduler
///
/// When the current thread scheduler is enabled `block_on`
/// can be called concurrently from multiple threads. The first call
/// will take ownership of the io and timer drivers. This means
/// other threads which do not own the drivers will hook into that one.
/// When the first `block_on` completes, other threads will be able to
/// "steal" the driver to allow continued execution of their futures.
///
/// Any spawned tasks will be suspended after `block_on` returns. Calling
/// `block_on` again will resume previously spawned tasks.
///
/// # Panics
///
/// This function panics if the provided future panics, or if called within an
/// asynchronous execution context.
///
/// # Examples
///
/// ```no_run
/// use tokio::runtime::Runtime;
///
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Execute the future, blocking the current thread until completion
/// rt.block_on(async {
/// println!("hello");
/// });
/// ```
///
/// [handle]: fn@Handle::block_on
#[track_caller]
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
#[cfg(all(
tokio_unstable,
tokio_taskdump,
feature = "rt",
target_os = "linux",
any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64")
))]
let future = super::task::trace::Trace::root(future);
#[cfg(all(tokio_unstable, feature = "tracing"))]
let future = crate::util::trace::task(
future,
"block_on",
None,
crate::runtime::task::Id::next().as_u64(),
);
let _enter = self.enter();
match &self.scheduler {
Scheduler::CurrentThread(exec) => exec.block_on(&self.handle.inner, future),
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThread(exec) => exec.block_on(&self.handle.inner, future),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThreadAlt(exec) => exec.block_on(&self.handle.inner, future),
}
}
/// Enters the runtime context.
///
/// This allows you to construct types that must have an executor
/// available on creation such as [`Sleep`] or [`TcpStream`]. It will
/// also allow you to call methods such as [`tokio::spawn`].
///
/// [`Sleep`]: struct@crate::time::Sleep
/// [`TcpStream`]: struct@crate::net::TcpStream
/// [`tokio::spawn`]: fn@crate::spawn
///
/// # Example
///
/// ```
/// use tokio::runtime::Runtime;
///
/// fn function_that_spawns(msg: String) {
/// // Had we not used `rt.enter` below, this would panic.
/// tokio::spawn(async move {
/// println!("{}", msg);
/// });
/// }
///
/// fn main() {
/// let rt = Runtime::new().unwrap();
///
/// let s = "Hello World!".to_string();
///
/// // By entering the context, we tie `tokio::spawn` to this executor.
/// let _guard = rt.enter();
/// function_that_spawns(s);
/// }
/// ```
pub fn enter(&self) -> EnterGuard<'_> {
self.handle.enter()
}
/// Shuts down the runtime, waiting for at most `duration` for all spawned
/// work to stop.
///
/// See the [struct level documentation](Runtime#shutdown) for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::task;
///
/// use std::thread;
/// use std::time::Duration;
///
/// fn main() {
/// let runtime = Runtime::new().unwrap();
///
/// runtime.block_on(async move {
/// task::spawn_blocking(move || {
/// thread::sleep(Duration::from_secs(10_000));
/// });
/// });
///
/// runtime.shutdown_timeout(Duration::from_millis(100));
/// }
/// ```
pub fn shutdown_timeout(mut self, duration: Duration) {
// Wakeup and shutdown all the worker threads
self.handle.inner.shutdown();
self.blocking_pool.shutdown(Some(duration));
}
/// Shuts down the runtime, without waiting for any spawned work to stop.
///
/// This can be useful if you want to drop a runtime from within another runtime.
/// Normally, dropping a runtime will block indefinitely for spawned blocking tasks
/// to complete, which would normally not be permitted within an asynchronous context.
/// By calling `shutdown_background()`, you can drop the runtime from such a context.
///
/// Note however, that because we do not wait for any blocking tasks to complete, this
/// may result in a resource leak (in that any blocking tasks are still running until they
/// return.
///
/// See the [struct level documentation](Runtime#shutdown) for more details.
///
/// This function is equivalent to calling `shutdown_timeout(Duration::from_nanos(0))`.
///
/// ```
/// use tokio::runtime::Runtime;
///
/// fn main() {
/// let runtime = Runtime::new().unwrap();
///
/// runtime.block_on(async move {
/// let inner_runtime = Runtime::new().unwrap();
/// // ...
/// inner_runtime.shutdown_background();
/// });
/// }
/// ```
pub fn shutdown_background(self) {
self.shutdown_timeout(Duration::from_nanos(0))
}
}
#[allow(clippy::single_match)] // there are comments in the error branch, so we don't want if-let
impl Drop for Runtime {
fn drop(&mut self) {
match &mut self.scheduler {
Scheduler::CurrentThread(current_thread) => {
// This ensures that tasks spawned on the current-thread
// runtime are dropped inside the runtime's context.
let _guard = context::try_set_current(&self.handle.inner);
current_thread.shutdown(&self.handle.inner);
}
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThread(multi_thread) => {
// The threaded scheduler drops its tasks on its worker threads, which is
// already in the runtime's context.
multi_thread.shutdown(&self.handle.inner);
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThreadAlt(multi_thread) => {
// The threaded scheduler drops its tasks on its worker threads, which is
// already in the runtime's context.
multi_thread.shutdown(&self.handle.inner);
}
}
}
}
cfg_metrics! {
impl Runtime {
/// TODO
pub fn metrics(&self) -> crate::runtime::RuntimeMetrics {
self.handle.metrics()
}
}
}
| from_parts | identifier_name |
runtime.rs | use crate::runtime::blocking::BlockingPool;
use crate::runtime::scheduler::CurrentThread;
use crate::runtime::{context, EnterGuard, Handle};
use crate::task::JoinHandle;
use std::future::Future;
use std::time::Duration;
cfg_rt_multi_thread! {
use crate::runtime::Builder;
use crate::runtime::scheduler::MultiThread;
cfg_unstable! {
use crate::runtime::scheduler::MultiThreadAlt;
}
}
/// The Tokio runtime.
///
/// The runtime provides an I/O driver, task scheduler, [timer], and
/// blocking pool, necessary for running asynchronous tasks.
///
/// Instances of `Runtime` can be created using [`new`], or [`Builder`].
/// However, most users will use the `#[tokio::main]` annotation on their
/// entry point instead.
///
/// See [module level][mod] documentation for more details.
///
/// # Shutdown
///
/// Shutting down the runtime is done by dropping the value, or calling
/// [`shutdown_background`] or [`shutdown_timeout`].
///
/// Tasks spawned through [`Runtime::spawn`] keep running until they yield.
/// Then they are dropped. They are not *guaranteed* to run to completion, but
/// *might* do so if they do not yield until completion.
///
/// Blocking functions spawned through [`Runtime::spawn_blocking`] keep running
/// until they return.
///
/// The thread initiating the shutdown blocks until all spawned work has been
/// stopped. This can take an indefinite amount of time. The `Drop`
/// implementation waits forever for this.
///
/// The [`shutdown_background`] and [`shutdown_timeout`] methods can be used if
/// waiting forever is undesired. When the timeout is reached, spawned work that
/// did not stop in time and threads running it are leaked. The work continues
/// to run until one of the stopping conditions is fulfilled, but the thread
/// initiating the shutdown is unblocked.
///
/// Once the runtime has been dropped, any outstanding I/O resources bound to
/// it will no longer function. Calling any method on them will result in an
/// error.
///
/// # Sharing
///
/// There are several ways to establish shared access to a Tokio runtime:
///
/// * Using an <code>[Arc]\<Runtime></code>.
/// * Using a [`Handle`].
/// * Entering the runtime context.
///
/// Using an <code>[Arc]\<Runtime></code> or [`Handle`] allows you to do various
/// things with the runtime such as spawning new tasks or entering the runtime
/// context. Both types can be cloned to create a new handle that allows access
/// to the same runtime. By passing clones into different tasks or threads, you
/// will be able to access the runtime from those tasks or threads.
///
/// The difference between <code>[Arc]\<Runtime></code> and [`Handle`] is that
/// an <code>[Arc]\<Runtime></code> will prevent the runtime from shutting down,
/// whereas a [`Handle`] does not prevent that. This is because shutdown of the
/// runtime happens when the destructor of the `Runtime` object runs.
///
/// Calls to [`shutdown_background`] and [`shutdown_timeout`] require exclusive
/// ownership of the `Runtime` type. When using an <code>[Arc]\<Runtime></code>,
/// this can be achieved via [`Arc::try_unwrap`] when only one strong count
/// reference is left over.
///
/// The runtime context is entered using the [`Runtime::enter`] or
/// [`Handle::enter`] methods, which use a thread-local variable to store the
/// current runtime. Whenever you are inside the runtime context, methods such
/// as [`tokio::spawn`] will use the runtime whose context you are inside.
///
/// [timer]: crate::time
/// [mod]: index.html
/// [`new`]: method@Self::new
/// [`Builder`]: struct@Builder
/// [`Handle`]: struct@Handle
/// [`tokio::spawn`]: crate::spawn
/// [`Arc::try_unwrap`]: std::sync::Arc::try_unwrap
/// [Arc]: std::sync::Arc
/// [`shutdown_background`]: method@Runtime::shutdown_background
/// [`shutdown_timeout`]: method@Runtime::shutdown_timeout
#[derive(Debug)]
pub struct Runtime {
/// Task scheduler
scheduler: Scheduler,
/// Handle to runtime, also contains driver handles
handle: Handle,
/// Blocking pool handle, used to signal shutdown
blocking_pool: BlockingPool,
}
/// The flavor of a `Runtime`.
///
/// This is the return type for [`Handle::runtime_flavor`](crate::runtime::Handle::runtime_flavor()).
#[derive(Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum RuntimeFlavor {
/// The flavor that executes all tasks on the current thread.
CurrentThread,
/// The flavor that executes tasks across multiple threads.
MultiThread,
/// The flavor that executes tasks across multiple threads.
#[cfg(tokio_unstable)]
MultiThreadAlt,
}
/// The runtime scheduler is either a multi-thread or a current-thread executor.
#[derive(Debug)]
pub(super) enum Scheduler {
/// Execute all tasks on the current-thread.
CurrentThread(CurrentThread),
/// Execute tasks across multiple threads.
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
MultiThread(MultiThread),
/// Execute tasks across multiple threads.
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
MultiThreadAlt(MultiThreadAlt),
}
impl Runtime {
pub(super) fn from_parts(
scheduler: Scheduler,
handle: Handle,
blocking_pool: BlockingPool,
) -> Runtime {
Runtime {
scheduler,
handle,
blocking_pool,
}
}
cfg_not_wasi! {
/// Creates a new runtime instance with default configuration values.
///
/// This results in the multi threaded scheduler, I/O driver, and time driver being
/// initialized.
///
/// Most applications will not need to call this function directly. Instead,
/// they will use the [`#[tokio::main]` attribute][main]. When a more complex
/// configuration is necessary, the [runtime builder] may be used.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
/// ```
///
/// [mod]: index.html
/// [main]: ../attr.main.html
/// [threaded scheduler]: index.html#threaded-scheduler
/// [runtime builder]: crate::runtime::Builder
#[cfg(feature = "rt-multi-thread")]
#[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))]
pub fn new() -> std::io::Result<Runtime> {
Builder::new_multi_thread().enable_all().build()
}
}
/// Returns a handle to the runtime's spawner.
///
/// The returned handle can be used to spawn tasks that run on this runtime, and can
/// be cloned to allow moving the `Handle` to other threads.
///
/// Calling [`Handle::block_on`] on a handle to a `current_thread` runtime is error-prone.
/// Refer to the documentation of [`Handle::block_on`] for more.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let handle = rt.handle();
///
/// // Use the handle...
/// ```
pub fn handle(&self) -> &Handle {
&self.handle
}
/// Spawns a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// The provided future will start running in the background immediately
/// when `spawn` is called, even if you don't await the returned
/// `JoinHandle`.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(async {
/// println!("now running on a worker thread");
/// });
/// # }
/// ```
#[track_caller]
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
self.handle.spawn(future)
}
/// Runs the provided function on an executor dedicated to blocking operations.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a blocking function onto the runtime
/// rt.spawn_blocking(|| {
/// println!("now running on a worker thread");
/// });
/// # }
/// ```
#[track_caller]
pub fn spawn_blocking<F, R>(&self, func: F) -> JoinHandle<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
self.handle.spawn_blocking(func)
}
/// Runs a future to completion on the Tokio runtime. This is the
/// runtime's entry point.
///
/// This runs the given future on the current thread, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers
/// which the future spawns internally will be executed on the runtime.
///
/// # Non-worker future
///
/// Note that the future required by this function does not run as a
/// worker. The expectation is that other tasks are spawned by the future here.
/// Awaiting on other futures from the future provided here will not
/// perform as fast as those spawned as workers.
///
/// # Multi thread scheduler
///
/// When the multi thread scheduler is used this will allow futures
/// to run within the io driver and timer context of the overall runtime.
///
/// Any spawned tasks will continue running after `block_on` returns.
///
/// # Current thread scheduler
///
/// When the current thread scheduler is enabled `block_on`
/// can be called concurrently from multiple threads. The first call
/// will take ownership of the io and timer drivers. This means
/// other threads which do not own the drivers will hook into that one.
/// When the first `block_on` completes, other threads will be able to
/// "steal" the driver to allow continued execution of their futures.
///
/// Any spawned tasks will be suspended after `block_on` returns. Calling
/// `block_on` again will resume previously spawned tasks.
///
/// # Panics
///
/// This function panics if the provided future panics, or if called within an
/// asynchronous execution context.
///
/// # Examples
///
/// ```no_run
/// use tokio::runtime::Runtime;
///
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
///
/// // Execute the future, blocking the current thread until completion
/// rt.block_on(async {
/// println!("hello");
/// });
/// ```
///
/// [handle]: fn@Handle::block_on
#[track_caller]
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
#[cfg(all(
tokio_unstable,
tokio_taskdump,
feature = "rt",
target_os = "linux",
any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64")
))]
let future = super::task::trace::Trace::root(future);
#[cfg(all(tokio_unstable, feature = "tracing"))]
let future = crate::util::trace::task(
future,
"block_on",
None,
crate::runtime::task::Id::next().as_u64(),
);
let _enter = self.enter();
match &self.scheduler {
Scheduler::CurrentThread(exec) => exec.block_on(&self.handle.inner, future),
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThread(exec) => exec.block_on(&self.handle.inner, future),
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThreadAlt(exec) => exec.block_on(&self.handle.inner, future),
}
}
/// Enters the runtime context.
///
/// This allows you to construct types that must have an executor
/// available on creation such as [`Sleep`] or [`TcpStream`]. It will
/// also allow you to call methods such as [`tokio::spawn`].
///
/// [`Sleep`]: struct@crate::time::Sleep
/// [`TcpStream`]: struct@crate::net::TcpStream
/// [`tokio::spawn`]: fn@crate::spawn
///
/// # Example
///
/// ```
/// use tokio::runtime::Runtime;
///
/// fn function_that_spawns(msg: String) {
/// // Had we not used `rt.enter` below, this would panic.
/// tokio::spawn(async move {
/// println!("{}", msg);
/// });
/// }
///
/// fn main() {
/// let rt = Runtime::new().unwrap();
///
/// let s = "Hello World!".to_string();
///
/// // By entering the context, we tie `tokio::spawn` to this executor.
/// let _guard = rt.enter();
/// function_that_spawns(s);
/// }
/// ```
pub fn enter(&self) -> EnterGuard<'_> {
self.handle.enter()
}
/// Shuts down the runtime, waiting for at most `duration` for all spawned
/// work to stop.
///
/// See the [struct level documentation](Runtime#shutdown) for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::task;
///
/// use std::thread;
/// use std::time::Duration;
///
/// fn main() {
/// let runtime = Runtime::new().unwrap();
///
/// runtime.block_on(async move {
/// task::spawn_blocking(move || {
/// thread::sleep(Duration::from_secs(10_000));
/// });
/// });
///
/// runtime.shutdown_timeout(Duration::from_millis(100));
/// }
/// ```
pub fn shutdown_timeout(mut self, duration: Duration) {
// Wakeup and shutdown all the worker threads
self.handle.inner.shutdown();
self.blocking_pool.shutdown(Some(duration));
}
/// Shuts down the runtime, without waiting for any spawned work to stop.
///
/// This can be useful if you want to drop a runtime from within another runtime.
/// Normally, dropping a runtime will block indefinitely for spawned blocking tasks
/// to complete, which would normally not be permitted within an asynchronous context.
/// By calling `shutdown_background()`, you can drop the runtime from such a context.
///
/// Note however, that because we do not wait for any blocking tasks to complete, this
/// may result in a resource leak (in that any blocking tasks are still running until they
/// return.
///
/// See the [struct level documentation](Runtime#shutdown) for more details.
///
/// This function is equivalent to calling `shutdown_timeout(Duration::from_nanos(0))`.
///
/// ```
/// use tokio::runtime::Runtime;
///
/// fn main() {
/// let runtime = Runtime::new().unwrap();
///
/// runtime.block_on(async move {
/// let inner_runtime = Runtime::new().unwrap();
/// // ...
/// inner_runtime.shutdown_background();
/// });
/// }
/// ```
pub fn shutdown_background(self) {
self.shutdown_timeout(Duration::from_nanos(0))
}
}
#[allow(clippy::single_match)] // there are comments in the error branch, so we don't want if-let
impl Drop for Runtime {
fn drop(&mut self) |
}
cfg_metrics! {
impl Runtime {
/// TODO
pub fn metrics(&self) -> crate::runtime::RuntimeMetrics {
self.handle.metrics()
}
}
}
| {
match &mut self.scheduler {
Scheduler::CurrentThread(current_thread) => {
// This ensures that tasks spawned on the current-thread
// runtime are dropped inside the runtime's context.
let _guard = context::try_set_current(&self.handle.inner);
current_thread.shutdown(&self.handle.inner);
}
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThread(multi_thread) => {
// The threaded scheduler drops its tasks on its worker threads, which is
// already in the runtime's context.
multi_thread.shutdown(&self.handle.inner);
}
#[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))]
Scheduler::MultiThreadAlt(multi_thread) => {
// The threaded scheduler drops its tasks on its worker threads, which is
// already in the runtime's context.
multi_thread.shutdown(&self.handle.inner);
}
}
} | identifier_body |
multiuser.go | package minidb
import (
"bytes"
"crypto/rand"
"fmt"
"os"
"path/filepath"
"regexp"
"time"
"github.com/rasteric/packdir"
"golang.org/x/crypto/argon2"
"golang.org/x/crypto/blake2b"
)
// Params contain all the parameters that are used by a multiuser database.
type Params struct {
Argon2Memory uint32
Argon2Iterations uint32
Argon2Parallelism uint8
KeyLength uint32
InternalSaltLength uint32
ExternalSaltLength uint32
}
// DefaultParams returns parameters with reasonable default values that are safe to use.
// Be aware that default parameters may change from release to release to reflect
// updates and changes in security requirements.
func DefaultParams() *Params {
p := Params{
KeyLength: 512,
InternalSaltLength: 256,
ExternalSaltLength: 256,
Argon2Memory: 64 * 1024,
Argon2Iterations: 3,
Argon2Parallelism: 4}
return &p
}
func (p *Params) validate() bool {
if p.KeyLength >= 64 && p.InternalSaltLength >= 32 &&
p.ExternalSaltLength >= 32 && p.Argon2Memory >= 16*1024 && p.Argon2Iterations >= 2 {
return true
}
return false
}
// User represents a user.
type User struct {
name string
id Item
}
// Name returns the name of the user.
func (u *User) Name() string {
return u.name
}
// ID returns the ID of the user.
func (u *User) ID() Item {
return u.id
}
// MultiDB contains all information needed for housekeeping multiple DBs, except for the parameters
// and context-specific information like passwords.
type MultiDB struct {
basepath string
driver string
system *MDB
userdbs map[Item]*MDB
}
// NewMultiDB returns a new multi user database.
func NewMultiDB(basedir string, driver string) (*MultiDB, error) {
d := filepath.Clean(basedir)
if !validDir(d) {
return nil, Fail(`the base directory "%s" does not exist or has incorrect permissions`, d)
}
db := MultiDB{basepath: basedir}
thedb := &db
sys, err := Open(driver, thedb.systemDBFile())
if err != nil {
return nil, err
}
thedb.system = sys
thedb.driver = driver
thedb.userdbs = make(map[Item]*MDB)
err = sys.AddTable("User",
[]Field{Field{Name: "Username", Sort: DBString},
Field{Name: "Email", Sort: DBString},
Field{Name: "Key", Sort: DBBlob},
Field{Name: "ExternalSalt", Sort: DBBlob},
Field{Name: "InternalSalt", Sort: DBBlob},
Field{Name: "Created", Sort: DBDate},
Field{Name: "Modified", Sort: DBDate}})
if err != nil {
return nil, Fail(`could not create user table: %s`, err)
}
return thedb, nil
}
// Begin a transaction.
func (m *MultiDB) Begin() (*Tx, error) {
return m.system.Begin()
}
// UserDir returns the given user's directory where the user database is stored.
func (m *MultiDB) UserDir(user *User) string {
return filepath.Join(m.basepath, user.name)
}
// BaseDir returns the base directory of the multiuser database. This directory contains databases
// for all users.
func (m *MultiDB) BaseDir() string {
return m.basepath
}
func (m *MultiDB) userFile(user *User, file string) string {
return filepath.Join(m.UserDir(user), file)
}
func (m *MultiDB) userDBFile(user *User) string {
return m.userFile(user, "data.sqlite")
}
func (m *MultiDB) systemDBFile() string {
return filepath.Join(m.BaseDir(), "system.sqlite")
}
func validUserName(name string) bool {
var validUser = regexp.MustCompile(`^\p{L}+[_0-9\p{L}]*$`)
return validUser.MatchString(name)
}
func validDir(dir string) bool {
if _, err := os.Stat(dir); os.IsNotExist(err) {
return false
}
return true
}
// CreateDirIfNotExists creates a directory including all subdirectories needed,
// or returns an error.
func CreateDirIfNotExist(dir string) error {
if _, err := os.Stat(dir); os.IsNotExist(err) {
err = os.MkdirAll(dir, 0755)
if err != nil {
return err
}
}
return nil
}
func validateUser(name string, basedir string) error {
if !validUserName(name) {
return Fail(`invalid user name "%s"`, name)
}
if !validDir(basedir) {
return Fail(`the base directory for user "%s" does not exist: %s`, name, basedir)
}
src, err := os.Stat(basedir)
if err != nil {
return err
}
if !src.IsDir() {
return Fail(`not a directory: %s`, basedir)
}
return nil
}
// ErrCode types represent errors instead of error structures.
type ErrCode int
// Error codes returned by the functions.
const (
ErrAuthenticationFailed ErrCode = iota + 1 // User authentication has failed (wrong password).
OK // No error has occured.
ErrUsernameInUse // The user name is already being used.
ErrEmailInUse // The email is already being used.
ErrCryptoRandFailure // The random number generator has failed.
ErrInvalidParams // One or more parameters were invalid.
ErrUnknownUser // The user is not known.
ErrNotEnoughSalt // Insufficiently long salt has been supplied.
ErrInvalidUser // The user name or email is invalid.
ErrDBClosed // The internal housekeeping DB is locked, corrupted, or closed.
ErrDBFail // A database operation has failed.
ErrFileSystem // A directory or file could not be created.
ErrNoHome // The user's DB home directory does not exist.
ErrCloseFailed // Could not close the user database.
ErrOpenFailed // Could not open the user database.
ErrPackFail // Compressing user data failed.
ErrInvalidKey // A given salted key is invalid (either nil, or other problems).
ErrTransactionFail // Could not perform op because of a failed transaction.
)
func (m *MultiDB) isExisting(field, query string) bool {
q, _ := ParseQuery(fmt.Sprintf("User %s=%s", field, query))
results, err := m.system.Find(q, 1)
if err != nil || len(results) < 1 {
return false
}
return true
}
func (m *MultiDB) | (username string) Item {
q, err := ParseQuery(fmt.Sprintf("User Username=%s", username))
if err != nil {
return 0
}
results, err := m.system.Find(q, 1)
if err != nil || len(results) != 1 {
return 0
}
return results[0]
}
// ExistingUser returns true if a user with the given user name exists, false otherwise.
func (m *MultiDB) ExistingUser(username string) bool {
result := m.isExisting("Username", username)
return result
}
// ExistingEmail returns true if a user with this email address exists, false otherwise.
func (m *MultiDB) ExistingEmail(email string) bool {
result := m.isExisting("Email", email)
return result
}
// NewUser creates a new user with given username, email, and password. Based on a strong
// salt that is only used internally and the Argon2 algorithm with the given parameters
// an internal key is created and stored in an internal database. The user and OK are returned
// unless an error has occurred. The integer returned is a numeric error code to make it easier to distinguish
// certain cases: EmailInUse - the email has already been registered, UsernameInUse - a user with the same
// user name has already been registered. Both emails and usernames must be unique and cannot be
// registered twice.
func (m *MultiDB) NewUser(username, email string, key *saltedKey) (*User, ErrCode, error) {
// validate inputs
if err := validateUser(username, m.BaseDir()); err != nil {
return nil, ErrInvalidUser, err
}
reply, err := key.validate()
if err != nil || reply != OK {
return nil, reply, err
}
user := User{name: username}
if m.system == nil {
return nil, ErrDBClosed, Fail(`internal DB is nil`)
}
// check if user and email exist
if m.ExistingUser(username) {
return nil, ErrUsernameInUse, Fail(`user "%s" already exists!`, username)
}
if m.ExistingEmail(email) {
return nil, ErrEmailInUse, Fail(`email "%s" is already in use!`, email)
}
// now start adding the user
user.id, err = m.system.NewItem("User")
if err != nil {
return nil, ErrDBFail, err
}
tx, err := m.Begin()
if err != nil {
return nil, ErrTransactionFail, err
}
if err := tx.Set("User", user.id, "Username", []Value{NewString(username)}); err != nil {
return nil, ErrDBFail, err
}
if err := tx.Set("User", user.id, "Email", []Value{NewString(email)}); err != nil {
return nil, ErrDBFail, err
}
salt := make([]byte, key.p.InternalSaltLength)
n, err := rand.Read(salt)
if uint32(n) != key.p.InternalSaltLength || err != nil {
return nil, ErrCryptoRandFailure, Fail(`random number generator failed to generate salt`)
}
if err := tx.Set("User", user.id, "InternalSalt", []Value{NewBytes(salt)}); err != nil {
return nil, ErrDBFail, Fail(`could not store salt in multiuser database: %s`, err)
}
realkey := argon2.IDKey(key.pwd,
salt, key.p.Argon2Iterations, key.p.Argon2Memory,
key.p.Argon2Parallelism, key.p.KeyLength)
if err := tx.Set("User", user.id, "Key", []Value{NewBytes(realkey)}); err != nil {
return nil, ErrDBFail, Fail(`could not store key in multiuser database: %s`, err)
}
if err := tx.Set("User", user.id, "ExternalSalt", []Value{NewBytes(key.sel)}); err != nil {
return nil, ErrDBFail, Fail(`could not store the external salt in multiuser database: %s`, err)
}
now := NewDate(time.Now())
if err := tx.Set("User", user.id, "Created", []Value{now}); err != nil {
return nil, ErrDBFail, err
}
if err := tx.Set("User", user.id, "Modified", []Value{now}); err != nil {
return nil, ErrDBFail, err
}
if err := tx.Commit(); err != nil {
return nil, ErrDBFail, Fail(`multiuser database error: %s`, err)
}
dirpath := m.UserDir(&user)
err = CreateDirIfNotExist(dirpath)
if err != nil {
return nil, ErrFileSystem, err
}
return &user, OK, nil
}
// ExternalSalt is the salt associated with a user. It is stored in the database and may
// be used for hashing the password prior to authentication. The external salt is not used
// for internal key derivation.
func (m *MultiDB) ExternalSalt(username string) ([]byte, ErrCode, error) {
if !m.ExistingUser(username) {
return nil, ErrUnknownUser, Fail(`unknown user "%s"`, username)
}
tx, err := m.Begin()
if err != nil {
return nil, ErrTransactionFail, Fail(`transaction failed: %s`, err)
}
defer tx.Rollback()
id := m.userID(username)
if id == 0 {
return nil, ErrUnknownUser, Fail(`unknown user "%s"`, username)
}
result, err := m.system.Get("User", id, "ExternalSalt")
if err != nil || len(result) != 1 {
return nil, ErrNotEnoughSalt, Fail(`user "%s" salt not found, the user database might be corrupted`, username)
}
err = tx.Commit()
if err != nil {
return nil, ErrTransactionFail, Fail(`transaction commit failed: %s`, err)
}
return result[0].Bytes(), OK, nil
}
// GenerateExternalSalt returns some new external salt of the length specified in params.
// This salt should be passed to NewUser and can be used for passphrase hashing prior to
// calling NewUser. It is stored in the user database and can be retrieved as ExternalSalt.
func GenerateExternalSalt(params *Params) []byte {
salt := make([]byte, params.ExternalSaltLength)
n, err := rand.Read(salt)
if err != nil || uint32(n) < params.ExternalSaltLength {
return nil
}
return salt
}
type saltedKey struct {
pwd []byte
sel []byte
p *Params
}
func (key *saltedKey) validate() (ErrCode, error) {
if key == nil {
return ErrInvalidKey, Fail(`key is nil`)
}
if key.p == nil {
return ErrInvalidParams, Fail(`key parameters are nil`)
}
if key.pwd == nil {
return ErrInvalidKey, Fail(`key password is empty`)
}
if key.sel == nil {
return ErrNotEnoughSalt, Fail(`key salt is nil`)
}
if !key.p.validate() {
return ErrInvalidParams, Fail(`invalid parameters`)
}
if uint32(len(key.sel)) < key.p.ExternalSaltLength {
return ErrNotEnoughSalt, Fail(`external key salt length is less than required by key params`)
}
return OK, nil
}
// GenerateKey takes a password and some salt, and generates a salted key of length 64 bytes.
// Use the ExternalSalt as salt and the original, unaltered password. The function
// use Blake2b-512 for key derivation.
func GenerateKey(password string, salt []byte, params *Params) *saltedKey {
unsalted := blake2b.Sum512([]byte(password))
salted := saltedKey{pwd: append(salt, unsalted[:]...), sel: salt, p: params}
return &salted
}
// Authenticate a user by given name and salted password.
// Returns the user and OK if successful, otherwise nil, a numeric error code and the error.
// Notice that the external salt is not passed to this function. Instead, the password string
// should have been prepared (securely hashed, whitened, etc.) before calling this function
// on the basis of the user's ExternalSalt.
func (m *MultiDB) Authenticate(username string, key *saltedKey) (*User, ErrCode, error) {
if err := validateUser(username, m.BaseDir()); err != nil {
return nil, ErrInvalidUser, err
}
if !m.ExistingUser(username) {
return nil, ErrUnknownUser, Fail(`user "%s" does not exist`, username)
}
reply, err := key.validate()
if err != nil || reply != OK {
return nil, reply, err
}
user := User{name: username}
dirpath := m.UserDir(&user)
if _, err := os.Stat(dirpath); os.IsNotExist(err) {
return nil, ErrNoHome, Fail(`user "%s" home directory does not exist: %s`, username, dirpath)
}
user.id = m.userID(username)
if user.id == 0 {
return nil, ErrUnknownUser, Fail(`user "%s" does not exist`, username)
}
// get the strong salt and hash with it using argon2, compare to stored key
result, err := m.system.Get("User", user.id, "InternalSalt")
if err != nil || len(result) != 1 {
return nil, ErrNotEnoughSalt,
Fail(`user "%s"'s internal salt was not found, the user database might be corrupted`, username)
}
salt := result[0].Bytes()
if len(salt) != int(key.p.InternalSaltLength) {
return nil, ErrInvalidParams,
Fail(`invalid params, user "%s"'s internal salt length does not match internal salt length in params, given %d, expected %d`, username, len(salt), key.p.InternalSaltLength)
}
keyA := argon2.IDKey(key.pwd,
salt, key.p.Argon2Iterations, key.p.Argon2Memory,
key.p.Argon2Parallelism, key.p.KeyLength)
keyresult, err := m.system.Get("User", user.id, "Key")
if err != nil || len(keyresult) != 1 {
return nil, ErrAuthenticationFailed,
Fail(`user "%s" key was not found in user database, the database might be corrupted`, username)
}
keyB := keyresult[0].Bytes()
if !bytes.Equal(keyA, keyB) {
return nil, ErrAuthenticationFailed, Fail(`authentication failure`)
}
return &user, OK, nil
}
// Close the MultiDB, closing the internal housekeeping and all open user databases.
func (m *MultiDB) Close() (ErrCode, error) {
errcount := 0
s := ""
for _, v := range m.userdbs {
if v != nil {
err := v.Close()
if err != nil {
s = fmt.Sprintf("%s, %s", s, err.Error())
errcount++
}
}
}
for k := range m.userdbs {
delete(m.userdbs, k)
}
if err := m.system.Close(); err != nil {
s = fmt.Sprintf("%s, %s", s, err.Error())
errcount++
}
if errcount > 0 {
return ErrCloseFailed, Fail(`errors closing multi user DB: %s`, s)
}
return OK, nil
}
// UserDB returns the database of the given user.
func (m *MultiDB) UserDB(user *User) (*MDB, ErrCode, error) {
var err error
if user.id == 0 {
return nil, ErrUnknownUser, Fail(`user "%s" does not exist`, user.name)
}
db := m.userdbs[user.id]
if db == nil {
db, err = Open(m.driver, m.userDBFile(user))
if err != nil {
return nil, ErrOpenFailed, err
}
}
return db, OK, nil
}
// DeleteUserContent deletes a user's content in the multiuser database, i.e., all the user data.
// This action cannot be undone.
func (m *MultiDB) DeleteUserContent(user *User) (ErrCode, error) {
db, _, _ := m.UserDB(user)
db.Close()
if err := removeContents(m.UserDir(user)); err != nil {
return ErrFileSystem, err
}
return OK, nil
}
// DeleteUser deletes a user and all associated user content from a multiuser database.
func (m *MultiDB) DeleteUser(user *User) (ErrCode, error) {
tx, err := m.Begin()
if err != nil {
return ErrTransactionFail, err
}
defer tx.Rollback()
if err := tx.RemoveItem("User", user.ID()); err != nil {
return ErrDBFail, err
}
errcode, err := m.DeleteUserContent(user)
delete(m.userdbs, user.ID())
if err != nil {
return errcode, err
}
err = tx.Commit()
if err != nil {
return ErrTransactionFail, err
}
return OK, nil
}
func removeContents(dir string) error {
d, err := os.Open(dir)
if err != nil {
return err
}
defer d.Close()
names, err := d.Readdirnames(-1)
if err != nil {
return err
}
for _, name := range names {
err = os.RemoveAll(filepath.Join(dir, name))
if err != nil {
return err
}
}
return nil
}
// Delete deletes the whole multiuser db including all housekeeping information and directories.
// This action cannot be undone.
func (m *MultiDB) Delete() (ErrCode, error) {
m.Close()
m.system.Close()
if err := removeContents(m.BaseDir()); err != nil {
return ErrFileSystem, err
}
return OK, nil
}
// ArchiveUser stores the user data in a packed zip file but does not close or remove the user.
// This can be used for backups or for archiving.
func (m *MultiDB) ArchiveUser(user *User, archivedir string) (ErrCode, error) {
db, reply, err := m.UserDB(user)
if err != nil {
return reply, err
}
if err := db.Close(); err != nil {
return ErrCloseFailed, err
}
source := m.UserDir(user)
filename := fmt.Sprintf("%s-%d_%s.multidb", user.Name(), int64(user.ID()), time.Now().UTC().Format(time.RFC3339))
result, err := packdir.Pack(source, filename, archivedir, packdir.GOOD_COMPRESSION, 0)
if err != nil {
return ErrPackFail, err
}
if result.ScanErrNum > 0 {
return ErrFileSystem,
Fail(`archiving failed, unable to pack %d files (insufficient permissions?)`, result.ScanErrNum)
}
if result.ArchiveErrNum > 0 {
return ErrPackFail,
Fail(`archiving failed, %d files were not properly archived (insufficient permissions?)`,
result.ArchiveErrNum)
}
delete(m.userdbs, user.ID())
return OK, nil
}
func (m *MultiDB) UserEmail(user *User) (string, ErrCode, error) {
if user == nil || !m.ExistingUser(user.name) {
return "", ErrUnknownUser, Fail(`user "%s" does not exist`, user.name)
}
result, err := m.system.Get("User", user.ID(), "Email")
if err != nil {
return "", ErrUnknownUser, Fail(`user "%s" does not exist`, user.name)
}
if len(result) != 1 {
return "", ErrDBFail, Fail(`non unique result for user "%s" email`, user.name)
}
return result[0].String(), OK, nil
}
| userID | identifier_name |
multiuser.go | package minidb
import (
"bytes"
"crypto/rand"
"fmt"
"os"
"path/filepath"
"regexp"
"time"
"github.com/rasteric/packdir"
"golang.org/x/crypto/argon2"
"golang.org/x/crypto/blake2b"
)
// Params contain all the parameters that are used by a multiuser database.
type Params struct {
Argon2Memory uint32
Argon2Iterations uint32
Argon2Parallelism uint8
KeyLength uint32
InternalSaltLength uint32
ExternalSaltLength uint32
}
// DefaultParams returns parameters with reasonable default values that are safe to use.
// Be aware that default parameters may change from release to release to reflect
// updates and changes in security requirements.
func DefaultParams() *Params {
p := Params{
KeyLength: 512,
InternalSaltLength: 256,
ExternalSaltLength: 256,
Argon2Memory: 64 * 1024,
Argon2Iterations: 3,
Argon2Parallelism: 4}
return &p
}
func (p *Params) validate() bool {
if p.KeyLength >= 64 && p.InternalSaltLength >= 32 &&
p.ExternalSaltLength >= 32 && p.Argon2Memory >= 16*1024 && p.Argon2Iterations >= 2 {
return true
}
return false
}
// User represents a user.
type User struct {
name string
id Item
}
// Name returns the name of the user.
func (u *User) Name() string {
return u.name
}
// ID returns the ID of the user.
func (u *User) ID() Item {
return u.id
}
// MultiDB contains all information needed for housekeeping multiple DBs, except for the parameters
// and context-specific information like passwords.
type MultiDB struct {
basepath string
driver string
system *MDB
userdbs map[Item]*MDB
}
// NewMultiDB returns a new multi user database.
func NewMultiDB(basedir string, driver string) (*MultiDB, error) {
d := filepath.Clean(basedir)
if !validDir(d) {
return nil, Fail(`the base directory "%s" does not exist or has incorrect permissions`, d)
}
db := MultiDB{basepath: basedir}
thedb := &db
sys, err := Open(driver, thedb.systemDBFile())
if err != nil {
return nil, err
}
thedb.system = sys
thedb.driver = driver
thedb.userdbs = make(map[Item]*MDB)
err = sys.AddTable("User",
[]Field{Field{Name: "Username", Sort: DBString},
Field{Name: "Email", Sort: DBString},
Field{Name: "Key", Sort: DBBlob},
Field{Name: "ExternalSalt", Sort: DBBlob},
Field{Name: "InternalSalt", Sort: DBBlob},
Field{Name: "Created", Sort: DBDate},
Field{Name: "Modified", Sort: DBDate}})
if err != nil {
return nil, Fail(`could not create user table: %s`, err)
}
return thedb, nil
}
// Begin a transaction.
func (m *MultiDB) Begin() (*Tx, error) {
return m.system.Begin()
}
// UserDir returns the given user's directory where the user database is stored.
func (m *MultiDB) UserDir(user *User) string {
return filepath.Join(m.basepath, user.name)
}
// BaseDir returns the base directory of the multiuser database. This directory contains databases
// for all users.
func (m *MultiDB) BaseDir() string {
return m.basepath
}
func (m *MultiDB) userFile(user *User, file string) string {
return filepath.Join(m.UserDir(user), file)
}
func (m *MultiDB) userDBFile(user *User) string {
return m.userFile(user, "data.sqlite")
}
func (m *MultiDB) systemDBFile() string {
return filepath.Join(m.BaseDir(), "system.sqlite")
}
func validUserName(name string) bool {
var validUser = regexp.MustCompile(`^\p{L}+[_0-9\p{L}]*$`)
return validUser.MatchString(name)
}
func validDir(dir string) bool {
if _, err := os.Stat(dir); os.IsNotExist(err) {
return false
}
return true
}
// CreateDirIfNotExists creates a directory including all subdirectories needed,
// or returns an error.
func CreateDirIfNotExist(dir string) error {
if _, err := os.Stat(dir); os.IsNotExist(err) {
err = os.MkdirAll(dir, 0755)
if err != nil {
return err
}
}
return nil
}
func validateUser(name string, basedir string) error {
if !validUserName(name) {
return Fail(`invalid user name "%s"`, name)
}
if !validDir(basedir) {
return Fail(`the base directory for user "%s" does not exist: %s`, name, basedir)
}
src, err := os.Stat(basedir)
if err != nil {
return err
}
if !src.IsDir() {
return Fail(`not a directory: %s`, basedir)
}
return nil
}
// ErrCode types represent errors instead of error structures.
type ErrCode int
// Error codes returned by the functions.
const (
ErrAuthenticationFailed ErrCode = iota + 1 // User authentication has failed (wrong password).
OK // No error has occured.
ErrUsernameInUse // The user name is already being used.
ErrEmailInUse // The email is already being used.
ErrCryptoRandFailure // The random number generator has failed.
ErrInvalidParams // One or more parameters were invalid.
ErrUnknownUser // The user is not known.
ErrNotEnoughSalt // Insufficiently long salt has been supplied.
ErrInvalidUser // The user name or email is invalid.
ErrDBClosed // The internal housekeeping DB is locked, corrupted, or closed.
ErrDBFail // A database operation has failed.
ErrFileSystem // A directory or file could not be created.
ErrNoHome // The user's DB home directory does not exist.
ErrCloseFailed // Could not close the user database.
ErrOpenFailed // Could not open the user database.
ErrPackFail // Compressing user data failed.
ErrInvalidKey // A given salted key is invalid (either nil, or other problems).
ErrTransactionFail // Could not perform op because of a failed transaction.
)
func (m *MultiDB) isExisting(field, query string) bool {
q, _ := ParseQuery(fmt.Sprintf("User %s=%s", field, query))
results, err := m.system.Find(q, 1)
if err != nil || len(results) < 1 {
return false
}
return true
}
func (m *MultiDB) userID(username string) Item {
q, err := ParseQuery(fmt.Sprintf("User Username=%s", username))
if err != nil {
return 0
}
results, err := m.system.Find(q, 1)
if err != nil || len(results) != 1 {
return 0
}
return results[0]
}
// ExistingUser returns true if a user with the given user name exists, false otherwise.
func (m *MultiDB) ExistingUser(username string) bool {
result := m.isExisting("Username", username)
return result
}
// ExistingEmail returns true if a user with this email address exists, false otherwise.
func (m *MultiDB) ExistingEmail(email string) bool {
result := m.isExisting("Email", email)
return result
}
// NewUser creates a new user with given username, email, and password. Based on a strong
// salt that is only used internally and the Argon2 algorithm with the given parameters
// an internal key is created and stored in an internal database. The user and OK are returned
// unless an error has occurred. The integer returned is a numeric error code to make it easier to distinguish
// certain cases: EmailInUse - the email has already been registered, UsernameInUse - a user with the same
// user name has already been registered. Both emails and usernames must be unique and cannot be
// registered twice.
func (m *MultiDB) NewUser(username, email string, key *saltedKey) (*User, ErrCode, error) {
// validate inputs
if err := validateUser(username, m.BaseDir()); err != nil {
return nil, ErrInvalidUser, err
}
reply, err := key.validate()
if err != nil || reply != OK {
return nil, reply, err
}
user := User{name: username}
if m.system == nil {
return nil, ErrDBClosed, Fail(`internal DB is nil`)
}
// check if user and email exist
if m.ExistingUser(username) {
return nil, ErrUsernameInUse, Fail(`user "%s" already exists!`, username)
}
if m.ExistingEmail(email) {
return nil, ErrEmailInUse, Fail(`email "%s" is already in use!`, email)
}
// now start adding the user
user.id, err = m.system.NewItem("User")
if err != nil {
return nil, ErrDBFail, err
}
tx, err := m.Begin()
if err != nil {
return nil, ErrTransactionFail, err
}
if err := tx.Set("User", user.id, "Username", []Value{NewString(username)}); err != nil {
return nil, ErrDBFail, err
}
if err := tx.Set("User", user.id, "Email", []Value{NewString(email)}); err != nil {
return nil, ErrDBFail, err
}
salt := make([]byte, key.p.InternalSaltLength)
n, err := rand.Read(salt)
if uint32(n) != key.p.InternalSaltLength || err != nil {
return nil, ErrCryptoRandFailure, Fail(`random number generator failed to generate salt`)
}
if err := tx.Set("User", user.id, "InternalSalt", []Value{NewBytes(salt)}); err != nil {
return nil, ErrDBFail, Fail(`could not store salt in multiuser database: %s`, err)
}
realkey := argon2.IDKey(key.pwd,
salt, key.p.Argon2Iterations, key.p.Argon2Memory,
key.p.Argon2Parallelism, key.p.KeyLength)
if err := tx.Set("User", user.id, "Key", []Value{NewBytes(realkey)}); err != nil {
return nil, ErrDBFail, Fail(`could not store key in multiuser database: %s`, err)
}
if err := tx.Set("User", user.id, "ExternalSalt", []Value{NewBytes(key.sel)}); err != nil {
return nil, ErrDBFail, Fail(`could not store the external salt in multiuser database: %s`, err)
}
now := NewDate(time.Now())
if err := tx.Set("User", user.id, "Created", []Value{now}); err != nil {
return nil, ErrDBFail, err
}
if err := tx.Set("User", user.id, "Modified", []Value{now}); err != nil {
return nil, ErrDBFail, err
}
if err := tx.Commit(); err != nil {
return nil, ErrDBFail, Fail(`multiuser database error: %s`, err)
}
dirpath := m.UserDir(&user)
err = CreateDirIfNotExist(dirpath)
if err != nil {
return nil, ErrFileSystem, err
}
return &user, OK, nil
}
// ExternalSalt is the salt associated with a user. It is stored in the database and may
// be used for hashing the password prior to authentication. The external salt is not used
// for internal key derivation.
func (m *MultiDB) ExternalSalt(username string) ([]byte, ErrCode, error) {
if !m.ExistingUser(username) {
return nil, ErrUnknownUser, Fail(`unknown user "%s"`, username)
}
tx, err := m.Begin()
if err != nil {
return nil, ErrTransactionFail, Fail(`transaction failed: %s`, err)
}
defer tx.Rollback()
id := m.userID(username)
if id == 0 {
return nil, ErrUnknownUser, Fail(`unknown user "%s"`, username)
}
result, err := m.system.Get("User", id, "ExternalSalt")
if err != nil || len(result) != 1 {
return nil, ErrNotEnoughSalt, Fail(`user "%s" salt not found, the user database might be corrupted`, username)
}
err = tx.Commit()
if err != nil {
return nil, ErrTransactionFail, Fail(`transaction commit failed: %s`, err)
}
return result[0].Bytes(), OK, nil
}
// GenerateExternalSalt returns some new external salt of the length specified in params.
// This salt should be passed to NewUser and can be used for passphrase hashing prior to
// calling NewUser. It is stored in the user database and can be retrieved as ExternalSalt.
func GenerateExternalSalt(params *Params) []byte {
salt := make([]byte, params.ExternalSaltLength)
n, err := rand.Read(salt)
if err != nil || uint32(n) < params.ExternalSaltLength {
return nil
}
return salt
}
type saltedKey struct {
pwd []byte
sel []byte
p *Params
}
func (key *saltedKey) validate() (ErrCode, error) {
if key == nil {
return ErrInvalidKey, Fail(`key is nil`)
}
if key.p == nil {
return ErrInvalidParams, Fail(`key parameters are nil`)
}
if key.pwd == nil {
return ErrInvalidKey, Fail(`key password is empty`)
}
if key.sel == nil {
return ErrNotEnoughSalt, Fail(`key salt is nil`)
}
if !key.p.validate() {
return ErrInvalidParams, Fail(`invalid parameters`)
}
if uint32(len(key.sel)) < key.p.ExternalSaltLength {
return ErrNotEnoughSalt, Fail(`external key salt length is less than required by key params`)
}
return OK, nil
}
// GenerateKey takes a password and some salt, and generates a salted key of length 64 bytes.
// Use the ExternalSalt as salt and the original, unaltered password. The function
// use Blake2b-512 for key derivation.
func GenerateKey(password string, salt []byte, params *Params) *saltedKey {
unsalted := blake2b.Sum512([]byte(password))
salted := saltedKey{pwd: append(salt, unsalted[:]...), sel: salt, p: params}
return &salted
}
// Authenticate a user by given name and salted password.
// Returns the user and OK if successful, otherwise nil, a numeric error code and the error.
// Notice that the external salt is not passed to this function. Instead, the password string
// should have been prepared (securely hashed, whitened, etc.) before calling this function
// on the basis of the user's ExternalSalt.
func (m *MultiDB) Authenticate(username string, key *saltedKey) (*User, ErrCode, error) {
if err := validateUser(username, m.BaseDir()); err != nil {
return nil, ErrInvalidUser, err
}
if !m.ExistingUser(username) {
return nil, ErrUnknownUser, Fail(`user "%s" does not exist`, username)
}
reply, err := key.validate()
if err != nil || reply != OK {
return nil, reply, err
}
user := User{name: username}
dirpath := m.UserDir(&user)
if _, err := os.Stat(dirpath); os.IsNotExist(err) {
return nil, ErrNoHome, Fail(`user "%s" home directory does not exist: %s`, username, dirpath)
}
user.id = m.userID(username)
if user.id == 0 {
return nil, ErrUnknownUser, Fail(`user "%s" does not exist`, username)
}
// get the strong salt and hash with it using argon2, compare to stored key
result, err := m.system.Get("User", user.id, "InternalSalt")
if err != nil || len(result) != 1 {
return nil, ErrNotEnoughSalt,
Fail(`user "%s"'s internal salt was not found, the user database might be corrupted`, username)
}
salt := result[0].Bytes()
if len(salt) != int(key.p.InternalSaltLength) {
return nil, ErrInvalidParams,
Fail(`invalid params, user "%s"'s internal salt length does not match internal salt length in params, given %d, expected %d`, username, len(salt), key.p.InternalSaltLength)
}
keyA := argon2.IDKey(key.pwd,
salt, key.p.Argon2Iterations, key.p.Argon2Memory,
key.p.Argon2Parallelism, key.p.KeyLength)
keyresult, err := m.system.Get("User", user.id, "Key")
if err != nil || len(keyresult) != 1 {
return nil, ErrAuthenticationFailed,
Fail(`user "%s" key was not found in user database, the database might be corrupted`, username)
}
keyB := keyresult[0].Bytes()
if !bytes.Equal(keyA, keyB) {
return nil, ErrAuthenticationFailed, Fail(`authentication failure`)
}
return &user, OK, nil
}
// Close the MultiDB, closing the internal housekeeping and all open user databases.
func (m *MultiDB) Close() (ErrCode, error) {
errcount := 0
s := ""
for _, v := range m.userdbs {
if v != nil {
err := v.Close()
if err != nil {
s = fmt.Sprintf("%s, %s", s, err.Error())
errcount++
}
}
}
for k := range m.userdbs {
delete(m.userdbs, k)
}
if err := m.system.Close(); err != nil {
s = fmt.Sprintf("%s, %s", s, err.Error())
errcount++
}
if errcount > 0 {
return ErrCloseFailed, Fail(`errors closing multi user DB: %s`, s)
}
return OK, nil
}
// UserDB returns the database of the given user.
func (m *MultiDB) UserDB(user *User) (*MDB, ErrCode, error) {
var err error
if user.id == 0 {
return nil, ErrUnknownUser, Fail(`user "%s" does not exist`, user.name)
}
db := m.userdbs[user.id]
if db == nil {
db, err = Open(m.driver, m.userDBFile(user))
if err != nil {
return nil, ErrOpenFailed, err
}
}
return db, OK, nil
}
// DeleteUserContent deletes a user's content in the multiuser database, i.e., all the user data.
// This action cannot be undone.
func (m *MultiDB) DeleteUserContent(user *User) (ErrCode, error) {
db, _, _ := m.UserDB(user)
db.Close()
if err := removeContents(m.UserDir(user)); err != nil {
return ErrFileSystem, err
}
return OK, nil
}
// DeleteUser deletes a user and all associated user content from a multiuser database.
func (m *MultiDB) DeleteUser(user *User) (ErrCode, error) {
tx, err := m.Begin()
if err != nil {
return ErrTransactionFail, err
}
defer tx.Rollback()
if err := tx.RemoveItem("User", user.ID()); err != nil {
return ErrDBFail, err
}
errcode, err := m.DeleteUserContent(user)
delete(m.userdbs, user.ID())
if err != nil {
return errcode, err
}
err = tx.Commit()
if err != nil {
return ErrTransactionFail, err
}
return OK, nil
}
func removeContents(dir string) error {
d, err := os.Open(dir)
if err != nil {
return err
}
defer d.Close()
names, err := d.Readdirnames(-1)
if err != nil {
return err
}
for _, name := range names |
return nil
}
// Delete deletes the whole multiuser db including all housekeeping information and directories.
// This action cannot be undone.
func (m *MultiDB) Delete() (ErrCode, error) {
m.Close()
m.system.Close()
if err := removeContents(m.BaseDir()); err != nil {
return ErrFileSystem, err
}
return OK, nil
}
// ArchiveUser stores the user data in a packed zip file but does not close or remove the user.
// This can be used for backups or for archiving.
func (m *MultiDB) ArchiveUser(user *User, archivedir string) (ErrCode, error) {
db, reply, err := m.UserDB(user)
if err != nil {
return reply, err
}
if err := db.Close(); err != nil {
return ErrCloseFailed, err
}
source := m.UserDir(user)
filename := fmt.Sprintf("%s-%d_%s.multidb", user.Name(), int64(user.ID()), time.Now().UTC().Format(time.RFC3339))
result, err := packdir.Pack(source, filename, archivedir, packdir.GOOD_COMPRESSION, 0)
if err != nil {
return ErrPackFail, err
}
if result.ScanErrNum > 0 {
return ErrFileSystem,
Fail(`archiving failed, unable to pack %d files (insufficient permissions?)`, result.ScanErrNum)
}
if result.ArchiveErrNum > 0 {
return ErrPackFail,
Fail(`archiving failed, %d files were not properly archived (insufficient permissions?)`,
result.ArchiveErrNum)
}
delete(m.userdbs, user.ID())
return OK, nil
}
func (m *MultiDB) UserEmail(user *User) (string, ErrCode, error) {
if user == nil || !m.ExistingUser(user.name) {
return "", ErrUnknownUser, Fail(`user "%s" does not exist`, user.name)
}
result, err := m.system.Get("User", user.ID(), "Email")
if err != nil {
return "", ErrUnknownUser, Fail(`user "%s" does not exist`, user.name)
}
if len(result) != 1 {
return "", ErrDBFail, Fail(`non unique result for user "%s" email`, user.name)
}
return result[0].String(), OK, nil
}
| {
err = os.RemoveAll(filepath.Join(dir, name))
if err != nil {
return err
}
} | conditional_block |
multiuser.go | package minidb
import (
"bytes"
"crypto/rand"
"fmt"
"os"
"path/filepath"
"regexp"
"time"
"github.com/rasteric/packdir"
"golang.org/x/crypto/argon2"
"golang.org/x/crypto/blake2b"
)
// Params contain all the parameters that are used by a multiuser database.
type Params struct {
Argon2Memory uint32
Argon2Iterations uint32
Argon2Parallelism uint8
KeyLength uint32
InternalSaltLength uint32
ExternalSaltLength uint32
}
// DefaultParams returns parameters with reasonable default values that are safe to use.
// Be aware that default parameters may change from release to release to reflect
// updates and changes in security requirements.
func DefaultParams() *Params {
p := Params{
KeyLength: 512,
InternalSaltLength: 256,
ExternalSaltLength: 256,
Argon2Memory: 64 * 1024,
Argon2Iterations: 3,
Argon2Parallelism: 4}
return &p
}
func (p *Params) validate() bool {
if p.KeyLength >= 64 && p.InternalSaltLength >= 32 &&
p.ExternalSaltLength >= 32 && p.Argon2Memory >= 16*1024 && p.Argon2Iterations >= 2 {
return true
}
return false
}
// User represents a user.
type User struct {
name string
id Item
}
// Name returns the name of the user.
func (u *User) Name() string {
return u.name
}
// ID returns the ID of the user.
func (u *User) ID() Item {
return u.id
}
// MultiDB contains all information needed for housekeeping multiple DBs, except for the parameters
// and context-specific information like passwords.
type MultiDB struct {
basepath string
driver string
system *MDB
userdbs map[Item]*MDB
}
// NewMultiDB returns a new multi user database.
func NewMultiDB(basedir string, driver string) (*MultiDB, error) {
d := filepath.Clean(basedir)
if !validDir(d) {
return nil, Fail(`the base directory "%s" does not exist or has incorrect permissions`, d)
}
db := MultiDB{basepath: basedir}
thedb := &db
sys, err := Open(driver, thedb.systemDBFile())
if err != nil {
return nil, err
}
thedb.system = sys
thedb.driver = driver
thedb.userdbs = make(map[Item]*MDB)
err = sys.AddTable("User",
[]Field{Field{Name: "Username", Sort: DBString},
Field{Name: "Email", Sort: DBString},
Field{Name: "Key", Sort: DBBlob},
Field{Name: "ExternalSalt", Sort: DBBlob},
Field{Name: "InternalSalt", Sort: DBBlob},
Field{Name: "Created", Sort: DBDate},
Field{Name: "Modified", Sort: DBDate}})
if err != nil {
return nil, Fail(`could not create user table: %s`, err)
}
return thedb, nil
}
// Begin a transaction.
func (m *MultiDB) Begin() (*Tx, error) {
return m.system.Begin()
}
// UserDir returns the given user's directory where the user database is stored.
func (m *MultiDB) UserDir(user *User) string |
// BaseDir returns the base directory of the multiuser database. This directory contains databases
// for all users.
func (m *MultiDB) BaseDir() string {
return m.basepath
}
func (m *MultiDB) userFile(user *User, file string) string {
return filepath.Join(m.UserDir(user), file)
}
func (m *MultiDB) userDBFile(user *User) string {
return m.userFile(user, "data.sqlite")
}
func (m *MultiDB) systemDBFile() string {
return filepath.Join(m.BaseDir(), "system.sqlite")
}
func validUserName(name string) bool {
var validUser = regexp.MustCompile(`^\p{L}+[_0-9\p{L}]*$`)
return validUser.MatchString(name)
}
func validDir(dir string) bool {
if _, err := os.Stat(dir); os.IsNotExist(err) {
return false
}
return true
}
// CreateDirIfNotExists creates a directory including all subdirectories needed,
// or returns an error.
func CreateDirIfNotExist(dir string) error {
if _, err := os.Stat(dir); os.IsNotExist(err) {
err = os.MkdirAll(dir, 0755)
if err != nil {
return err
}
}
return nil
}
func validateUser(name string, basedir string) error {
if !validUserName(name) {
return Fail(`invalid user name "%s"`, name)
}
if !validDir(basedir) {
return Fail(`the base directory for user "%s" does not exist: %s`, name, basedir)
}
src, err := os.Stat(basedir)
if err != nil {
return err
}
if !src.IsDir() {
return Fail(`not a directory: %s`, basedir)
}
return nil
}
// ErrCode types represent errors instead of error structures.
type ErrCode int
// Error codes returned by the functions.
const (
ErrAuthenticationFailed ErrCode = iota + 1 // User authentication has failed (wrong password).
OK // No error has occured.
ErrUsernameInUse // The user name is already being used.
ErrEmailInUse // The email is already being used.
ErrCryptoRandFailure // The random number generator has failed.
ErrInvalidParams // One or more parameters were invalid.
ErrUnknownUser // The user is not known.
ErrNotEnoughSalt // Insufficiently long salt has been supplied.
ErrInvalidUser // The user name or email is invalid.
ErrDBClosed // The internal housekeeping DB is locked, corrupted, or closed.
ErrDBFail // A database operation has failed.
ErrFileSystem // A directory or file could not be created.
ErrNoHome // The user's DB home directory does not exist.
ErrCloseFailed // Could not close the user database.
ErrOpenFailed // Could not open the user database.
ErrPackFail // Compressing user data failed.
ErrInvalidKey // A given salted key is invalid (either nil, or other problems).
ErrTransactionFail // Could not perform op because of a failed transaction.
)
func (m *MultiDB) isExisting(field, query string) bool {
q, _ := ParseQuery(fmt.Sprintf("User %s=%s", field, query))
results, err := m.system.Find(q, 1)
if err != nil || len(results) < 1 {
return false
}
return true
}
func (m *MultiDB) userID(username string) Item {
q, err := ParseQuery(fmt.Sprintf("User Username=%s", username))
if err != nil {
return 0
}
results, err := m.system.Find(q, 1)
if err != nil || len(results) != 1 {
return 0
}
return results[0]
}
// ExistingUser returns true if a user with the given user name exists, false otherwise.
func (m *MultiDB) ExistingUser(username string) bool {
result := m.isExisting("Username", username)
return result
}
// ExistingEmail returns true if a user with this email address exists, false otherwise.
func (m *MultiDB) ExistingEmail(email string) bool {
result := m.isExisting("Email", email)
return result
}
// NewUser creates a new user with given username, email, and password. Based on a strong
// salt that is only used internally and the Argon2 algorithm with the given parameters
// an internal key is created and stored in an internal database. The user and OK are returned
// unless an error has occurred. The integer returned is a numeric error code to make it easier to distinguish
// certain cases: EmailInUse - the email has already been registered, UsernameInUse - a user with the same
// user name has already been registered. Both emails and usernames must be unique and cannot be
// registered twice.
func (m *MultiDB) NewUser(username, email string, key *saltedKey) (*User, ErrCode, error) {
// validate inputs
if err := validateUser(username, m.BaseDir()); err != nil {
return nil, ErrInvalidUser, err
}
reply, err := key.validate()
if err != nil || reply != OK {
return nil, reply, err
}
user := User{name: username}
if m.system == nil {
return nil, ErrDBClosed, Fail(`internal DB is nil`)
}
// check if user and email exist
if m.ExistingUser(username) {
return nil, ErrUsernameInUse, Fail(`user "%s" already exists!`, username)
}
if m.ExistingEmail(email) {
return nil, ErrEmailInUse, Fail(`email "%s" is already in use!`, email)
}
// now start adding the user
user.id, err = m.system.NewItem("User")
if err != nil {
return nil, ErrDBFail, err
}
tx, err := m.Begin()
if err != nil {
return nil, ErrTransactionFail, err
}
if err := tx.Set("User", user.id, "Username", []Value{NewString(username)}); err != nil {
return nil, ErrDBFail, err
}
if err := tx.Set("User", user.id, "Email", []Value{NewString(email)}); err != nil {
return nil, ErrDBFail, err
}
salt := make([]byte, key.p.InternalSaltLength)
n, err := rand.Read(salt)
if uint32(n) != key.p.InternalSaltLength || err != nil {
return nil, ErrCryptoRandFailure, Fail(`random number generator failed to generate salt`)
}
if err := tx.Set("User", user.id, "InternalSalt", []Value{NewBytes(salt)}); err != nil {
return nil, ErrDBFail, Fail(`could not store salt in multiuser database: %s`, err)
}
realkey := argon2.IDKey(key.pwd,
salt, key.p.Argon2Iterations, key.p.Argon2Memory,
key.p.Argon2Parallelism, key.p.KeyLength)
if err := tx.Set("User", user.id, "Key", []Value{NewBytes(realkey)}); err != nil {
return nil, ErrDBFail, Fail(`could not store key in multiuser database: %s`, err)
}
if err := tx.Set("User", user.id, "ExternalSalt", []Value{NewBytes(key.sel)}); err != nil {
return nil, ErrDBFail, Fail(`could not store the external salt in multiuser database: %s`, err)
}
now := NewDate(time.Now())
if err := tx.Set("User", user.id, "Created", []Value{now}); err != nil {
return nil, ErrDBFail, err
}
if err := tx.Set("User", user.id, "Modified", []Value{now}); err != nil {
return nil, ErrDBFail, err
}
if err := tx.Commit(); err != nil {
return nil, ErrDBFail, Fail(`multiuser database error: %s`, err)
}
dirpath := m.UserDir(&user)
err = CreateDirIfNotExist(dirpath)
if err != nil {
return nil, ErrFileSystem, err
}
return &user, OK, nil
}
// ExternalSalt is the salt associated with a user. It is stored in the database and may
// be used for hashing the password prior to authentication. The external salt is not used
// for internal key derivation.
func (m *MultiDB) ExternalSalt(username string) ([]byte, ErrCode, error) {
if !m.ExistingUser(username) {
return nil, ErrUnknownUser, Fail(`unknown user "%s"`, username)
}
tx, err := m.Begin()
if err != nil {
return nil, ErrTransactionFail, Fail(`transaction failed: %s`, err)
}
defer tx.Rollback()
id := m.userID(username)
if id == 0 {
return nil, ErrUnknownUser, Fail(`unknown user "%s"`, username)
}
result, err := m.system.Get("User", id, "ExternalSalt")
if err != nil || len(result) != 1 {
return nil, ErrNotEnoughSalt, Fail(`user "%s" salt not found, the user database might be corrupted`, username)
}
err = tx.Commit()
if err != nil {
return nil, ErrTransactionFail, Fail(`transaction commit failed: %s`, err)
}
return result[0].Bytes(), OK, nil
}
// GenerateExternalSalt returns some new external salt of the length specified in params.
// This salt should be passed to NewUser and can be used for passphrase hashing prior to
// calling NewUser. It is stored in the user database and can be retrieved as ExternalSalt.
func GenerateExternalSalt(params *Params) []byte {
salt := make([]byte, params.ExternalSaltLength)
n, err := rand.Read(salt)
if err != nil || uint32(n) < params.ExternalSaltLength {
return nil
}
return salt
}
type saltedKey struct {
pwd []byte
sel []byte
p *Params
}
func (key *saltedKey) validate() (ErrCode, error) {
if key == nil {
return ErrInvalidKey, Fail(`key is nil`)
}
if key.p == nil {
return ErrInvalidParams, Fail(`key parameters are nil`)
}
if key.pwd == nil {
return ErrInvalidKey, Fail(`key password is empty`)
}
if key.sel == nil {
return ErrNotEnoughSalt, Fail(`key salt is nil`)
}
if !key.p.validate() {
return ErrInvalidParams, Fail(`invalid parameters`)
}
if uint32(len(key.sel)) < key.p.ExternalSaltLength {
return ErrNotEnoughSalt, Fail(`external key salt length is less than required by key params`)
}
return OK, nil
}
// GenerateKey takes a password and some salt, and generates a salted key of length 64 bytes.
// Use the ExternalSalt as salt and the original, unaltered password. The function
// use Blake2b-512 for key derivation.
func GenerateKey(password string, salt []byte, params *Params) *saltedKey {
unsalted := blake2b.Sum512([]byte(password))
salted := saltedKey{pwd: append(salt, unsalted[:]...), sel: salt, p: params}
return &salted
}
// Authenticate a user by given name and salted password.
// Returns the user and OK if successful, otherwise nil, a numeric error code and the error.
// Notice that the external salt is not passed to this function. Instead, the password string
// should have been prepared (securely hashed, whitened, etc.) before calling this function
// on the basis of the user's ExternalSalt.
func (m *MultiDB) Authenticate(username string, key *saltedKey) (*User, ErrCode, error) {
if err := validateUser(username, m.BaseDir()); err != nil {
return nil, ErrInvalidUser, err
}
if !m.ExistingUser(username) {
return nil, ErrUnknownUser, Fail(`user "%s" does not exist`, username)
}
reply, err := key.validate()
if err != nil || reply != OK {
return nil, reply, err
}
user := User{name: username}
dirpath := m.UserDir(&user)
if _, err := os.Stat(dirpath); os.IsNotExist(err) {
return nil, ErrNoHome, Fail(`user "%s" home directory does not exist: %s`, username, dirpath)
}
user.id = m.userID(username)
if user.id == 0 {
return nil, ErrUnknownUser, Fail(`user "%s" does not exist`, username)
}
// get the strong salt and hash with it using argon2, compare to stored key
result, err := m.system.Get("User", user.id, "InternalSalt")
if err != nil || len(result) != 1 {
return nil, ErrNotEnoughSalt,
Fail(`user "%s"'s internal salt was not found, the user database might be corrupted`, username)
}
salt := result[0].Bytes()
if len(salt) != int(key.p.InternalSaltLength) {
return nil, ErrInvalidParams,
Fail(`invalid params, user "%s"'s internal salt length does not match internal salt length in params, given %d, expected %d`, username, len(salt), key.p.InternalSaltLength)
}
keyA := argon2.IDKey(key.pwd,
salt, key.p.Argon2Iterations, key.p.Argon2Memory,
key.p.Argon2Parallelism, key.p.KeyLength)
keyresult, err := m.system.Get("User", user.id, "Key")
if err != nil || len(keyresult) != 1 {
return nil, ErrAuthenticationFailed,
Fail(`user "%s" key was not found in user database, the database might be corrupted`, username)
}
keyB := keyresult[0].Bytes()
if !bytes.Equal(keyA, keyB) {
return nil, ErrAuthenticationFailed, Fail(`authentication failure`)
}
return &user, OK, nil
}
// Close the MultiDB, closing the internal housekeeping and all open user databases.
func (m *MultiDB) Close() (ErrCode, error) {
errcount := 0
s := ""
for _, v := range m.userdbs {
if v != nil {
err := v.Close()
if err != nil {
s = fmt.Sprintf("%s, %s", s, err.Error())
errcount++
}
}
}
for k := range m.userdbs {
delete(m.userdbs, k)
}
if err := m.system.Close(); err != nil {
s = fmt.Sprintf("%s, %s", s, err.Error())
errcount++
}
if errcount > 0 {
return ErrCloseFailed, Fail(`errors closing multi user DB: %s`, s)
}
return OK, nil
}
// UserDB returns the database of the given user.
func (m *MultiDB) UserDB(user *User) (*MDB, ErrCode, error) {
var err error
if user.id == 0 {
return nil, ErrUnknownUser, Fail(`user "%s" does not exist`, user.name)
}
db := m.userdbs[user.id]
if db == nil {
db, err = Open(m.driver, m.userDBFile(user))
if err != nil {
return nil, ErrOpenFailed, err
}
}
return db, OK, nil
}
// DeleteUserContent deletes a user's content in the multiuser database, i.e., all the user data.
// This action cannot be undone.
func (m *MultiDB) DeleteUserContent(user *User) (ErrCode, error) {
db, _, _ := m.UserDB(user)
db.Close()
if err := removeContents(m.UserDir(user)); err != nil {
return ErrFileSystem, err
}
return OK, nil
}
// DeleteUser deletes a user and all associated user content from a multiuser database.
func (m *MultiDB) DeleteUser(user *User) (ErrCode, error) {
tx, err := m.Begin()
if err != nil {
return ErrTransactionFail, err
}
defer tx.Rollback()
if err := tx.RemoveItem("User", user.ID()); err != nil {
return ErrDBFail, err
}
errcode, err := m.DeleteUserContent(user)
delete(m.userdbs, user.ID())
if err != nil {
return errcode, err
}
err = tx.Commit()
if err != nil {
return ErrTransactionFail, err
}
return OK, nil
}
func removeContents(dir string) error {
d, err := os.Open(dir)
if err != nil {
return err
}
defer d.Close()
names, err := d.Readdirnames(-1)
if err != nil {
return err
}
for _, name := range names {
err = os.RemoveAll(filepath.Join(dir, name))
if err != nil {
return err
}
}
return nil
}
// Delete deletes the whole multiuser db including all housekeeping information and directories.
// This action cannot be undone.
func (m *MultiDB) Delete() (ErrCode, error) {
m.Close()
m.system.Close()
if err := removeContents(m.BaseDir()); err != nil {
return ErrFileSystem, err
}
return OK, nil
}
// ArchiveUser stores the user data in a packed zip file but does not close or remove the user.
// This can be used for backups or for archiving.
func (m *MultiDB) ArchiveUser(user *User, archivedir string) (ErrCode, error) {
db, reply, err := m.UserDB(user)
if err != nil {
return reply, err
}
if err := db.Close(); err != nil {
return ErrCloseFailed, err
}
source := m.UserDir(user)
filename := fmt.Sprintf("%s-%d_%s.multidb", user.Name(), int64(user.ID()), time.Now().UTC().Format(time.RFC3339))
result, err := packdir.Pack(source, filename, archivedir, packdir.GOOD_COMPRESSION, 0)
if err != nil {
return ErrPackFail, err
}
if result.ScanErrNum > 0 {
return ErrFileSystem,
Fail(`archiving failed, unable to pack %d files (insufficient permissions?)`, result.ScanErrNum)
}
if result.ArchiveErrNum > 0 {
return ErrPackFail,
Fail(`archiving failed, %d files were not properly archived (insufficient permissions?)`,
result.ArchiveErrNum)
}
delete(m.userdbs, user.ID())
return OK, nil
}
func (m *MultiDB) UserEmail(user *User) (string, ErrCode, error) {
if user == nil || !m.ExistingUser(user.name) {
return "", ErrUnknownUser, Fail(`user "%s" does not exist`, user.name)
}
result, err := m.system.Get("User", user.ID(), "Email")
if err != nil {
return "", ErrUnknownUser, Fail(`user "%s" does not exist`, user.name)
}
if len(result) != 1 {
return "", ErrDBFail, Fail(`non unique result for user "%s" email`, user.name)
}
return result[0].String(), OK, nil
}
| {
return filepath.Join(m.basepath, user.name)
} | identifier_body |
multiuser.go | package minidb
import (
"bytes"
"crypto/rand"
"fmt"
"os"
"path/filepath"
"regexp"
"time"
"github.com/rasteric/packdir"
"golang.org/x/crypto/argon2"
"golang.org/x/crypto/blake2b"
)
// Params contain all the parameters that are used by a multiuser database.
type Params struct {
Argon2Memory uint32
Argon2Iterations uint32
Argon2Parallelism uint8
KeyLength uint32
InternalSaltLength uint32
ExternalSaltLength uint32
}
// DefaultParams returns parameters with reasonable default values that are safe to use.
// Be aware that default parameters may change from release to release to reflect
// updates and changes in security requirements.
func DefaultParams() *Params {
p := Params{
KeyLength: 512,
InternalSaltLength: 256,
ExternalSaltLength: 256,
Argon2Memory: 64 * 1024,
Argon2Iterations: 3,
Argon2Parallelism: 4}
return &p
}
func (p *Params) validate() bool {
if p.KeyLength >= 64 && p.InternalSaltLength >= 32 &&
p.ExternalSaltLength >= 32 && p.Argon2Memory >= 16*1024 && p.Argon2Iterations >= 2 {
return true
}
return false
}
// User represents a user.
type User struct {
name string
id Item
}
// Name returns the name of the user.
func (u *User) Name() string {
return u.name
}
// ID returns the ID of the user.
func (u *User) ID() Item {
return u.id
}
// MultiDB contains all information needed for housekeeping multiple DBs, except for the parameters
// and context-specific information like passwords.
type MultiDB struct {
basepath string
driver string
system *MDB
userdbs map[Item]*MDB
}
// NewMultiDB returns a new multi user database.
func NewMultiDB(basedir string, driver string) (*MultiDB, error) {
d := filepath.Clean(basedir)
if !validDir(d) {
return nil, Fail(`the base directory "%s" does not exist or has incorrect permissions`, d)
}
db := MultiDB{basepath: basedir}
thedb := &db
sys, err := Open(driver, thedb.systemDBFile())
if err != nil {
return nil, err
}
thedb.system = sys
thedb.driver = driver
thedb.userdbs = make(map[Item]*MDB)
err = sys.AddTable("User",
[]Field{Field{Name: "Username", Sort: DBString},
Field{Name: "Email", Sort: DBString},
Field{Name: "Key", Sort: DBBlob},
Field{Name: "ExternalSalt", Sort: DBBlob},
Field{Name: "InternalSalt", Sort: DBBlob},
Field{Name: "Created", Sort: DBDate},
Field{Name: "Modified", Sort: DBDate}})
if err != nil {
return nil, Fail(`could not create user table: %s`, err)
}
return thedb, nil
}
// Begin a transaction.
func (m *MultiDB) Begin() (*Tx, error) {
return m.system.Begin()
}
// UserDir returns the given user's directory where the user database is stored.
func (m *MultiDB) UserDir(user *User) string {
return filepath.Join(m.basepath, user.name)
}
// BaseDir returns the base directory of the multiuser database. This directory contains databases
// for all users.
func (m *MultiDB) BaseDir() string {
return m.basepath
}
func (m *MultiDB) userFile(user *User, file string) string {
return filepath.Join(m.UserDir(user), file)
}
func (m *MultiDB) userDBFile(user *User) string {
return m.userFile(user, "data.sqlite")
}
func (m *MultiDB) systemDBFile() string {
return filepath.Join(m.BaseDir(), "system.sqlite")
}
func validUserName(name string) bool {
var validUser = regexp.MustCompile(`^\p{L}+[_0-9\p{L}]*$`)
return validUser.MatchString(name)
}
func validDir(dir string) bool {
if _, err := os.Stat(dir); os.IsNotExist(err) {
return false
}
return true
}
// CreateDirIfNotExists creates a directory including all subdirectories needed,
// or returns an error.
func CreateDirIfNotExist(dir string) error {
if _, err := os.Stat(dir); os.IsNotExist(err) {
err = os.MkdirAll(dir, 0755)
if err != nil {
return err
}
}
return nil
}
func validateUser(name string, basedir string) error {
if !validUserName(name) {
return Fail(`invalid user name "%s"`, name)
}
if !validDir(basedir) {
return Fail(`the base directory for user "%s" does not exist: %s`, name, basedir)
}
src, err := os.Stat(basedir)
if err != nil {
return err
}
if !src.IsDir() {
return Fail(`not a directory: %s`, basedir)
}
return nil
}
// ErrCode types represent errors instead of error structures.
type ErrCode int
// Error codes returned by the functions.
const (
ErrAuthenticationFailed ErrCode = iota + 1 // User authentication has failed (wrong password).
OK // No error has occured.
ErrUsernameInUse // The user name is already being used.
ErrEmailInUse // The email is already being used.
ErrCryptoRandFailure // The random number generator has failed.
ErrInvalidParams // One or more parameters were invalid.
ErrUnknownUser // The user is not known.
ErrNotEnoughSalt // Insufficiently long salt has been supplied.
ErrInvalidUser // The user name or email is invalid.
ErrDBClosed // The internal housekeeping DB is locked, corrupted, or closed.
ErrDBFail // A database operation has failed.
ErrFileSystem // A directory or file could not be created.
ErrNoHome // The user's DB home directory does not exist.
ErrCloseFailed // Could not close the user database.
ErrOpenFailed // Could not open the user database.
ErrPackFail // Compressing user data failed.
ErrInvalidKey // A given salted key is invalid (either nil, or other problems).
ErrTransactionFail // Could not perform op because of a failed transaction.
)
func (m *MultiDB) isExisting(field, query string) bool {
q, _ := ParseQuery(fmt.Sprintf("User %s=%s", field, query))
results, err := m.system.Find(q, 1)
if err != nil || len(results) < 1 {
return false
}
return true
}
func (m *MultiDB) userID(username string) Item {
q, err := ParseQuery(fmt.Sprintf("User Username=%s", username))
if err != nil {
return 0
}
results, err := m.system.Find(q, 1)
if err != nil || len(results) != 1 {
return 0
}
return results[0]
}
// ExistingUser returns true if a user with the given user name exists, false otherwise.
func (m *MultiDB) ExistingUser(username string) bool {
result := m.isExisting("Username", username)
return result
}
// ExistingEmail returns true if a user with this email address exists, false otherwise.
func (m *MultiDB) ExistingEmail(email string) bool {
result := m.isExisting("Email", email)
return result
}
// NewUser creates a new user with given username, email, and password. Based on a strong
// salt that is only used internally and the Argon2 algorithm with the given parameters
// an internal key is created and stored in an internal database. The user and OK are returned
// unless an error has occurred. The integer returned is a numeric error code to make it easier to distinguish
// certain cases: EmailInUse - the email has already been registered, UsernameInUse - a user with the same
// user name has already been registered. Both emails and usernames must be unique and cannot be
// registered twice.
func (m *MultiDB) NewUser(username, email string, key *saltedKey) (*User, ErrCode, error) {
// validate inputs
if err := validateUser(username, m.BaseDir()); err != nil {
return nil, ErrInvalidUser, err
}
reply, err := key.validate()
if err != nil || reply != OK {
return nil, reply, err
}
user := User{name: username}
if m.system == nil {
return nil, ErrDBClosed, Fail(`internal DB is nil`)
}
// check if user and email exist
if m.ExistingUser(username) {
return nil, ErrUsernameInUse, Fail(`user "%s" already exists!`, username)
}
if m.ExistingEmail(email) {
return nil, ErrEmailInUse, Fail(`email "%s" is already in use!`, email)
}
// now start adding the user
user.id, err = m.system.NewItem("User")
if err != nil {
return nil, ErrDBFail, err
}
tx, err := m.Begin()
if err != nil {
return nil, ErrTransactionFail, err
}
if err := tx.Set("User", user.id, "Username", []Value{NewString(username)}); err != nil {
return nil, ErrDBFail, err
}
if err := tx.Set("User", user.id, "Email", []Value{NewString(email)}); err != nil {
return nil, ErrDBFail, err
}
salt := make([]byte, key.p.InternalSaltLength)
n, err := rand.Read(salt)
if uint32(n) != key.p.InternalSaltLength || err != nil {
return nil, ErrCryptoRandFailure, Fail(`random number generator failed to generate salt`)
}
if err := tx.Set("User", user.id, "InternalSalt", []Value{NewBytes(salt)}); err != nil {
return nil, ErrDBFail, Fail(`could not store salt in multiuser database: %s`, err)
}
realkey := argon2.IDKey(key.pwd,
salt, key.p.Argon2Iterations, key.p.Argon2Memory,
key.p.Argon2Parallelism, key.p.KeyLength)
if err := tx.Set("User", user.id, "Key", []Value{NewBytes(realkey)}); err != nil {
return nil, ErrDBFail, Fail(`could not store key in multiuser database: %s`, err)
}
if err := tx.Set("User", user.id, "ExternalSalt", []Value{NewBytes(key.sel)}); err != nil {
return nil, ErrDBFail, Fail(`could not store the external salt in multiuser database: %s`, err)
}
now := NewDate(time.Now())
if err := tx.Set("User", user.id, "Created", []Value{now}); err != nil {
return nil, ErrDBFail, err
}
if err := tx.Set("User", user.id, "Modified", []Value{now}); err != nil {
return nil, ErrDBFail, err
}
if err := tx.Commit(); err != nil {
return nil, ErrDBFail, Fail(`multiuser database error: %s`, err)
}
dirpath := m.UserDir(&user)
err = CreateDirIfNotExist(dirpath)
if err != nil {
return nil, ErrFileSystem, err
}
return &user, OK, nil
}
// ExternalSalt is the salt associated with a user. It is stored in the database and may
// be used for hashing the password prior to authentication. The external salt is not used
// for internal key derivation.
func (m *MultiDB) ExternalSalt(username string) ([]byte, ErrCode, error) {
if !m.ExistingUser(username) {
return nil, ErrUnknownUser, Fail(`unknown user "%s"`, username)
}
tx, err := m.Begin()
if err != nil {
return nil, ErrTransactionFail, Fail(`transaction failed: %s`, err)
}
defer tx.Rollback()
id := m.userID(username)
if id == 0 {
return nil, ErrUnknownUser, Fail(`unknown user "%s"`, username)
}
result, err := m.system.Get("User", id, "ExternalSalt")
if err != nil || len(result) != 1 {
return nil, ErrNotEnoughSalt, Fail(`user "%s" salt not found, the user database might be corrupted`, username)
}
err = tx.Commit()
if err != nil {
return nil, ErrTransactionFail, Fail(`transaction commit failed: %s`, err)
}
return result[0].Bytes(), OK, nil
}
// GenerateExternalSalt returns some new external salt of the length specified in params.
// This salt should be passed to NewUser and can be used for passphrase hashing prior to
// calling NewUser. It is stored in the user database and can be retrieved as ExternalSalt.
func GenerateExternalSalt(params *Params) []byte {
salt := make([]byte, params.ExternalSaltLength)
n, err := rand.Read(salt)
if err != nil || uint32(n) < params.ExternalSaltLength {
return nil
}
return salt
}
type saltedKey struct {
pwd []byte
sel []byte
p *Params
}
func (key *saltedKey) validate() (ErrCode, error) {
if key == nil {
return ErrInvalidKey, Fail(`key is nil`)
}
if key.p == nil {
return ErrInvalidParams, Fail(`key parameters are nil`)
}
if key.pwd == nil {
return ErrInvalidKey, Fail(`key password is empty`)
}
if key.sel == nil {
return ErrNotEnoughSalt, Fail(`key salt is nil`)
}
if !key.p.validate() {
return ErrInvalidParams, Fail(`invalid parameters`)
}
if uint32(len(key.sel)) < key.p.ExternalSaltLength {
return ErrNotEnoughSalt, Fail(`external key salt length is less than required by key params`)
}
return OK, nil
}
// GenerateKey takes a password and some salt, and generates a salted key of length 64 bytes.
// Use the ExternalSalt as salt and the original, unaltered password. The function
// use Blake2b-512 for key derivation.
func GenerateKey(password string, salt []byte, params *Params) *saltedKey {
unsalted := blake2b.Sum512([]byte(password))
salted := saltedKey{pwd: append(salt, unsalted[:]...), sel: salt, p: params}
return &salted
}
// Authenticate a user by given name and salted password.
// Returns the user and OK if successful, otherwise nil, a numeric error code and the error.
// Notice that the external salt is not passed to this function. Instead, the password string
// should have been prepared (securely hashed, whitened, etc.) before calling this function
// on the basis of the user's ExternalSalt.
func (m *MultiDB) Authenticate(username string, key *saltedKey) (*User, ErrCode, error) {
if err := validateUser(username, m.BaseDir()); err != nil {
return nil, ErrInvalidUser, err
}
if !m.ExistingUser(username) {
return nil, ErrUnknownUser, Fail(`user "%s" does not exist`, username)
}
reply, err := key.validate()
if err != nil || reply != OK {
return nil, reply, err
}
user := User{name: username}
dirpath := m.UserDir(&user)
if _, err := os.Stat(dirpath); os.IsNotExist(err) {
return nil, ErrNoHome, Fail(`user "%s" home directory does not exist: %s`, username, dirpath)
}
user.id = m.userID(username)
if user.id == 0 {
return nil, ErrUnknownUser, Fail(`user "%s" does not exist`, username)
}
// get the strong salt and hash with it using argon2, compare to stored key
result, err := m.system.Get("User", user.id, "InternalSalt")
if err != nil || len(result) != 1 {
return nil, ErrNotEnoughSalt,
Fail(`user "%s"'s internal salt was not found, the user database might be corrupted`, username)
}
salt := result[0].Bytes()
if len(salt) != int(key.p.InternalSaltLength) {
return nil, ErrInvalidParams,
Fail(`invalid params, user "%s"'s internal salt length does not match internal salt length in params, given %d, expected %d`, username, len(salt), key.p.InternalSaltLength)
}
keyA := argon2.IDKey(key.pwd,
salt, key.p.Argon2Iterations, key.p.Argon2Memory,
key.p.Argon2Parallelism, key.p.KeyLength)
keyresult, err := m.system.Get("User", user.id, "Key")
if err != nil || len(keyresult) != 1 {
return nil, ErrAuthenticationFailed,
Fail(`user "%s" key was not found in user database, the database might be corrupted`, username)
}
keyB := keyresult[0].Bytes()
if !bytes.Equal(keyA, keyB) {
return nil, ErrAuthenticationFailed, Fail(`authentication failure`)
}
return &user, OK, nil
}
// Close the MultiDB, closing the internal housekeeping and all open user databases.
func (m *MultiDB) Close() (ErrCode, error) {
errcount := 0
s := ""
for _, v := range m.userdbs {
if v != nil {
err := v.Close()
if err != nil {
s = fmt.Sprintf("%s, %s", s, err.Error())
errcount++
}
}
}
for k := range m.userdbs {
delete(m.userdbs, k)
}
if err := m.system.Close(); err != nil {
s = fmt.Sprintf("%s, %s", s, err.Error())
errcount++
}
if errcount > 0 {
return ErrCloseFailed, Fail(`errors closing multi user DB: %s`, s)
}
return OK, nil
}
// UserDB returns the database of the given user.
func (m *MultiDB) UserDB(user *User) (*MDB, ErrCode, error) {
var err error
if user.id == 0 {
return nil, ErrUnknownUser, Fail(`user "%s" does not exist`, user.name)
}
db := m.userdbs[user.id]
if db == nil {
db, err = Open(m.driver, m.userDBFile(user))
if err != nil {
return nil, ErrOpenFailed, err
}
}
return db, OK, nil
}
// DeleteUserContent deletes a user's content in the multiuser database, i.e., all the user data.
// This action cannot be undone.
func (m *MultiDB) DeleteUserContent(user *User) (ErrCode, error) {
db, _, _ := m.UserDB(user)
db.Close()
if err := removeContents(m.UserDir(user)); err != nil {
return ErrFileSystem, err
}
return OK, nil
}
// DeleteUser deletes a user and all associated user content from a multiuser database.
func (m *MultiDB) DeleteUser(user *User) (ErrCode, error) {
tx, err := m.Begin()
if err != nil {
return ErrTransactionFail, err
}
defer tx.Rollback()
if err := tx.RemoveItem("User", user.ID()); err != nil {
return ErrDBFail, err
}
errcode, err := m.DeleteUserContent(user)
delete(m.userdbs, user.ID())
if err != nil {
return errcode, err
}
err = tx.Commit()
if err != nil {
return ErrTransactionFail, err
}
return OK, nil
}
func removeContents(dir string) error {
d, err := os.Open(dir)
if err != nil {
return err
}
defer d.Close()
names, err := d.Readdirnames(-1) | }
for _, name := range names {
err = os.RemoveAll(filepath.Join(dir, name))
if err != nil {
return err
}
}
return nil
}
// Delete deletes the whole multiuser db including all housekeeping information and directories.
// This action cannot be undone.
func (m *MultiDB) Delete() (ErrCode, error) {
m.Close()
m.system.Close()
if err := removeContents(m.BaseDir()); err != nil {
return ErrFileSystem, err
}
return OK, nil
}
// ArchiveUser stores the user data in a packed zip file but does not close or remove the user.
// This can be used for backups or for archiving.
func (m *MultiDB) ArchiveUser(user *User, archivedir string) (ErrCode, error) {
db, reply, err := m.UserDB(user)
if err != nil {
return reply, err
}
if err := db.Close(); err != nil {
return ErrCloseFailed, err
}
source := m.UserDir(user)
filename := fmt.Sprintf("%s-%d_%s.multidb", user.Name(), int64(user.ID()), time.Now().UTC().Format(time.RFC3339))
result, err := packdir.Pack(source, filename, archivedir, packdir.GOOD_COMPRESSION, 0)
if err != nil {
return ErrPackFail, err
}
if result.ScanErrNum > 0 {
return ErrFileSystem,
Fail(`archiving failed, unable to pack %d files (insufficient permissions?)`, result.ScanErrNum)
}
if result.ArchiveErrNum > 0 {
return ErrPackFail,
Fail(`archiving failed, %d files were not properly archived (insufficient permissions?)`,
result.ArchiveErrNum)
}
delete(m.userdbs, user.ID())
return OK, nil
}
func (m *MultiDB) UserEmail(user *User) (string, ErrCode, error) {
if user == nil || !m.ExistingUser(user.name) {
return "", ErrUnknownUser, Fail(`user "%s" does not exist`, user.name)
}
result, err := m.system.Get("User", user.ID(), "Email")
if err != nil {
return "", ErrUnknownUser, Fail(`user "%s" does not exist`, user.name)
}
if len(result) != 1 {
return "", ErrDBFail, Fail(`non unique result for user "%s" email`, user.name)
}
return result[0].String(), OK, nil
} | if err != nil {
return err | random_line_split |
B-Server.go | package main
import (
"bufio"
"bytes"
"encoding/gob"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"os/exec"
"strings"
"sync"
"time"
"github.com/getsentry/raven-go"
"github.com/gorilla/websocket"
"github.com/streadway/amqp"
)
// Associacoes entre clientes e atributos;
var nameClients = make(map[*websocket.Conn]string)
var chanClients = make(map[*websocket.Conn]uint32)
// Canais de controle;
var newConnection = make(chan *websocket.Conn)
var deadConnection = make(chan *websocket.Conn)
// 1.3. Canais de mensagens;
var sysMessageGlobal = make(chan Chat)
var messageQueue = make(chan Chat)
var Message = make(chan Chat)
var commandQueue = make(chan Command)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool { return true },
}
func handler(writer http.ResponseWriter, request *http.Request) {
socket, err := upgrader.Upgrade(writer, request, nil)
if err != nil {
fmt.Println(err)
}
_, msg, err := socket.ReadMessage()
nameClients[socket] = string(msg) // msg contem o nome do usuario.
newConnection <- socket
for {
// Recebendo mensagens desse cliente indefinidamente:
_, msg, err := socket.ReadMessage()
if err != nil {
break
}
message := string(msg)
if message[0:1] != "/" {
// ID 0 significa um tipo de mensagem.
messageQueue <- Chat{0, nil, chanClients[socket], message, nameClients[socket]}
} else {
message = strings.TrimLeft(message, "/")
sub := strings.Split(message, " ")
// ID nos comandos nao possuem um significado.
commandQueue <- Command{0, socket, sub[0], sub[1:]}
}
}
// Se o loop quebrar, significa que o cliente se desconectou;
deadConnection <- socket
}
func PanicOnError(err error) {
if err != nil {
raven.CaptureErrorAndWait(err, nil)
log.Panic(err)
}
}
func PrintOnError(err error) {
if err != nil {
raven.CaptureError(err, nil)
log.Println(err)
}
}
func serialize(message Chat) ([]byte, error) {
var b bytes.Buffer
encoder := gob.NewEncoder(&b)
err := encoder.Encode(message)
return b.Bytes(), err
}
func deserialize(b []byte) (Chat, error) {
var msg Chat
buf := bytes.NewBuffer(b)
decoder := gob.NewDecoder(buf)
err := decoder.Decode(&msg)
return msg, err
}
func get(reader io.Reader) (message string, err error) {
message, err = bufio.NewReader(reader).ReadString('\n')
message = strings.TrimRight(message, "\r\n")
return
}
func reconnect(config Properties) (serverConn net.Conn, ip string) {
fmt.Println("Trying to connect to Master Server...")
fmt.Println("It will not possible to know other new servers until this moment.")
fmt.Println()
time.Sleep(1000 * time.Millisecond)
for {
sc, err := net.Dial("tcp", config.MasterIP+":"+config.MasterPort)
serverConn = sc
sW := bufio.NewWriter(serverConn)
if err != nil {
time.Sleep(5000 * time.Millisecond)
continue
} else {
fmt.Println("Connected to Master Server!")
fmt.Println("Now, new global servers can be known.")
fmt.Println()
if config.PublicServer == false {
ip = "127.0.0.1"
sW.WriteString(config.ThisServerName + ":" + ip + ":" +
config.PortServers + ":" + config.PortClients + "\n")
sW.Flush()
} else if config.PublicServer == true {
var buf bytes.Buffer
resp, _ := http.Get("http://myexternalip.com/raw")
io.Copy(&buf, resp.Body)
resp.Body.Close()
ip = buf.String()
sW.WriteString(config.ThisServerName + ":" + ip + ":" +
config.PortServers + ":" + config.PortClients + "\n")
sW.Flush()
}
break
}
}
return
}
func head() {
fmt.Println(" _____________________________")
fmt.Println("| |")
fmt.Println("| XIAOMI Chat System |")
fmt.Println("| Developed by Saulo Pinedo |")
fmt.Println("| --- |")
fmt.Println("| Broadcast Program |")
fmt.Println("|_____________________________|\n")
}
func main() {
// 0. Inicializando servicos adicionais;
raven.SetDSN("https://277886f557384520a086cfedea9930cf@sentry.io/1831452")
// 0.1. blabla2
raven.SetDefaultLoggerName("saulopinedo")
raven.SetDebug(true)
raven.SetEnvironment("staging")
raven.SetRelease("Xiaomi")
raven.SetSampleRate(1.0)
// 1. Pre-definindo as variaveis fundamentais;
clientCount := 0
var serverConn net.Conn
var ip string
var mutex sync.Mutex
// 1.2. Associacoes entre canais de mensagens e atributos;
nameChannels := make(map[uint32]string)
passChannels := make(map[uint32]string)
// 1.2. Banco de enderecos de servidores;
chanAddresses := make(map[uint32][]string) | PanicOnError(err)
defer jsonFile.Close()
byteValueJSON, _ := ioutil.ReadAll(jsonFile)
config := Properties{}
json.Unmarshal(byteValueJSON, &config)
channelCount := config.IDchanBegin
nameChannels[0] = "Global"
// 3. Preparando o cabecalho;
cmd := exec.Command("cmd", "/c", "cls")
cmd.Stdout = os.Stdout
cmd.Run()
head()
// 4. Preparando a conexao dos clientes;
http.HandleFunc("/echo", handler)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "websockets.html")
})
go func() {
http.ListenAndServe("127.0.0.1:"+config.PortClients, nil) // Tratar erro!
}()
// 4.1. Estabelendo conexao com o servico RabbitMQ;
rabbit, err := amqp.Dial("amqp://guest:guest@localhost:5672/")
PanicOnError(err)
defer rabbit.Close()
// 4.2. Dedicando canal de servico RabbitMQ;
ch, err := rabbit.Channel()
PanicOnError(err)
defer ch.Close()
// 4.3. Dedicando exchange;
err = ch.ExchangeDeclare("syslog", "fanout", true, false, false, false, nil)
PanicOnError(err)
err = ch.ExchangeDeclare("msglog", "fanout", true, false, false, false, nil)
PanicOnError(err)
err = ch.ExchangeDeclare("setlog", "fanout", true, false, false, false, nil)
PanicOnError(err)
// 4.4. Dedicando filas de mensagens;
sysqueue, err := ch.QueueDeclare("", false, false, true, false, nil)
PanicOnError(err)
msgqueue, err := ch.QueueDeclare("", false, false, true, false, nil)
PanicOnError(err)
setqueue, err := ch.QueueDeclare("", false, false, true, false, nil)
PanicOnError(err)
// 4.5. Criando vinculacoes;
err = ch.QueueBind(sysqueue.Name, "", "syslog", false, nil)
PanicOnError(err)
err = ch.QueueBind(msgqueue.Name, "", "msglog", false, nil)
PanicOnError(err)
err = ch.QueueBind(setqueue.Name, "", "setlog", false, nil)
PanicOnError(err)
// 5. Conectando e recebendo lista estruturada de servidores do Master Server;
go func() {
mutex.Lock()
serverConn, ip = reconnect(config)
mutex.Unlock()
for {
var list List
dec := gob.NewDecoder(serverConn)
err := dec.Decode(&list)
if err != nil && err.Error() != "gob: unknown type id or corrupted data" {
fmt.Println("The Master Server is offline now.")
mutex.Lock()
serverConn, ip = reconnect(config)
mutex.Unlock()
continue
} else if list.Name == nil {
continue
}
for name := range serverAddresses {
delete(serverAddresses, name)
}
for i := 0; i < len(list.Name); i++ {
serverAddresses[list.Name[i]] = list.AddrS[i]
}
delete(serverAddresses, config.ThisServerName)
chanAddresses[0] = list.AddrS
}
}()
// 7. Recebendo mensagens estruturadas de servidores;
msgClient, err := ch.Consume(msgqueue.Name, "", true, false, false, false, nil)
PanicOnError(err)
msgSystem, err := ch.Consume(sysqueue.Name, "", true, false, false, false, nil)
PanicOnError(err)
msgConfig, err := ch.Consume(setqueue.Name, "", true, false, false, false, nil)
PanicOnError(err)
// Se for uma mensagem de cliente, proceder:
go func() {
for d := range msgClient {
msg, _ := deserialize(d.Body)
Message <- msg
}
}()
// Se for uma mensagem global de sistema:
go func() {
for d := range msgSystem {
msg, _ := deserialize(d.Body)
for c := range nameClients {
if msg.Message2 != nameClients[c] {
err := c.WriteMessage(1, []byte(fmt.Sprint(msg.Message1+"\n")))
if err != nil {
deadConnection <- c
}
}
}
}
}()
// Se forem atributos de canal, proceder:
go func() {
for d := range msgConfig {
msg, _ := deserialize(d.Body)
if str, ok := chanAddresses[msg.RoomID]; ok {
str = append(str, msg.Message2)
chanAddresses[msg.RoomID] = str
str2 := strings.Split(msg.Message1, ":")
nameChannels[msg.RoomID] = str2[0]
passChannels[msg.RoomID] = str2[1]
fmt.Println(msg) //
} else {
str = append(str, msg.Message2)
chanAddresses[msg.RoomID] = str
str2 := strings.Split(msg.Message1, ":")
nameChannels[msg.RoomID] = str2[0]
passChannels[msg.RoomID] = str2[1]
fmt.Println(msg) //
}
}
}()
// 8. Preparando para receber conexoes de novos clientes;
//clientListener, err := net.Listen("tcp", ":"+config.PortClients)
//PanicOnError(err)
// 9. Recebendo novos clientes;
// go func(cl net.Listener) {
// for {
// conn, err := cl.Accept()
// if err != nil {
// raven.CaptureError(err, nil)
// log.Println(err)
// continue
// }
// clientName, _ := get(conn)
// nameClients[conn] = clientName
// newConnection <- conn
// }
// }(clientListener)
// 10. Qual alternativa tomar?
go func() {
for {
select {
// 10.1. Se um novo cliente se conectar, proceder:
case conn := <-newConnection:
fmt.Println("A new client has arrived: " + nameClients[conn])
sysMessageGlobal <- Chat{2, nil, 0, fmt.Sprint("{System} " +
nameClients[conn] + " is online now."), nameClients[conn]}
chanClients[conn] = 0
clientCount++
time.Sleep(100 * time.Millisecond)
// 9.1.2. Recebendo mensagens desse cliente indefinidamente;
// go func(conn net.Conn) {
// for {
// message, err := get(conn)
// if err != nil {
// break
// }
// if message[0:1] != "/" {
// // ID 0 significa um tipo de mensagem.
// messageQueue <- Chat{0, nil, chanClients[conn], message, nameClients[conn]}
// } else {
// message = strings.TrimLeft(message, "/")
// sub := strings.Split(message, " ")
// // ID nos comandos nao possuem um significado.
// commandQueue <- Command{0, conn, sub[0], sub[1:]}
// }
// }
// // 9.1.2.1. Se o loop quebrar, significa que o cliente se desconectou;
// deadConnection <- conn
// }(conn)
// 10.2. Se existe uma mensagem para os clientes locais, proceder:
case msg := <-Message:
fmt.Println("TESTE2")
for conn := range chanClients {
if msg.RoomID == chanClients[conn] && msg.Message2 != nameClients[conn] {
go func(msg Chat, conn *websocket.Conn) {
if msg.ID == 0 {
err := conn.WriteMessage(1, []byte(fmt.Sprint(msg.Message2+": "+msg.Message1+"\n")))
if err != nil {
deadConnection <- conn
}
} else if msg.ID == 2 {
err := conn.WriteMessage(1, []byte(fmt.Sprint(msg.Message1+"\n")))
if err != nil {
deadConnection <- conn
}
}
}(msg, conn)
}
}
}
}
}()
go func() {
Selection1:
for {
select {
// 10.3. Se o sistema enviar uma mensagem global, proceder:
case message := <-sysMessageGlobal:
msg, err := serialize(message) // Implementar tratamento de erro!
err = ch.Publish("syslog", "", false, false, amqp.Publishing{
ContentType: "text/plain",
Body: msg,
})
PrintOnError(err)
// for c := range nameClients {
// if message.Message2 != nameClients[c] {
// err := c.WriteMessage(1, []byte(fmt.Sprint(message.Message1+"\n")))
// if err != nil {
// deadConnection <- c
// }
// }
// }
// 10.4. Se existe um comando para ser processado, proceder:
case cmd := <-commandQueue:
fmt.Printf("%s sent a command.\n", nameClients[cmd.FromUser])
switch cmd.Command {
case "show":
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se existem canais para serem exibidos:
if len(nameChannels) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} There is no channels available!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Enviando os canais disponiveis para o cliente:
var b bytes.Buffer
for i := range nameChannels {
b.WriteString(nameChannels[i] + " ")
}
names := b.String()
names = strings.TrimRight(names, " ")
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("Salas disponíveis: "+names+"\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
case "join":
// Verificando se o comando possui seus argumentos necessarios:
if len(cmd.Parameters) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This command requires some arguments.\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 2 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se cliente ja esta conectado ao canal:
if cmd.Parameters[0] == nameChannels[chanClients[cmd.FromUser]] {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You're already subscribed to this channel!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Atribuindo o canal ao cliente:
for i := range nameChannels {
if cmd.Parameters[0] == nameChannels[i] {
if passChannels[i] == "" || cmd.Parameters[1] == passChannels[i] {
// Enviar para o antigo canal:
messageQueue <- Chat{2, nil, chanClients[cmd.FromUser],
fmt.Sprint("{System} " + nameClients[cmd.FromUser] +
" left your channel."), nameClients[cmd.FromUser]}
chanClients[cmd.FromUser] = i
// Enviar para o novo canal:
messageQueue <- Chat{2, nil, chanClients[cmd.FromUser],
fmt.Sprint("{System} " + nameClients[cmd.FromUser] +
" joined your channel."), nameClients[cmd.FromUser]}
// Enviar para o remetente do comando:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} All done!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
}
}
// Se os casos anteriores falharam, o canal nao existe:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This channels does not exist!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
case "create":
// Verificando se o comando possui seus argumentos necessarios:
if len(cmd.Parameters) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This command requires some arguments.\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 2 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se a sala ja existe:
for i := range nameChannels {
if cmd.Parameters[0] == nameChannels[i] {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This channel already does exist!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
}
// Criando uma nova sala nesse servidor:
nameChannels[channelCount] = cmd.Parameters[0]
if len(cmd.Parameters) == 2 {
passChannels[channelCount] = cmd.Parameters[1]
} else {
passChannels[channelCount] = ""
}
chanClients[cmd.FromUser] = channelCount
// Notificando o cliente de que a operacao foi realizada com sucesso:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} All done! Now you are on your own channel.\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
// Informando aos outros servidores sobre a existencia da sala criada:
msgByte, _ := serialize(Chat{1, nil, channelCount, nameChannels[channelCount] + ":" + passChannels[channelCount], ip + ":" + config.PortServers}) // Implementar tratamento de erro!
err = ch.Publish("setlog", "", false, false, amqp.Publishing{
ContentType: "text/plain",
Body: msgByte,
})
PrintOnError(err)
channelCount++
case "nick":
// Verificando se o comando possui seus argumentos necessarios:
if len(cmd.Parameters) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This command requires an argument!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 1 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se esse nome e valido:
if cmd.Parameters[0] == nameClients[cmd.FromUser] {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This is your name already!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Atribuindo o novo nick para o cliente:
messageQueue <- Chat{2, nil, chanClients[cmd.FromUser],
fmt.Sprint("{System} " + nameClients[cmd.FromUser] +
" now is " + cmd.Parameters[0]), cmd.Parameters[0]}
nameClients[cmd.FromUser] = cmd.Parameters[0]
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} All done!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
default:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} Sorry, but this command does not exist!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
}
}
}
}()
for {
select {
// 10.5. Se um cliente se desconectar, proceder:
case conn := <-deadConnection:
fmt.Println(nameClients[conn] + " is gone.")
sysMessageGlobal <- Chat{2, nil, 0, fmt.Sprint("{System} " +
nameClients[conn] + " is offline now."), ""}
delete(nameClients, conn)
delete(chanClients, conn)
conn.Close()
clientCount--
// 10.6. Se existe uma mensagem para ser entregue, proceder:
case msg := <-messageQueue:
if msg.ID == 0 {
fmt.Printf("%s sent a message on room [%s].\n", msg.Message2,
nameChannels[msg.RoomID])
}
//Message <- msg
msgByte, _ := serialize(msg) // Implementar tratamento de erro!
err := ch.Publish("msglog", "", false, false, amqp.Publishing{
ContentType: "text/plain",
Body: msgByte,
})
PrintOnError(err)
}
}
} | serverAddresses := make(map[string]string)
// 2. Lendo configuracoes;
jsonFile, err := os.Open(`b-properties.json`) | random_line_split |
B-Server.go | package main
import (
"bufio"
"bytes"
"encoding/gob"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"os/exec"
"strings"
"sync"
"time"
"github.com/getsentry/raven-go"
"github.com/gorilla/websocket"
"github.com/streadway/amqp"
)
// Associacoes entre clientes e atributos;
var nameClients = make(map[*websocket.Conn]string)
var chanClients = make(map[*websocket.Conn]uint32)
// Canais de controle;
var newConnection = make(chan *websocket.Conn)
var deadConnection = make(chan *websocket.Conn)
// 1.3. Canais de mensagens;
var sysMessageGlobal = make(chan Chat)
var messageQueue = make(chan Chat)
var Message = make(chan Chat)
var commandQueue = make(chan Command)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool { return true },
}
func handler(writer http.ResponseWriter, request *http.Request) {
socket, err := upgrader.Upgrade(writer, request, nil)
if err != nil {
fmt.Println(err)
}
_, msg, err := socket.ReadMessage()
nameClients[socket] = string(msg) // msg contem o nome do usuario.
newConnection <- socket
for {
// Recebendo mensagens desse cliente indefinidamente:
_, msg, err := socket.ReadMessage()
if err != nil {
break
}
message := string(msg)
if message[0:1] != "/" | else {
message = strings.TrimLeft(message, "/")
sub := strings.Split(message, " ")
// ID nos comandos nao possuem um significado.
commandQueue <- Command{0, socket, sub[0], sub[1:]}
}
}
// Se o loop quebrar, significa que o cliente se desconectou;
deadConnection <- socket
}
func PanicOnError(err error) {
if err != nil {
raven.CaptureErrorAndWait(err, nil)
log.Panic(err)
}
}
func PrintOnError(err error) {
if err != nil {
raven.CaptureError(err, nil)
log.Println(err)
}
}
func serialize(message Chat) ([]byte, error) {
var b bytes.Buffer
encoder := gob.NewEncoder(&b)
err := encoder.Encode(message)
return b.Bytes(), err
}
func deserialize(b []byte) (Chat, error) {
var msg Chat
buf := bytes.NewBuffer(b)
decoder := gob.NewDecoder(buf)
err := decoder.Decode(&msg)
return msg, err
}
func get(reader io.Reader) (message string, err error) {
message, err = bufio.NewReader(reader).ReadString('\n')
message = strings.TrimRight(message, "\r\n")
return
}
func reconnect(config Properties) (serverConn net.Conn, ip string) {
fmt.Println("Trying to connect to Master Server...")
fmt.Println("It will not possible to know other new servers until this moment.")
fmt.Println()
time.Sleep(1000 * time.Millisecond)
for {
sc, err := net.Dial("tcp", config.MasterIP+":"+config.MasterPort)
serverConn = sc
sW := bufio.NewWriter(serverConn)
if err != nil {
time.Sleep(5000 * time.Millisecond)
continue
} else {
fmt.Println("Connected to Master Server!")
fmt.Println("Now, new global servers can be known.")
fmt.Println()
if config.PublicServer == false {
ip = "127.0.0.1"
sW.WriteString(config.ThisServerName + ":" + ip + ":" +
config.PortServers + ":" + config.PortClients + "\n")
sW.Flush()
} else if config.PublicServer == true {
var buf bytes.Buffer
resp, _ := http.Get("http://myexternalip.com/raw")
io.Copy(&buf, resp.Body)
resp.Body.Close()
ip = buf.String()
sW.WriteString(config.ThisServerName + ":" + ip + ":" +
config.PortServers + ":" + config.PortClients + "\n")
sW.Flush()
}
break
}
}
return
}
func head() {
fmt.Println(" _____________________________")
fmt.Println("| |")
fmt.Println("| XIAOMI Chat System |")
fmt.Println("| Developed by Saulo Pinedo |")
fmt.Println("| --- |")
fmt.Println("| Broadcast Program |")
fmt.Println("|_____________________________|\n")
}
func main() {
// 0. Inicializando servicos adicionais;
raven.SetDSN("https://277886f557384520a086cfedea9930cf@sentry.io/1831452")
// 0.1. blabla2
raven.SetDefaultLoggerName("saulopinedo")
raven.SetDebug(true)
raven.SetEnvironment("staging")
raven.SetRelease("Xiaomi")
raven.SetSampleRate(1.0)
// 1. Pre-definindo as variaveis fundamentais;
clientCount := 0
var serverConn net.Conn
var ip string
var mutex sync.Mutex
// 1.2. Associacoes entre canais de mensagens e atributos;
nameChannels := make(map[uint32]string)
passChannels := make(map[uint32]string)
// 1.2. Banco de enderecos de servidores;
chanAddresses := make(map[uint32][]string)
serverAddresses := make(map[string]string)
// 2. Lendo configuracoes;
jsonFile, err := os.Open(`b-properties.json`)
PanicOnError(err)
defer jsonFile.Close()
byteValueJSON, _ := ioutil.ReadAll(jsonFile)
config := Properties{}
json.Unmarshal(byteValueJSON, &config)
channelCount := config.IDchanBegin
nameChannels[0] = "Global"
// 3. Preparando o cabecalho;
cmd := exec.Command("cmd", "/c", "cls")
cmd.Stdout = os.Stdout
cmd.Run()
head()
// 4. Preparando a conexao dos clientes;
http.HandleFunc("/echo", handler)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "websockets.html")
})
go func() {
http.ListenAndServe("127.0.0.1:"+config.PortClients, nil) // Tratar erro!
}()
// 4.1. Estabelendo conexao com o servico RabbitMQ;
rabbit, err := amqp.Dial("amqp://guest:guest@localhost:5672/")
PanicOnError(err)
defer rabbit.Close()
// 4.2. Dedicando canal de servico RabbitMQ;
ch, err := rabbit.Channel()
PanicOnError(err)
defer ch.Close()
// 4.3. Dedicando exchange;
err = ch.ExchangeDeclare("syslog", "fanout", true, false, false, false, nil)
PanicOnError(err)
err = ch.ExchangeDeclare("msglog", "fanout", true, false, false, false, nil)
PanicOnError(err)
err = ch.ExchangeDeclare("setlog", "fanout", true, false, false, false, nil)
PanicOnError(err)
// 4.4. Dedicando filas de mensagens;
sysqueue, err := ch.QueueDeclare("", false, false, true, false, nil)
PanicOnError(err)
msgqueue, err := ch.QueueDeclare("", false, false, true, false, nil)
PanicOnError(err)
setqueue, err := ch.QueueDeclare("", false, false, true, false, nil)
PanicOnError(err)
// 4.5. Criando vinculacoes;
err = ch.QueueBind(sysqueue.Name, "", "syslog", false, nil)
PanicOnError(err)
err = ch.QueueBind(msgqueue.Name, "", "msglog", false, nil)
PanicOnError(err)
err = ch.QueueBind(setqueue.Name, "", "setlog", false, nil)
PanicOnError(err)
// 5. Conectando e recebendo lista estruturada de servidores do Master Server;
go func() {
mutex.Lock()
serverConn, ip = reconnect(config)
mutex.Unlock()
for {
var list List
dec := gob.NewDecoder(serverConn)
err := dec.Decode(&list)
if err != nil && err.Error() != "gob: unknown type id or corrupted data" {
fmt.Println("The Master Server is offline now.")
mutex.Lock()
serverConn, ip = reconnect(config)
mutex.Unlock()
continue
} else if list.Name == nil {
continue
}
for name := range serverAddresses {
delete(serverAddresses, name)
}
for i := 0; i < len(list.Name); i++ {
serverAddresses[list.Name[i]] = list.AddrS[i]
}
delete(serverAddresses, config.ThisServerName)
chanAddresses[0] = list.AddrS
}
}()
// 7. Recebendo mensagens estruturadas de servidores;
msgClient, err := ch.Consume(msgqueue.Name, "", true, false, false, false, nil)
PanicOnError(err)
msgSystem, err := ch.Consume(sysqueue.Name, "", true, false, false, false, nil)
PanicOnError(err)
msgConfig, err := ch.Consume(setqueue.Name, "", true, false, false, false, nil)
PanicOnError(err)
// Se for uma mensagem de cliente, proceder:
go func() {
for d := range msgClient {
msg, _ := deserialize(d.Body)
Message <- msg
}
}()
// Se for uma mensagem global de sistema:
go func() {
for d := range msgSystem {
msg, _ := deserialize(d.Body)
for c := range nameClients {
if msg.Message2 != nameClients[c] {
err := c.WriteMessage(1, []byte(fmt.Sprint(msg.Message1+"\n")))
if err != nil {
deadConnection <- c
}
}
}
}
}()
// Se forem atributos de canal, proceder:
go func() {
for d := range msgConfig {
msg, _ := deserialize(d.Body)
if str, ok := chanAddresses[msg.RoomID]; ok {
str = append(str, msg.Message2)
chanAddresses[msg.RoomID] = str
str2 := strings.Split(msg.Message1, ":")
nameChannels[msg.RoomID] = str2[0]
passChannels[msg.RoomID] = str2[1]
fmt.Println(msg) //
} else {
str = append(str, msg.Message2)
chanAddresses[msg.RoomID] = str
str2 := strings.Split(msg.Message1, ":")
nameChannels[msg.RoomID] = str2[0]
passChannels[msg.RoomID] = str2[1]
fmt.Println(msg) //
}
}
}()
// 8. Preparando para receber conexoes de novos clientes;
//clientListener, err := net.Listen("tcp", ":"+config.PortClients)
//PanicOnError(err)
// 9. Recebendo novos clientes;
// go func(cl net.Listener) {
// for {
// conn, err := cl.Accept()
// if err != nil {
// raven.CaptureError(err, nil)
// log.Println(err)
// continue
// }
// clientName, _ := get(conn)
// nameClients[conn] = clientName
// newConnection <- conn
// }
// }(clientListener)
// 10. Qual alternativa tomar?
go func() {
for {
select {
// 10.1. Se um novo cliente se conectar, proceder:
case conn := <-newConnection:
fmt.Println("A new client has arrived: " + nameClients[conn])
sysMessageGlobal <- Chat{2, nil, 0, fmt.Sprint("{System} " +
nameClients[conn] + " is online now."), nameClients[conn]}
chanClients[conn] = 0
clientCount++
time.Sleep(100 * time.Millisecond)
// 9.1.2. Recebendo mensagens desse cliente indefinidamente;
// go func(conn net.Conn) {
// for {
// message, err := get(conn)
// if err != nil {
// break
// }
// if message[0:1] != "/" {
// // ID 0 significa um tipo de mensagem.
// messageQueue <- Chat{0, nil, chanClients[conn], message, nameClients[conn]}
// } else {
// message = strings.TrimLeft(message, "/")
// sub := strings.Split(message, " ")
// // ID nos comandos nao possuem um significado.
// commandQueue <- Command{0, conn, sub[0], sub[1:]}
// }
// }
// // 9.1.2.1. Se o loop quebrar, significa que o cliente se desconectou;
// deadConnection <- conn
// }(conn)
// 10.2. Se existe uma mensagem para os clientes locais, proceder:
case msg := <-Message:
fmt.Println("TESTE2")
for conn := range chanClients {
if msg.RoomID == chanClients[conn] && msg.Message2 != nameClients[conn] {
go func(msg Chat, conn *websocket.Conn) {
if msg.ID == 0 {
err := conn.WriteMessage(1, []byte(fmt.Sprint(msg.Message2+": "+msg.Message1+"\n")))
if err != nil {
deadConnection <- conn
}
} else if msg.ID == 2 {
err := conn.WriteMessage(1, []byte(fmt.Sprint(msg.Message1+"\n")))
if err != nil {
deadConnection <- conn
}
}
}(msg, conn)
}
}
}
}
}()
go func() {
Selection1:
for {
select {
// 10.3. Se o sistema enviar uma mensagem global, proceder:
case message := <-sysMessageGlobal:
msg, err := serialize(message) // Implementar tratamento de erro!
err = ch.Publish("syslog", "", false, false, amqp.Publishing{
ContentType: "text/plain",
Body: msg,
})
PrintOnError(err)
// for c := range nameClients {
// if message.Message2 != nameClients[c] {
// err := c.WriteMessage(1, []byte(fmt.Sprint(message.Message1+"\n")))
// if err != nil {
// deadConnection <- c
// }
// }
// }
// 10.4. Se existe um comando para ser processado, proceder:
case cmd := <-commandQueue:
fmt.Printf("%s sent a command.\n", nameClients[cmd.FromUser])
switch cmd.Command {
case "show":
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se existem canais para serem exibidos:
if len(nameChannels) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} There is no channels available!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Enviando os canais disponiveis para o cliente:
var b bytes.Buffer
for i := range nameChannels {
b.WriteString(nameChannels[i] + " ")
}
names := b.String()
names = strings.TrimRight(names, " ")
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("Salas disponíveis: "+names+"\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
case "join":
// Verificando se o comando possui seus argumentos necessarios:
if len(cmd.Parameters) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This command requires some arguments.\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 2 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se cliente ja esta conectado ao canal:
if cmd.Parameters[0] == nameChannels[chanClients[cmd.FromUser]] {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You're already subscribed to this channel!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Atribuindo o canal ao cliente:
for i := range nameChannels {
if cmd.Parameters[0] == nameChannels[i] {
if passChannels[i] == "" || cmd.Parameters[1] == passChannels[i] {
// Enviar para o antigo canal:
messageQueue <- Chat{2, nil, chanClients[cmd.FromUser],
fmt.Sprint("{System} " + nameClients[cmd.FromUser] +
" left your channel."), nameClients[cmd.FromUser]}
chanClients[cmd.FromUser] = i
// Enviar para o novo canal:
messageQueue <- Chat{2, nil, chanClients[cmd.FromUser],
fmt.Sprint("{System} " + nameClients[cmd.FromUser] +
" joined your channel."), nameClients[cmd.FromUser]}
// Enviar para o remetente do comando:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} All done!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
}
}
// Se os casos anteriores falharam, o canal nao existe:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This channels does not exist!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
case "create":
// Verificando se o comando possui seus argumentos necessarios:
if len(cmd.Parameters) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This command requires some arguments.\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 2 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se a sala ja existe:
for i := range nameChannels {
if cmd.Parameters[0] == nameChannels[i] {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This channel already does exist!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
}
// Criando uma nova sala nesse servidor:
nameChannels[channelCount] = cmd.Parameters[0]
if len(cmd.Parameters) == 2 {
passChannels[channelCount] = cmd.Parameters[1]
} else {
passChannels[channelCount] = ""
}
chanClients[cmd.FromUser] = channelCount
// Notificando o cliente de que a operacao foi realizada com sucesso:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} All done! Now you are on your own channel.\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
// Informando aos outros servidores sobre a existencia da sala criada:
msgByte, _ := serialize(Chat{1, nil, channelCount, nameChannels[channelCount] + ":" + passChannels[channelCount], ip + ":" + config.PortServers}) // Implementar tratamento de erro!
err = ch.Publish("setlog", "", false, false, amqp.Publishing{
ContentType: "text/plain",
Body: msgByte,
})
PrintOnError(err)
channelCount++
case "nick":
// Verificando se o comando possui seus argumentos necessarios:
if len(cmd.Parameters) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This command requires an argument!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 1 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se esse nome e valido:
if cmd.Parameters[0] == nameClients[cmd.FromUser] {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This is your name already!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Atribuindo o novo nick para o cliente:
messageQueue <- Chat{2, nil, chanClients[cmd.FromUser],
fmt.Sprint("{System} " + nameClients[cmd.FromUser] +
" now is " + cmd.Parameters[0]), cmd.Parameters[0]}
nameClients[cmd.FromUser] = cmd.Parameters[0]
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} All done!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
default:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} Sorry, but this command does not exist!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
}
}
}
}()
for {
select {
// 10.5. Se um cliente se desconectar, proceder:
case conn := <-deadConnection:
fmt.Println(nameClients[conn] + " is gone.")
sysMessageGlobal <- Chat{2, nil, 0, fmt.Sprint("{System} " +
nameClients[conn] + " is offline now."), ""}
delete(nameClients, conn)
delete(chanClients, conn)
conn.Close()
clientCount--
// 10.6. Se existe uma mensagem para ser entregue, proceder:
case msg := <-messageQueue:
if msg.ID == 0 {
fmt.Printf("%s sent a message on room [%s].\n", msg.Message2,
nameChannels[msg.RoomID])
}
//Message <- msg
msgByte, _ := serialize(msg) // Implementar tratamento de erro!
err := ch.Publish("msglog", "", false, false, amqp.Publishing{
ContentType: "text/plain",
Body: msgByte,
})
PrintOnError(err)
}
}
}
| {
// ID 0 significa um tipo de mensagem.
messageQueue <- Chat{0, nil, chanClients[socket], message, nameClients[socket]}
} | conditional_block |
B-Server.go | package main
import (
"bufio"
"bytes"
"encoding/gob"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"os/exec"
"strings"
"sync"
"time"
"github.com/getsentry/raven-go"
"github.com/gorilla/websocket"
"github.com/streadway/amqp"
)
// Associacoes entre clientes e atributos;
var nameClients = make(map[*websocket.Conn]string)
var chanClients = make(map[*websocket.Conn]uint32)
// Canais de controle;
var newConnection = make(chan *websocket.Conn)
var deadConnection = make(chan *websocket.Conn)
// 1.3. Canais de mensagens;
var sysMessageGlobal = make(chan Chat)
var messageQueue = make(chan Chat)
var Message = make(chan Chat)
var commandQueue = make(chan Command)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool { return true },
}
func | (writer http.ResponseWriter, request *http.Request) {
socket, err := upgrader.Upgrade(writer, request, nil)
if err != nil {
fmt.Println(err)
}
_, msg, err := socket.ReadMessage()
nameClients[socket] = string(msg) // msg contem o nome do usuario.
newConnection <- socket
for {
// Recebendo mensagens desse cliente indefinidamente:
_, msg, err := socket.ReadMessage()
if err != nil {
break
}
message := string(msg)
if message[0:1] != "/" {
// ID 0 significa um tipo de mensagem.
messageQueue <- Chat{0, nil, chanClients[socket], message, nameClients[socket]}
} else {
message = strings.TrimLeft(message, "/")
sub := strings.Split(message, " ")
// ID nos comandos nao possuem um significado.
commandQueue <- Command{0, socket, sub[0], sub[1:]}
}
}
// Se o loop quebrar, significa que o cliente se desconectou;
deadConnection <- socket
}
func PanicOnError(err error) {
if err != nil {
raven.CaptureErrorAndWait(err, nil)
log.Panic(err)
}
}
func PrintOnError(err error) {
if err != nil {
raven.CaptureError(err, nil)
log.Println(err)
}
}
func serialize(message Chat) ([]byte, error) {
var b bytes.Buffer
encoder := gob.NewEncoder(&b)
err := encoder.Encode(message)
return b.Bytes(), err
}
func deserialize(b []byte) (Chat, error) {
var msg Chat
buf := bytes.NewBuffer(b)
decoder := gob.NewDecoder(buf)
err := decoder.Decode(&msg)
return msg, err
}
func get(reader io.Reader) (message string, err error) {
message, err = bufio.NewReader(reader).ReadString('\n')
message = strings.TrimRight(message, "\r\n")
return
}
func reconnect(config Properties) (serverConn net.Conn, ip string) {
fmt.Println("Trying to connect to Master Server...")
fmt.Println("It will not possible to know other new servers until this moment.")
fmt.Println()
time.Sleep(1000 * time.Millisecond)
for {
sc, err := net.Dial("tcp", config.MasterIP+":"+config.MasterPort)
serverConn = sc
sW := bufio.NewWriter(serverConn)
if err != nil {
time.Sleep(5000 * time.Millisecond)
continue
} else {
fmt.Println("Connected to Master Server!")
fmt.Println("Now, new global servers can be known.")
fmt.Println()
if config.PublicServer == false {
ip = "127.0.0.1"
sW.WriteString(config.ThisServerName + ":" + ip + ":" +
config.PortServers + ":" + config.PortClients + "\n")
sW.Flush()
} else if config.PublicServer == true {
var buf bytes.Buffer
resp, _ := http.Get("http://myexternalip.com/raw")
io.Copy(&buf, resp.Body)
resp.Body.Close()
ip = buf.String()
sW.WriteString(config.ThisServerName + ":" + ip + ":" +
config.PortServers + ":" + config.PortClients + "\n")
sW.Flush()
}
break
}
}
return
}
func head() {
fmt.Println(" _____________________________")
fmt.Println("| |")
fmt.Println("| XIAOMI Chat System |")
fmt.Println("| Developed by Saulo Pinedo |")
fmt.Println("| --- |")
fmt.Println("| Broadcast Program |")
fmt.Println("|_____________________________|\n")
}
func main() {
// 0. Inicializando servicos adicionais;
raven.SetDSN("https://277886f557384520a086cfedea9930cf@sentry.io/1831452")
// 0.1. blabla2
raven.SetDefaultLoggerName("saulopinedo")
raven.SetDebug(true)
raven.SetEnvironment("staging")
raven.SetRelease("Xiaomi")
raven.SetSampleRate(1.0)
// 1. Pre-definindo as variaveis fundamentais;
clientCount := 0
var serverConn net.Conn
var ip string
var mutex sync.Mutex
// 1.2. Associacoes entre canais de mensagens e atributos;
nameChannels := make(map[uint32]string)
passChannels := make(map[uint32]string)
// 1.2. Banco de enderecos de servidores;
chanAddresses := make(map[uint32][]string)
serverAddresses := make(map[string]string)
// 2. Lendo configuracoes;
jsonFile, err := os.Open(`b-properties.json`)
PanicOnError(err)
defer jsonFile.Close()
byteValueJSON, _ := ioutil.ReadAll(jsonFile)
config := Properties{}
json.Unmarshal(byteValueJSON, &config)
channelCount := config.IDchanBegin
nameChannels[0] = "Global"
// 3. Preparando o cabecalho;
cmd := exec.Command("cmd", "/c", "cls")
cmd.Stdout = os.Stdout
cmd.Run()
head()
// 4. Preparando a conexao dos clientes;
http.HandleFunc("/echo", handler)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "websockets.html")
})
go func() {
http.ListenAndServe("127.0.0.1:"+config.PortClients, nil) // Tratar erro!
}()
// 4.1. Estabelendo conexao com o servico RabbitMQ;
rabbit, err := amqp.Dial("amqp://guest:guest@localhost:5672/")
PanicOnError(err)
defer rabbit.Close()
// 4.2. Dedicando canal de servico RabbitMQ;
ch, err := rabbit.Channel()
PanicOnError(err)
defer ch.Close()
// 4.3. Dedicando exchange;
err = ch.ExchangeDeclare("syslog", "fanout", true, false, false, false, nil)
PanicOnError(err)
err = ch.ExchangeDeclare("msglog", "fanout", true, false, false, false, nil)
PanicOnError(err)
err = ch.ExchangeDeclare("setlog", "fanout", true, false, false, false, nil)
PanicOnError(err)
// 4.4. Dedicando filas de mensagens;
sysqueue, err := ch.QueueDeclare("", false, false, true, false, nil)
PanicOnError(err)
msgqueue, err := ch.QueueDeclare("", false, false, true, false, nil)
PanicOnError(err)
setqueue, err := ch.QueueDeclare("", false, false, true, false, nil)
PanicOnError(err)
// 4.5. Criando vinculacoes;
err = ch.QueueBind(sysqueue.Name, "", "syslog", false, nil)
PanicOnError(err)
err = ch.QueueBind(msgqueue.Name, "", "msglog", false, nil)
PanicOnError(err)
err = ch.QueueBind(setqueue.Name, "", "setlog", false, nil)
PanicOnError(err)
// 5. Conectando e recebendo lista estruturada de servidores do Master Server;
go func() {
mutex.Lock()
serverConn, ip = reconnect(config)
mutex.Unlock()
for {
var list List
dec := gob.NewDecoder(serverConn)
err := dec.Decode(&list)
if err != nil && err.Error() != "gob: unknown type id or corrupted data" {
fmt.Println("The Master Server is offline now.")
mutex.Lock()
serverConn, ip = reconnect(config)
mutex.Unlock()
continue
} else if list.Name == nil {
continue
}
for name := range serverAddresses {
delete(serverAddresses, name)
}
for i := 0; i < len(list.Name); i++ {
serverAddresses[list.Name[i]] = list.AddrS[i]
}
delete(serverAddresses, config.ThisServerName)
chanAddresses[0] = list.AddrS
}
}()
// 7. Recebendo mensagens estruturadas de servidores;
msgClient, err := ch.Consume(msgqueue.Name, "", true, false, false, false, nil)
PanicOnError(err)
msgSystem, err := ch.Consume(sysqueue.Name, "", true, false, false, false, nil)
PanicOnError(err)
msgConfig, err := ch.Consume(setqueue.Name, "", true, false, false, false, nil)
PanicOnError(err)
// Se for uma mensagem de cliente, proceder:
go func() {
for d := range msgClient {
msg, _ := deserialize(d.Body)
Message <- msg
}
}()
// Se for uma mensagem global de sistema:
go func() {
for d := range msgSystem {
msg, _ := deserialize(d.Body)
for c := range nameClients {
if msg.Message2 != nameClients[c] {
err := c.WriteMessage(1, []byte(fmt.Sprint(msg.Message1+"\n")))
if err != nil {
deadConnection <- c
}
}
}
}
}()
// Se forem atributos de canal, proceder:
go func() {
for d := range msgConfig {
msg, _ := deserialize(d.Body)
if str, ok := chanAddresses[msg.RoomID]; ok {
str = append(str, msg.Message2)
chanAddresses[msg.RoomID] = str
str2 := strings.Split(msg.Message1, ":")
nameChannels[msg.RoomID] = str2[0]
passChannels[msg.RoomID] = str2[1]
fmt.Println(msg) //
} else {
str = append(str, msg.Message2)
chanAddresses[msg.RoomID] = str
str2 := strings.Split(msg.Message1, ":")
nameChannels[msg.RoomID] = str2[0]
passChannels[msg.RoomID] = str2[1]
fmt.Println(msg) //
}
}
}()
// 8. Preparando para receber conexoes de novos clientes;
//clientListener, err := net.Listen("tcp", ":"+config.PortClients)
//PanicOnError(err)
// 9. Recebendo novos clientes;
// go func(cl net.Listener) {
// for {
// conn, err := cl.Accept()
// if err != nil {
// raven.CaptureError(err, nil)
// log.Println(err)
// continue
// }
// clientName, _ := get(conn)
// nameClients[conn] = clientName
// newConnection <- conn
// }
// }(clientListener)
// 10. Qual alternativa tomar?
go func() {
for {
select {
// 10.1. Se um novo cliente se conectar, proceder:
case conn := <-newConnection:
fmt.Println("A new client has arrived: " + nameClients[conn])
sysMessageGlobal <- Chat{2, nil, 0, fmt.Sprint("{System} " +
nameClients[conn] + " is online now."), nameClients[conn]}
chanClients[conn] = 0
clientCount++
time.Sleep(100 * time.Millisecond)
// 9.1.2. Recebendo mensagens desse cliente indefinidamente;
// go func(conn net.Conn) {
// for {
// message, err := get(conn)
// if err != nil {
// break
// }
// if message[0:1] != "/" {
// // ID 0 significa um tipo de mensagem.
// messageQueue <- Chat{0, nil, chanClients[conn], message, nameClients[conn]}
// } else {
// message = strings.TrimLeft(message, "/")
// sub := strings.Split(message, " ")
// // ID nos comandos nao possuem um significado.
// commandQueue <- Command{0, conn, sub[0], sub[1:]}
// }
// }
// // 9.1.2.1. Se o loop quebrar, significa que o cliente se desconectou;
// deadConnection <- conn
// }(conn)
// 10.2. Se existe uma mensagem para os clientes locais, proceder:
case msg := <-Message:
fmt.Println("TESTE2")
for conn := range chanClients {
if msg.RoomID == chanClients[conn] && msg.Message2 != nameClients[conn] {
go func(msg Chat, conn *websocket.Conn) {
if msg.ID == 0 {
err := conn.WriteMessage(1, []byte(fmt.Sprint(msg.Message2+": "+msg.Message1+"\n")))
if err != nil {
deadConnection <- conn
}
} else if msg.ID == 2 {
err := conn.WriteMessage(1, []byte(fmt.Sprint(msg.Message1+"\n")))
if err != nil {
deadConnection <- conn
}
}
}(msg, conn)
}
}
}
}
}()
go func() {
Selection1:
for {
select {
// 10.3. Se o sistema enviar uma mensagem global, proceder:
case message := <-sysMessageGlobal:
msg, err := serialize(message) // Implementar tratamento de erro!
err = ch.Publish("syslog", "", false, false, amqp.Publishing{
ContentType: "text/plain",
Body: msg,
})
PrintOnError(err)
// for c := range nameClients {
// if message.Message2 != nameClients[c] {
// err := c.WriteMessage(1, []byte(fmt.Sprint(message.Message1+"\n")))
// if err != nil {
// deadConnection <- c
// }
// }
// }
// 10.4. Se existe um comando para ser processado, proceder:
case cmd := <-commandQueue:
fmt.Printf("%s sent a command.\n", nameClients[cmd.FromUser])
switch cmd.Command {
case "show":
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se existem canais para serem exibidos:
if len(nameChannels) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} There is no channels available!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Enviando os canais disponiveis para o cliente:
var b bytes.Buffer
for i := range nameChannels {
b.WriteString(nameChannels[i] + " ")
}
names := b.String()
names = strings.TrimRight(names, " ")
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("Salas disponíveis: "+names+"\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
case "join":
// Verificando se o comando possui seus argumentos necessarios:
if len(cmd.Parameters) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This command requires some arguments.\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 2 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se cliente ja esta conectado ao canal:
if cmd.Parameters[0] == nameChannels[chanClients[cmd.FromUser]] {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You're already subscribed to this channel!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Atribuindo o canal ao cliente:
for i := range nameChannels {
if cmd.Parameters[0] == nameChannels[i] {
if passChannels[i] == "" || cmd.Parameters[1] == passChannels[i] {
// Enviar para o antigo canal:
messageQueue <- Chat{2, nil, chanClients[cmd.FromUser],
fmt.Sprint("{System} " + nameClients[cmd.FromUser] +
" left your channel."), nameClients[cmd.FromUser]}
chanClients[cmd.FromUser] = i
// Enviar para o novo canal:
messageQueue <- Chat{2, nil, chanClients[cmd.FromUser],
fmt.Sprint("{System} " + nameClients[cmd.FromUser] +
" joined your channel."), nameClients[cmd.FromUser]}
// Enviar para o remetente do comando:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} All done!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
}
}
// Se os casos anteriores falharam, o canal nao existe:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This channels does not exist!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
case "create":
// Verificando se o comando possui seus argumentos necessarios:
if len(cmd.Parameters) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This command requires some arguments.\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 2 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se a sala ja existe:
for i := range nameChannels {
if cmd.Parameters[0] == nameChannels[i] {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This channel already does exist!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
}
// Criando uma nova sala nesse servidor:
nameChannels[channelCount] = cmd.Parameters[0]
if len(cmd.Parameters) == 2 {
passChannels[channelCount] = cmd.Parameters[1]
} else {
passChannels[channelCount] = ""
}
chanClients[cmd.FromUser] = channelCount
// Notificando o cliente de que a operacao foi realizada com sucesso:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} All done! Now you are on your own channel.\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
// Informando aos outros servidores sobre a existencia da sala criada:
msgByte, _ := serialize(Chat{1, nil, channelCount, nameChannels[channelCount] + ":" + passChannels[channelCount], ip + ":" + config.PortServers}) // Implementar tratamento de erro!
err = ch.Publish("setlog", "", false, false, amqp.Publishing{
ContentType: "text/plain",
Body: msgByte,
})
PrintOnError(err)
channelCount++
case "nick":
// Verificando se o comando possui seus argumentos necessarios:
if len(cmd.Parameters) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This command requires an argument!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 1 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se esse nome e valido:
if cmd.Parameters[0] == nameClients[cmd.FromUser] {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This is your name already!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Atribuindo o novo nick para o cliente:
messageQueue <- Chat{2, nil, chanClients[cmd.FromUser],
fmt.Sprint("{System} " + nameClients[cmd.FromUser] +
" now is " + cmd.Parameters[0]), cmd.Parameters[0]}
nameClients[cmd.FromUser] = cmd.Parameters[0]
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} All done!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
default:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} Sorry, but this command does not exist!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
}
}
}
}()
for {
select {
// 10.5. Se um cliente se desconectar, proceder:
case conn := <-deadConnection:
fmt.Println(nameClients[conn] + " is gone.")
sysMessageGlobal <- Chat{2, nil, 0, fmt.Sprint("{System} " +
nameClients[conn] + " is offline now."), ""}
delete(nameClients, conn)
delete(chanClients, conn)
conn.Close()
clientCount--
// 10.6. Se existe uma mensagem para ser entregue, proceder:
case msg := <-messageQueue:
if msg.ID == 0 {
fmt.Printf("%s sent a message on room [%s].\n", msg.Message2,
nameChannels[msg.RoomID])
}
//Message <- msg
msgByte, _ := serialize(msg) // Implementar tratamento de erro!
err := ch.Publish("msglog", "", false, false, amqp.Publishing{
ContentType: "text/plain",
Body: msgByte,
})
PrintOnError(err)
}
}
}
| handler | identifier_name |
B-Server.go | package main
import (
"bufio"
"bytes"
"encoding/gob"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"os/exec"
"strings"
"sync"
"time"
"github.com/getsentry/raven-go"
"github.com/gorilla/websocket"
"github.com/streadway/amqp"
)
// Associacoes entre clientes e atributos;
var nameClients = make(map[*websocket.Conn]string)
var chanClients = make(map[*websocket.Conn]uint32)
// Canais de controle;
var newConnection = make(chan *websocket.Conn)
var deadConnection = make(chan *websocket.Conn)
// 1.3. Canais de mensagens;
var sysMessageGlobal = make(chan Chat)
var messageQueue = make(chan Chat)
var Message = make(chan Chat)
var commandQueue = make(chan Command)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool { return true },
}
func handler(writer http.ResponseWriter, request *http.Request) {
socket, err := upgrader.Upgrade(writer, request, nil)
if err != nil {
fmt.Println(err)
}
_, msg, err := socket.ReadMessage()
nameClients[socket] = string(msg) // msg contem o nome do usuario.
newConnection <- socket
for {
// Recebendo mensagens desse cliente indefinidamente:
_, msg, err := socket.ReadMessage()
if err != nil {
break
}
message := string(msg)
if message[0:1] != "/" {
// ID 0 significa um tipo de mensagem.
messageQueue <- Chat{0, nil, chanClients[socket], message, nameClients[socket]}
} else {
message = strings.TrimLeft(message, "/")
sub := strings.Split(message, " ")
// ID nos comandos nao possuem um significado.
commandQueue <- Command{0, socket, sub[0], sub[1:]}
}
}
// Se o loop quebrar, significa que o cliente se desconectou;
deadConnection <- socket
}
func PanicOnError(err error) |
func PrintOnError(err error) {
if err != nil {
raven.CaptureError(err, nil)
log.Println(err)
}
}
func serialize(message Chat) ([]byte, error) {
var b bytes.Buffer
encoder := gob.NewEncoder(&b)
err := encoder.Encode(message)
return b.Bytes(), err
}
func deserialize(b []byte) (Chat, error) {
var msg Chat
buf := bytes.NewBuffer(b)
decoder := gob.NewDecoder(buf)
err := decoder.Decode(&msg)
return msg, err
}
func get(reader io.Reader) (message string, err error) {
message, err = bufio.NewReader(reader).ReadString('\n')
message = strings.TrimRight(message, "\r\n")
return
}
func reconnect(config Properties) (serverConn net.Conn, ip string) {
fmt.Println("Trying to connect to Master Server...")
fmt.Println("It will not possible to know other new servers until this moment.")
fmt.Println()
time.Sleep(1000 * time.Millisecond)
for {
sc, err := net.Dial("tcp", config.MasterIP+":"+config.MasterPort)
serverConn = sc
sW := bufio.NewWriter(serverConn)
if err != nil {
time.Sleep(5000 * time.Millisecond)
continue
} else {
fmt.Println("Connected to Master Server!")
fmt.Println("Now, new global servers can be known.")
fmt.Println()
if config.PublicServer == false {
ip = "127.0.0.1"
sW.WriteString(config.ThisServerName + ":" + ip + ":" +
config.PortServers + ":" + config.PortClients + "\n")
sW.Flush()
} else if config.PublicServer == true {
var buf bytes.Buffer
resp, _ := http.Get("http://myexternalip.com/raw")
io.Copy(&buf, resp.Body)
resp.Body.Close()
ip = buf.String()
sW.WriteString(config.ThisServerName + ":" + ip + ":" +
config.PortServers + ":" + config.PortClients + "\n")
sW.Flush()
}
break
}
}
return
}
func head() {
fmt.Println(" _____________________________")
fmt.Println("| |")
fmt.Println("| XIAOMI Chat System |")
fmt.Println("| Developed by Saulo Pinedo |")
fmt.Println("| --- |")
fmt.Println("| Broadcast Program |")
fmt.Println("|_____________________________|\n")
}
func main() {
// 0. Inicializando servicos adicionais;
raven.SetDSN("https://277886f557384520a086cfedea9930cf@sentry.io/1831452")
// 0.1. blabla2
raven.SetDefaultLoggerName("saulopinedo")
raven.SetDebug(true)
raven.SetEnvironment("staging")
raven.SetRelease("Xiaomi")
raven.SetSampleRate(1.0)
// 1. Pre-definindo as variaveis fundamentais;
clientCount := 0
var serverConn net.Conn
var ip string
var mutex sync.Mutex
// 1.2. Associacoes entre canais de mensagens e atributos;
nameChannels := make(map[uint32]string)
passChannels := make(map[uint32]string)
// 1.2. Banco de enderecos de servidores;
chanAddresses := make(map[uint32][]string)
serverAddresses := make(map[string]string)
// 2. Lendo configuracoes;
jsonFile, err := os.Open(`b-properties.json`)
PanicOnError(err)
defer jsonFile.Close()
byteValueJSON, _ := ioutil.ReadAll(jsonFile)
config := Properties{}
json.Unmarshal(byteValueJSON, &config)
channelCount := config.IDchanBegin
nameChannels[0] = "Global"
// 3. Preparando o cabecalho;
cmd := exec.Command("cmd", "/c", "cls")
cmd.Stdout = os.Stdout
cmd.Run()
head()
// 4. Preparando a conexao dos clientes;
http.HandleFunc("/echo", handler)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "websockets.html")
})
go func() {
http.ListenAndServe("127.0.0.1:"+config.PortClients, nil) // Tratar erro!
}()
// 4.1. Estabelendo conexao com o servico RabbitMQ;
rabbit, err := amqp.Dial("amqp://guest:guest@localhost:5672/")
PanicOnError(err)
defer rabbit.Close()
// 4.2. Dedicando canal de servico RabbitMQ;
ch, err := rabbit.Channel()
PanicOnError(err)
defer ch.Close()
// 4.3. Dedicando exchange;
err = ch.ExchangeDeclare("syslog", "fanout", true, false, false, false, nil)
PanicOnError(err)
err = ch.ExchangeDeclare("msglog", "fanout", true, false, false, false, nil)
PanicOnError(err)
err = ch.ExchangeDeclare("setlog", "fanout", true, false, false, false, nil)
PanicOnError(err)
// 4.4. Dedicando filas de mensagens;
sysqueue, err := ch.QueueDeclare("", false, false, true, false, nil)
PanicOnError(err)
msgqueue, err := ch.QueueDeclare("", false, false, true, false, nil)
PanicOnError(err)
setqueue, err := ch.QueueDeclare("", false, false, true, false, nil)
PanicOnError(err)
// 4.5. Criando vinculacoes;
err = ch.QueueBind(sysqueue.Name, "", "syslog", false, nil)
PanicOnError(err)
err = ch.QueueBind(msgqueue.Name, "", "msglog", false, nil)
PanicOnError(err)
err = ch.QueueBind(setqueue.Name, "", "setlog", false, nil)
PanicOnError(err)
// 5. Conectando e recebendo lista estruturada de servidores do Master Server;
go func() {
mutex.Lock()
serverConn, ip = reconnect(config)
mutex.Unlock()
for {
var list List
dec := gob.NewDecoder(serverConn)
err := dec.Decode(&list)
if err != nil && err.Error() != "gob: unknown type id or corrupted data" {
fmt.Println("The Master Server is offline now.")
mutex.Lock()
serverConn, ip = reconnect(config)
mutex.Unlock()
continue
} else if list.Name == nil {
continue
}
for name := range serverAddresses {
delete(serverAddresses, name)
}
for i := 0; i < len(list.Name); i++ {
serverAddresses[list.Name[i]] = list.AddrS[i]
}
delete(serverAddresses, config.ThisServerName)
chanAddresses[0] = list.AddrS
}
}()
// 7. Recebendo mensagens estruturadas de servidores;
msgClient, err := ch.Consume(msgqueue.Name, "", true, false, false, false, nil)
PanicOnError(err)
msgSystem, err := ch.Consume(sysqueue.Name, "", true, false, false, false, nil)
PanicOnError(err)
msgConfig, err := ch.Consume(setqueue.Name, "", true, false, false, false, nil)
PanicOnError(err)
// Se for uma mensagem de cliente, proceder:
go func() {
for d := range msgClient {
msg, _ := deserialize(d.Body)
Message <- msg
}
}()
// Se for uma mensagem global de sistema:
go func() {
for d := range msgSystem {
msg, _ := deserialize(d.Body)
for c := range nameClients {
if msg.Message2 != nameClients[c] {
err := c.WriteMessage(1, []byte(fmt.Sprint(msg.Message1+"\n")))
if err != nil {
deadConnection <- c
}
}
}
}
}()
// Se forem atributos de canal, proceder:
go func() {
for d := range msgConfig {
msg, _ := deserialize(d.Body)
if str, ok := chanAddresses[msg.RoomID]; ok {
str = append(str, msg.Message2)
chanAddresses[msg.RoomID] = str
str2 := strings.Split(msg.Message1, ":")
nameChannels[msg.RoomID] = str2[0]
passChannels[msg.RoomID] = str2[1]
fmt.Println(msg) //
} else {
str = append(str, msg.Message2)
chanAddresses[msg.RoomID] = str
str2 := strings.Split(msg.Message1, ":")
nameChannels[msg.RoomID] = str2[0]
passChannels[msg.RoomID] = str2[1]
fmt.Println(msg) //
}
}
}()
// 8. Preparando para receber conexoes de novos clientes;
//clientListener, err := net.Listen("tcp", ":"+config.PortClients)
//PanicOnError(err)
// 9. Recebendo novos clientes;
// go func(cl net.Listener) {
// for {
// conn, err := cl.Accept()
// if err != nil {
// raven.CaptureError(err, nil)
// log.Println(err)
// continue
// }
// clientName, _ := get(conn)
// nameClients[conn] = clientName
// newConnection <- conn
// }
// }(clientListener)
// 10. Qual alternativa tomar?
go func() {
for {
select {
// 10.1. Se um novo cliente se conectar, proceder:
case conn := <-newConnection:
fmt.Println("A new client has arrived: " + nameClients[conn])
sysMessageGlobal <- Chat{2, nil, 0, fmt.Sprint("{System} " +
nameClients[conn] + " is online now."), nameClients[conn]}
chanClients[conn] = 0
clientCount++
time.Sleep(100 * time.Millisecond)
// 9.1.2. Recebendo mensagens desse cliente indefinidamente;
// go func(conn net.Conn) {
// for {
// message, err := get(conn)
// if err != nil {
// break
// }
// if message[0:1] != "/" {
// // ID 0 significa um tipo de mensagem.
// messageQueue <- Chat{0, nil, chanClients[conn], message, nameClients[conn]}
// } else {
// message = strings.TrimLeft(message, "/")
// sub := strings.Split(message, " ")
// // ID nos comandos nao possuem um significado.
// commandQueue <- Command{0, conn, sub[0], sub[1:]}
// }
// }
// // 9.1.2.1. Se o loop quebrar, significa que o cliente se desconectou;
// deadConnection <- conn
// }(conn)
// 10.2. Se existe uma mensagem para os clientes locais, proceder:
case msg := <-Message:
fmt.Println("TESTE2")
for conn := range chanClients {
if msg.RoomID == chanClients[conn] && msg.Message2 != nameClients[conn] {
go func(msg Chat, conn *websocket.Conn) {
if msg.ID == 0 {
err := conn.WriteMessage(1, []byte(fmt.Sprint(msg.Message2+": "+msg.Message1+"\n")))
if err != nil {
deadConnection <- conn
}
} else if msg.ID == 2 {
err := conn.WriteMessage(1, []byte(fmt.Sprint(msg.Message1+"\n")))
if err != nil {
deadConnection <- conn
}
}
}(msg, conn)
}
}
}
}
}()
go func() {
Selection1:
for {
select {
// 10.3. Se o sistema enviar uma mensagem global, proceder:
case message := <-sysMessageGlobal:
msg, err := serialize(message) // Implementar tratamento de erro!
err = ch.Publish("syslog", "", false, false, amqp.Publishing{
ContentType: "text/plain",
Body: msg,
})
PrintOnError(err)
// for c := range nameClients {
// if message.Message2 != nameClients[c] {
// err := c.WriteMessage(1, []byte(fmt.Sprint(message.Message1+"\n")))
// if err != nil {
// deadConnection <- c
// }
// }
// }
// 10.4. Se existe um comando para ser processado, proceder:
case cmd := <-commandQueue:
fmt.Printf("%s sent a command.\n", nameClients[cmd.FromUser])
switch cmd.Command {
case "show":
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se existem canais para serem exibidos:
if len(nameChannels) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} There is no channels available!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Enviando os canais disponiveis para o cliente:
var b bytes.Buffer
for i := range nameChannels {
b.WriteString(nameChannels[i] + " ")
}
names := b.String()
names = strings.TrimRight(names, " ")
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("Salas disponíveis: "+names+"\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
case "join":
// Verificando se o comando possui seus argumentos necessarios:
if len(cmd.Parameters) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This command requires some arguments.\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 2 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se cliente ja esta conectado ao canal:
if cmd.Parameters[0] == nameChannels[chanClients[cmd.FromUser]] {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You're already subscribed to this channel!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Atribuindo o canal ao cliente:
for i := range nameChannels {
if cmd.Parameters[0] == nameChannels[i] {
if passChannels[i] == "" || cmd.Parameters[1] == passChannels[i] {
// Enviar para o antigo canal:
messageQueue <- Chat{2, nil, chanClients[cmd.FromUser],
fmt.Sprint("{System} " + nameClients[cmd.FromUser] +
" left your channel."), nameClients[cmd.FromUser]}
chanClients[cmd.FromUser] = i
// Enviar para o novo canal:
messageQueue <- Chat{2, nil, chanClients[cmd.FromUser],
fmt.Sprint("{System} " + nameClients[cmd.FromUser] +
" joined your channel."), nameClients[cmd.FromUser]}
// Enviar para o remetente do comando:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} All done!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
}
}
// Se os casos anteriores falharam, o canal nao existe:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This channels does not exist!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
case "create":
// Verificando se o comando possui seus argumentos necessarios:
if len(cmd.Parameters) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This command requires some arguments.\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 2 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se a sala ja existe:
for i := range nameChannels {
if cmd.Parameters[0] == nameChannels[i] {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This channel already does exist!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
}
// Criando uma nova sala nesse servidor:
nameChannels[channelCount] = cmd.Parameters[0]
if len(cmd.Parameters) == 2 {
passChannels[channelCount] = cmd.Parameters[1]
} else {
passChannels[channelCount] = ""
}
chanClients[cmd.FromUser] = channelCount
// Notificando o cliente de que a operacao foi realizada com sucesso:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} All done! Now you are on your own channel.\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
// Informando aos outros servidores sobre a existencia da sala criada:
msgByte, _ := serialize(Chat{1, nil, channelCount, nameChannels[channelCount] + ":" + passChannels[channelCount], ip + ":" + config.PortServers}) // Implementar tratamento de erro!
err = ch.Publish("setlog", "", false, false, amqp.Publishing{
ContentType: "text/plain",
Body: msgByte,
})
PrintOnError(err)
channelCount++
case "nick":
// Verificando se o comando possui seus argumentos necessarios:
if len(cmd.Parameters) == 0 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This command requires an argument!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se o comando possui mais argumentos do que o necessario:
if len(cmd.Parameters) > 1 {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} You entered too many arguments!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Verificando se esse nome e valido:
if cmd.Parameters[0] == nameClients[cmd.FromUser] {
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} This is your name already!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
continue Selection1
}
// Atribuindo o novo nick para o cliente:
messageQueue <- Chat{2, nil, chanClients[cmd.FromUser],
fmt.Sprint("{System} " + nameClients[cmd.FromUser] +
" now is " + cmd.Parameters[0]), cmd.Parameters[0]}
nameClients[cmd.FromUser] = cmd.Parameters[0]
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} All done!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
default:
err := cmd.FromUser.WriteMessage(1, []byte(fmt.Sprint("{System} Sorry, but this command does not exist!\n")))
if err != nil {
deadConnection <- cmd.FromUser
}
}
}
}
}()
for {
select {
// 10.5. Se um cliente se desconectar, proceder:
case conn := <-deadConnection:
fmt.Println(nameClients[conn] + " is gone.")
sysMessageGlobal <- Chat{2, nil, 0, fmt.Sprint("{System} " +
nameClients[conn] + " is offline now."), ""}
delete(nameClients, conn)
delete(chanClients, conn)
conn.Close()
clientCount--
// 10.6. Se existe uma mensagem para ser entregue, proceder:
case msg := <-messageQueue:
if msg.ID == 0 {
fmt.Printf("%s sent a message on room [%s].\n", msg.Message2,
nameChannels[msg.RoomID])
}
//Message <- msg
msgByte, _ := serialize(msg) // Implementar tratamento de erro!
err := ch.Publish("msglog", "", false, false, amqp.Publishing{
ContentType: "text/plain",
Body: msgByte,
})
PrintOnError(err)
}
}
}
| {
if err != nil {
raven.CaptureErrorAndWait(err, nil)
log.Panic(err)
}
} | identifier_body |
train_fineall.py | # -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
from PIL import Image
from sklearn.neural_network import MLPClassifier
import argparse
import torch
import torch.optim as optim
from tqdm import *
from termcolor import *
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision
# from package.models import MyModel # you may import your own model in the package
# from package import preprocess_methods # you may import your own preprocess method in the package
parser=argparse.ArgumentParser(description='uniqlo network')
parser.add_argument('--bh',default='1',
help='choose the network')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--epochs', type=int, default=600, metavar='N',
help='number of epochs to train (default: 300)')
parser.add_argument('--batchsize', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='SGD weight_decay (default: 5e-4)')
parser.add_argument('--automonous_stopping', type=int,default=0,
help='automonous_stopping')
parser.add_argument('--data',default='data',metavar='NT',
help='the data directory')
parser.add_argument('--modelpos',default='models/resnet18_pretrained.t7',metavar='NT',
help='the data directory')
args=parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
PATH_TO_MODEL = os.path.join('models', 'color_models')
category_num=24
history=[0.01]*1000
historyMax=0.01
Hloss=[0.01]*1000
lossMin=1000000.0
hash2=[0, 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 21, 22, 23, 3, 4, 5, 6, 7, 8, 9]
colorMax=[0.0001]*24
Misspre=np.array([[0.01 for col in range(0,24)] for row in range(0,24)])
# loading pre-trained model
print(colored('initializing the model ...',"blue"))
print(colored('load model at '+args.modelpos,"blue"))
#model = models.resnet18(pretrained=True)#at least 224*224
model=torch.load(args.modelpos)
model.fc=nn.Linear(512,24)
for param in model.parameters():
param.requires_grads=False
model.fc.requires_grads=True
model.layer4.requires_grads=True
model.layer3.requires_grads=True
model.layer2.requires_grads=True
model.layer1.requires_grads=True
if args.cuda:
model.cuda()
print(colored('model ==> ',"green"))
print(model)
print(colored('initializing done.',"blue"))
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
traindir='data/train'
valdir='data/val'
#print(os.listdir(traindir))
train_loader1= torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
train_loader2= torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Scale(224),
#transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
train_loader3= torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=False,
num_workers=4, pin_memory=True)
def imshow(img):
|
#dataiter = iter(train_loader)
#print(len(train_loader))
#print(len(val_loader))
#images,labels= dataiter.next()
#print(images)
#print(labels)
# print images
#imshow(torchvision.utils.make_grid(images))
# define loss function (criterion) and pptimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,weight_decay=args.weight_decay)
def save_model(model, name):
print('saving the model ...')
if not os.path.exists(PATH_TO_MODEL):
os.mkdir(PATH_TO_MODEL)
torch.save(model,PATH_TO_MODEL+'/'+str(historyMax)+'.t7')
print('done.')
def visualize(data):
for i in range(0,24):
print('color '+str(i)+ '!')
for j in range(0,24):
if(data[i][j]>5):
print '[%d][%d] = %.2f ' %(i,j,data[i][j]) ,
print()
for i in range(0,24):
print(colored('color %d precision: %.2f !' %(i,data[i][i]),'green'))
def save_color_model(model, color):
print('saving the color model for %d ...' %color)
visualize(Misspre)
PATH_TO_COLOR=PATH_TO_MODEL+str(color)
if not os.path.exists(PATH_TO_COLOR):
os.mkdir(PATH_TO_COLOR)
torch.save(model,PATH_TO_COLOR+'/'+str(colorMax[color])+'.t7')
print('done.')
def train(epoch):
model.train()
print(colored('training epoch '+ str(epoch) + ' !','blue'))
print(colored('loading data!','green'))
if epoch%3==0:
train_loader=train_loader1
elif epoch%3==1:
train_loader=torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Scale(224),
#transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
else:
train_loader=train_loader3
print(colored('done!','green'))
tot_loss=0.0
num=0
right=0
for i, (inputs, targets) in tqdm(enumerate(train_loader)):
if args.cuda:
inputs=inputs.cuda(async=True)
targets=targets.cuda(async=True)
inputs_var, targets_var = Variable(inputs), Variable(targets)
optimizer.zero_grad()
outputs = model(inputs_var)
loss=criterion(outputs, targets_var)/(args.batchsize*1.0)
loss.backward()
optimizer.step()
tot_loss=tot_loss+loss.data[0]
num=num+inputs.size(0)
_,indices=torch.max(outputs.data,1)
indices=indices.view(inputs.size(0))
right=right+sum(indices==targets)
#print(output.data)
#averageloss=2.3
#averageloss=(tot_loss*1.0)/(num*1.00)
print(colored("totloss: %.8f ! " %tot_loss,'red'))
precision=2.3
precision=(right*100.0)/(num*1.00)
print(colored("precision: %.2f%c ! " %(precision,'%'),'red'))
global Hloss,lossMin
Hloss[epoch]=tot_loss
if epoch==1:
lossMin=tot_loss
else:
lossMin=min(lossMin,tot_loss)
#print(colored("right: %d ! " %right,'red'))
def test(epoch):
Miss=[[0 for col in range(0,24)] for row in range(0,24)]# suppose i to be j
model.eval()
print(colored('Testing!','blue'))
tot_loss=0.0
num=0
right=0
for i, (inputs, targets) in tqdm(enumerate(val_loader)):
if args.cuda:
inputs=inputs.cuda(async=True)
targets=targets.cuda(async=True)
inputs_var, targets_var = Variable(inputs), Variable(targets)
#optimizer.zero_grad()
outputs = model(inputs_var)
loss=criterion(outputs, targets_var)/(args.batchsize*1.0)
#loss.backward()
#optimizer.step()
tot_loss=tot_loss+loss.data[0]
num=num+inputs.size(0)
_,indices=torch.max(outputs.data,1)
indices=indices.view(inputs.size(0))
right=right+sum(indices==targets)
for j in range(0,inputs.size(0)):
#if targets[j]>24 or indices[j]>24:
#print(targets[j],indices[j])
Miss[hash2[int(targets[j])]][hash2[int(indices[j])]]+=1
#print(output.data)
#averageloss=2.3
#averageloss=(tot_loss*1.0)/(num*1.00)
print(colored("totloss: %.8f ! " %tot_loss,'red'))
precision=2.3
precision=(right*100.0)/(num*1.00)
print(colored("precision: %.2f%c ! " %(precision,'%'),'red'))
global historyMax,history
history[epoch]=precision
historyMax=max(historyMax,precision)
global Misspre
for i in range(0,24):
for j in range(0,24):
Misspre[hash2[i]][hash2[j]]=(100*Miss[hash2[i]][hash2[j]]*1.000)/(sum(Miss[hash2[i]])*1.000)
#torch.save(Misspre,'visualize/visualize.t7')
#visualize(Misspre)
for i in range(0,24):
if(Misspre[hash2[i]][hash2[i]]>colorMax[hash2[i]]):
colorMax[hash2[i]]=Misspre[hash2[i]][hash2[i]]
if((Misspre[hash2[i]][hash2[i]]>85.00) and (precision>65)):
save_color_model(model,hash2[i])
if __name__ == '__main__':
#save_model(model, PATH_TO_MODEL)
#print(np.shape(X))
for epoch in range(1, args.epochs + 1):
train(epoch)
test(epoch)
if historyMax==history[epoch]:
save_model(model, PATH_TO_MODEL)
if args.automonous_stopping==1:
haha=0
if(epoch>10):
for i in range(epoch-10+1,epoch+1):
if(Hloss[i]==lossMin):
haha=1
if haha==0:
break
| img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1,2,0)))
plt.show() | identifier_body |
train_fineall.py | # -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
from PIL import Image
from sklearn.neural_network import MLPClassifier
import argparse
import torch
import torch.optim as optim
from tqdm import *
from termcolor import *
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision
# from package.models import MyModel # you may import your own model in the package
# from package import preprocess_methods # you may import your own preprocess method in the package
parser=argparse.ArgumentParser(description='uniqlo network')
parser.add_argument('--bh',default='1',
help='choose the network')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--epochs', type=int, default=600, metavar='N',
help='number of epochs to train (default: 300)')
parser.add_argument('--batchsize', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='SGD weight_decay (default: 5e-4)')
parser.add_argument('--automonous_stopping', type=int,default=0,
help='automonous_stopping')
parser.add_argument('--data',default='data',metavar='NT',
help='the data directory')
parser.add_argument('--modelpos',default='models/resnet18_pretrained.t7',metavar='NT',
help='the data directory')
args=parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
PATH_TO_MODEL = os.path.join('models', 'color_models')
category_num=24
history=[0.01]*1000
historyMax=0.01
Hloss=[0.01]*1000
lossMin=1000000.0
hash2=[0, 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 21, 22, 23, 3, 4, 5, 6, 7, 8, 9]
colorMax=[0.0001]*24
Misspre=np.array([[0.01 for col in range(0,24)] for row in range(0,24)])
# loading pre-trained model
print(colored('initializing the model ...',"blue"))
print(colored('load model at '+args.modelpos,"blue"))
#model = models.resnet18(pretrained=True)#at least 224*224
model=torch.load(args.modelpos)
model.fc=nn.Linear(512,24)
for param in model.parameters():
param.requires_grads=False
model.fc.requires_grads=True
model.layer4.requires_grads=True
model.layer3.requires_grads=True
model.layer2.requires_grads=True
model.layer1.requires_grads=True
if args.cuda:
model.cuda()
print(colored('model ==> ',"green"))
print(model)
print(colored('initializing done.',"blue"))
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
traindir='data/train'
valdir='data/val'
#print(os.listdir(traindir))
train_loader1= torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
train_loader2= torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Scale(224),
#transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
train_loader3= torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=False,
num_workers=4, pin_memory=True)
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1,2,0)))
plt.show()
#dataiter = iter(train_loader)
#print(len(train_loader))
#print(len(val_loader))
#images,labels= dataiter.next()
#print(images)
#print(labels)
# print images
#imshow(torchvision.utils.make_grid(images))
# define loss function (criterion) and pptimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,weight_decay=args.weight_decay)
def | (model, name):
print('saving the model ...')
if not os.path.exists(PATH_TO_MODEL):
os.mkdir(PATH_TO_MODEL)
torch.save(model,PATH_TO_MODEL+'/'+str(historyMax)+'.t7')
print('done.')
def visualize(data):
for i in range(0,24):
print('color '+str(i)+ '!')
for j in range(0,24):
if(data[i][j]>5):
print '[%d][%d] = %.2f ' %(i,j,data[i][j]) ,
print()
for i in range(0,24):
print(colored('color %d precision: %.2f !' %(i,data[i][i]),'green'))
def save_color_model(model, color):
print('saving the color model for %d ...' %color)
visualize(Misspre)
PATH_TO_COLOR=PATH_TO_MODEL+str(color)
if not os.path.exists(PATH_TO_COLOR):
os.mkdir(PATH_TO_COLOR)
torch.save(model,PATH_TO_COLOR+'/'+str(colorMax[color])+'.t7')
print('done.')
def train(epoch):
model.train()
print(colored('training epoch '+ str(epoch) + ' !','blue'))
print(colored('loading data!','green'))
if epoch%3==0:
train_loader=train_loader1
elif epoch%3==1:
train_loader=torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Scale(224),
#transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
else:
train_loader=train_loader3
print(colored('done!','green'))
tot_loss=0.0
num=0
right=0
for i, (inputs, targets) in tqdm(enumerate(train_loader)):
if args.cuda:
inputs=inputs.cuda(async=True)
targets=targets.cuda(async=True)
inputs_var, targets_var = Variable(inputs), Variable(targets)
optimizer.zero_grad()
outputs = model(inputs_var)
loss=criterion(outputs, targets_var)/(args.batchsize*1.0)
loss.backward()
optimizer.step()
tot_loss=tot_loss+loss.data[0]
num=num+inputs.size(0)
_,indices=torch.max(outputs.data,1)
indices=indices.view(inputs.size(0))
right=right+sum(indices==targets)
#print(output.data)
#averageloss=2.3
#averageloss=(tot_loss*1.0)/(num*1.00)
print(colored("totloss: %.8f ! " %tot_loss,'red'))
precision=2.3
precision=(right*100.0)/(num*1.00)
print(colored("precision: %.2f%c ! " %(precision,'%'),'red'))
global Hloss,lossMin
Hloss[epoch]=tot_loss
if epoch==1:
lossMin=tot_loss
else:
lossMin=min(lossMin,tot_loss)
#print(colored("right: %d ! " %right,'red'))
def test(epoch):
Miss=[[0 for col in range(0,24)] for row in range(0,24)]# suppose i to be j
model.eval()
print(colored('Testing!','blue'))
tot_loss=0.0
num=0
right=0
for i, (inputs, targets) in tqdm(enumerate(val_loader)):
if args.cuda:
inputs=inputs.cuda(async=True)
targets=targets.cuda(async=True)
inputs_var, targets_var = Variable(inputs), Variable(targets)
#optimizer.zero_grad()
outputs = model(inputs_var)
loss=criterion(outputs, targets_var)/(args.batchsize*1.0)
#loss.backward()
#optimizer.step()
tot_loss=tot_loss+loss.data[0]
num=num+inputs.size(0)
_,indices=torch.max(outputs.data,1)
indices=indices.view(inputs.size(0))
right=right+sum(indices==targets)
for j in range(0,inputs.size(0)):
#if targets[j]>24 or indices[j]>24:
#print(targets[j],indices[j])
Miss[hash2[int(targets[j])]][hash2[int(indices[j])]]+=1
#print(output.data)
#averageloss=2.3
#averageloss=(tot_loss*1.0)/(num*1.00)
print(colored("totloss: %.8f ! " %tot_loss,'red'))
precision=2.3
precision=(right*100.0)/(num*1.00)
print(colored("precision: %.2f%c ! " %(precision,'%'),'red'))
global historyMax,history
history[epoch]=precision
historyMax=max(historyMax,precision)
global Misspre
for i in range(0,24):
for j in range(0,24):
Misspre[hash2[i]][hash2[j]]=(100*Miss[hash2[i]][hash2[j]]*1.000)/(sum(Miss[hash2[i]])*1.000)
#torch.save(Misspre,'visualize/visualize.t7')
#visualize(Misspre)
for i in range(0,24):
if(Misspre[hash2[i]][hash2[i]]>colorMax[hash2[i]]):
colorMax[hash2[i]]=Misspre[hash2[i]][hash2[i]]
if((Misspre[hash2[i]][hash2[i]]>85.00) and (precision>65)):
save_color_model(model,hash2[i])
if __name__ == '__main__':
#save_model(model, PATH_TO_MODEL)
#print(np.shape(X))
for epoch in range(1, args.epochs + 1):
train(epoch)
test(epoch)
if historyMax==history[epoch]:
save_model(model, PATH_TO_MODEL)
if args.automonous_stopping==1:
haha=0
if(epoch>10):
for i in range(epoch-10+1,epoch+1):
if(Hloss[i]==lossMin):
haha=1
if haha==0:
break
| save_model | identifier_name |
train_fineall.py | # -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
from PIL import Image
from sklearn.neural_network import MLPClassifier
import argparse
import torch
import torch.optim as optim
from tqdm import *
from termcolor import *
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision
# from package.models import MyModel # you may import your own model in the package
# from package import preprocess_methods # you may import your own preprocess method in the package
parser=argparse.ArgumentParser(description='uniqlo network')
parser.add_argument('--bh',default='1',
help='choose the network')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--epochs', type=int, default=600, metavar='N',
help='number of epochs to train (default: 300)')
parser.add_argument('--batchsize', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='SGD weight_decay (default: 5e-4)')
parser.add_argument('--automonous_stopping', type=int,default=0,
help='automonous_stopping')
parser.add_argument('--data',default='data',metavar='NT',
help='the data directory')
parser.add_argument('--modelpos',default='models/resnet18_pretrained.t7',metavar='NT',
help='the data directory')
args=parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
PATH_TO_MODEL = os.path.join('models', 'color_models')
category_num=24
history=[0.01]*1000
historyMax=0.01
Hloss=[0.01]*1000
lossMin=1000000.0
hash2=[0, 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 21, 22, 23, 3, 4, 5, 6, 7, 8, 9]
colorMax=[0.0001]*24
Misspre=np.array([[0.01 for col in range(0,24)] for row in range(0,24)])
# loading pre-trained model
print(colored('initializing the model ...',"blue"))
print(colored('load model at '+args.modelpos,"blue"))
#model = models.resnet18(pretrained=True)#at least 224*224
model=torch.load(args.modelpos)
model.fc=nn.Linear(512,24)
for param in model.parameters():
param.requires_grads=False
model.fc.requires_grads=True
model.layer4.requires_grads=True
model.layer3.requires_grads=True
model.layer2.requires_grads=True
model.layer1.requires_grads=True
if args.cuda:
model.cuda()
print(colored('model ==> ',"green"))
print(model)
print(colored('initializing done.',"blue"))
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
traindir='data/train'
valdir='data/val'
#print(os.listdir(traindir))
train_loader1= torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
train_loader2= torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Scale(224),
#transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
train_loader3= torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=False,
num_workers=4, pin_memory=True)
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1,2,0)))
plt.show()
#dataiter = iter(train_loader)
#print(len(train_loader))
#print(len(val_loader))
#images,labels= dataiter.next()
#print(images)
#print(labels)
# print images
#imshow(torchvision.utils.make_grid(images))
# define loss function (criterion) and pptimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,weight_decay=args.weight_decay)
def save_model(model, name):
print('saving the model ...')
if not os.path.exists(PATH_TO_MODEL):
os.mkdir(PATH_TO_MODEL)
torch.save(model,PATH_TO_MODEL+'/'+str(historyMax)+'.t7')
print('done.')
def visualize(data):
for i in range(0,24):
print('color '+str(i)+ '!')
for j in range(0,24):
if(data[i][j]>5):
print '[%d][%d] = %.2f ' %(i,j,data[i][j]) ,
print()
for i in range(0,24):
print(colored('color %d precision: %.2f !' %(i,data[i][i]),'green'))
def save_color_model(model, color):
print('saving the color model for %d ...' %color)
visualize(Misspre)
PATH_TO_COLOR=PATH_TO_MODEL+str(color)
if not os.path.exists(PATH_TO_COLOR):
os.mkdir(PATH_TO_COLOR)
torch.save(model,PATH_TO_COLOR+'/'+str(colorMax[color])+'.t7')
print('done.')
def train(epoch):
model.train()
print(colored('training epoch '+ str(epoch) + ' !','blue'))
print(colored('loading data!','green'))
if epoch%3==0:
train_loader=train_loader1
elif epoch%3==1:
train_loader=torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Scale(224),
#transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
else:
train_loader=train_loader3
print(colored('done!','green'))
tot_loss=0.0
num=0
right=0
for i, (inputs, targets) in tqdm(enumerate(train_loader)):
if args.cuda:
inputs=inputs.cuda(async=True)
targets=targets.cuda(async=True)
inputs_var, targets_var = Variable(inputs), Variable(targets)
optimizer.zero_grad()
outputs = model(inputs_var)
loss=criterion(outputs, targets_var)/(args.batchsize*1.0)
loss.backward()
optimizer.step()
tot_loss=tot_loss+loss.data[0]
num=num+inputs.size(0)
_,indices=torch.max(outputs.data,1)
indices=indices.view(inputs.size(0))
right=right+sum(indices==targets)
#print(output.data)
#averageloss=2.3
#averageloss=(tot_loss*1.0)/(num*1.00)
print(colored("totloss: %.8f ! " %tot_loss,'red'))
precision=2.3
precision=(right*100.0)/(num*1.00)
print(colored("precision: %.2f%c ! " %(precision,'%'),'red'))
global Hloss,lossMin
Hloss[epoch]=tot_loss
if epoch==1:
lossMin=tot_loss
else:
lossMin=min(lossMin,tot_loss)
#print(colored("right: %d ! " %right,'red'))
def test(epoch):
Miss=[[0 for col in range(0,24)] for row in range(0,24)]# suppose i to be j
model.eval()
print(colored('Testing!','blue'))
tot_loss=0.0
num=0
right=0
for i, (inputs, targets) in tqdm(enumerate(val_loader)):
if args.cuda:
inputs=inputs.cuda(async=True)
targets=targets.cuda(async=True)
inputs_var, targets_var = Variable(inputs), Variable(targets)
#optimizer.zero_grad()
outputs = model(inputs_var)
loss=criterion(outputs, targets_var)/(args.batchsize*1.0)
#loss.backward()
#optimizer.step()
tot_loss=tot_loss+loss.data[0]
num=num+inputs.size(0)
_,indices=torch.max(outputs.data,1)
indices=indices.view(inputs.size(0))
right=right+sum(indices==targets)
for j in range(0,inputs.size(0)):
#if targets[j]>24 or indices[j]>24:
#print(targets[j],indices[j])
Miss[hash2[int(targets[j])]][hash2[int(indices[j])]]+=1
#print(output.data)
#averageloss=2.3
#averageloss=(tot_loss*1.0)/(num*1.00)
print(colored("totloss: %.8f ! " %tot_loss,'red'))
precision=2.3
precision=(right*100.0)/(num*1.00)
print(colored("precision: %.2f%c ! " %(precision,'%'),'red'))
global historyMax,history
history[epoch]=precision
historyMax=max(historyMax,precision)
global Misspre
for i in range(0,24):
for j in range(0,24):
Misspre[hash2[i]][hash2[j]]=(100*Miss[hash2[i]][hash2[j]]*1.000)/(sum(Miss[hash2[i]])*1.000)
#torch.save(Misspre,'visualize/visualize.t7')
#visualize(Misspre)
for i in range(0,24):
if(Misspre[hash2[i]][hash2[i]]>colorMax[hash2[i]]):
colorMax[hash2[i]]=Misspre[hash2[i]][hash2[i]]
if((Misspre[hash2[i]][hash2[i]]>85.00) and (precision>65)):
save_color_model(model,hash2[i])
if __name__ == '__main__':
#save_model(model, PATH_TO_MODEL)
#print(np.shape(X))
for epoch in range(1, args.epochs + 1):
train(epoch)
test(epoch)
if historyMax==history[epoch]:
save_model(model, PATH_TO_MODEL)
if args.automonous_stopping==1:
| haha=0
if(epoch>10):
for i in range(epoch-10+1,epoch+1):
if(Hloss[i]==lossMin):
haha=1
if haha==0:
break | conditional_block | |
train_fineall.py | # -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
from PIL import Image
from sklearn.neural_network import MLPClassifier
import argparse
import torch
import torch.optim as optim
from tqdm import *
from termcolor import *
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision
# from package.models import MyModel # you may import your own model in the package
# from package import preprocess_methods # you may import your own preprocess method in the package
parser=argparse.ArgumentParser(description='uniqlo network')
parser.add_argument('--bh',default='1',
help='choose the network')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--epochs', type=int, default=600, metavar='N',
help='number of epochs to train (default: 300)')
parser.add_argument('--batchsize', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='SGD weight_decay (default: 5e-4)')
parser.add_argument('--automonous_stopping', type=int,default=0,
help='automonous_stopping')
parser.add_argument('--data',default='data',metavar='NT',
help='the data directory')
parser.add_argument('--modelpos',default='models/resnet18_pretrained.t7',metavar='NT',
help='the data directory')
args=parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
PATH_TO_MODEL = os.path.join('models', 'color_models')
category_num=24
history=[0.01]*1000
historyMax=0.01
Hloss=[0.01]*1000
lossMin=1000000.0
hash2=[0, 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 21, 22, 23, 3, 4, 5, 6, 7, 8, 9]
colorMax=[0.0001]*24
Misspre=np.array([[0.01 for col in range(0,24)] for row in range(0,24)])
# loading pre-trained model
print(colored('initializing the model ...',"blue"))
print(colored('load model at '+args.modelpos,"blue"))
#model = models.resnet18(pretrained=True)#at least 224*224
model=torch.load(args.modelpos)
model.fc=nn.Linear(512,24)
for param in model.parameters():
param.requires_grads=False
model.fc.requires_grads=True
model.layer4.requires_grads=True
model.layer3.requires_grads=True
model.layer2.requires_grads=True
model.layer1.requires_grads=True
if args.cuda:
model.cuda()
print(colored('model ==> ',"green"))
print(model)
|
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
traindir='data/train'
valdir='data/val'
#print(os.listdir(traindir))
train_loader1= torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
train_loader2= torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Scale(224),
#transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
train_loader3= torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=False,
num_workers=4, pin_memory=True)
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1,2,0)))
plt.show()
#dataiter = iter(train_loader)
#print(len(train_loader))
#print(len(val_loader))
#images,labels= dataiter.next()
#print(images)
#print(labels)
# print images
#imshow(torchvision.utils.make_grid(images))
# define loss function (criterion) and pptimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,weight_decay=args.weight_decay)
def save_model(model, name):
print('saving the model ...')
if not os.path.exists(PATH_TO_MODEL):
os.mkdir(PATH_TO_MODEL)
torch.save(model,PATH_TO_MODEL+'/'+str(historyMax)+'.t7')
print('done.')
def visualize(data):
for i in range(0,24):
print('color '+str(i)+ '!')
for j in range(0,24):
if(data[i][j]>5):
print '[%d][%d] = %.2f ' %(i,j,data[i][j]) ,
print()
for i in range(0,24):
print(colored('color %d precision: %.2f !' %(i,data[i][i]),'green'))
def save_color_model(model, color):
print('saving the color model for %d ...' %color)
visualize(Misspre)
PATH_TO_COLOR=PATH_TO_MODEL+str(color)
if not os.path.exists(PATH_TO_COLOR):
os.mkdir(PATH_TO_COLOR)
torch.save(model,PATH_TO_COLOR+'/'+str(colorMax[color])+'.t7')
print('done.')
def train(epoch):
model.train()
print(colored('training epoch '+ str(epoch) + ' !','blue'))
print(colored('loading data!','green'))
if epoch%3==0:
train_loader=train_loader1
elif epoch%3==1:
train_loader=torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Scale(224),
#transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batchsize, shuffle=True,
num_workers=4, pin_memory=True)
else:
train_loader=train_loader3
print(colored('done!','green'))
tot_loss=0.0
num=0
right=0
for i, (inputs, targets) in tqdm(enumerate(train_loader)):
if args.cuda:
inputs=inputs.cuda(async=True)
targets=targets.cuda(async=True)
inputs_var, targets_var = Variable(inputs), Variable(targets)
optimizer.zero_grad()
outputs = model(inputs_var)
loss=criterion(outputs, targets_var)/(args.batchsize*1.0)
loss.backward()
optimizer.step()
tot_loss=tot_loss+loss.data[0]
num=num+inputs.size(0)
_,indices=torch.max(outputs.data,1)
indices=indices.view(inputs.size(0))
right=right+sum(indices==targets)
#print(output.data)
#averageloss=2.3
#averageloss=(tot_loss*1.0)/(num*1.00)
print(colored("totloss: %.8f ! " %tot_loss,'red'))
precision=2.3
precision=(right*100.0)/(num*1.00)
print(colored("precision: %.2f%c ! " %(precision,'%'),'red'))
global Hloss,lossMin
Hloss[epoch]=tot_loss
if epoch==1:
lossMin=tot_loss
else:
lossMin=min(lossMin,tot_loss)
#print(colored("right: %d ! " %right,'red'))
def test(epoch):
Miss=[[0 for col in range(0,24)] for row in range(0,24)]# suppose i to be j
model.eval()
print(colored('Testing!','blue'))
tot_loss=0.0
num=0
right=0
for i, (inputs, targets) in tqdm(enumerate(val_loader)):
if args.cuda:
inputs=inputs.cuda(async=True)
targets=targets.cuda(async=True)
inputs_var, targets_var = Variable(inputs), Variable(targets)
#optimizer.zero_grad()
outputs = model(inputs_var)
loss=criterion(outputs, targets_var)/(args.batchsize*1.0)
#loss.backward()
#optimizer.step()
tot_loss=tot_loss+loss.data[0]
num=num+inputs.size(0)
_,indices=torch.max(outputs.data,1)
indices=indices.view(inputs.size(0))
right=right+sum(indices==targets)
for j in range(0,inputs.size(0)):
#if targets[j]>24 or indices[j]>24:
#print(targets[j],indices[j])
Miss[hash2[int(targets[j])]][hash2[int(indices[j])]]+=1
#print(output.data)
#averageloss=2.3
#averageloss=(tot_loss*1.0)/(num*1.00)
print(colored("totloss: %.8f ! " %tot_loss,'red'))
precision=2.3
precision=(right*100.0)/(num*1.00)
print(colored("precision: %.2f%c ! " %(precision,'%'),'red'))
global historyMax,history
history[epoch]=precision
historyMax=max(historyMax,precision)
global Misspre
for i in range(0,24):
for j in range(0,24):
Misspre[hash2[i]][hash2[j]]=(100*Miss[hash2[i]][hash2[j]]*1.000)/(sum(Miss[hash2[i]])*1.000)
#torch.save(Misspre,'visualize/visualize.t7')
#visualize(Misspre)
for i in range(0,24):
if(Misspre[hash2[i]][hash2[i]]>colorMax[hash2[i]]):
colorMax[hash2[i]]=Misspre[hash2[i]][hash2[i]]
if((Misspre[hash2[i]][hash2[i]]>85.00) and (precision>65)):
save_color_model(model,hash2[i])
if __name__ == '__main__':
#save_model(model, PATH_TO_MODEL)
#print(np.shape(X))
for epoch in range(1, args.epochs + 1):
train(epoch)
test(epoch)
if historyMax==history[epoch]:
save_model(model, PATH_TO_MODEL)
if args.automonous_stopping==1:
haha=0
if(epoch>10):
for i in range(epoch-10+1,epoch+1):
if(Hloss[i]==lossMin):
haha=1
if haha==0:
break | print(colored('initializing done.',"blue"))
# Data loading code | random_line_split |
lines.py | __author__ = 'hooda'
def intersection(line1, line2):
[line1, line2] = sorted([line1, line2])
if line1[0] == line2[0]:
print("INVALID")
m1, c1, m2, c2 = line1[0], line1[1], line2[0], line2[1]
x = (c2 - c1) / (m1 - m2)
y = (m2 * c1 - m1 * c2) / (m2 - m1)
print('interstection', line1, line2, x, y)
return [x, y]
def visible(lines):
# print(lines)
# print("visible check", lines)
if len(lines) == 1:
return [[float("-inf"), float("inf"), lines[0]]]
if len(lines) == 2:
line1 = lines[0]
line2 = lines[1]
point = intersection(line1, line2)
return [[float("-inf"), point[0], line1], [point[0], float("inf"), line2]]
mid = len(lines) / 2
struct1 = visible(lines[0:mid])
struct2 = visible(lines[mid:])
struct = combine(struct1, struct2)
# print("visibleD ", struct)
return struct
def combine(struct1, struct2):
# print("combining''''''''''''''''''''''''''''''''''''''")
# print(struct1)
# print(struct2)
# IDEA : We assume that struct1 is from the lower slope half, struct2 from the higher slop half.
# Now, we merge the intersection points in struct1 and struct2. We get something like:
# p11, p12, p21, p13, p22 etc. some random order. The insight is the point of intersection we're looking for
# Must lie between consec. p's. (Or straight up outside the range).
# So we sort of pick each interleaving, find the corresponding lines
# in that region, and check if their intersection is also in that region.
# Another approach is to notice that as we approach form -infinity, struct2 must be lower,
# and as we approach +infinity, struct2 must be higer.
# The point of intersection is where this flip happens. This is also a reasonable approach,
# but the corner casses etc. need to be considered.
# Unsaid here is the assumption that there is one and only one point of intersection.
# I can't come up with a definite proof, but it seems reasonable nonetheless.
# The flippy approach.
# Struct1 is required by intergalactic law to be low-slope struct.
# if the infinity lines intersect at x < x10 and x20, we are done. Similarly for x > x1n and x2n.
infx = intersection(struct1[0][2], struct2[0][2])[0]
# print("infx", infx)
inf2x = intersection(struct1[-1][2], struct2[-1][2])[0]
# print("inf2x", inf2x)
if infx <= min(struct1[0][1], struct2[0][1]):
final = [[float("-inf"), infx, struct1[0][2]], [infx, struct2[0][1], struct2[0][2]]] + struct2[1:]
elif inf2x >= max(struct1[-1][0], struct2[-1][0]):
final = struct1[0:-1] + [[struct1[-1][0], inf2x, struct1[-1][2]], [inf2x, float("inf"), struct2[-1][2]]]
# Otherwise we truncate the structs to finite lengths. Find the intersection using flipping.
else:
minx = min(struct1[0][1], struct2[0][1])
maxx = max(struct1[-1][0], struct2[-1][0])
struct1a = confine(struct1, minx, maxx)
struct2a = confine(struct2, minx, maxx)
intersectionx = struct_intersection(struct1a, struct2a)
pos1 = getindex(intersectionx, struct1)
pos2 = getindex(intersectionx, struct2)
final1 = struct1[0:pos1] + [[struct1[pos1][0], intersectionx, struct1[pos1][2]]]
final2 = [[intersectionx, struct2[pos2][1], struct2[pos2][2]]] + struct2[pos2 + 1:]
final = final1 + final2
flag = False
if flag:
print("=1=1=1=11=1=1=1=1=1=1=1=1=1=1=1=1=1=1=1")
print(struct1, struct2)
print("seem to have combined into")
print(final)
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
return final
def confine(struct, x1, x2):
# print("confinig", struct, x1, x2)
newstruct = struct[0:]
if newstruct[0][1] > x1:
newstruct[0] = [x1, newstruct[0][1], newstruct[0][2]]
elif newstruct[0][1] == x1:
newstruct = newstruct[1:]
if newstruct[-1][0] < x2:
newstruct[-1] = [newstruct[-1][0], x2, newstruct[-1][2]]
elif newstruct[-1][0] == x2:
newstruct = newstruct[:-1]
# print("CONNFFFIIIINNNNNEEEEEEDDDDDDD", newstruct)
return newstruct
def struct_intersection(struct1, struct2):
pos1 = binary_flip_search(struct1, struct2)
pos2 = binary_flip_search(struct2, struct1)
intersectionx = intersection(struct1[pos1][2], struct2[pos2][2])[0]
return intersectionx
def binary_flip_search(struct, cand):
# print("-----------------------------")
# print("binary flip search", struct, cand)
if len(struct) == 1:
if higher(struct[0], cand) is 0:
return 0
else:
print("ERROR. Flipping didn't happen in: ", struct, cand)
mid = len(struct) / 2
higher1 = higher(struct[0], cand)
highern = higher(struct[-1], cand)
higher_mid = higher(struct[mid], cand)
if higher1 is 0:
return 0
if highern is 0:
return len(struct) - 1
if higher_mid is 0:
return mid
if higher1 == higher_mid:
# print("in call case0|||||||||||||||||||||||")
return mid + 1 + binary_flip_search(struct[mid + 1:-1], cand)
else:
# print("in call case1||||||||||||||||||||||||||")
|
def higher(region, cand):
point1 = [region[0], gety(region[0], region[2])]
point2 = [region[1], gety(region[1], region[2])]
high1 = high(point1, cand)
high2 = high(point2, cand)
if high1 and high2:
return 1
elif not (high1 or high2):
return -1
else:
return 0
def high(point, struct):
# print("HIGHHIGHHIG", point, struct)
line = struct[getindex(point[0], struct)][2]
y = gety(point[0], line)
# print("Results for :", point, struct, line, y)
if point[1] >= y:
return True
else:
return False
def getindex(x, struct):
if len(struct) == 1:
if struct[0][0] <= x <= struct[0][1]:
return 0
else:
return "Out of range of struct."
else:
mid = len(struct) / 2
if struct[mid][0] <= x <= struct[mid][1]:
return mid
elif x < struct[mid][0]:
return getindex(x, struct[0:mid])
elif x > struct[mid][1]:
return mid + 1 + getindex(x, struct[mid + 1:])
def gety(x, line):
return line[0] * x + line[1]
def reader(infile):
linelist = []
infile = open(infile)
lines = infile.readlines()
for i in range(1, int(lines[0]) + 1):
line = lines[i].split(":")
linelist += [[float(line[0]), float(line[1]), i]]
return linelist
def writer(outfile, struct):
outfile = open(outfile, "w")
visibles = []
for i in range(0, len(struct)):
visibles += [struct[i][2][2]]
visibles = sorted(list(set(visibles)))
s = str(visibles)
s = s[1:-1]
s = s.replace("'", "").replace(' ', '')
# print(s)
outfile.write(s)
outfile.close()
return s
def clean(lines):
if len(lines) < 2:
return lines
i = 1
while i < len(lines):
now = lines[i][0]
prv = lines[i - 1][0]
if now == prv:
# print(len(lines))
# print("hahaha. lele fuckru")
lines = lines[0:i - 1] + lines[i:]
# i += 1
# print(len(lines))
else:
i += 1
return lines
def runner(inf, outf):
lines = reader(inf)
lines.sort()
lines = clean(lines)
# sure = superbrute(lines)
struct = visible(lines)
s = writer(outf, struct)
# surelines = []
# for line in sure:
# surelines += [line[2]]
# s = str((sorted(surelines)))
# s = s[1:-1].replace(' ', '')
print(s)
return s
infile = "input.txt"
outfile = "output.txt"
def superbrute(lines):
visibles = []
for line in lines:
if brute(lines, line):
visibles += [line]
print(visibles)
return visibles
def brute(lines, mine):
# print(len(lines))
intersections = []
for line in lines:
if not mine == line:
intersections += [intersection(line, mine)[0]]
# intersections.sort()
ivisible = False
print(intersections)
for x in intersections:
my = gety(x, mine)
print('my',x,my)
high = True
for line in lines:
if not mine == line:
print('ot',x,gety(x, line))
if gety(x, line) > my:
print('other was higher')
high = False
if high:
ivisible = True
# print(mine)
return ivisible
return ivisible
import random
def generate(n):
mylines = []
for i in range(1, n + 1):
m = float(random.uniform(-100000, 100000))
c = float(random.uniform(-100000, 100000))
mylines += [[m, c, i]]
f = open('input.txt', 'w')
f.write(str(n) + '\n')
for line in mylines:
f.write(str(line[0]) + ':' + str(line[1]) + '\n')
return mylines
def supertest(n):
# lines = generate(n)
# lines.sort()
# lines = clean(lines)
# for line in lines:
# print(line)
# print("Doing Brute Forces")
# sure = superbrute(lines)
print("doing ninja speed mode")
maybe = visible(lines)
writer(outfile, maybe)
surelines = []
for line in sure:
surelines += [line[2]]
s = str((sorted(surelines)))
s = s[1:-1].replace(' ', '')
print(s)
def infitest():
# print('lol')
# return
while True:
i = int(raw_input('What now?'))
lines = generate(i)
print(sorted(lines))
maybe = runner('input.txt', 'output.txt')
sure = superbrute(lines)
surelines = []
for line in sure:
surelines += [line[2]]
s = str((sorted(surelines)))
s = s[1:-1].replace(' ', '')
print('sure',s)
print('maybe',maybe)
# runner('input.txt','output.txt')
# infitest()
# TODO make etc. files for script based checking.
| return 1 + binary_flip_search(struct[1:mid], cand) | conditional_block |
lines.py | __author__ = 'hooda'
def intersection(line1, line2):
[line1, line2] = sorted([line1, line2])
if line1[0] == line2[0]:
print("INVALID")
m1, c1, m2, c2 = line1[0], line1[1], line2[0], line2[1]
x = (c2 - c1) / (m1 - m2)
y = (m2 * c1 - m1 * c2) / (m2 - m1)
print('interstection', line1, line2, x, y)
return [x, y]
def visible(lines):
# print(lines)
# print("visible check", lines)
if len(lines) == 1:
return [[float("-inf"), float("inf"), lines[0]]]
if len(lines) == 2:
line1 = lines[0]
line2 = lines[1]
point = intersection(line1, line2)
return [[float("-inf"), point[0], line1], [point[0], float("inf"), line2]]
mid = len(lines) / 2
struct1 = visible(lines[0:mid])
struct2 = visible(lines[mid:])
struct = combine(struct1, struct2)
# print("visibleD ", struct)
return struct
def combine(struct1, struct2):
# print("combining''''''''''''''''''''''''''''''''''''''")
# print(struct1)
# print(struct2)
# IDEA : We assume that struct1 is from the lower slope half, struct2 from the higher slop half.
# Now, we merge the intersection points in struct1 and struct2. We get something like:
# p11, p12, p21, p13, p22 etc. some random order. The insight is the point of intersection we're looking for
# Must lie between consec. p's. (Or straight up outside the range).
# So we sort of pick each interleaving, find the corresponding lines
# in that region, and check if their intersection is also in that region.
# Another approach is to notice that as we approach form -infinity, struct2 must be lower,
# and as we approach +infinity, struct2 must be higer.
# The point of intersection is where this flip happens. This is also a reasonable approach,
# but the corner casses etc. need to be considered.
# Unsaid here is the assumption that there is one and only one point of intersection.
# I can't come up with a definite proof, but it seems reasonable nonetheless.
# The flippy approach.
# Struct1 is required by intergalactic law to be low-slope struct.
# if the infinity lines intersect at x < x10 and x20, we are done. Similarly for x > x1n and x2n.
infx = intersection(struct1[0][2], struct2[0][2])[0]
# print("infx", infx)
inf2x = intersection(struct1[-1][2], struct2[-1][2])[0]
# print("inf2x", inf2x)
if infx <= min(struct1[0][1], struct2[0][1]):
final = [[float("-inf"), infx, struct1[0][2]], [infx, struct2[0][1], struct2[0][2]]] + struct2[1:]
elif inf2x >= max(struct1[-1][0], struct2[-1][0]):
final = struct1[0:-1] + [[struct1[-1][0], inf2x, struct1[-1][2]], [inf2x, float("inf"), struct2[-1][2]]]
# Otherwise we truncate the structs to finite lengths. Find the intersection using flipping.
else:
minx = min(struct1[0][1], struct2[0][1])
maxx = max(struct1[-1][0], struct2[-1][0])
struct1a = confine(struct1, minx, maxx)
struct2a = confine(struct2, minx, maxx)
intersectionx = struct_intersection(struct1a, struct2a)
pos1 = getindex(intersectionx, struct1)
pos2 = getindex(intersectionx, struct2)
final1 = struct1[0:pos1] + [[struct1[pos1][0], intersectionx, struct1[pos1][2]]]
final2 = [[intersectionx, struct2[pos2][1], struct2[pos2][2]]] + struct2[pos2 + 1:]
final = final1 + final2
flag = False
if flag:
print("=1=1=1=11=1=1=1=1=1=1=1=1=1=1=1=1=1=1=1")
print(struct1, struct2)
print("seem to have combined into")
print(final)
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
return final
def confine(struct, x1, x2):
# print("confinig", struct, x1, x2)
newstruct = struct[0:]
if newstruct[0][1] > x1:
newstruct[0] = [x1, newstruct[0][1], newstruct[0][2]]
elif newstruct[0][1] == x1:
newstruct = newstruct[1:]
if newstruct[-1][0] < x2:
newstruct[-1] = [newstruct[-1][0], x2, newstruct[-1][2]]
elif newstruct[-1][0] == x2:
newstruct = newstruct[:-1]
# print("CONNFFFIIIINNNNNEEEEEEDDDDDDD", newstruct)
return newstruct
def struct_intersection(struct1, struct2):
pos1 = binary_flip_search(struct1, struct2)
pos2 = binary_flip_search(struct2, struct1)
intersectionx = intersection(struct1[pos1][2], struct2[pos2][2])[0]
return intersectionx
def binary_flip_search(struct, cand):
# print("-----------------------------")
# print("binary flip search", struct, cand)
if len(struct) == 1:
if higher(struct[0], cand) is 0:
return 0
else:
print("ERROR. Flipping didn't happen in: ", struct, cand)
mid = len(struct) / 2
higher1 = higher(struct[0], cand)
highern = higher(struct[-1], cand)
higher_mid = higher(struct[mid], cand)
if higher1 is 0:
return 0
if highern is 0:
return len(struct) - 1
if higher_mid is 0:
return mid
if higher1 == higher_mid:
# print("in call case0|||||||||||||||||||||||")
return mid + 1 + binary_flip_search(struct[mid + 1:-1], cand)
else:
# print("in call case1||||||||||||||||||||||||||")
return 1 + binary_flip_search(struct[1:mid], cand)
def higher(region, cand):
point1 = [region[0], gety(region[0], region[2])]
point2 = [region[1], gety(region[1], region[2])]
high1 = high(point1, cand)
high2 = high(point2, cand)
if high1 and high2:
return 1
elif not (high1 or high2):
return -1
else:
return 0
def high(point, struct):
# print("HIGHHIGHHIG", point, struct)
line = struct[getindex(point[0], struct)][2]
y = gety(point[0], line)
# print("Results for :", point, struct, line, y)
if point[1] >= y:
return True
else:
return False
def getindex(x, struct):
if len(struct) == 1:
if struct[0][0] <= x <= struct[0][1]:
return 0
else:
return "Out of range of struct."
else:
mid = len(struct) / 2
if struct[mid][0] <= x <= struct[mid][1]:
return mid
elif x < struct[mid][0]:
return getindex(x, struct[0:mid])
elif x > struct[mid][1]:
return mid + 1 + getindex(x, struct[mid + 1:])
def gety(x, line):
return line[0] * x + line[1]
def reader(infile):
linelist = []
infile = open(infile)
lines = infile.readlines()
for i in range(1, int(lines[0]) + 1):
line = lines[i].split(":")
linelist += [[float(line[0]), float(line[1]), i]]
return linelist
def writer(outfile, struct):
outfile = open(outfile, "w")
visibles = []
for i in range(0, len(struct)):
visibles += [struct[i][2][2]]
visibles = sorted(list(set(visibles)))
s = str(visibles)
s = s[1:-1]
s = s.replace("'", "").replace(' ', '')
# print(s)
outfile.write(s)
outfile.close()
return s
def clean(lines):
if len(lines) < 2:
return lines
i = 1
while i < len(lines):
now = lines[i][0]
prv = lines[i - 1][0]
if now == prv:
# print(len(lines))
# print("hahaha. lele fuckru")
lines = lines[0:i - 1] + lines[i:]
# i += 1
# print(len(lines))
else:
i += 1
return lines
def runner(inf, outf):
lines = reader(inf)
lines.sort()
lines = clean(lines)
# sure = superbrute(lines)
struct = visible(lines)
s = writer(outf, struct)
# surelines = []
# for line in sure:
# surelines += [line[2]]
# s = str((sorted(surelines)))
# s = s[1:-1].replace(' ', '')
print(s)
return s
infile = "input.txt"
outfile = "output.txt"
def superbrute(lines):
visibles = []
for line in lines:
if brute(lines, line):
visibles += [line]
print(visibles)
return visibles
def brute(lines, mine):
# print(len(lines))
intersections = []
for line in lines:
if not mine == line:
intersections += [intersection(line, mine)[0]]
# intersections.sort()
ivisible = False
print(intersections)
for x in intersections:
my = gety(x, mine)
print('my',x,my)
high = True
for line in lines:
if not mine == line:
print('ot',x,gety(x, line))
if gety(x, line) > my:
print('other was higher')
high = False
if high:
ivisible = True
# print(mine)
return ivisible
return ivisible
import random
def generate(n):
|
def supertest(n):
# lines = generate(n)
# lines.sort()
# lines = clean(lines)
# for line in lines:
# print(line)
# print("Doing Brute Forces")
# sure = superbrute(lines)
print("doing ninja speed mode")
maybe = visible(lines)
writer(outfile, maybe)
surelines = []
for line in sure:
surelines += [line[2]]
s = str((sorted(surelines)))
s = s[1:-1].replace(' ', '')
print(s)
def infitest():
# print('lol')
# return
while True:
i = int(raw_input('What now?'))
lines = generate(i)
print(sorted(lines))
maybe = runner('input.txt', 'output.txt')
sure = superbrute(lines)
surelines = []
for line in sure:
surelines += [line[2]]
s = str((sorted(surelines)))
s = s[1:-1].replace(' ', '')
print('sure',s)
print('maybe',maybe)
# runner('input.txt','output.txt')
# infitest()
# TODO make etc. files for script based checking.
| mylines = []
for i in range(1, n + 1):
m = float(random.uniform(-100000, 100000))
c = float(random.uniform(-100000, 100000))
mylines += [[m, c, i]]
f = open('input.txt', 'w')
f.write(str(n) + '\n')
for line in mylines:
f.write(str(line[0]) + ':' + str(line[1]) + '\n')
return mylines | identifier_body |
lines.py | __author__ = 'hooda'
def intersection(line1, line2):
[line1, line2] = sorted([line1, line2])
if line1[0] == line2[0]:
print("INVALID")
m1, c1, m2, c2 = line1[0], line1[1], line2[0], line2[1]
x = (c2 - c1) / (m1 - m2)
y = (m2 * c1 - m1 * c2) / (m2 - m1)
print('interstection', line1, line2, x, y)
return [x, y]
def visible(lines):
# print(lines)
# print("visible check", lines)
if len(lines) == 1:
return [[float("-inf"), float("inf"), lines[0]]]
if len(lines) == 2:
line1 = lines[0]
line2 = lines[1]
point = intersection(line1, line2)
return [[float("-inf"), point[0], line1], [point[0], float("inf"), line2]]
mid = len(lines) / 2
struct1 = visible(lines[0:mid])
struct2 = visible(lines[mid:])
struct = combine(struct1, struct2)
# print("visibleD ", struct)
return struct
def combine(struct1, struct2):
# print("combining''''''''''''''''''''''''''''''''''''''")
# print(struct1)
# print(struct2)
# IDEA : We assume that struct1 is from the lower slope half, struct2 from the higher slop half.
# Now, we merge the intersection points in struct1 and struct2. We get something like:
# p11, p12, p21, p13, p22 etc. some random order. The insight is the point of intersection we're looking for
# Must lie between consec. p's. (Or straight up outside the range).
# So we sort of pick each interleaving, find the corresponding lines
# in that region, and check if their intersection is also in that region.
# Another approach is to notice that as we approach form -infinity, struct2 must be lower,
# and as we approach +infinity, struct2 must be higer.
# The point of intersection is where this flip happens. This is also a reasonable approach,
# but the corner casses etc. need to be considered.
# Unsaid here is the assumption that there is one and only one point of intersection.
# I can't come up with a definite proof, but it seems reasonable nonetheless.
# The flippy approach.
# Struct1 is required by intergalactic law to be low-slope struct.
# if the infinity lines intersect at x < x10 and x20, we are done. Similarly for x > x1n and x2n.
infx = intersection(struct1[0][2], struct2[0][2])[0]
# print("infx", infx)
inf2x = intersection(struct1[-1][2], struct2[-1][2])[0]
# print("inf2x", inf2x)
if infx <= min(struct1[0][1], struct2[0][1]):
final = [[float("-inf"), infx, struct1[0][2]], [infx, struct2[0][1], struct2[0][2]]] + struct2[1:]
elif inf2x >= max(struct1[-1][0], struct2[-1][0]):
final = struct1[0:-1] + [[struct1[-1][0], inf2x, struct1[-1][2]], [inf2x, float("inf"), struct2[-1][2]]]
# Otherwise we truncate the structs to finite lengths. Find the intersection using flipping.
else:
minx = min(struct1[0][1], struct2[0][1])
maxx = max(struct1[-1][0], struct2[-1][0])
struct1a = confine(struct1, minx, maxx)
struct2a = confine(struct2, minx, maxx)
intersectionx = struct_intersection(struct1a, struct2a)
pos1 = getindex(intersectionx, struct1)
pos2 = getindex(intersectionx, struct2)
final1 = struct1[0:pos1] + [[struct1[pos1][0], intersectionx, struct1[pos1][2]]]
final2 = [[intersectionx, struct2[pos2][1], struct2[pos2][2]]] + struct2[pos2 + 1:]
final = final1 + final2
flag = False
if flag:
print("=1=1=1=11=1=1=1=1=1=1=1=1=1=1=1=1=1=1=1")
print(struct1, struct2)
print("seem to have combined into")
print(final)
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
return final
def confine(struct, x1, x2):
# print("confinig", struct, x1, x2)
newstruct = struct[0:]
if newstruct[0][1] > x1:
newstruct[0] = [x1, newstruct[0][1], newstruct[0][2]]
elif newstruct[0][1] == x1:
newstruct = newstruct[1:]
if newstruct[-1][0] < x2:
newstruct[-1] = [newstruct[-1][0], x2, newstruct[-1][2]]
elif newstruct[-1][0] == x2:
newstruct = newstruct[:-1]
# print("CONNFFFIIIINNNNNEEEEEEDDDDDDD", newstruct)
return newstruct
def struct_intersection(struct1, struct2):
pos1 = binary_flip_search(struct1, struct2)
pos2 = binary_flip_search(struct2, struct1)
intersectionx = intersection(struct1[pos1][2], struct2[pos2][2])[0]
return intersectionx
def binary_flip_search(struct, cand):
# print("-----------------------------")
# print("binary flip search", struct, cand)
if len(struct) == 1:
if higher(struct[0], cand) is 0:
return 0
else:
print("ERROR. Flipping didn't happen in: ", struct, cand)
mid = len(struct) / 2
higher1 = higher(struct[0], cand)
highern = higher(struct[-1], cand)
higher_mid = higher(struct[mid], cand)
if higher1 is 0:
return 0
if highern is 0:
return len(struct) - 1 | return mid
if higher1 == higher_mid:
# print("in call case0|||||||||||||||||||||||")
return mid + 1 + binary_flip_search(struct[mid + 1:-1], cand)
else:
# print("in call case1||||||||||||||||||||||||||")
return 1 + binary_flip_search(struct[1:mid], cand)
def higher(region, cand):
point1 = [region[0], gety(region[0], region[2])]
point2 = [region[1], gety(region[1], region[2])]
high1 = high(point1, cand)
high2 = high(point2, cand)
if high1 and high2:
return 1
elif not (high1 or high2):
return -1
else:
return 0
def high(point, struct):
# print("HIGHHIGHHIG", point, struct)
line = struct[getindex(point[0], struct)][2]
y = gety(point[0], line)
# print("Results for :", point, struct, line, y)
if point[1] >= y:
return True
else:
return False
def getindex(x, struct):
if len(struct) == 1:
if struct[0][0] <= x <= struct[0][1]:
return 0
else:
return "Out of range of struct."
else:
mid = len(struct) / 2
if struct[mid][0] <= x <= struct[mid][1]:
return mid
elif x < struct[mid][0]:
return getindex(x, struct[0:mid])
elif x > struct[mid][1]:
return mid + 1 + getindex(x, struct[mid + 1:])
def gety(x, line):
return line[0] * x + line[1]
def reader(infile):
linelist = []
infile = open(infile)
lines = infile.readlines()
for i in range(1, int(lines[0]) + 1):
line = lines[i].split(":")
linelist += [[float(line[0]), float(line[1]), i]]
return linelist
def writer(outfile, struct):
outfile = open(outfile, "w")
visibles = []
for i in range(0, len(struct)):
visibles += [struct[i][2][2]]
visibles = sorted(list(set(visibles)))
s = str(visibles)
s = s[1:-1]
s = s.replace("'", "").replace(' ', '')
# print(s)
outfile.write(s)
outfile.close()
return s
def clean(lines):
if len(lines) < 2:
return lines
i = 1
while i < len(lines):
now = lines[i][0]
prv = lines[i - 1][0]
if now == prv:
# print(len(lines))
# print("hahaha. lele fuckru")
lines = lines[0:i - 1] + lines[i:]
# i += 1
# print(len(lines))
else:
i += 1
return lines
def runner(inf, outf):
lines = reader(inf)
lines.sort()
lines = clean(lines)
# sure = superbrute(lines)
struct = visible(lines)
s = writer(outf, struct)
# surelines = []
# for line in sure:
# surelines += [line[2]]
# s = str((sorted(surelines)))
# s = s[1:-1].replace(' ', '')
print(s)
return s
infile = "input.txt"
outfile = "output.txt"
def superbrute(lines):
visibles = []
for line in lines:
if brute(lines, line):
visibles += [line]
print(visibles)
return visibles
def brute(lines, mine):
# print(len(lines))
intersections = []
for line in lines:
if not mine == line:
intersections += [intersection(line, mine)[0]]
# intersections.sort()
ivisible = False
print(intersections)
for x in intersections:
my = gety(x, mine)
print('my',x,my)
high = True
for line in lines:
if not mine == line:
print('ot',x,gety(x, line))
if gety(x, line) > my:
print('other was higher')
high = False
if high:
ivisible = True
# print(mine)
return ivisible
return ivisible
import random
def generate(n):
mylines = []
for i in range(1, n + 1):
m = float(random.uniform(-100000, 100000))
c = float(random.uniform(-100000, 100000))
mylines += [[m, c, i]]
f = open('input.txt', 'w')
f.write(str(n) + '\n')
for line in mylines:
f.write(str(line[0]) + ':' + str(line[1]) + '\n')
return mylines
def supertest(n):
# lines = generate(n)
# lines.sort()
# lines = clean(lines)
# for line in lines:
# print(line)
# print("Doing Brute Forces")
# sure = superbrute(lines)
print("doing ninja speed mode")
maybe = visible(lines)
writer(outfile, maybe)
surelines = []
for line in sure:
surelines += [line[2]]
s = str((sorted(surelines)))
s = s[1:-1].replace(' ', '')
print(s)
def infitest():
# print('lol')
# return
while True:
i = int(raw_input('What now?'))
lines = generate(i)
print(sorted(lines))
maybe = runner('input.txt', 'output.txt')
sure = superbrute(lines)
surelines = []
for line in sure:
surelines += [line[2]]
s = str((sorted(surelines)))
s = s[1:-1].replace(' ', '')
print('sure',s)
print('maybe',maybe)
# runner('input.txt','output.txt')
# infitest()
# TODO make etc. files for script based checking. | if higher_mid is 0: | random_line_split |
lines.py | __author__ = 'hooda'
def intersection(line1, line2):
[line1, line2] = sorted([line1, line2])
if line1[0] == line2[0]:
print("INVALID")
m1, c1, m2, c2 = line1[0], line1[1], line2[0], line2[1]
x = (c2 - c1) / (m1 - m2)
y = (m2 * c1 - m1 * c2) / (m2 - m1)
print('interstection', line1, line2, x, y)
return [x, y]
def visible(lines):
# print(lines)
# print("visible check", lines)
if len(lines) == 1:
return [[float("-inf"), float("inf"), lines[0]]]
if len(lines) == 2:
line1 = lines[0]
line2 = lines[1]
point = intersection(line1, line2)
return [[float("-inf"), point[0], line1], [point[0], float("inf"), line2]]
mid = len(lines) / 2
struct1 = visible(lines[0:mid])
struct2 = visible(lines[mid:])
struct = combine(struct1, struct2)
# print("visibleD ", struct)
return struct
def combine(struct1, struct2):
# print("combining''''''''''''''''''''''''''''''''''''''")
# print(struct1)
# print(struct2)
# IDEA : We assume that struct1 is from the lower slope half, struct2 from the higher slop half.
# Now, we merge the intersection points in struct1 and struct2. We get something like:
# p11, p12, p21, p13, p22 etc. some random order. The insight is the point of intersection we're looking for
# Must lie between consec. p's. (Or straight up outside the range).
# So we sort of pick each interleaving, find the corresponding lines
# in that region, and check if their intersection is also in that region.
# Another approach is to notice that as we approach form -infinity, struct2 must be lower,
# and as we approach +infinity, struct2 must be higer.
# The point of intersection is where this flip happens. This is also a reasonable approach,
# but the corner casses etc. need to be considered.
# Unsaid here is the assumption that there is one and only one point of intersection.
# I can't come up with a definite proof, but it seems reasonable nonetheless.
# The flippy approach.
# Struct1 is required by intergalactic law to be low-slope struct.
# if the infinity lines intersect at x < x10 and x20, we are done. Similarly for x > x1n and x2n.
infx = intersection(struct1[0][2], struct2[0][2])[0]
# print("infx", infx)
inf2x = intersection(struct1[-1][2], struct2[-1][2])[0]
# print("inf2x", inf2x)
if infx <= min(struct1[0][1], struct2[0][1]):
final = [[float("-inf"), infx, struct1[0][2]], [infx, struct2[0][1], struct2[0][2]]] + struct2[1:]
elif inf2x >= max(struct1[-1][0], struct2[-1][0]):
final = struct1[0:-1] + [[struct1[-1][0], inf2x, struct1[-1][2]], [inf2x, float("inf"), struct2[-1][2]]]
# Otherwise we truncate the structs to finite lengths. Find the intersection using flipping.
else:
minx = min(struct1[0][1], struct2[0][1])
maxx = max(struct1[-1][0], struct2[-1][0])
struct1a = confine(struct1, minx, maxx)
struct2a = confine(struct2, minx, maxx)
intersectionx = struct_intersection(struct1a, struct2a)
pos1 = getindex(intersectionx, struct1)
pos2 = getindex(intersectionx, struct2)
final1 = struct1[0:pos1] + [[struct1[pos1][0], intersectionx, struct1[pos1][2]]]
final2 = [[intersectionx, struct2[pos2][1], struct2[pos2][2]]] + struct2[pos2 + 1:]
final = final1 + final2
flag = False
if flag:
print("=1=1=1=11=1=1=1=1=1=1=1=1=1=1=1=1=1=1=1")
print(struct1, struct2)
print("seem to have combined into")
print(final)
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
return final
def confine(struct, x1, x2):
# print("confinig", struct, x1, x2)
newstruct = struct[0:]
if newstruct[0][1] > x1:
newstruct[0] = [x1, newstruct[0][1], newstruct[0][2]]
elif newstruct[0][1] == x1:
newstruct = newstruct[1:]
if newstruct[-1][0] < x2:
newstruct[-1] = [newstruct[-1][0], x2, newstruct[-1][2]]
elif newstruct[-1][0] == x2:
newstruct = newstruct[:-1]
# print("CONNFFFIIIINNNNNEEEEEEDDDDDDD", newstruct)
return newstruct
def struct_intersection(struct1, struct2):
pos1 = binary_flip_search(struct1, struct2)
pos2 = binary_flip_search(struct2, struct1)
intersectionx = intersection(struct1[pos1][2], struct2[pos2][2])[0]
return intersectionx
def binary_flip_search(struct, cand):
# print("-----------------------------")
# print("binary flip search", struct, cand)
if len(struct) == 1:
if higher(struct[0], cand) is 0:
return 0
else:
print("ERROR. Flipping didn't happen in: ", struct, cand)
mid = len(struct) / 2
higher1 = higher(struct[0], cand)
highern = higher(struct[-1], cand)
higher_mid = higher(struct[mid], cand)
if higher1 is 0:
return 0
if highern is 0:
return len(struct) - 1
if higher_mid is 0:
return mid
if higher1 == higher_mid:
# print("in call case0|||||||||||||||||||||||")
return mid + 1 + binary_flip_search(struct[mid + 1:-1], cand)
else:
# print("in call case1||||||||||||||||||||||||||")
return 1 + binary_flip_search(struct[1:mid], cand)
def higher(region, cand):
point1 = [region[0], gety(region[0], region[2])]
point2 = [region[1], gety(region[1], region[2])]
high1 = high(point1, cand)
high2 = high(point2, cand)
if high1 and high2:
return 1
elif not (high1 or high2):
return -1
else:
return 0
def high(point, struct):
# print("HIGHHIGHHIG", point, struct)
line = struct[getindex(point[0], struct)][2]
y = gety(point[0], line)
# print("Results for :", point, struct, line, y)
if point[1] >= y:
return True
else:
return False
def getindex(x, struct):
if len(struct) == 1:
if struct[0][0] <= x <= struct[0][1]:
return 0
else:
return "Out of range of struct."
else:
mid = len(struct) / 2
if struct[mid][0] <= x <= struct[mid][1]:
return mid
elif x < struct[mid][0]:
return getindex(x, struct[0:mid])
elif x > struct[mid][1]:
return mid + 1 + getindex(x, struct[mid + 1:])
def gety(x, line):
return line[0] * x + line[1]
def reader(infile):
linelist = []
infile = open(infile)
lines = infile.readlines()
for i in range(1, int(lines[0]) + 1):
line = lines[i].split(":")
linelist += [[float(line[0]), float(line[1]), i]]
return linelist
def writer(outfile, struct):
outfile = open(outfile, "w")
visibles = []
for i in range(0, len(struct)):
visibles += [struct[i][2][2]]
visibles = sorted(list(set(visibles)))
s = str(visibles)
s = s[1:-1]
s = s.replace("'", "").replace(' ', '')
# print(s)
outfile.write(s)
outfile.close()
return s
def clean(lines):
if len(lines) < 2:
return lines
i = 1
while i < len(lines):
now = lines[i][0]
prv = lines[i - 1][0]
if now == prv:
# print(len(lines))
# print("hahaha. lele fuckru")
lines = lines[0:i - 1] + lines[i:]
# i += 1
# print(len(lines))
else:
i += 1
return lines
def runner(inf, outf):
lines = reader(inf)
lines.sort()
lines = clean(lines)
# sure = superbrute(lines)
struct = visible(lines)
s = writer(outf, struct)
# surelines = []
# for line in sure:
# surelines += [line[2]]
# s = str((sorted(surelines)))
# s = s[1:-1].replace(' ', '')
print(s)
return s
infile = "input.txt"
outfile = "output.txt"
def superbrute(lines):
visibles = []
for line in lines:
if brute(lines, line):
visibles += [line]
print(visibles)
return visibles
def brute(lines, mine):
# print(len(lines))
intersections = []
for line in lines:
if not mine == line:
intersections += [intersection(line, mine)[0]]
# intersections.sort()
ivisible = False
print(intersections)
for x in intersections:
my = gety(x, mine)
print('my',x,my)
high = True
for line in lines:
if not mine == line:
print('ot',x,gety(x, line))
if gety(x, line) > my:
print('other was higher')
high = False
if high:
ivisible = True
# print(mine)
return ivisible
return ivisible
import random
def | (n):
mylines = []
for i in range(1, n + 1):
m = float(random.uniform(-100000, 100000))
c = float(random.uniform(-100000, 100000))
mylines += [[m, c, i]]
f = open('input.txt', 'w')
f.write(str(n) + '\n')
for line in mylines:
f.write(str(line[0]) + ':' + str(line[1]) + '\n')
return mylines
def supertest(n):
# lines = generate(n)
# lines.sort()
# lines = clean(lines)
# for line in lines:
# print(line)
# print("Doing Brute Forces")
# sure = superbrute(lines)
print("doing ninja speed mode")
maybe = visible(lines)
writer(outfile, maybe)
surelines = []
for line in sure:
surelines += [line[2]]
s = str((sorted(surelines)))
s = s[1:-1].replace(' ', '')
print(s)
def infitest():
# print('lol')
# return
while True:
i = int(raw_input('What now?'))
lines = generate(i)
print(sorted(lines))
maybe = runner('input.txt', 'output.txt')
sure = superbrute(lines)
surelines = []
for line in sure:
surelines += [line[2]]
s = str((sorted(surelines)))
s = s[1:-1].replace(' ', '')
print('sure',s)
print('maybe',maybe)
# runner('input.txt','output.txt')
# infitest()
# TODO make etc. files for script based checking.
| generate | identifier_name |
tower-of-power.py | # This is a bit of an experiment, really: a way to generate "box tower"
# dependency diagrams in the style of the inside cover of Matt Parker's book,
# "Things to Make and Do in the Fourth Dimension." Parker's blocks represent
# prerequisite-knowledge dependencies between his book chapters, but surely the
# same idea could apply to all sorts of dependencies: software modules and
# college course prereqs come to mind as examples.
# After struggling for a few hours with trying to generate such diagrams
# directly, I eventually gave up and wrote a solver-aided program. Hence, this
# program depends on the z3 solver and its Python interface (neither of which
# is hard to install).
import z3
# ...but I'm getting ahead of myself. Really, the story of "how do I draw
# dependency diagrams" begins with the question, "what are dependencies?"
# I think of dependencies as directed acyclic graphs, or DAGs. Each
# chapter/module/course is a node, and edge from A to B indicates that "A
# depends on B." The reason the graph is acyclic should then be clear: A and B
# can't depend on each other!
# There's actually a lot of literature on drawing diagrams of directed acyclic
# graphs, some of which can be found in the `graphviz' documentation (the
# working title to this program was `Sugiyama's Last Stand').
# For now, though, here's a small DAG class in Python to get started.
class DAGException(Exception):
pass
class dag():
# "root" is a placeholder node: anything with no dependencies secretly depends
# on "root". Why? Because all the blocks at the base of the tower need to sit
# on the same ground level. "root" is the floor.
ROOT = '( root )'
def __init__(self):
self.deps = {} # maps a node to a list of its dependencies
self.clss = {dag.ROOT: 'base'} # maps a node to its CSS class
self.text = {dag.ROOT: ''} # maps a node to its label text
self.dep_cache = {} # this speeds up some algorithms below
self.insert(dag.ROOT, [])
# The following incomprehensible regular expressions define the BOX file format
# which I invented to make it easier to specify DAGs for this project.
def load_line(self, line):
import re
wsop_re = r'\s*'
name_re = r'([\w-]+)'
deps_re = r'\((\s*(?:[\w-]+(?:\s*,\s*[\w-]+)*)?\s*)\)'
clss_re = r'(?:\.([\w-]+))?'
text_re = r':\s*(.*)?'
line_re = re.compile(
wsop_re + name_re +
wsop_re + deps_re +
wsop_re + clss_re +
wsop_re + text_re
)
match = line_re.match(line)
if match:
name = match.group(1)
deps = match.group(2).replace(',', ' ').split()
clss = match.group(3) or 'box-generic'
text = match.group(4)
if len(deps) == 0:
deps = [dag.ROOT]
self.insert(name, deps)
self.clss[name] = clss
self.text[name] = text
elif len(line.split()) == 0 or line[0] == '#':
pass
else:
raise DAGException("Syntax error on line: `" + line + "`")
def load_file(self, text):
lines = text.split('\n')
for line in lines:
self.load_line(line)
# A lot of the invariants on this DAG are maintained by complaining loudly as
# soon as they are violated. So, you have to insert nodes in the correct order.
# Other than that, this insertion routine is unexciting.
def insert(self, name, deps):
if name in self.get_nodes():
raise DAGException("Already added this name to DAG.")
for d in deps:
if d not in self.deps:
raise DAGException("Unknown dependency: %s (for %s)"%(d, name))
self.deps[name] = list(deps)
# This is also unexciting.
def get_nodes(self):
return self.deps.keys()
# This, however, is kind of interesting. Notice that Parker's tower doesn't
# distinguish between "direct dependencies" and "transitive dependencies": if
# A depends on B and C, and B itself depends on C, then the box for A doesn't
# need to sit on the boxes for both B and C (in fact, it can't!). The correct
# drawing is "A on B, B on C", with the understanding that A "of course"
# depends on C as well.
#
# The following predicates sort out this mess, by giving me a way to check if a
# dependency is "direct" or "transitive" in that sense. Transitive dependencies
# can be ignored.
def get_dependencies(self, node):
return self.deps[node]
def is_dependency(self, node, dep):
if (node, dep) in self.dep_cache:
return self.dep_cache[(node, dep)]
shallow_deps = self.get_dependencies(node)
if dep in shallow_deps:
self.dep_cache[(node, dep)] = True
return True
for sd in shallow_deps:
if self.is_dependency(sd, dep):
self.dep_cache[(node, dep)] = True
return True
self.dep_cache[(node, dep)] = False
return False
def is_transitive_dependency(self, node, dep):
for sd in self.get_dependencies(node):
if self.is_dependency(sd, dep):
return True
return False
def is_direct_dependency(self, node, dep):
return self.is_dependency(node, dep) and\
not self.is_transitive_dependency(node, dep)
# Okay, okay, fine, I'll start talking about the solver now.
def solve(self):
# The way it works is, each box is represented by four symbolic integers,
# representing the X/Y coordinates of its top-left and bottom-right vertices.
# (Note, however, that because computers are silly, the Y coordinates DECREASE
# as you go UP the tower. Just something to keep in mind. Otherwise we get
# upside-down stalactite-towers.)
svs = {}
solver = z3.Solver()
for node in self.get_nodes():
svs[node] = (
(z3.Int(node+'_x0'), z3.Int(node+'_y0')),
(z3.Int(node+'_x1'), z3.Int(node+'_y1'))
)
# Almost immediately, we need to make some sanity assertions. We want the
# top-left corner to actually be "to the left" and "on top of" the bottom-right
# corner, so we have to tell the solver that.
solver.add(svs[node][0][0] < svs[node][1][0])
solver.add(svs[node][0][1] < svs[node][1][1])
# There's also a bit of logic here to automatically make boxes taller if they
# have a lot of text, so that text doesn't overflow awkwardly. This is janky,
# but it works!
solver.add(
svs[node][1][1] - svs[node][0][1] >=\
(len(self.text[node].split('\\')) - len(self.text[node].split('\\')) / 2)
)
# And finally, we enforce that everything happens in the first quadrant.
solver.add(svs[node][0][0] >= 0)
# Now we can put root (recall, the "ground") literally on the ground!
solver.add(svs[dag.ROOT][0][0] == 0)
solver.add(svs[dag.ROOT][0][1] == 0)
# Up next, we enforce that no boxes intersect. This is done by checking if the
# X and Y ranges are disjoint (at least one needs to be -- but not necessarily
# both!).
def ranges_disjoint(x0min, x0max, x1min, x1max):
return z3.Or(x0min >= x1max, x0max <= x1min)
for node1 in self.get_nodes():
for node2 in self.get_nodes():
if node1 != node2:
solver.add(
z3.Or(
ranges_disjoint(
svs[node1][0][0],
svs[node1][1][0],
svs[node2][0][0],
svs[node2][1][0]
),
ranges_disjoint(
svs[node1][0][1],
svs[node1][1][1],
svs[node2][0][1],
svs[node2][1][1]
)
)
)
# This is the hard one: for each pair of nodes, it creates an "A is on top of
# B" assertion, and then asserts either it or its negation, depending on
# whether or not B is a direct dependency of A.
for node in self.get_nodes():
for dep in self.get_nodes():
on_top = z3.And(
# When is "A" on top of "B"? There are two conditions:
# First, A's box's floor is directly on B's box's ceiling.
svs[node][1][1] == svs[dep][0][1],
# Second, the boxes have intersecting X ranges.
z3.Not(
ranges_disjoint(
svs[node][0][0], svs[node][1][0],
svs[dep] [0][0], svs[dep] [1][0]
)
)
)
if self.is_direct_dependency(node, dep):
solver.add(on_top)
else:
solver.add(z3.Not(on_top))
# Finally, for the sake of ~aesthetics~, there's a bit of logic to
# automatically minimize the total perimeter of all the blocks. (Why not area,
# you ask? Because area is nonlinear and the solver takes *much* longer to work
# with such constrants!)
def perimeter(node):
return (svs[node][1][0] - svs[node][0][0]) + (svs[node][1][1] - svs[node][0][1])
total_perim = sum([perimeter(node) for node in self.get_nodes()])
# (That's what the loop is for: it keeps asking the solver to "do better" until
# the solver can't do any better and gives up. It may or may not be a metaphor
# for life.)
rects = None
perim_tgt = len(self.get_nodes()) * 4 * 3
while True:
perim_tgt -= 1
solver.add(total_perim < perim_tgt)
check = solver.check()
if check == z3.sat:
model = solver.model()
rects = []
# I translate the solver output into SVG coordinates using some hardcoded
# scaling factors and randomized fudge factors.
for node in self.get_nodes():
|
# This is the "solver gives up" case
else:
return rects
# This is perhaps the least exciting bit of the whole program: just some silly
# SVG generation routines. There's some logic to get the text wrapping to work,
# but other than that, it's pretty simple (and by "simple" I mean
# "extensible"!).
def render(self, rects, css):
min_x = min([x0 for node, x0, y0, x1, y1 in rects]) - 5
max_x = max([x1 for node, x0, y0, x1, y1 in rects]) + 5
min_y = min([y0 for node, x0, y0, x1, y1 in rects]) - 5
max_y = max([y1 for node, x0, y0, x1, y1 in rects]) + 5
width = max_x - min_x
height = max_y - min_y
out = ""
out += """<svg viewBox="%d %d %d %d" xmlns="http://www.w3.org/2000/svg">""" % (min_x, min_y, width, height)
out += """
<style>
rect {
stroke: hsl(0, 100%, 80%);
fill: hsl(0, 100%, 90%);
}
rect.red {
stroke: hsl(0, 100%, 80%);
fill: hsl(0, 100%, 90%);
}
rect.yellow {
stroke: hsl(60, 100%, 80%);
fill: hsl(60, 100%, 90%);
}
rect.green {
stroke: hsl(120, 100%, 80%);
fill: hsl(120, 100%, 90%);
}
rect.blue {
stroke: hsl(180, 100%, 80%);
fill: hsl(180, 100%, 90%);
}
rect.purple {
stroke: hsl(240, 100%, 80%);
fill: hsl(240, 100%, 90%);
}
rect.pink {
stroke: hsl(300, 100%, 80%);
fill: hsl(300, 100%, 90%);
}
rect.base {
stroke: hsl(60, 100%, 80%);
fill: hsl(60, 100%, 90%);
}
text {
font-family: Garamond, sans-serif;
font-size: 14pt;
}
""" + css + """
</style>
"""
for (node, x0, y0, x1, y1) in rects:
out += """ <rect fill="white" stroke="gray" x="%d" y="%d" width="%d" height="%d" rx="4" ry="4" class="%s"></rect>"""%(
x0, y0,
x1 - x0, y1 - y0 - 1,
self.clss.get(node, 'box-generic')
)
for (node, x0, y0, x1, y1) in rects:
out += """ <text x="%d" y="%d" width="100" class="%s">%s</text>"""%(
x0 + 4, y0,
self.clss.get(node, 'box-generic'),
''.join(["""<tspan x="%d" dy="%d">%s</tspan>"""%(x0 + 4, 20, text) for i, text in enumerate(self.text.get(node, node).split('\\'))])
)
out += """</svg>"""
return out
# And that's it! The rest is just a tiny command-line interface that reads a
# BOX file (with optional CSS) and outputs the SVG rendering.
import sys
d = dag()
d.load_file(open(sys.argv[1]).read())
css = ''
if len(sys.argv) == 3:
with open(sys.argv[2], 'r') as cssf:
css = cssf.read()
rects = d.solve()
if rects is not None:
print d.render(rects, css)
else:
print "You seem to be in dependency hell."
| x0 = model.eval(svs[node][0][0])
y0 = model.eval(svs[node][0][1])
x1 = model.eval(svs[node][1][0])
y1 = model.eval(svs[node][1][1])
import random
x0 = int(str(x0)) * 160 + 10 + random.choice(range(10))
y0 = int(str(y0)) * 50
x1 = int(str(x1)) * 160 - 10 + random.choice(range(10))
y1 = int(str(y1)) * 50
rects.append((node, x0, y0, x1, y1)) | conditional_block |
tower-of-power.py | # This is a bit of an experiment, really: a way to generate "box tower"
# dependency diagrams in the style of the inside cover of Matt Parker's book,
# "Things to Make and Do in the Fourth Dimension." Parker's blocks represent
# prerequisite-knowledge dependencies between his book chapters, but surely the
# same idea could apply to all sorts of dependencies: software modules and
# college course prereqs come to mind as examples.
# After struggling for a few hours with trying to generate such diagrams
# directly, I eventually gave up and wrote a solver-aided program. Hence, this
# program depends on the z3 solver and its Python interface (neither of which
# is hard to install).
import z3
# ...but I'm getting ahead of myself. Really, the story of "how do I draw
# dependency diagrams" begins with the question, "what are dependencies?"
# I think of dependencies as directed acyclic graphs, or DAGs. Each
# chapter/module/course is a node, and edge from A to B indicates that "A
# depends on B." The reason the graph is acyclic should then be clear: A and B
# can't depend on each other!
# There's actually a lot of literature on drawing diagrams of directed acyclic
# graphs, some of which can be found in the `graphviz' documentation (the
# working title to this program was `Sugiyama's Last Stand').
# For now, though, here's a small DAG class in Python to get started.
class DAGException(Exception):
pass
class dag():
# "root" is a placeholder node: anything with no dependencies secretly depends
# on "root". Why? Because all the blocks at the base of the tower need to sit
# on the same ground level. "root" is the floor.
ROOT = '( root )'
def __init__(self):
self.deps = {} # maps a node to a list of its dependencies
self.clss = {dag.ROOT: 'base'} # maps a node to its CSS class
self.text = {dag.ROOT: ''} # maps a node to its label text
self.dep_cache = {} # this speeds up some algorithms below
self.insert(dag.ROOT, [])
# The following incomprehensible regular expressions define the BOX file format
# which I invented to make it easier to specify DAGs for this project.
def load_line(self, line):
import re
wsop_re = r'\s*'
name_re = r'([\w-]+)'
deps_re = r'\((\s*(?:[\w-]+(?:\s*,\s*[\w-]+)*)?\s*)\)'
clss_re = r'(?:\.([\w-]+))?'
text_re = r':\s*(.*)?'
line_re = re.compile(
wsop_re + name_re +
wsop_re + deps_re +
wsop_re + clss_re +
wsop_re + text_re
)
match = line_re.match(line)
if match:
name = match.group(1)
deps = match.group(2).replace(',', ' ').split()
clss = match.group(3) or 'box-generic'
text = match.group(4)
if len(deps) == 0:
deps = [dag.ROOT]
self.insert(name, deps)
self.clss[name] = clss
self.text[name] = text
elif len(line.split()) == 0 or line[0] == '#':
pass
else:
raise DAGException("Syntax error on line: `" + line + "`")
def load_file(self, text):
lines = text.split('\n')
for line in lines:
self.load_line(line)
# A lot of the invariants on this DAG are maintained by complaining loudly as
# soon as they are violated. So, you have to insert nodes in the correct order.
# Other than that, this insertion routine is unexciting.
def insert(self, name, deps):
if name in self.get_nodes():
raise DAGException("Already added this name to DAG.")
for d in deps:
if d not in self.deps:
raise DAGException("Unknown dependency: %s (for %s)"%(d, name))
self.deps[name] = list(deps)
# This is also unexciting.
def get_nodes(self):
return self.deps.keys()
# This, however, is kind of interesting. Notice that Parker's tower doesn't
# distinguish between "direct dependencies" and "transitive dependencies": if
# A depends on B and C, and B itself depends on C, then the box for A doesn't
# need to sit on the boxes for both B and C (in fact, it can't!). The correct
# drawing is "A on B, B on C", with the understanding that A "of course"
# depends on C as well.
#
# The following predicates sort out this mess, by giving me a way to check if a
# dependency is "direct" or "transitive" in that sense. Transitive dependencies
# can be ignored.
def get_dependencies(self, node):
return self.deps[node]
def is_dependency(self, node, dep):
if (node, dep) in self.dep_cache:
return self.dep_cache[(node, dep)]
shallow_deps = self.get_dependencies(node)
if dep in shallow_deps:
self.dep_cache[(node, dep)] = True
return True
for sd in shallow_deps:
if self.is_dependency(sd, dep):
self.dep_cache[(node, dep)] = True
return True
self.dep_cache[(node, dep)] = False
return False
def is_transitive_dependency(self, node, dep):
|
def is_direct_dependency(self, node, dep):
return self.is_dependency(node, dep) and\
not self.is_transitive_dependency(node, dep)
# Okay, okay, fine, I'll start talking about the solver now.
def solve(self):
# The way it works is, each box is represented by four symbolic integers,
# representing the X/Y coordinates of its top-left and bottom-right vertices.
# (Note, however, that because computers are silly, the Y coordinates DECREASE
# as you go UP the tower. Just something to keep in mind. Otherwise we get
# upside-down stalactite-towers.)
svs = {}
solver = z3.Solver()
for node in self.get_nodes():
svs[node] = (
(z3.Int(node+'_x0'), z3.Int(node+'_y0')),
(z3.Int(node+'_x1'), z3.Int(node+'_y1'))
)
# Almost immediately, we need to make some sanity assertions. We want the
# top-left corner to actually be "to the left" and "on top of" the bottom-right
# corner, so we have to tell the solver that.
solver.add(svs[node][0][0] < svs[node][1][0])
solver.add(svs[node][0][1] < svs[node][1][1])
# There's also a bit of logic here to automatically make boxes taller if they
# have a lot of text, so that text doesn't overflow awkwardly. This is janky,
# but it works!
solver.add(
svs[node][1][1] - svs[node][0][1] >=\
(len(self.text[node].split('\\')) - len(self.text[node].split('\\')) / 2)
)
# And finally, we enforce that everything happens in the first quadrant.
solver.add(svs[node][0][0] >= 0)
# Now we can put root (recall, the "ground") literally on the ground!
solver.add(svs[dag.ROOT][0][0] == 0)
solver.add(svs[dag.ROOT][0][1] == 0)
# Up next, we enforce that no boxes intersect. This is done by checking if the
# X and Y ranges are disjoint (at least one needs to be -- but not necessarily
# both!).
def ranges_disjoint(x0min, x0max, x1min, x1max):
return z3.Or(x0min >= x1max, x0max <= x1min)
for node1 in self.get_nodes():
for node2 in self.get_nodes():
if node1 != node2:
solver.add(
z3.Or(
ranges_disjoint(
svs[node1][0][0],
svs[node1][1][0],
svs[node2][0][0],
svs[node2][1][0]
),
ranges_disjoint(
svs[node1][0][1],
svs[node1][1][1],
svs[node2][0][1],
svs[node2][1][1]
)
)
)
# This is the hard one: for each pair of nodes, it creates an "A is on top of
# B" assertion, and then asserts either it or its negation, depending on
# whether or not B is a direct dependency of A.
for node in self.get_nodes():
for dep in self.get_nodes():
on_top = z3.And(
# When is "A" on top of "B"? There are two conditions:
# First, A's box's floor is directly on B's box's ceiling.
svs[node][1][1] == svs[dep][0][1],
# Second, the boxes have intersecting X ranges.
z3.Not(
ranges_disjoint(
svs[node][0][0], svs[node][1][0],
svs[dep] [0][0], svs[dep] [1][0]
)
)
)
if self.is_direct_dependency(node, dep):
solver.add(on_top)
else:
solver.add(z3.Not(on_top))
# Finally, for the sake of ~aesthetics~, there's a bit of logic to
# automatically minimize the total perimeter of all the blocks. (Why not area,
# you ask? Because area is nonlinear and the solver takes *much* longer to work
# with such constrants!)
def perimeter(node):
return (svs[node][1][0] - svs[node][0][0]) + (svs[node][1][1] - svs[node][0][1])
total_perim = sum([perimeter(node) for node in self.get_nodes()])
# (That's what the loop is for: it keeps asking the solver to "do better" until
# the solver can't do any better and gives up. It may or may not be a metaphor
# for life.)
rects = None
perim_tgt = len(self.get_nodes()) * 4 * 3
while True:
perim_tgt -= 1
solver.add(total_perim < perim_tgt)
check = solver.check()
if check == z3.sat:
model = solver.model()
rects = []
# I translate the solver output into SVG coordinates using some hardcoded
# scaling factors and randomized fudge factors.
for node in self.get_nodes():
x0 = model.eval(svs[node][0][0])
y0 = model.eval(svs[node][0][1])
x1 = model.eval(svs[node][1][0])
y1 = model.eval(svs[node][1][1])
import random
x0 = int(str(x0)) * 160 + 10 + random.choice(range(10))
y0 = int(str(y0)) * 50
x1 = int(str(x1)) * 160 - 10 + random.choice(range(10))
y1 = int(str(y1)) * 50
rects.append((node, x0, y0, x1, y1))
# This is the "solver gives up" case
else:
return rects
# This is perhaps the least exciting bit of the whole program: just some silly
# SVG generation routines. There's some logic to get the text wrapping to work,
# but other than that, it's pretty simple (and by "simple" I mean
# "extensible"!).
def render(self, rects, css):
min_x = min([x0 for node, x0, y0, x1, y1 in rects]) - 5
max_x = max([x1 for node, x0, y0, x1, y1 in rects]) + 5
min_y = min([y0 for node, x0, y0, x1, y1 in rects]) - 5
max_y = max([y1 for node, x0, y0, x1, y1 in rects]) + 5
width = max_x - min_x
height = max_y - min_y
out = ""
out += """<svg viewBox="%d %d %d %d" xmlns="http://www.w3.org/2000/svg">""" % (min_x, min_y, width, height)
out += """
<style>
rect {
stroke: hsl(0, 100%, 80%);
fill: hsl(0, 100%, 90%);
}
rect.red {
stroke: hsl(0, 100%, 80%);
fill: hsl(0, 100%, 90%);
}
rect.yellow {
stroke: hsl(60, 100%, 80%);
fill: hsl(60, 100%, 90%);
}
rect.green {
stroke: hsl(120, 100%, 80%);
fill: hsl(120, 100%, 90%);
}
rect.blue {
stroke: hsl(180, 100%, 80%);
fill: hsl(180, 100%, 90%);
}
rect.purple {
stroke: hsl(240, 100%, 80%);
fill: hsl(240, 100%, 90%);
}
rect.pink {
stroke: hsl(300, 100%, 80%);
fill: hsl(300, 100%, 90%);
}
rect.base {
stroke: hsl(60, 100%, 80%);
fill: hsl(60, 100%, 90%);
}
text {
font-family: Garamond, sans-serif;
font-size: 14pt;
}
""" + css + """
</style>
"""
for (node, x0, y0, x1, y1) in rects:
out += """ <rect fill="white" stroke="gray" x="%d" y="%d" width="%d" height="%d" rx="4" ry="4" class="%s"></rect>"""%(
x0, y0,
x1 - x0, y1 - y0 - 1,
self.clss.get(node, 'box-generic')
)
for (node, x0, y0, x1, y1) in rects:
out += """ <text x="%d" y="%d" width="100" class="%s">%s</text>"""%(
x0 + 4, y0,
self.clss.get(node, 'box-generic'),
''.join(["""<tspan x="%d" dy="%d">%s</tspan>"""%(x0 + 4, 20, text) for i, text in enumerate(self.text.get(node, node).split('\\'))])
)
out += """</svg>"""
return out
# And that's it! The rest is just a tiny command-line interface that reads a
# BOX file (with optional CSS) and outputs the SVG rendering.
import sys
d = dag()
d.load_file(open(sys.argv[1]).read())
css = ''
if len(sys.argv) == 3:
with open(sys.argv[2], 'r') as cssf:
css = cssf.read()
rects = d.solve()
if rects is not None:
print d.render(rects, css)
else:
print "You seem to be in dependency hell."
| for sd in self.get_dependencies(node):
if self.is_dependency(sd, dep):
return True
return False | identifier_body |
tower-of-power.py | # This is a bit of an experiment, really: a way to generate "box tower"
# dependency diagrams in the style of the inside cover of Matt Parker's book,
# "Things to Make and Do in the Fourth Dimension." Parker's blocks represent
# prerequisite-knowledge dependencies between his book chapters, but surely the
# same idea could apply to all sorts of dependencies: software modules and
# college course prereqs come to mind as examples.
# After struggling for a few hours with trying to generate such diagrams
# directly, I eventually gave up and wrote a solver-aided program. Hence, this
# program depends on the z3 solver and its Python interface (neither of which
# is hard to install).
import z3
# ...but I'm getting ahead of myself. Really, the story of "how do I draw
# dependency diagrams" begins with the question, "what are dependencies?"
# I think of dependencies as directed acyclic graphs, or DAGs. Each
# chapter/module/course is a node, and edge from A to B indicates that "A
# depends on B." The reason the graph is acyclic should then be clear: A and B
# can't depend on each other!
# There's actually a lot of literature on drawing diagrams of directed acyclic
# graphs, some of which can be found in the `graphviz' documentation (the
# working title to this program was `Sugiyama's Last Stand').
# For now, though, here's a small DAG class in Python to get started.
class DAGException(Exception):
pass
class dag():
# "root" is a placeholder node: anything with no dependencies secretly depends
# on "root". Why? Because all the blocks at the base of the tower need to sit
# on the same ground level. "root" is the floor.
ROOT = '( root )'
def __init__(self):
self.deps = {} # maps a node to a list of its dependencies
self.clss = {dag.ROOT: 'base'} # maps a node to its CSS class
self.text = {dag.ROOT: ''} # maps a node to its label text
self.dep_cache = {} # this speeds up some algorithms below
self.insert(dag.ROOT, [])
# The following incomprehensible regular expressions define the BOX file format
# which I invented to make it easier to specify DAGs for this project.
def load_line(self, line):
import re
wsop_re = r'\s*'
name_re = r'([\w-]+)'
deps_re = r'\((\s*(?:[\w-]+(?:\s*,\s*[\w-]+)*)?\s*)\)'
clss_re = r'(?:\.([\w-]+))?'
text_re = r':\s*(.*)?'
line_re = re.compile(
wsop_re + name_re +
wsop_re + deps_re +
wsop_re + clss_re +
wsop_re + text_re
)
match = line_re.match(line)
if match:
name = match.group(1)
deps = match.group(2).replace(',', ' ').split()
clss = match.group(3) or 'box-generic'
text = match.group(4)
if len(deps) == 0:
deps = [dag.ROOT]
self.insert(name, deps)
self.clss[name] = clss
self.text[name] = text
elif len(line.split()) == 0 or line[0] == '#':
pass
else:
raise DAGException("Syntax error on line: `" + line + "`")
def load_file(self, text):
lines = text.split('\n')
for line in lines:
self.load_line(line)
# A lot of the invariants on this DAG are maintained by complaining loudly as
# soon as they are violated. So, you have to insert nodes in the correct order.
# Other than that, this insertion routine is unexciting.
def insert(self, name, deps):
if name in self.get_nodes():
raise DAGException("Already added this name to DAG.")
for d in deps:
if d not in self.deps:
raise DAGException("Unknown dependency: %s (for %s)"%(d, name))
self.deps[name] = list(deps)
# This is also unexciting.
def get_nodes(self):
return self.deps.keys()
# This, however, is kind of interesting. Notice that Parker's tower doesn't
# distinguish between "direct dependencies" and "transitive dependencies": if
# A depends on B and C, and B itself depends on C, then the box for A doesn't
# need to sit on the boxes for both B and C (in fact, it can't!). The correct
# drawing is "A on B, B on C", with the understanding that A "of course"
# depends on C as well.
#
# The following predicates sort out this mess, by giving me a way to check if a
# dependency is "direct" or "transitive" in that sense. Transitive dependencies
# can be ignored.
def get_dependencies(self, node):
return self.deps[node]
def is_dependency(self, node, dep):
if (node, dep) in self.dep_cache:
return self.dep_cache[(node, dep)]
shallow_deps = self.get_dependencies(node)
if dep in shallow_deps:
self.dep_cache[(node, dep)] = True
return True
for sd in shallow_deps:
if self.is_dependency(sd, dep):
self.dep_cache[(node, dep)] = True
return True
self.dep_cache[(node, dep)] = False
return False
def is_transitive_dependency(self, node, dep):
for sd in self.get_dependencies(node):
if self.is_dependency(sd, dep):
return True
return False
def is_direct_dependency(self, node, dep):
return self.is_dependency(node, dep) and\
not self.is_transitive_dependency(node, dep)
# Okay, okay, fine, I'll start talking about the solver now.
def solve(self):
# The way it works is, each box is represented by four symbolic integers,
# representing the X/Y coordinates of its top-left and bottom-right vertices.
# (Note, however, that because computers are silly, the Y coordinates DECREASE
# as you go UP the tower. Just something to keep in mind. Otherwise we get
# upside-down stalactite-towers.)
svs = {}
solver = z3.Solver()
for node in self.get_nodes():
svs[node] = (
(z3.Int(node+'_x0'), z3.Int(node+'_y0')),
(z3.Int(node+'_x1'), z3.Int(node+'_y1'))
)
# Almost immediately, we need to make some sanity assertions. We want the
# top-left corner to actually be "to the left" and "on top of" the bottom-right
# corner, so we have to tell the solver that.
solver.add(svs[node][0][0] < svs[node][1][0])
solver.add(svs[node][0][1] < svs[node][1][1])
# There's also a bit of logic here to automatically make boxes taller if they
# have a lot of text, so that text doesn't overflow awkwardly. This is janky,
# but it works!
solver.add(
svs[node][1][1] - svs[node][0][1] >=\
(len(self.text[node].split('\\')) - len(self.text[node].split('\\')) / 2)
)
# And finally, we enforce that everything happens in the first quadrant.
solver.add(svs[node][0][0] >= 0)
# Now we can put root (recall, the "ground") literally on the ground!
solver.add(svs[dag.ROOT][0][0] == 0)
solver.add(svs[dag.ROOT][0][1] == 0)
# Up next, we enforce that no boxes intersect. This is done by checking if the
# X and Y ranges are disjoint (at least one needs to be -- but not necessarily
# both!).
def | (x0min, x0max, x1min, x1max):
return z3.Or(x0min >= x1max, x0max <= x1min)
for node1 in self.get_nodes():
for node2 in self.get_nodes():
if node1 != node2:
solver.add(
z3.Or(
ranges_disjoint(
svs[node1][0][0],
svs[node1][1][0],
svs[node2][0][0],
svs[node2][1][0]
),
ranges_disjoint(
svs[node1][0][1],
svs[node1][1][1],
svs[node2][0][1],
svs[node2][1][1]
)
)
)
# This is the hard one: for each pair of nodes, it creates an "A is on top of
# B" assertion, and then asserts either it or its negation, depending on
# whether or not B is a direct dependency of A.
for node in self.get_nodes():
for dep in self.get_nodes():
on_top = z3.And(
# When is "A" on top of "B"? There are two conditions:
# First, A's box's floor is directly on B's box's ceiling.
svs[node][1][1] == svs[dep][0][1],
# Second, the boxes have intersecting X ranges.
z3.Not(
ranges_disjoint(
svs[node][0][0], svs[node][1][0],
svs[dep] [0][0], svs[dep] [1][0]
)
)
)
if self.is_direct_dependency(node, dep):
solver.add(on_top)
else:
solver.add(z3.Not(on_top))
# Finally, for the sake of ~aesthetics~, there's a bit of logic to
# automatically minimize the total perimeter of all the blocks. (Why not area,
# you ask? Because area is nonlinear and the solver takes *much* longer to work
# with such constrants!)
def perimeter(node):
return (svs[node][1][0] - svs[node][0][0]) + (svs[node][1][1] - svs[node][0][1])
total_perim = sum([perimeter(node) for node in self.get_nodes()])
# (That's what the loop is for: it keeps asking the solver to "do better" until
# the solver can't do any better and gives up. It may or may not be a metaphor
# for life.)
rects = None
perim_tgt = len(self.get_nodes()) * 4 * 3
while True:
perim_tgt -= 1
solver.add(total_perim < perim_tgt)
check = solver.check()
if check == z3.sat:
model = solver.model()
rects = []
# I translate the solver output into SVG coordinates using some hardcoded
# scaling factors and randomized fudge factors.
for node in self.get_nodes():
x0 = model.eval(svs[node][0][0])
y0 = model.eval(svs[node][0][1])
x1 = model.eval(svs[node][1][0])
y1 = model.eval(svs[node][1][1])
import random
x0 = int(str(x0)) * 160 + 10 + random.choice(range(10))
y0 = int(str(y0)) * 50
x1 = int(str(x1)) * 160 - 10 + random.choice(range(10))
y1 = int(str(y1)) * 50
rects.append((node, x0, y0, x1, y1))
# This is the "solver gives up" case
else:
return rects
# This is perhaps the least exciting bit of the whole program: just some silly
# SVG generation routines. There's some logic to get the text wrapping to work,
# but other than that, it's pretty simple (and by "simple" I mean
# "extensible"!).
def render(self, rects, css):
min_x = min([x0 for node, x0, y0, x1, y1 in rects]) - 5
max_x = max([x1 for node, x0, y0, x1, y1 in rects]) + 5
min_y = min([y0 for node, x0, y0, x1, y1 in rects]) - 5
max_y = max([y1 for node, x0, y0, x1, y1 in rects]) + 5
width = max_x - min_x
height = max_y - min_y
out = ""
out += """<svg viewBox="%d %d %d %d" xmlns="http://www.w3.org/2000/svg">""" % (min_x, min_y, width, height)
out += """
<style>
rect {
stroke: hsl(0, 100%, 80%);
fill: hsl(0, 100%, 90%);
}
rect.red {
stroke: hsl(0, 100%, 80%);
fill: hsl(0, 100%, 90%);
}
rect.yellow {
stroke: hsl(60, 100%, 80%);
fill: hsl(60, 100%, 90%);
}
rect.green {
stroke: hsl(120, 100%, 80%);
fill: hsl(120, 100%, 90%);
}
rect.blue {
stroke: hsl(180, 100%, 80%);
fill: hsl(180, 100%, 90%);
}
rect.purple {
stroke: hsl(240, 100%, 80%);
fill: hsl(240, 100%, 90%);
}
rect.pink {
stroke: hsl(300, 100%, 80%);
fill: hsl(300, 100%, 90%);
}
rect.base {
stroke: hsl(60, 100%, 80%);
fill: hsl(60, 100%, 90%);
}
text {
font-family: Garamond, sans-serif;
font-size: 14pt;
}
""" + css + """
</style>
"""
for (node, x0, y0, x1, y1) in rects:
out += """ <rect fill="white" stroke="gray" x="%d" y="%d" width="%d" height="%d" rx="4" ry="4" class="%s"></rect>"""%(
x0, y0,
x1 - x0, y1 - y0 - 1,
self.clss.get(node, 'box-generic')
)
for (node, x0, y0, x1, y1) in rects:
out += """ <text x="%d" y="%d" width="100" class="%s">%s</text>"""%(
x0 + 4, y0,
self.clss.get(node, 'box-generic'),
''.join(["""<tspan x="%d" dy="%d">%s</tspan>"""%(x0 + 4, 20, text) for i, text in enumerate(self.text.get(node, node).split('\\'))])
)
out += """</svg>"""
return out
# And that's it! The rest is just a tiny command-line interface that reads a
# BOX file (with optional CSS) and outputs the SVG rendering.
import sys
d = dag()
d.load_file(open(sys.argv[1]).read())
css = ''
if len(sys.argv) == 3:
with open(sys.argv[2], 'r') as cssf:
css = cssf.read()
rects = d.solve()
if rects is not None:
print d.render(rects, css)
else:
print "You seem to be in dependency hell."
| ranges_disjoint | identifier_name |
tower-of-power.py | # This is a bit of an experiment, really: a way to generate "box tower"
# dependency diagrams in the style of the inside cover of Matt Parker's book,
# "Things to Make and Do in the Fourth Dimension." Parker's blocks represent
# prerequisite-knowledge dependencies between his book chapters, but surely the
# same idea could apply to all sorts of dependencies: software modules and
# college course prereqs come to mind as examples.
# After struggling for a few hours with trying to generate such diagrams
# directly, I eventually gave up and wrote a solver-aided program. Hence, this
# program depends on the z3 solver and its Python interface (neither of which
# is hard to install).
import z3
# ...but I'm getting ahead of myself. Really, the story of "how do I draw
# dependency diagrams" begins with the question, "what are dependencies?"
# I think of dependencies as directed acyclic graphs, or DAGs. Each
# chapter/module/course is a node, and edge from A to B indicates that "A
# depends on B." The reason the graph is acyclic should then be clear: A and B
# can't depend on each other!
# There's actually a lot of literature on drawing diagrams of directed acyclic
# graphs, some of which can be found in the `graphviz' documentation (the
# working title to this program was `Sugiyama's Last Stand').
# For now, though, here's a small DAG class in Python to get started.
class DAGException(Exception):
pass
class dag():
# "root" is a placeholder node: anything with no dependencies secretly depends
# on "root". Why? Because all the blocks at the base of the tower need to sit
# on the same ground level. "root" is the floor.
ROOT = '( root )'
def __init__(self):
self.deps = {} # maps a node to a list of its dependencies
self.clss = {dag.ROOT: 'base'} # maps a node to its CSS class
self.text = {dag.ROOT: ''} # maps a node to its label text
self.dep_cache = {} # this speeds up some algorithms below
self.insert(dag.ROOT, [])
# The following incomprehensible regular expressions define the BOX file format
# which I invented to make it easier to specify DAGs for this project.
def load_line(self, line):
import re
wsop_re = r'\s*'
name_re = r'([\w-]+)'
deps_re = r'\((\s*(?:[\w-]+(?:\s*,\s*[\w-]+)*)?\s*)\)'
clss_re = r'(?:\.([\w-]+))?'
text_re = r':\s*(.*)?'
line_re = re.compile(
wsop_re + name_re +
wsop_re + deps_re +
wsop_re + clss_re +
wsop_re + text_re
)
match = line_re.match(line)
if match:
name = match.group(1)
deps = match.group(2).replace(',', ' ').split()
clss = match.group(3) or 'box-generic'
text = match.group(4)
if len(deps) == 0:
deps = [dag.ROOT]
self.insert(name, deps)
self.clss[name] = clss
self.text[name] = text
elif len(line.split()) == 0 or line[0] == '#':
pass
else:
raise DAGException("Syntax error on line: `" + line + "`")
def load_file(self, text):
lines = text.split('\n')
for line in lines:
self.load_line(line)
# A lot of the invariants on this DAG are maintained by complaining loudly as
# soon as they are violated. So, you have to insert nodes in the correct order.
# Other than that, this insertion routine is unexciting.
def insert(self, name, deps):
if name in self.get_nodes():
raise DAGException("Already added this name to DAG.")
for d in deps:
if d not in self.deps:
raise DAGException("Unknown dependency: %s (for %s)"%(d, name))
self.deps[name] = list(deps)
# This is also unexciting.
def get_nodes(self):
return self.deps.keys()
# This, however, is kind of interesting. Notice that Parker's tower doesn't
# distinguish between "direct dependencies" and "transitive dependencies": if
# A depends on B and C, and B itself depends on C, then the box for A doesn't
# need to sit on the boxes for both B and C (in fact, it can't!). The correct
# drawing is "A on B, B on C", with the understanding that A "of course"
# depends on C as well.
#
# The following predicates sort out this mess, by giving me a way to check if a
# dependency is "direct" or "transitive" in that sense. Transitive dependencies
# can be ignored.
def get_dependencies(self, node):
return self.deps[node]
def is_dependency(self, node, dep):
if (node, dep) in self.dep_cache:
return self.dep_cache[(node, dep)]
shallow_deps = self.get_dependencies(node)
if dep in shallow_deps:
self.dep_cache[(node, dep)] = True
return True
for sd in shallow_deps:
if self.is_dependency(sd, dep):
self.dep_cache[(node, dep)] = True
return True
self.dep_cache[(node, dep)] = False
return False
def is_transitive_dependency(self, node, dep):
for sd in self.get_dependencies(node):
if self.is_dependency(sd, dep):
return True
return False
def is_direct_dependency(self, node, dep):
return self.is_dependency(node, dep) and\
not self.is_transitive_dependency(node, dep)
# Okay, okay, fine, I'll start talking about the solver now.
def solve(self):
# The way it works is, each box is represented by four symbolic integers,
# representing the X/Y coordinates of its top-left and bottom-right vertices.
# (Note, however, that because computers are silly, the Y coordinates DECREASE
# as you go UP the tower. Just something to keep in mind. Otherwise we get |
svs = {}
solver = z3.Solver()
for node in self.get_nodes():
svs[node] = (
(z3.Int(node+'_x0'), z3.Int(node+'_y0')),
(z3.Int(node+'_x1'), z3.Int(node+'_y1'))
)
# Almost immediately, we need to make some sanity assertions. We want the
# top-left corner to actually be "to the left" and "on top of" the bottom-right
# corner, so we have to tell the solver that.
solver.add(svs[node][0][0] < svs[node][1][0])
solver.add(svs[node][0][1] < svs[node][1][1])
# There's also a bit of logic here to automatically make boxes taller if they
# have a lot of text, so that text doesn't overflow awkwardly. This is janky,
# but it works!
solver.add(
svs[node][1][1] - svs[node][0][1] >=\
(len(self.text[node].split('\\')) - len(self.text[node].split('\\')) / 2)
)
# And finally, we enforce that everything happens in the first quadrant.
solver.add(svs[node][0][0] >= 0)
# Now we can put root (recall, the "ground") literally on the ground!
solver.add(svs[dag.ROOT][0][0] == 0)
solver.add(svs[dag.ROOT][0][1] == 0)
# Up next, we enforce that no boxes intersect. This is done by checking if the
# X and Y ranges are disjoint (at least one needs to be -- but not necessarily
# both!).
def ranges_disjoint(x0min, x0max, x1min, x1max):
return z3.Or(x0min >= x1max, x0max <= x1min)
for node1 in self.get_nodes():
for node2 in self.get_nodes():
if node1 != node2:
solver.add(
z3.Or(
ranges_disjoint(
svs[node1][0][0],
svs[node1][1][0],
svs[node2][0][0],
svs[node2][1][0]
),
ranges_disjoint(
svs[node1][0][1],
svs[node1][1][1],
svs[node2][0][1],
svs[node2][1][1]
)
)
)
# This is the hard one: for each pair of nodes, it creates an "A is on top of
# B" assertion, and then asserts either it or its negation, depending on
# whether or not B is a direct dependency of A.
for node in self.get_nodes():
for dep in self.get_nodes():
on_top = z3.And(
# When is "A" on top of "B"? There are two conditions:
# First, A's box's floor is directly on B's box's ceiling.
svs[node][1][1] == svs[dep][0][1],
# Second, the boxes have intersecting X ranges.
z3.Not(
ranges_disjoint(
svs[node][0][0], svs[node][1][0],
svs[dep] [0][0], svs[dep] [1][0]
)
)
)
if self.is_direct_dependency(node, dep):
solver.add(on_top)
else:
solver.add(z3.Not(on_top))
# Finally, for the sake of ~aesthetics~, there's a bit of logic to
# automatically minimize the total perimeter of all the blocks. (Why not area,
# you ask? Because area is nonlinear and the solver takes *much* longer to work
# with such constrants!)
def perimeter(node):
return (svs[node][1][0] - svs[node][0][0]) + (svs[node][1][1] - svs[node][0][1])
total_perim = sum([perimeter(node) for node in self.get_nodes()])
# (That's what the loop is for: it keeps asking the solver to "do better" until
# the solver can't do any better and gives up. It may or may not be a metaphor
# for life.)
rects = None
perim_tgt = len(self.get_nodes()) * 4 * 3
while True:
perim_tgt -= 1
solver.add(total_perim < perim_tgt)
check = solver.check()
if check == z3.sat:
model = solver.model()
rects = []
# I translate the solver output into SVG coordinates using some hardcoded
# scaling factors and randomized fudge factors.
for node in self.get_nodes():
x0 = model.eval(svs[node][0][0])
y0 = model.eval(svs[node][0][1])
x1 = model.eval(svs[node][1][0])
y1 = model.eval(svs[node][1][1])
import random
x0 = int(str(x0)) * 160 + 10 + random.choice(range(10))
y0 = int(str(y0)) * 50
x1 = int(str(x1)) * 160 - 10 + random.choice(range(10))
y1 = int(str(y1)) * 50
rects.append((node, x0, y0, x1, y1))
# This is the "solver gives up" case
else:
return rects
# This is perhaps the least exciting bit of the whole program: just some silly
# SVG generation routines. There's some logic to get the text wrapping to work,
# but other than that, it's pretty simple (and by "simple" I mean
# "extensible"!).
def render(self, rects, css):
min_x = min([x0 for node, x0, y0, x1, y1 in rects]) - 5
max_x = max([x1 for node, x0, y0, x1, y1 in rects]) + 5
min_y = min([y0 for node, x0, y0, x1, y1 in rects]) - 5
max_y = max([y1 for node, x0, y0, x1, y1 in rects]) + 5
width = max_x - min_x
height = max_y - min_y
out = ""
out += """<svg viewBox="%d %d %d %d" xmlns="http://www.w3.org/2000/svg">""" % (min_x, min_y, width, height)
out += """
<style>
rect {
stroke: hsl(0, 100%, 80%);
fill: hsl(0, 100%, 90%);
}
rect.red {
stroke: hsl(0, 100%, 80%);
fill: hsl(0, 100%, 90%);
}
rect.yellow {
stroke: hsl(60, 100%, 80%);
fill: hsl(60, 100%, 90%);
}
rect.green {
stroke: hsl(120, 100%, 80%);
fill: hsl(120, 100%, 90%);
}
rect.blue {
stroke: hsl(180, 100%, 80%);
fill: hsl(180, 100%, 90%);
}
rect.purple {
stroke: hsl(240, 100%, 80%);
fill: hsl(240, 100%, 90%);
}
rect.pink {
stroke: hsl(300, 100%, 80%);
fill: hsl(300, 100%, 90%);
}
rect.base {
stroke: hsl(60, 100%, 80%);
fill: hsl(60, 100%, 90%);
}
text {
font-family: Garamond, sans-serif;
font-size: 14pt;
}
""" + css + """
</style>
"""
for (node, x0, y0, x1, y1) in rects:
out += """ <rect fill="white" stroke="gray" x="%d" y="%d" width="%d" height="%d" rx="4" ry="4" class="%s"></rect>"""%(
x0, y0,
x1 - x0, y1 - y0 - 1,
self.clss.get(node, 'box-generic')
)
for (node, x0, y0, x1, y1) in rects:
out += """ <text x="%d" y="%d" width="100" class="%s">%s</text>"""%(
x0 + 4, y0,
self.clss.get(node, 'box-generic'),
''.join(["""<tspan x="%d" dy="%d">%s</tspan>"""%(x0 + 4, 20, text) for i, text in enumerate(self.text.get(node, node).split('\\'))])
)
out += """</svg>"""
return out
# And that's it! The rest is just a tiny command-line interface that reads a
# BOX file (with optional CSS) and outputs the SVG rendering.
import sys
d = dag()
d.load_file(open(sys.argv[1]).read())
css = ''
if len(sys.argv) == 3:
with open(sys.argv[2], 'r') as cssf:
css = cssf.read()
rects = d.solve()
if rects is not None:
print d.render(rects, css)
else:
print "You seem to be in dependency hell." | # upside-down stalactite-towers.) | random_line_split |
pool.rs | //! `LoggedPool` structure for logging raw tasks events.
#![macro_use]
// we can now use performance counters to tag subgraphs
#[cfg(feature = "perf")]
use perfcnt::linux::PerfCounterBuilderLinux;
#[cfg(feature = "perf")]
use perfcnt::linux::{CacheId, CacheOpId, CacheOpResultId, HardwareEventType, SoftwareEventType};
#[cfg(feature = "perf")]
use perfcnt::{AbstractPerfCounter, PerfCounter};
use crate::log::RunLog;
use crate::raw_events::{now, RayonEvent, TaskId};
use crate::storage::Storage;
use crate::Comparator;
use crate::{scope, scope_fifo, Scope, ScopeFifo};
use rayon;
use rayon::FnContext;
use std::cell::RefCell;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
/// We use an atomic usize to generate unique ids for tasks.
pub(crate) static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0);
/// We use an atomic usize to generate unique ids for iterators.
pub(crate) static NEXT_ITERATOR_ID: AtomicUsize = AtomicUsize::new(0);
/// get an id for a new task and increment global tasks counter.
pub fn next_task_id() -> TaskId {
NEXT_TASK_ID.fetch_add(1, Ordering::SeqCst)
}
/// get an id for a new iterator and increment global iterators counter.
pub fn next_iterator_id() -> usize {
NEXT_ITERATOR_ID.fetch_add(1, Ordering::SeqCst)
}
thread_local!(pub(crate) static LOGS: RefCell<Arc<Storage<RayonEvent>>> = RefCell::new(Arc::new(Storage::new())));
/// Add given event to logs of current thread.
pub(crate) fn log(event: RayonEvent) {
LOGS.with(|l| l.borrow().push(event))
}
/// Logs several events at once (with decreased cost).
macro_rules! logs {
($($x:expr ), +) => {
$crate::pool::LOGS.with(|l| {let thread_logs = l.borrow();
$(
thread_logs.push($x);
)*
})
}
}
/// We tag all the tasks that op makes as one subgraph.
///
/// `work_type` is a str tag and `work_amount` an integer specifying the expected algorithmic cost
/// (should not be zero).
/// As we know the work and execution time we can compute an execution speed for each subgraph.
/// When different graphs are tagged with the same tag we can then compare their speeds.
/// Slow graphs will see their displayed colors darkened.
/// You can also hover on tasks to display their speeds.
///
/// Example:
///
/// ```
/// use rayon_logs::{join, subgraph, ThreadPoolBuilder};
///
/// fn manual_max(slice: &[u32]) -> u32 {
/// if slice.len() < 200_000 {
/// subgraph("max", slice.len(), || slice.iter().max().cloned().unwrap())
/// } else {
/// let middle = slice.len() / 2;
/// let (left, right) = slice.split_at(middle);
/// let (mleft, mright) = join(|| manual_max(left), || manual_max(right));
/// std::cmp::max(mleft, mright)
/// }
/// }
///
/// let v: Vec<u32> = (0..2_000_000).collect();
/// let pool = ThreadPoolBuilder::new()
/// .num_threads(2)
/// .build()
/// .expect("building pool failed");
/// let max = pool.install(|| manual_max(&v));
/// assert_eq!(max, v.last().cloned().unwrap());
/// ```
///
/// <div>
/// <img
/// src="http://www-id.imag.fr/Laboratoire/Membres/Wagner_Frederic/images/downgraded_manual_max.svg"/>
/// </div>
///
/// Using it we obtain the graph below.
/// On the real file you can hover but javascript and toggle the display of the different tags but
/// it is disabled with rustdoc so I downgraded the file
/// for this display.
pub fn subgraph<OP, R>(work_type: &'static str, work_amount: usize, op: OP) -> R
where
OP: FnOnce() -> R,
{
custom_subgraph(work_type, || (), |_| work_amount, op)
}
/// Same as the subgraph function, but we can log a hardware event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// Events:
///
/// * ```HardwareEventType::CPUCycles```
///
/// * ```HardwareEventType::Instructions```
///
/// * ```HardwareEventType::CacheReferences```
///
/// * ```HardwareEventType::CacheMisses```
///
/// * ```HardwareEventType::BranchInstructions```
///
/// * ```HardwareEventType::BranchMisses```
///
/// * ```HardwareEventType::BusCycles```
///
/// * ```HardwareEventType::StalledCyclesFrontend```
///
/// * ```HardwareEventType::StalledCyclesBackend```
///
/// * ```HardwareEventType::RefCPUCycles```
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler.
/// note that It is **freaking slow**: 1 full second to set up the counter.
#[cfg(feature = "perf")]
pub fn subgraph_hardware_event<OP, R>(tag: &'static str, event: HardwareEventType, op: OP) -> R
where
OP: FnOnce() -> R,
{
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_hardware_event(event)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Same as the subgraph function, but we can log a software event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// Events:
///
/// * ```SoftwareEventType::CpuClock```
///
/// * ```SoftwareEventType::TaskClock```
///
/// * ```SoftwareEventType::PageFaults```
///
/// * ```SoftwareEventType::CacheMisses```
///
/// * ```SoftwareEventType::ContextSwitches```
///
/// * ```SoftwareEventType::CpuMigrations```
///
/// * ```SoftwareEventType::PageFaultsMin```
///
/// * ```SoftwareEventType::PageFaultsMin```
///
/// * ```SoftwareEventType::PageFaultsMaj```
///
/// * ```SoftwareEventType::AlignmentFaults```
///
/// * ```SoftwareEventType::EmulationFaults```
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler
#[cfg(feature = "perf")]
pub fn subgraph_software_event<OP, R>(tag: &'static str, event: SoftwareEventType, op: OP) -> R
where
OP: FnOnce() -> R,
{
//TODO: avoid code duplication by abstracting over events
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_software_event(event)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Same as the subgraph function, but we can log a cache event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// CacheId:
///
/// * ```CacheId::L1D```
///
/// * ```CacheId::L1I```
///
/// * ```CacheId::LL```
///
/// * ```CacheId::DTLB```
///
/// * ```CacheId::ITLB```
///
/// * ```CacheId::BPU```
///
/// * ```CacheId::Node```
///
/// CacheOpId:
///
/// * ```CacheOpId::Read```
///
/// * ```CacheOpId::Write```
///
/// * ```CacheOpId::Prefetch```
///
/// CacheOpResultId:
///
/// * ```CacheOpResultId::Access```
///
/// * ```CacheOpResultId::Miss```
///
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler
///
#[cfg(feature = "perf")]
pub fn subgraph_cache_event<OP, R>(
tag: &'static str,
cache_id: CacheId,
cache_op_id: CacheOpId,
cache_op_result_id: CacheOpResultId,
op: OP,
) -> R
where
OP: FnOnce() -> R,
{
//TODO: avoid code duplication by abstracting over events
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_cache_event(
cache_id,
cache_op_id,
cache_op_result_id,
)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Tag a subgraph with a custom value.
/// The start function will be called just before running the graph and produce an S.
/// The end function will be called just after running the graph on this S and produce a usize
/// which will the be stored for display.
pub fn custom_subgraph<OP, R, START, END, S>(tag: &'static str, start: START, end: END, op: OP) -> R
where
OP: FnOnce() -> R,
START: FnOnce() -> S,
END: FnOnce(S) -> usize,
{
let s = start();
start_subgraph(tag);
let r = op();
let measured_value = end(s);
end_subgraph(tag, measured_value);
r
}
/// Stop current task (virtually) and start a subgraph.
/// You most likely don't need to call this function directly but `subgraph` instead.
pub fn start_subgraph(tag: &'static str) {
let subgraph_start_task_id = next_task_id();
logs!(
// log child's work and dependencies.
RayonEvent::Child(subgraph_start_task_id),
// end current task
RayonEvent::TaskEnd(now()),
// execute full sequential task
RayonEvent::TaskStart(subgraph_start_task_id, now()),
RayonEvent::SubgraphStart(tag)
);
}
/// Stop current task (virtually) and end a subgraph.
/// You most likely don't need to call this function directly but `subgraph` instead.
pub fn end_subgraph(tag: &'static str, measured_value: usize) {
let continuation_task_id = next_task_id();
logs!(
RayonEvent::SubgraphEnd(tag, measured_value),
RayonEvent::Child(continuation_task_id),
RayonEvent::TaskEnd(now()),
// start continuation task
RayonEvent::TaskStart(continuation_task_id, now())
);
}
/// Identical to `join`, except that the closures have a parameter
/// that provides context for the way the closure has been called,
/// especially indicating whether they're executing on a different
/// thread than where `join_context` was called. This will occur if
/// the second job is stolen by a different thread, or if
/// `join_context` was called from outside the thread pool to begin
/// with.
pub fn join_context<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce(FnContext) -> RA + Send,
B: FnOnce(FnContext) -> RB + Send,
RA: Send,
RB: Send,
{
let id_c = next_task_id();
let id_a = next_task_id();
let ca = |c| {
log(RayonEvent::TaskStart(id_a, now()));
let result = oper_a(c);
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
let id_b = next_task_id();
let cb = |c| {
log(RayonEvent::TaskStart(id_b, now()));
let result = oper_b(c);
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
logs!(
RayonEvent::Child(id_a),
RayonEvent::Child(id_b),
RayonEvent::TaskEnd(now())
);
let r = rayon::join_context(ca, cb);
log(RayonEvent::TaskStart(id_c, now()));
r
}
/// Takes two closures and *potentially* runs them in parallel. It
/// returns a pair of the results from those closures.
///
/// Conceptually, calling `join()` is similar to spawning two threads,
/// one executing each of the two closures. However, the
/// implementation is quite different and incurs very low
/// overhead. The underlying technique is called "work stealing": the
/// Rayon runtime uses a fixed pool of worker threads and attempts to
/// only execute code in parallel when there are idle CPUs to handle
/// it.
///
/// When `join` is called from outside the thread pool, the calling
/// thread will block while the closures execute in the pool. When
/// `join` is called within the pool, the calling thread still actively
/// participates in the thread pool. It will begin by executing closure
/// A (on the current thread). While it is doing that, it will advertise
/// closure B as being available for other threads to execute. Once closure A
/// has completed, the current thread will try to execute closure B;
/// if however closure B has been stolen, then it will look for other work
/// while waiting for the thief to fully execute closure B. (This is the
/// typical work-stealing strategy).
///
/// # Examples
///
/// This example uses join to perform a quick-sort (note this is not a
/// particularly optimized implementation: if you **actually** want to
/// sort for real, you should prefer [the `par_sort` method] offered
/// by Rayon).
///
/// [the `par_sort` method]: ../rayon/slice/trait.ParallelSliceMut.html#method.par_sort
///
/// ```rust
/// let mut v = vec![5, 1, 8, 22, 0, 44];
/// quick_sort(&mut v);
/// assert_eq!(v, vec![0, 1, 5, 8, 22, 44]);
///
/// fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
/// if v.len() > 1 {
/// let mid = partition(v);
/// let (lo, hi) = v.split_at_mut(mid);
/// rayon::join(|| quick_sort(lo),
/// || quick_sort(hi));
/// }
/// }
///
/// // Partition rearranges all items `<=` to the pivot
/// // item (arbitrary selected to be the last item in the slice)
/// // to the first half of the slice. It then returns the
/// // "dividing point" where the pivot is placed.
/// fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
/// let pivot = v.len() - 1;
/// let mut i = 0;
/// for j in 0..pivot {
/// if v[j] <= v[pivot] {
/// v.swap(i, j);
/// i += 1;
/// }
/// }
/// v.swap(i, pivot);
/// i
/// }
/// ```
///
/// # Warning about blocking I/O
///
/// The assumption is that the closures given to `join()` are
/// CPU-bound tasks that do not perform I/O or other blocking
/// operations. If you do perform I/O, and that I/O should block
/// (e.g., waiting for a network request), the overall performance may
/// be poor. Moreover, if you cause one closure to be blocked waiting
/// on another (for example, using a channel), that could lead to a
/// deadlock.
///
/// # Panics
///
/// No matter what happens, both closures will always be executed. If
/// a single closure panics, whether it be the first or second
/// closure, that panic will be propagated and hence `join()` will
/// panic with the same panic value. If both closures panic, `join()`
/// will panic with the panic value from the first closure.
pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce() -> RA + Send,
B: FnOnce() -> RB + Send,
RA: Send,
RB: Send,
{
let id_c = next_task_id();
let id_a = next_task_id();
let ca = || {
log(RayonEvent::TaskStart(id_a, now()));
let result = oper_a();
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
let id_b = next_task_id();
let cb = || {
log(RayonEvent::TaskStart(id_b, now()));
let result = oper_b();
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
logs!(
RayonEvent::Child(id_a),
RayonEvent::Child(id_b),
RayonEvent::TaskEnd(now())
);
let r = rayon::join(ca, cb);
log(RayonEvent::TaskStart(id_c, now()));
r
}
// small global counter to increment file names
static INSTALL_COUNT: AtomicUsize = AtomicUsize::new(0);
/// We wrap rayon's pool into our own struct to overload the install method.
pub struct ThreadPool {
pub(crate) logs: Arc<Mutex<Vec<Arc<Storage<RayonEvent>>>>>,
pub(crate) pool: rayon::ThreadPool,
}
impl ThreadPool {
/// Reset all logs and counters to initial condition.
fn reset(&self) {
NEXT_TASK_ID.store(0, Ordering::SeqCst);
NEXT_ITERATOR_ID.store(0, Ordering::SeqCst);
let logs = &*self.logs.lock().unwrap(); // oh yeah baby
for log in logs {
log.clear();
}
}
/// Execute given closure in the thread pool, logging it's task as the initial one. | where
OP: FnOnce() -> R + Send,
R: Send,
{
self.reset();
let id = next_task_id();
let c = || {
log(RayonEvent::TaskStart(id, now()));
let result = op();
log(RayonEvent::TaskEnd(now()));
result
};
let start = now();
let r = self.pool.install(c);
let log = RunLog::new(
NEXT_TASK_ID.load(Ordering::Relaxed),
NEXT_ITERATOR_ID.load(Ordering::Relaxed),
&*self.logs.lock().unwrap(),
start,
);
(r, log)
}
/// Creates a scope that executes within this thread-pool.
/// Equivalent to `self.install(|| scope(...))`.
///
/// See also: [the `scope()` function][scope].
///
/// [scope]: fn.scope.html
pub fn scope<'scope, OP, R>(&self, op: OP) -> R
where
OP: for<'s> FnOnce(&'s Scope<'scope>) -> R + 'scope + Send,
R: Send,
{
self.install(|| scope(op))
}
/// Like `scope` but fifo.
pub fn scope_fifo<'scope, OP, R>(&self, op: OP) -> R
where
OP: for<'s> FnOnce(&'s ScopeFifo<'scope>) -> R + 'scope + Send,
R: Send,
{
self.install(|| scope_fifo(op))
}
/// Execute given closure in the thread pool, logging it's task as the initial one.
/// After running, we save a json file with filename being an incremental counter.
pub fn install<OP, R>(&self, op: OP) -> R
where
OP: FnOnce() -> R + Send,
R: Send,
{
let (r, log) = self.logging_install(op);
log.save(format!(
"log_{}.json",
INSTALL_COUNT.fetch_add(1, Ordering::SeqCst)
))
.expect("saving json failed");
r
}
///This function simply returns a comparator that allows us to add algorithms for comparison.
pub fn compare(&self) -> Comparator {
Comparator::new(self)
}
} | /// After running, we post-process the logs and return a `RunLog` together with the closure's
/// result.
pub fn logging_install<OP, R>(&self, op: OP) -> (R, RunLog) | random_line_split |
pool.rs | //! `LoggedPool` structure for logging raw tasks events.
#![macro_use]
// we can now use performance counters to tag subgraphs
#[cfg(feature = "perf")]
use perfcnt::linux::PerfCounterBuilderLinux;
#[cfg(feature = "perf")]
use perfcnt::linux::{CacheId, CacheOpId, CacheOpResultId, HardwareEventType, SoftwareEventType};
#[cfg(feature = "perf")]
use perfcnt::{AbstractPerfCounter, PerfCounter};
use crate::log::RunLog;
use crate::raw_events::{now, RayonEvent, TaskId};
use crate::storage::Storage;
use crate::Comparator;
use crate::{scope, scope_fifo, Scope, ScopeFifo};
use rayon;
use rayon::FnContext;
use std::cell::RefCell;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
/// We use an atomic usize to generate unique ids for tasks.
pub(crate) static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0);
/// We use an atomic usize to generate unique ids for iterators.
pub(crate) static NEXT_ITERATOR_ID: AtomicUsize = AtomicUsize::new(0);
/// get an id for a new task and increment global tasks counter.
pub fn next_task_id() -> TaskId {
NEXT_TASK_ID.fetch_add(1, Ordering::SeqCst)
}
/// get an id for a new iterator and increment global iterators counter.
pub fn next_iterator_id() -> usize {
NEXT_ITERATOR_ID.fetch_add(1, Ordering::SeqCst)
}
thread_local!(pub(crate) static LOGS: RefCell<Arc<Storage<RayonEvent>>> = RefCell::new(Arc::new(Storage::new())));
/// Add given event to logs of current thread.
pub(crate) fn log(event: RayonEvent) {
LOGS.with(|l| l.borrow().push(event))
}
/// Logs several events at once (with decreased cost).
macro_rules! logs {
($($x:expr ), +) => {
$crate::pool::LOGS.with(|l| {let thread_logs = l.borrow();
$(
thread_logs.push($x);
)*
})
}
}
/// We tag all the tasks that op makes as one subgraph.
///
/// `work_type` is a str tag and `work_amount` an integer specifying the expected algorithmic cost
/// (should not be zero).
/// As we know the work and execution time we can compute an execution speed for each subgraph.
/// When different graphs are tagged with the same tag we can then compare their speeds.
/// Slow graphs will see their displayed colors darkened.
/// You can also hover on tasks to display their speeds.
///
/// Example:
///
/// ```
/// use rayon_logs::{join, subgraph, ThreadPoolBuilder};
///
/// fn manual_max(slice: &[u32]) -> u32 {
/// if slice.len() < 200_000 {
/// subgraph("max", slice.len(), || slice.iter().max().cloned().unwrap())
/// } else {
/// let middle = slice.len() / 2;
/// let (left, right) = slice.split_at(middle);
/// let (mleft, mright) = join(|| manual_max(left), || manual_max(right));
/// std::cmp::max(mleft, mright)
/// }
/// }
///
/// let v: Vec<u32> = (0..2_000_000).collect();
/// let pool = ThreadPoolBuilder::new()
/// .num_threads(2)
/// .build()
/// .expect("building pool failed");
/// let max = pool.install(|| manual_max(&v));
/// assert_eq!(max, v.last().cloned().unwrap());
/// ```
///
/// <div>
/// <img
/// src="http://www-id.imag.fr/Laboratoire/Membres/Wagner_Frederic/images/downgraded_manual_max.svg"/>
/// </div>
///
/// Using it we obtain the graph below.
/// On the real file you can hover but javascript and toggle the display of the different tags but
/// it is disabled with rustdoc so I downgraded the file
/// for this display.
pub fn subgraph<OP, R>(work_type: &'static str, work_amount: usize, op: OP) -> R
where
OP: FnOnce() -> R,
{
custom_subgraph(work_type, || (), |_| work_amount, op)
}
/// Same as the subgraph function, but we can log a hardware event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// Events:
///
/// * ```HardwareEventType::CPUCycles```
///
/// * ```HardwareEventType::Instructions```
///
/// * ```HardwareEventType::CacheReferences```
///
/// * ```HardwareEventType::CacheMisses```
///
/// * ```HardwareEventType::BranchInstructions```
///
/// * ```HardwareEventType::BranchMisses```
///
/// * ```HardwareEventType::BusCycles```
///
/// * ```HardwareEventType::StalledCyclesFrontend```
///
/// * ```HardwareEventType::StalledCyclesBackend```
///
/// * ```HardwareEventType::RefCPUCycles```
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler.
/// note that It is **freaking slow**: 1 full second to set up the counter.
#[cfg(feature = "perf")]
pub fn subgraph_hardware_event<OP, R>(tag: &'static str, event: HardwareEventType, op: OP) -> R
where
OP: FnOnce() -> R,
{
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_hardware_event(event)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Same as the subgraph function, but we can log a software event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// Events:
///
/// * ```SoftwareEventType::CpuClock```
///
/// * ```SoftwareEventType::TaskClock```
///
/// * ```SoftwareEventType::PageFaults```
///
/// * ```SoftwareEventType::CacheMisses```
///
/// * ```SoftwareEventType::ContextSwitches```
///
/// * ```SoftwareEventType::CpuMigrations```
///
/// * ```SoftwareEventType::PageFaultsMin```
///
/// * ```SoftwareEventType::PageFaultsMin```
///
/// * ```SoftwareEventType::PageFaultsMaj```
///
/// * ```SoftwareEventType::AlignmentFaults```
///
/// * ```SoftwareEventType::EmulationFaults```
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler
#[cfg(feature = "perf")]
pub fn subgraph_software_event<OP, R>(tag: &'static str, event: SoftwareEventType, op: OP) -> R
where
OP: FnOnce() -> R,
{
//TODO: avoid code duplication by abstracting over events
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_software_event(event)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Same as the subgraph function, but we can log a cache event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// CacheId:
///
/// * ```CacheId::L1D```
///
/// * ```CacheId::L1I```
///
/// * ```CacheId::LL```
///
/// * ```CacheId::DTLB```
///
/// * ```CacheId::ITLB```
///
/// * ```CacheId::BPU```
///
/// * ```CacheId::Node```
///
/// CacheOpId:
///
/// * ```CacheOpId::Read```
///
/// * ```CacheOpId::Write```
///
/// * ```CacheOpId::Prefetch```
///
/// CacheOpResultId:
///
/// * ```CacheOpResultId::Access```
///
/// * ```CacheOpResultId::Miss```
///
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler
///
#[cfg(feature = "perf")]
pub fn subgraph_cache_event<OP, R>(
tag: &'static str,
cache_id: CacheId,
cache_op_id: CacheOpId,
cache_op_result_id: CacheOpResultId,
op: OP,
) -> R
where
OP: FnOnce() -> R,
{
//TODO: avoid code duplication by abstracting over events
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_cache_event(
cache_id,
cache_op_id,
cache_op_result_id,
)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Tag a subgraph with a custom value.
/// The start function will be called just before running the graph and produce an S.
/// The end function will be called just after running the graph on this S and produce a usize
/// which will the be stored for display.
pub fn custom_subgraph<OP, R, START, END, S>(tag: &'static str, start: START, end: END, op: OP) -> R
where
OP: FnOnce() -> R,
START: FnOnce() -> S,
END: FnOnce(S) -> usize,
{
let s = start();
start_subgraph(tag);
let r = op();
let measured_value = end(s);
end_subgraph(tag, measured_value);
r
}
/// Stop current task (virtually) and start a subgraph.
/// You most likely don't need to call this function directly but `subgraph` instead.
pub fn start_subgraph(tag: &'static str) {
let subgraph_start_task_id = next_task_id();
logs!(
// log child's work and dependencies.
RayonEvent::Child(subgraph_start_task_id),
// end current task
RayonEvent::TaskEnd(now()),
// execute full sequential task
RayonEvent::TaskStart(subgraph_start_task_id, now()),
RayonEvent::SubgraphStart(tag)
);
}
/// Stop current task (virtually) and end a subgraph.
/// You most likely don't need to call this function directly but `subgraph` instead.
pub fn end_subgraph(tag: &'static str, measured_value: usize) |
/// Identical to `join`, except that the closures have a parameter
/// that provides context for the way the closure has been called,
/// especially indicating whether they're executing on a different
/// thread than where `join_context` was called. This will occur if
/// the second job is stolen by a different thread, or if
/// `join_context` was called from outside the thread pool to begin
/// with.
pub fn join_context<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce(FnContext) -> RA + Send,
B: FnOnce(FnContext) -> RB + Send,
RA: Send,
RB: Send,
{
let id_c = next_task_id();
let id_a = next_task_id();
let ca = |c| {
log(RayonEvent::TaskStart(id_a, now()));
let result = oper_a(c);
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
let id_b = next_task_id();
let cb = |c| {
log(RayonEvent::TaskStart(id_b, now()));
let result = oper_b(c);
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
logs!(
RayonEvent::Child(id_a),
RayonEvent::Child(id_b),
RayonEvent::TaskEnd(now())
);
let r = rayon::join_context(ca, cb);
log(RayonEvent::TaskStart(id_c, now()));
r
}
/// Takes two closures and *potentially* runs them in parallel. It
/// returns a pair of the results from those closures.
///
/// Conceptually, calling `join()` is similar to spawning two threads,
/// one executing each of the two closures. However, the
/// implementation is quite different and incurs very low
/// overhead. The underlying technique is called "work stealing": the
/// Rayon runtime uses a fixed pool of worker threads and attempts to
/// only execute code in parallel when there are idle CPUs to handle
/// it.
///
/// When `join` is called from outside the thread pool, the calling
/// thread will block while the closures execute in the pool. When
/// `join` is called within the pool, the calling thread still actively
/// participates in the thread pool. It will begin by executing closure
/// A (on the current thread). While it is doing that, it will advertise
/// closure B as being available for other threads to execute. Once closure A
/// has completed, the current thread will try to execute closure B;
/// if however closure B has been stolen, then it will look for other work
/// while waiting for the thief to fully execute closure B. (This is the
/// typical work-stealing strategy).
///
/// # Examples
///
/// This example uses join to perform a quick-sort (note this is not a
/// particularly optimized implementation: if you **actually** want to
/// sort for real, you should prefer [the `par_sort` method] offered
/// by Rayon).
///
/// [the `par_sort` method]: ../rayon/slice/trait.ParallelSliceMut.html#method.par_sort
///
/// ```rust
/// let mut v = vec![5, 1, 8, 22, 0, 44];
/// quick_sort(&mut v);
/// assert_eq!(v, vec![0, 1, 5, 8, 22, 44]);
///
/// fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
/// if v.len() > 1 {
/// let mid = partition(v);
/// let (lo, hi) = v.split_at_mut(mid);
/// rayon::join(|| quick_sort(lo),
/// || quick_sort(hi));
/// }
/// }
///
/// // Partition rearranges all items `<=` to the pivot
/// // item (arbitrary selected to be the last item in the slice)
/// // to the first half of the slice. It then returns the
/// // "dividing point" where the pivot is placed.
/// fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
/// let pivot = v.len() - 1;
/// let mut i = 0;
/// for j in 0..pivot {
/// if v[j] <= v[pivot] {
/// v.swap(i, j);
/// i += 1;
/// }
/// }
/// v.swap(i, pivot);
/// i
/// }
/// ```
///
/// # Warning about blocking I/O
///
/// The assumption is that the closures given to `join()` are
/// CPU-bound tasks that do not perform I/O or other blocking
/// operations. If you do perform I/O, and that I/O should block
/// (e.g., waiting for a network request), the overall performance may
/// be poor. Moreover, if you cause one closure to be blocked waiting
/// on another (for example, using a channel), that could lead to a
/// deadlock.
///
/// # Panics
///
/// No matter what happens, both closures will always be executed. If
/// a single closure panics, whether it be the first or second
/// closure, that panic will be propagated and hence `join()` will
/// panic with the same panic value. If both closures panic, `join()`
/// will panic with the panic value from the first closure.
pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce() -> RA + Send,
B: FnOnce() -> RB + Send,
RA: Send,
RB: Send,
{
let id_c = next_task_id();
let id_a = next_task_id();
let ca = || {
log(RayonEvent::TaskStart(id_a, now()));
let result = oper_a();
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
let id_b = next_task_id();
let cb = || {
log(RayonEvent::TaskStart(id_b, now()));
let result = oper_b();
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
logs!(
RayonEvent::Child(id_a),
RayonEvent::Child(id_b),
RayonEvent::TaskEnd(now())
);
let r = rayon::join(ca, cb);
log(RayonEvent::TaskStart(id_c, now()));
r
}
// small global counter to increment file names
static INSTALL_COUNT: AtomicUsize = AtomicUsize::new(0);
/// We wrap rayon's pool into our own struct to overload the install method.
pub struct ThreadPool {
pub(crate) logs: Arc<Mutex<Vec<Arc<Storage<RayonEvent>>>>>,
pub(crate) pool: rayon::ThreadPool,
}
impl ThreadPool {
/// Reset all logs and counters to initial condition.
fn reset(&self) {
NEXT_TASK_ID.store(0, Ordering::SeqCst);
NEXT_ITERATOR_ID.store(0, Ordering::SeqCst);
let logs = &*self.logs.lock().unwrap(); // oh yeah baby
for log in logs {
log.clear();
}
}
/// Execute given closure in the thread pool, logging it's task as the initial one.
/// After running, we post-process the logs and return a `RunLog` together with the closure's
/// result.
pub fn logging_install<OP, R>(&self, op: OP) -> (R, RunLog)
where
OP: FnOnce() -> R + Send,
R: Send,
{
self.reset();
let id = next_task_id();
let c = || {
log(RayonEvent::TaskStart(id, now()));
let result = op();
log(RayonEvent::TaskEnd(now()));
result
};
let start = now();
let r = self.pool.install(c);
let log = RunLog::new(
NEXT_TASK_ID.load(Ordering::Relaxed),
NEXT_ITERATOR_ID.load(Ordering::Relaxed),
&*self.logs.lock().unwrap(),
start,
);
(r, log)
}
/// Creates a scope that executes within this thread-pool.
/// Equivalent to `self.install(|| scope(...))`.
///
/// See also: [the `scope()` function][scope].
///
/// [scope]: fn.scope.html
pub fn scope<'scope, OP, R>(&self, op: OP) -> R
where
OP: for<'s> FnOnce(&'s Scope<'scope>) -> R + 'scope + Send,
R: Send,
{
self.install(|| scope(op))
}
/// Like `scope` but fifo.
pub fn scope_fifo<'scope, OP, R>(&self, op: OP) -> R
where
OP: for<'s> FnOnce(&'s ScopeFifo<'scope>) -> R + 'scope + Send,
R: Send,
{
self.install(|| scope_fifo(op))
}
/// Execute given closure in the thread pool, logging it's task as the initial one.
/// After running, we save a json file with filename being an incremental counter.
pub fn install<OP, R>(&self, op: OP) -> R
where
OP: FnOnce() -> R + Send,
R: Send,
{
let (r, log) = self.logging_install(op);
log.save(format!(
"log_{}.json",
INSTALL_COUNT.fetch_add(1, Ordering::SeqCst)
))
.expect("saving json failed");
r
}
///This function simply returns a comparator that allows us to add algorithms for comparison.
pub fn compare(&self) -> Comparator {
Comparator::new(self)
}
}
| {
let continuation_task_id = next_task_id();
logs!(
RayonEvent::SubgraphEnd(tag, measured_value),
RayonEvent::Child(continuation_task_id),
RayonEvent::TaskEnd(now()),
// start continuation task
RayonEvent::TaskStart(continuation_task_id, now())
);
} | identifier_body |
pool.rs | //! `LoggedPool` structure for logging raw tasks events.
#![macro_use]
// we can now use performance counters to tag subgraphs
#[cfg(feature = "perf")]
use perfcnt::linux::PerfCounterBuilderLinux;
#[cfg(feature = "perf")]
use perfcnt::linux::{CacheId, CacheOpId, CacheOpResultId, HardwareEventType, SoftwareEventType};
#[cfg(feature = "perf")]
use perfcnt::{AbstractPerfCounter, PerfCounter};
use crate::log::RunLog;
use crate::raw_events::{now, RayonEvent, TaskId};
use crate::storage::Storage;
use crate::Comparator;
use crate::{scope, scope_fifo, Scope, ScopeFifo};
use rayon;
use rayon::FnContext;
use std::cell::RefCell;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
/// We use an atomic usize to generate unique ids for tasks.
pub(crate) static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0);
/// We use an atomic usize to generate unique ids for iterators.
pub(crate) static NEXT_ITERATOR_ID: AtomicUsize = AtomicUsize::new(0);
/// get an id for a new task and increment global tasks counter.
pub fn next_task_id() -> TaskId {
NEXT_TASK_ID.fetch_add(1, Ordering::SeqCst)
}
/// get an id for a new iterator and increment global iterators counter.
pub fn next_iterator_id() -> usize {
NEXT_ITERATOR_ID.fetch_add(1, Ordering::SeqCst)
}
thread_local!(pub(crate) static LOGS: RefCell<Arc<Storage<RayonEvent>>> = RefCell::new(Arc::new(Storage::new())));
/// Add given event to logs of current thread.
pub(crate) fn log(event: RayonEvent) {
LOGS.with(|l| l.borrow().push(event))
}
/// Logs several events at once (with decreased cost).
macro_rules! logs {
($($x:expr ), +) => {
$crate::pool::LOGS.with(|l| {let thread_logs = l.borrow();
$(
thread_logs.push($x);
)*
})
}
}
/// We tag all the tasks that op makes as one subgraph.
///
/// `work_type` is a str tag and `work_amount` an integer specifying the expected algorithmic cost
/// (should not be zero).
/// As we know the work and execution time we can compute an execution speed for each subgraph.
/// When different graphs are tagged with the same tag we can then compare their speeds.
/// Slow graphs will see their displayed colors darkened.
/// You can also hover on tasks to display their speeds.
///
/// Example:
///
/// ```
/// use rayon_logs::{join, subgraph, ThreadPoolBuilder};
///
/// fn manual_max(slice: &[u32]) -> u32 {
/// if slice.len() < 200_000 {
/// subgraph("max", slice.len(), || slice.iter().max().cloned().unwrap())
/// } else {
/// let middle = slice.len() / 2;
/// let (left, right) = slice.split_at(middle);
/// let (mleft, mright) = join(|| manual_max(left), || manual_max(right));
/// std::cmp::max(mleft, mright)
/// }
/// }
///
/// let v: Vec<u32> = (0..2_000_000).collect();
/// let pool = ThreadPoolBuilder::new()
/// .num_threads(2)
/// .build()
/// .expect("building pool failed");
/// let max = pool.install(|| manual_max(&v));
/// assert_eq!(max, v.last().cloned().unwrap());
/// ```
///
/// <div>
/// <img
/// src="http://www-id.imag.fr/Laboratoire/Membres/Wagner_Frederic/images/downgraded_manual_max.svg"/>
/// </div>
///
/// Using it we obtain the graph below.
/// On the real file you can hover but javascript and toggle the display of the different tags but
/// it is disabled with rustdoc so I downgraded the file
/// for this display.
pub fn subgraph<OP, R>(work_type: &'static str, work_amount: usize, op: OP) -> R
where
OP: FnOnce() -> R,
{
custom_subgraph(work_type, || (), |_| work_amount, op)
}
/// Same as the subgraph function, but we can log a hardware event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// Events:
///
/// * ```HardwareEventType::CPUCycles```
///
/// * ```HardwareEventType::Instructions```
///
/// * ```HardwareEventType::CacheReferences```
///
/// * ```HardwareEventType::CacheMisses```
///
/// * ```HardwareEventType::BranchInstructions```
///
/// * ```HardwareEventType::BranchMisses```
///
/// * ```HardwareEventType::BusCycles```
///
/// * ```HardwareEventType::StalledCyclesFrontend```
///
/// * ```HardwareEventType::StalledCyclesBackend```
///
/// * ```HardwareEventType::RefCPUCycles```
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler.
/// note that It is **freaking slow**: 1 full second to set up the counter.
#[cfg(feature = "perf")]
pub fn subgraph_hardware_event<OP, R>(tag: &'static str, event: HardwareEventType, op: OP) -> R
where
OP: FnOnce() -> R,
{
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_hardware_event(event)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Same as the subgraph function, but we can log a software event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// Events:
///
/// * ```SoftwareEventType::CpuClock```
///
/// * ```SoftwareEventType::TaskClock```
///
/// * ```SoftwareEventType::PageFaults```
///
/// * ```SoftwareEventType::CacheMisses```
///
/// * ```SoftwareEventType::ContextSwitches```
///
/// * ```SoftwareEventType::CpuMigrations```
///
/// * ```SoftwareEventType::PageFaultsMin```
///
/// * ```SoftwareEventType::PageFaultsMin```
///
/// * ```SoftwareEventType::PageFaultsMaj```
///
/// * ```SoftwareEventType::AlignmentFaults```
///
/// * ```SoftwareEventType::EmulationFaults```
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler
#[cfg(feature = "perf")]
pub fn subgraph_software_event<OP, R>(tag: &'static str, event: SoftwareEventType, op: OP) -> R
where
OP: FnOnce() -> R,
{
//TODO: avoid code duplication by abstracting over events
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_software_event(event)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Same as the subgraph function, but we can log a cache event
///
/// (from: https://github.com/gz/rust-perfcnt)
///
/// CacheId:
///
/// * ```CacheId::L1D```
///
/// * ```CacheId::L1I```
///
/// * ```CacheId::LL```
///
/// * ```CacheId::DTLB```
///
/// * ```CacheId::ITLB```
///
/// * ```CacheId::BPU```
///
/// * ```CacheId::Node```
///
/// CacheOpId:
///
/// * ```CacheOpId::Read```
///
/// * ```CacheOpId::Write```
///
/// * ```CacheOpId::Prefetch```
///
/// CacheOpResultId:
///
/// * ```CacheOpResultId::Access```
///
/// * ```CacheOpResultId::Miss```
///
///
/// You will have to import the events from rayon_logs
/// and to use the nightly version of the compiler
///
#[cfg(feature = "perf")]
pub fn subgraph_cache_event<OP, R>(
tag: &'static str,
cache_id: CacheId,
cache_op_id: CacheOpId,
cache_op_result_id: CacheOpResultId,
op: OP,
) -> R
where
OP: FnOnce() -> R,
{
//TODO: avoid code duplication by abstracting over events
custom_subgraph(
tag,
|| {
let pc: PerfCounter = PerfCounterBuilderLinux::from_cache_event(
cache_id,
cache_op_id,
cache_op_result_id,
)
.exclude_idle()
.exclude_kernel()
.finish()
.expect("Could not create counter");
pc.start().expect("Can not start the counter");
pc
},
|mut pc| {
pc.stop().expect("Can not stop the counter");
let counted_value = pc.read().unwrap() as usize;
pc.reset().expect("Can not reset the counter");
counted_value
},
op,
)
}
/// Tag a subgraph with a custom value.
/// The start function will be called just before running the graph and produce an S.
/// The end function will be called just after running the graph on this S and produce a usize
/// which will the be stored for display.
pub fn custom_subgraph<OP, R, START, END, S>(tag: &'static str, start: START, end: END, op: OP) -> R
where
OP: FnOnce() -> R,
START: FnOnce() -> S,
END: FnOnce(S) -> usize,
{
let s = start();
start_subgraph(tag);
let r = op();
let measured_value = end(s);
end_subgraph(tag, measured_value);
r
}
/// Stop current task (virtually) and start a subgraph.
/// You most likely don't need to call this function directly but `subgraph` instead.
pub fn start_subgraph(tag: &'static str) {
let subgraph_start_task_id = next_task_id();
logs!(
// log child's work and dependencies.
RayonEvent::Child(subgraph_start_task_id),
// end current task
RayonEvent::TaskEnd(now()),
// execute full sequential task
RayonEvent::TaskStart(subgraph_start_task_id, now()),
RayonEvent::SubgraphStart(tag)
);
}
/// Stop current task (virtually) and end a subgraph.
/// You most likely don't need to call this function directly but `subgraph` instead.
pub fn end_subgraph(tag: &'static str, measured_value: usize) {
let continuation_task_id = next_task_id();
logs!(
RayonEvent::SubgraphEnd(tag, measured_value),
RayonEvent::Child(continuation_task_id),
RayonEvent::TaskEnd(now()),
// start continuation task
RayonEvent::TaskStart(continuation_task_id, now())
);
}
/// Identical to `join`, except that the closures have a parameter
/// that provides context for the way the closure has been called,
/// especially indicating whether they're executing on a different
/// thread than where `join_context` was called. This will occur if
/// the second job is stolen by a different thread, or if
/// `join_context` was called from outside the thread pool to begin
/// with.
pub fn join_context<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce(FnContext) -> RA + Send,
B: FnOnce(FnContext) -> RB + Send,
RA: Send,
RB: Send,
{
let id_c = next_task_id();
let id_a = next_task_id();
let ca = |c| {
log(RayonEvent::TaskStart(id_a, now()));
let result = oper_a(c);
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
let id_b = next_task_id();
let cb = |c| {
log(RayonEvent::TaskStart(id_b, now()));
let result = oper_b(c);
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
logs!(
RayonEvent::Child(id_a),
RayonEvent::Child(id_b),
RayonEvent::TaskEnd(now())
);
let r = rayon::join_context(ca, cb);
log(RayonEvent::TaskStart(id_c, now()));
r
}
/// Takes two closures and *potentially* runs them in parallel. It
/// returns a pair of the results from those closures.
///
/// Conceptually, calling `join()` is similar to spawning two threads,
/// one executing each of the two closures. However, the
/// implementation is quite different and incurs very low
/// overhead. The underlying technique is called "work stealing": the
/// Rayon runtime uses a fixed pool of worker threads and attempts to
/// only execute code in parallel when there are idle CPUs to handle
/// it.
///
/// When `join` is called from outside the thread pool, the calling
/// thread will block while the closures execute in the pool. When
/// `join` is called within the pool, the calling thread still actively
/// participates in the thread pool. It will begin by executing closure
/// A (on the current thread). While it is doing that, it will advertise
/// closure B as being available for other threads to execute. Once closure A
/// has completed, the current thread will try to execute closure B;
/// if however closure B has been stolen, then it will look for other work
/// while waiting for the thief to fully execute closure B. (This is the
/// typical work-stealing strategy).
///
/// # Examples
///
/// This example uses join to perform a quick-sort (note this is not a
/// particularly optimized implementation: if you **actually** want to
/// sort for real, you should prefer [the `par_sort` method] offered
/// by Rayon).
///
/// [the `par_sort` method]: ../rayon/slice/trait.ParallelSliceMut.html#method.par_sort
///
/// ```rust
/// let mut v = vec![5, 1, 8, 22, 0, 44];
/// quick_sort(&mut v);
/// assert_eq!(v, vec![0, 1, 5, 8, 22, 44]);
///
/// fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
/// if v.len() > 1 {
/// let mid = partition(v);
/// let (lo, hi) = v.split_at_mut(mid);
/// rayon::join(|| quick_sort(lo),
/// || quick_sort(hi));
/// }
/// }
///
/// // Partition rearranges all items `<=` to the pivot
/// // item (arbitrary selected to be the last item in the slice)
/// // to the first half of the slice. It then returns the
/// // "dividing point" where the pivot is placed.
/// fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
/// let pivot = v.len() - 1;
/// let mut i = 0;
/// for j in 0..pivot {
/// if v[j] <= v[pivot] {
/// v.swap(i, j);
/// i += 1;
/// }
/// }
/// v.swap(i, pivot);
/// i
/// }
/// ```
///
/// # Warning about blocking I/O
///
/// The assumption is that the closures given to `join()` are
/// CPU-bound tasks that do not perform I/O or other blocking
/// operations. If you do perform I/O, and that I/O should block
/// (e.g., waiting for a network request), the overall performance may
/// be poor. Moreover, if you cause one closure to be blocked waiting
/// on another (for example, using a channel), that could lead to a
/// deadlock.
///
/// # Panics
///
/// No matter what happens, both closures will always be executed. If
/// a single closure panics, whether it be the first or second
/// closure, that panic will be propagated and hence `join()` will
/// panic with the same panic value. If both closures panic, `join()`
/// will panic with the panic value from the first closure.
pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce() -> RA + Send,
B: FnOnce() -> RB + Send,
RA: Send,
RB: Send,
{
let id_c = next_task_id();
let id_a = next_task_id();
let ca = || {
log(RayonEvent::TaskStart(id_a, now()));
let result = oper_a();
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
let id_b = next_task_id();
let cb = || {
log(RayonEvent::TaskStart(id_b, now()));
let result = oper_b();
logs!(RayonEvent::Child(id_c), RayonEvent::TaskEnd(now()));
result
};
logs!(
RayonEvent::Child(id_a),
RayonEvent::Child(id_b),
RayonEvent::TaskEnd(now())
);
let r = rayon::join(ca, cb);
log(RayonEvent::TaskStart(id_c, now()));
r
}
// small global counter to increment file names
static INSTALL_COUNT: AtomicUsize = AtomicUsize::new(0);
/// We wrap rayon's pool into our own struct to overload the install method.
pub struct ThreadPool {
pub(crate) logs: Arc<Mutex<Vec<Arc<Storage<RayonEvent>>>>>,
pub(crate) pool: rayon::ThreadPool,
}
impl ThreadPool {
/// Reset all logs and counters to initial condition.
fn reset(&self) {
NEXT_TASK_ID.store(0, Ordering::SeqCst);
NEXT_ITERATOR_ID.store(0, Ordering::SeqCst);
let logs = &*self.logs.lock().unwrap(); // oh yeah baby
for log in logs {
log.clear();
}
}
/// Execute given closure in the thread pool, logging it's task as the initial one.
/// After running, we post-process the logs and return a `RunLog` together with the closure's
/// result.
pub fn | <OP, R>(&self, op: OP) -> (R, RunLog)
where
OP: FnOnce() -> R + Send,
R: Send,
{
self.reset();
let id = next_task_id();
let c = || {
log(RayonEvent::TaskStart(id, now()));
let result = op();
log(RayonEvent::TaskEnd(now()));
result
};
let start = now();
let r = self.pool.install(c);
let log = RunLog::new(
NEXT_TASK_ID.load(Ordering::Relaxed),
NEXT_ITERATOR_ID.load(Ordering::Relaxed),
&*self.logs.lock().unwrap(),
start,
);
(r, log)
}
/// Creates a scope that executes within this thread-pool.
/// Equivalent to `self.install(|| scope(...))`.
///
/// See also: [the `scope()` function][scope].
///
/// [scope]: fn.scope.html
pub fn scope<'scope, OP, R>(&self, op: OP) -> R
where
OP: for<'s> FnOnce(&'s Scope<'scope>) -> R + 'scope + Send,
R: Send,
{
self.install(|| scope(op))
}
/// Like `scope` but fifo.
pub fn scope_fifo<'scope, OP, R>(&self, op: OP) -> R
where
OP: for<'s> FnOnce(&'s ScopeFifo<'scope>) -> R + 'scope + Send,
R: Send,
{
self.install(|| scope_fifo(op))
}
/// Execute given closure in the thread pool, logging it's task as the initial one.
/// After running, we save a json file with filename being an incremental counter.
pub fn install<OP, R>(&self, op: OP) -> R
where
OP: FnOnce() -> R + Send,
R: Send,
{
let (r, log) = self.logging_install(op);
log.save(format!(
"log_{}.json",
INSTALL_COUNT.fetch_add(1, Ordering::SeqCst)
))
.expect("saving json failed");
r
}
///This function simply returns a comparator that allows us to add algorithms for comparison.
pub fn compare(&self) -> Comparator {
Comparator::new(self)
}
}
| logging_install | identifier_name |
slide-load.js | (function($){
/**
* The pushLoad object creates and manages the AJAX transitions between
* all the pages and subpages on the site.
*
* This object is in charge of loading the new content as well as the
* animated transition between the old content and the new content. It also
* contains logic to determine if the new content should completely replace the
* target element, or be places inside the target element. In most cases it is
* placed inside the target element, but in certain special cases it can be used
* to replace the invoking element.
*
* @todo Add method for adding custom transitions.
* @todo Make all events trigger on the called element.
*
* @type {Object}
*/
var pushLoad = {
$el: null,
offsets: {},
farCoords: {},
closeCoords: {},
transiton: {},
defaults: {
transition: "bottom",
ajax: {
url: "/",
dataType: 'html',
type: 'POST',
data: {},
},
},
/**
* Initializes the creation of the loader and sets its options.
* @param {Object} config A configuration object for the loader.
*/
_init: function( config, el ) {
config = config || {};
$("#pl-loader").remove();
this.$el = this._proxy_check(el);
this.settings = $.extend(true, {}, this.defaults, config);
this.$loader = $('<div id="pl-loader" class="loading-screen" />');
this._load();
},
/**
* Clones the invoking element and positions the clone directly on top of it.
*/
_create_loader: function() {
var $content = this.$el.clone();
this.$loader.html($content);
this._set_loader_style();
$(document).trigger("pl.clone", [this.$el]);
this.$loader.appendTo('.faux-body');
},
/**
* Gets two sets of coordinates for use in animating the loader.
*/
_set_coords: function() {
this.offsets = this.$el.offset();
this.farCoords = this._get_far_coords(this.$el);
this.closeCoords = this._get_close_coords();
},
/**
* Gets the close coordinates of the element.
*
* The close coordinates are the distances from each side of the document
* to that same side of the element. So distance from the left side
* of the document to the left side of the element.
*
* @param {Object} coords An object containing top and left coordinates.
* @return {Object} An object containing all four cooridantes.
*/
_get_close_coords: function() {
var coords = {};
coords.bottom = $(".faux-body").outerHeight() - this.farCoords.bottom;
coords.right = $(".faux-body").outerWidth() - this.farCoords.right;
coords.top = this.offsets.top; |
/**
* Gets the far coordinates of the element.
*
* The far coordinates are the distances from each side of the document
* to the opposite side of the element. So distance from the left side
* of the document to the right side of the element.
*
* @param {jQuery} coords The jQuery object to get teh coordinates of.
* @return {Object} An object containing all four cooridantes.
*/
_get_far_coords: function($cntr) {
coords = {};
coords.top = $(".faux-body").outerHeight() - this.offsets.top;
coords.left = $(".faux-body").outerWidth() - this.offsets.left;
coords.bottom = $cntr.outerHeight() + this.offsets.top;
coords.right = $cntr.outerWidth() + this.offsets.left;
return coords;
},
/**
* This sets additional css properties of the loader clone.
*/
_set_loader_style: function() {
this.$loader.css({
height: this.$el.height(),
width: this.$el.width(),
});
},
/**
* This runs the actual AJAX call to get new content, then calls the callback
* function.
*/
_load: function() {
var self = this;
$.ajax(this.settings.ajax).done(function(data) {
self._show_content.call(self, data);
});
},
/**
* This runs all of the necessary functions to add the new content to the
* container element, and then reveal it by animating the loader clone.
*
* @param {String} html The new HTML to insert into the container element.
*/
_show_content: function(html) {
var self = this;
this._set_coords();
this._create_loader();
this._prepare_cntr(html);
this.transitions[ this.settings.transition ].call(this);
this.$el.imagesLoaded( function() {
self.$el.trigger("pl.animate", [self.$el]);
var fade = self.$el.fadeTo({
duration: 750,
queue: false
}, 1).animateAuto("height", {
duration: 500,
queue: false
})
var slide = self.$loader.animate(
self.transiton,
{
queue: false,
duration: 500,
}
);
$.when(fade, slide).done(function () {
self.$el.removeAttr('style');
self._exit();
});
});
},
/**
* Inserts the new content into the container element.
*
* This sets a few style properties to keep things from showing up before
* they're ready. It sets the height to match the height of the previous content
* so that the page doesn't expand in length before we're ready for it. It also
* sets the opacity to 0, which is there primarily to aid the eventual transition,
* but has the added bonus of providing a fallback to make sure no new content is
* shown behind the loading screen. In some instances, the new content is meant
* to replace the container, instead of being placed inside of it. (Think team
* member to team member navigation.) This function will determine if that
* replacement needs to happen.
*
* @param {String} html The HTML content to load into the element.
*/
_prepare_cntr: function(html) {
var $new = $(html)
, replace = this.$el.data("load-replace")
$new = replace ? this._find_replacemnt($new) : $new;
this.$el.css({
height: this.$el.height(),
opacity: 0,
});
this.$el.html($new);
},
/**
* Checks for a load proxy on the container element.
*
* A load proxy allows the content to be loaded into an element that is not
* the container element. If the container element has a data-load="" property
* set, then this function attempts to find the new container element. The
* data-load property can be any valid jQuery selector. If there is a data-load
* property set, but the jQuery selector returns an empty result, the initial
* element is returned.
*
* @param {jQuery} $cntr A jQuery object containing the container element.
*
* @return {jQuery} A jQuery object containing the new element if found,
* otherwise the initial object is returned.
*/
_proxy_check: function($cntr) {
var proxy = $cntr.data("load-proxy");
return $(proxy).length ? $(proxy) : $cntr;
},
/**
* This function looks for a replacement container in the new content.
*
* In order to make sure that the new content is placed correctly, the replacement
* is dependent on finding an element at the root of the new HTML that has the
* data-load-replacement property. If an element with that property is found,
* then the replacement is called, which copies attributes/properties of the replacement
* onto the existing container. Then those properties.attributes are strripped from
* the replacement container. This is done because if we were to wholesale replace
* the existing container, all the events we have bound to it, and triggers in this
* plugin would cease to exist, and therefore would fail.
*
* @param {jQuery} $new A jQuery object containing the new HTML object.
*
* @return {jQuery} A jQuery object containing the new HTML with the replacement made.
*/
_find_replacemnt: function($new) {
var self = this
, $copy = $new
$copy.each(function(i, val) {
if ($(this).data("load-replacement")) {
self._replace_container($(this));
contents = $(this).contents().toArray();
$new.splice.apply($new, [i, 1].concat(contents))
}
});
return $new;
},
/**
* This function copies attributes from one jQuery element to the container element.
*
* This function iterates over the properties of the jQuery element provided, and then
* copies them onto the Container jQuery element.
*
* @param {jQuery} $new A jQuery element to copy the properties of.
*/
_replace_container: function($new) {
var attr = $new.prop("attributes")
self = this
$.each(attr, function() {
self.$el.attr(this.name, this.value);
});
},
/**
* Finishes execution of the loader.
*
* Removes the loading screen element from the page and then triggers the
* completion event on the document.
*/
_exit: function() {
this.$loader.remove();
$(document).trigger("pl.complete", [this.$el]);
},
transitions: {
top: function() {
this.$loader.css({
top: this.closeCoords.top,
});
this.transiton = {
height: 0,
}
},
bottom: function() {
this.$loader.css({
top: this.closeCoords.top,
bottom: this.closeCoords.bottom,
});
this.transiton = {
top: this.farCoords.bottom,
height: 0,
}
},
left: function() {
this.$loader.css({
top: this.closeCoords.top,
left: this.closeCoords.left,
});
this.transiton = {
left: "-100%",
}
},
right: function() {
this.$loader.css({
top: this.closeCoords.top,
right: this.closeCoords.right,
});
this.transiton = {
right: "-100%",
}
},
},
};
$.fn.pushLoad = function() {
// The second string is a pushLoad transition method
if ( pushLoad.transitions[ arguments[1] ] ) {
config = {
transition: arguments[1],
ajax: {
url: arguments[0],
},
}
pushLoad._init.call( pushLoad, config, this );
} else if ( typeof arguments[0] === 'object' ) {
pushLoad._init.call( pushLoad, arguments[0], this );
} else {
$.error( 'pushLoad has no method '+arguments[0] );
}
return this;
};
})( jQuery ); | coords.left = this.offsets.left;
// $.extend(coords, this.offsets);
return coords;
}, | random_line_split |
slide-load.js | (function($){
/**
* The pushLoad object creates and manages the AJAX transitions between
* all the pages and subpages on the site.
*
* This object is in charge of loading the new content as well as the
* animated transition between the old content and the new content. It also
* contains logic to determine if the new content should completely replace the
* target element, or be places inside the target element. In most cases it is
* placed inside the target element, but in certain special cases it can be used
* to replace the invoking element.
*
* @todo Add method for adding custom transitions.
* @todo Make all events trigger on the called element.
*
* @type {Object}
*/
var pushLoad = {
$el: null,
offsets: {},
farCoords: {},
closeCoords: {},
transiton: {},
defaults: {
transition: "bottom",
ajax: {
url: "/",
dataType: 'html',
type: 'POST',
data: {},
},
},
/**
* Initializes the creation of the loader and sets its options.
* @param {Object} config A configuration object for the loader.
*/
_init: function( config, el ) {
config = config || {};
$("#pl-loader").remove();
this.$el = this._proxy_check(el);
this.settings = $.extend(true, {}, this.defaults, config);
this.$loader = $('<div id="pl-loader" class="loading-screen" />');
this._load();
},
/**
* Clones the invoking element and positions the clone directly on top of it.
*/
_create_loader: function() {
var $content = this.$el.clone();
this.$loader.html($content);
this._set_loader_style();
$(document).trigger("pl.clone", [this.$el]);
this.$loader.appendTo('.faux-body');
},
/**
* Gets two sets of coordinates for use in animating the loader.
*/
_set_coords: function() {
this.offsets = this.$el.offset();
this.farCoords = this._get_far_coords(this.$el);
this.closeCoords = this._get_close_coords();
},
/**
* Gets the close coordinates of the element.
*
* The close coordinates are the distances from each side of the document
* to that same side of the element. So distance from the left side
* of the document to the left side of the element.
*
* @param {Object} coords An object containing top and left coordinates.
* @return {Object} An object containing all four cooridantes.
*/
_get_close_coords: function() {
var coords = {};
coords.bottom = $(".faux-body").outerHeight() - this.farCoords.bottom;
coords.right = $(".faux-body").outerWidth() - this.farCoords.right;
coords.top = this.offsets.top;
coords.left = this.offsets.left;
// $.extend(coords, this.offsets);
return coords;
},
/**
* Gets the far coordinates of the element.
*
* The far coordinates are the distances from each side of the document
* to the opposite side of the element. So distance from the left side
* of the document to the right side of the element.
*
* @param {jQuery} coords The jQuery object to get teh coordinates of.
* @return {Object} An object containing all four cooridantes.
*/
_get_far_coords: function($cntr) {
coords = {};
coords.top = $(".faux-body").outerHeight() - this.offsets.top;
coords.left = $(".faux-body").outerWidth() - this.offsets.left;
coords.bottom = $cntr.outerHeight() + this.offsets.top;
coords.right = $cntr.outerWidth() + this.offsets.left;
return coords;
},
/**
* This sets additional css properties of the loader clone.
*/
_set_loader_style: function() {
this.$loader.css({
height: this.$el.height(),
width: this.$el.width(),
});
},
/**
* This runs the actual AJAX call to get new content, then calls the callback
* function.
*/
_load: function() {
var self = this;
$.ajax(this.settings.ajax).done(function(data) {
self._show_content.call(self, data);
});
},
/**
* This runs all of the necessary functions to add the new content to the
* container element, and then reveal it by animating the loader clone.
*
* @param {String} html The new HTML to insert into the container element.
*/
_show_content: function(html) {
var self = this;
this._set_coords();
this._create_loader();
this._prepare_cntr(html);
this.transitions[ this.settings.transition ].call(this);
this.$el.imagesLoaded( function() {
self.$el.trigger("pl.animate", [self.$el]);
var fade = self.$el.fadeTo({
duration: 750,
queue: false
}, 1).animateAuto("height", {
duration: 500,
queue: false
})
var slide = self.$loader.animate(
self.transiton,
{
queue: false,
duration: 500,
}
);
$.when(fade, slide).done(function () {
self.$el.removeAttr('style');
self._exit();
});
});
},
/**
* Inserts the new content into the container element.
*
* This sets a few style properties to keep things from showing up before
* they're ready. It sets the height to match the height of the previous content
* so that the page doesn't expand in length before we're ready for it. It also
* sets the opacity to 0, which is there primarily to aid the eventual transition,
* but has the added bonus of providing a fallback to make sure no new content is
* shown behind the loading screen. In some instances, the new content is meant
* to replace the container, instead of being placed inside of it. (Think team
* member to team member navigation.) This function will determine if that
* replacement needs to happen.
*
* @param {String} html The HTML content to load into the element.
*/
_prepare_cntr: function(html) {
var $new = $(html)
, replace = this.$el.data("load-replace")
$new = replace ? this._find_replacemnt($new) : $new;
this.$el.css({
height: this.$el.height(),
opacity: 0,
});
this.$el.html($new);
},
/**
* Checks for a load proxy on the container element.
*
* A load proxy allows the content to be loaded into an element that is not
* the container element. If the container element has a data-load="" property
* set, then this function attempts to find the new container element. The
* data-load property can be any valid jQuery selector. If there is a data-load
* property set, but the jQuery selector returns an empty result, the initial
* element is returned.
*
* @param {jQuery} $cntr A jQuery object containing the container element.
*
* @return {jQuery} A jQuery object containing the new element if found,
* otherwise the initial object is returned.
*/
_proxy_check: function($cntr) {
var proxy = $cntr.data("load-proxy");
return $(proxy).length ? $(proxy) : $cntr;
},
/**
* This function looks for a replacement container in the new content.
*
* In order to make sure that the new content is placed correctly, the replacement
* is dependent on finding an element at the root of the new HTML that has the
* data-load-replacement property. If an element with that property is found,
* then the replacement is called, which copies attributes/properties of the replacement
* onto the existing container. Then those properties.attributes are strripped from
* the replacement container. This is done because if we were to wholesale replace
* the existing container, all the events we have bound to it, and triggers in this
* plugin would cease to exist, and therefore would fail.
*
* @param {jQuery} $new A jQuery object containing the new HTML object.
*
* @return {jQuery} A jQuery object containing the new HTML with the replacement made.
*/
_find_replacemnt: function($new) {
var self = this
, $copy = $new
$copy.each(function(i, val) {
if ($(this).data("load-replacement")) {
self._replace_container($(this));
contents = $(this).contents().toArray();
$new.splice.apply($new, [i, 1].concat(contents))
}
});
return $new;
},
/**
* This function copies attributes from one jQuery element to the container element.
*
* This function iterates over the properties of the jQuery element provided, and then
* copies them onto the Container jQuery element.
*
* @param {jQuery} $new A jQuery element to copy the properties of.
*/
_replace_container: function($new) {
var attr = $new.prop("attributes")
self = this
$.each(attr, function() {
self.$el.attr(this.name, this.value);
});
},
/**
* Finishes execution of the loader.
*
* Removes the loading screen element from the page and then triggers the
* completion event on the document.
*/
_exit: function() {
this.$loader.remove();
$(document).trigger("pl.complete", [this.$el]);
},
transitions: {
top: function() {
this.$loader.css({
top: this.closeCoords.top,
});
this.transiton = {
height: 0,
}
},
bottom: function() {
this.$loader.css({
top: this.closeCoords.top,
bottom: this.closeCoords.bottom,
});
this.transiton = {
top: this.farCoords.bottom,
height: 0,
}
},
left: function() {
this.$loader.css({
top: this.closeCoords.top,
left: this.closeCoords.left,
});
this.transiton = {
left: "-100%",
}
},
right: function() {
this.$loader.css({
top: this.closeCoords.top,
right: this.closeCoords.right,
});
this.transiton = {
right: "-100%",
}
},
},
};
$.fn.pushLoad = function() {
// The second string is a pushLoad transition method
if ( pushLoad.transitions[ arguments[1] ] ) {
config = {
transition: arguments[1],
ajax: {
url: arguments[0],
},
}
pushLoad._init.call( pushLoad, config, this );
} else if ( typeof arguments[0] === 'object' ) {
pushLoad._init.call( pushLoad, arguments[0], this );
} else |
return this;
};
})( jQuery ); | {
$.error( 'pushLoad has no method '+arguments[0] );
} | conditional_block |
main.rs | fn main() {
//Rust deals with stack and heaps for memory managment no gc or direct memory management
//The stack memory is a first in last off type queue
//Stack data must take up a known and fixed size
//In rust the heap is used for when we don't know the size of the vector at compile time
//or if the memory to be allocated is dynamic
//Heap memory is not really organized data just kinda gets thrown where the os has space for it
//Therefore, the program has to jump around to get data which can slow things down.
//Function local variables get pushed onto the stack and then popped off when
//it's done
//A value is assigned an owner which is it's owner. Only one owner can exist at a time
//When the owner is out of scope the value disappears
//Examples to go over variable scope
let s = "hello";
{
let s = "hello2";
println!("s: {}", s);
}
println!("Previous s is out of scope but the one defined earlier isn't");
println!("s: {}", s);
//Onto the next example which goes over the rules of ownership
//It's going to be using the String type aka StringBuffers
let mut s = String::from("hello");
s.push_str(", world!");// s must be mutable for this to work
println!("{}", s);
//Note: In C++, this pattern of deallocating resources at the end of an item’s lifetime is sometimes
//called Resource Acquisition Is Initialization (RAII). The drop function in Rust will be familiar
//to you if you’ve used RAII patterns.
let x = 5;
let y = x;// y is just a copy of x since they are simple types and have a fixed size
let s1 = String::from("hello");
let s2 = s1; // s2 is a copy of the pointer to the data that s1 points to
// this errors out because s1 does not have a copy trait which meanse
//we made a shallow copy instead of a deep copy. Rust does not like this
// if we tried to use s1. If we use s2 we are fine since s1 is invalidated
//after we assign s2 to s1 values. This operation is called a move.
// println!("{}", s2);
let s1 = String::from("hello");
let s2 = s1.clone(); // This creates a deep copy of of s1. We can now use s1 in other places with out
// it being invalid
// println!("{}",s1);
//Info about what things that make a deep copy when you do let x = something; let y = x;
// Rust has a special annotation called the Copy trait that we can place on types like integers that are
// stored on the stack (we’ll talk more about traits in Chapter 10). If a type has the Copy trait, an older
// variable is still usable after assignment. Rust won’t let us annotate a type with the Copy trait if the
// type, or any of its parts, has implemented the Drop trait. If the type needs something special to happen
// when the value goes out of scope and we add the Copy annotation to that type, we’ll get a compile time error.
// To learn about how to add the Copy annotation to your type, see Appendix C on Derivable Traits.
// So what types are Copy? You can check the documentation for the given type to be sure, but as a general rule,
// any group of simple scalar values can be Copy, and nothing that requires allocation or is some form of resource
// is Copy. Here are some of the types that are Copy:
// All the integer types, like u32.
// The boolean type, bool, with values true and false.
// All the floating point types, like f64.
// Tuples, but only if they contain types that are also Copy. (i32, i32) is Copy, but (i32, String) is not.
let s = String::from("hello"); // s comes into scope.
//So in rust if we pass a variable into a function it loses it's ownership to the
//function. Then once the function is over that variable no longer exists
//because it is now out of scope.
takes_ownership(s); // s's value moves into the function...
// ... and so is no longer valid here.
let x = 5; // x comes into scope.
//If a variable has the copy trait then only a copy is made to the function and
//we can still use the variable afterwards even though all the variables in the
//function are now out of scope.
makes_copy(x); // x would move into the function,
// but i32 is Copy, so it’s okay to still
// use x afterward.
//we can give ownership of a variable from a function by having an expression at the end.
//We could pass in a variable and then take back its ownership by doing this. However, I think this
//is kinda of a pain. The people at Rust feel the same.
let s1 = gives_ownership();
//Rust also let's return variables as tuples so which we can then can deconstruct this when
//we get the returned values.
//Now it's time to go over references and borrowing!
let s1 = String::from("hello");
//The & creates a reference to a variable. They can be thought of a pointer to the original data.
//By doing this we do not pass ownership of the variable to the function
//Therefore when we go out of scope of the function we still have ownership of the variable
//where the function call was made.
//References as function parameters is called borrowing.
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
//We can not modify a borrowed variable.
//change(&s1);
let mut s1 = String::from("hello");
//We can fix this by making a mutable reference
//We also need to make sure that our variable we're passing in is also mutable.
change(&mut s1);
println!("{}", s1);
//You are only allowed one mutable reference to a particular piece of data in a particular scope.
//This insures that we don't have any aliasing with our references refering to the same data.
//The benefit of having this restriction is that Rust can prevent data races at compile time.
//From the rust book
//Whew! We also cannot have a mutable reference while we have an immutable one.
//Users of an immutable reference don’t expect the values to suddenly change out from under them!
//However, multiple immutable references are okay because no one who is just reading the data has
//the ability to affect anyone else’s reading of the data.
//let mut s = String::from("Hello");
//let r1 = &s; //Immutable reference
//let r2 = &s; //Immutable reference
//let r3 = &s; //Mutable reference -- big no no
//The compiler does not dangling pointers/references. It therefore will error out on us.
// let refernece_to_nothing = dangle();
//We are now going to go over slices.
//From the rust book: Another data type that does not have ownership is the slice.
//Slices let you reference a contiguous sequence of elements in a collection rather than the whole collection.
// let mut s = String::from("hello world");
// let word = first_word(&s); // word will get the value 5.
// s.clear(); // This empties the String, making it equal to "".
// word still has the value 5 here, but there's no more string that
// we could meaningfully use the value 5 with. word is now totally invalid!
//The index we got is now completely out of sync with our original string.
//If we end up having more indices we could get even more out of sync with our data.
//For strings we can take advantage of a built in feature called string slices.
//They create a reference to portions of a string.
let s = String::from("hello world");
//Slicing is similar to slicing in python where you have a starting index and then
//the ending value is +1 of the data you actually care about.
let hello = &s[0..5];
// let hello = &s[..5]; //Equivalent to the above
let world = &s[6..11];
// let world = &s[6..]; //Equivalent to the above
let len = s.len();
let slice = &s[0..len];
// let slice = &s[..]; //Equivalent to the above
// We now have a straightforward API that’s much harder to mess up, since the compiler will
//ensure the references into the String remain valid. Remember the bug in the program in Listing 4-11,
//when we got the index to the end of the first word but then cleared the string so our index was invalid?
//That code was logically incorrect but didn’t show any immediate errors. The problems would show up later
//if we kept trying to use the first word index with an emptied string. Slices make this bug impossible
//and let us know we have a problem with our code much sooner. Using the slice version of first_word
//will throw a compile time error:
// let mut s = String::from("hello world");
// let word = first_word(&s);
// s.clear(); // Error!
// Recall from the borrowing rules that if we have an immutable reference to something, we cannot also
// take a mutable reference. Because clear needs to truncate the String, it tries to take a mutable reference,
// which fails. Not only has Rust made our API easier to use, but it has also eliminated an entire class of errors
// at compile time!
let s = "Hello, world!";
// The type of s here is &str: it’s a slice pointing to that specific point of the binary. This is also why string
// literals are immutable; &str is an immutable reference.
let my_string = String::from("hello world");
// first_word works on slices of `String`s
let word = first_word(&my_string[..]);
let my_string_literal = "hello world";
// first_word works on slices of string literals
let word = first_word(&my_string_literal[..]);
// since string literals *are* string slices already,
// this works too, without the slice syntax!
let word = first_word(my_string_literal);
let a = [1, 2, 3, 4, 5];
let slice = &a[1..3];
// This slice has the type &[i32]. It works the same way as string slices do, by storing a reference to the
// first element and a length. You’ll use this kind of slice for all sorts of other collections. We’ll discuss
// these collections in detail when we talk about vectors in Chapter 8
} // Here, x goes out of scope, then s. But since s's value was moved, nothing
// special happens.
fn takes_ownership(some_string: String) { // some_string comes into scope.
println!("{}", some_string);
} // Here, some_string goes out of scope and `drop` is called. The backing
// memory is freed.
fn makes_copy(some_integer: i32) { // some_integer comes into scope.
println!("{}", some_integer);
} // Here, some_integer goes out of scope. Nothing special happens.
//Tell what type the function will return
fn gives_ownership() -> String { // gives_ownership will move its
// return value into the function
// that calls it.
let some_string = String::from("hello"); // some_string comes into scope.
some_string // some_string is returned and
// moves out to the calling
// function.
}
fn calculate_length(s: &String) -> usize {
s.len()
}
//This fun | r on us since we are trying to
//modify a borrowed variable. We will always get an
//error for this function even if we never call it.
// fn change(some_string: &String) {
// some_string.push_str(", world");
// }
//This fixes the above code by making a mutable reference that we can now modify.
fn change(some_string: &mut String) {
some_string.push_str(", world");
}
//The below code creates a dangling pointer/reference.
//So when the data goes out of scope at the end of the function
//our reference now points to memory that has been freed.
//The compiler catches this and errors out on us.
// fn dangle() -> &String {
// let s = String::from("hello");
// &s
// }
//This version doesn't create slices of the data so things become out of index with each other
//We are going to rewrite it with a new version
// fn first_word(s: &String) -> usize {
// //We are converting our string into a byte
// let bytes = s.as_bytes();
// //We now iterate through the string using iter.
// //the enumerate function packages up each part of the
// //iterator as a tuple with an index and a reference to the value
// for (i, &item) in bytes.iter().enumerate() {
// //We check to see if the byte literal of the space is
// //equal to our item.
// //If it is then we return that index.
// if item == b' ' {
// return i;
// }
// }
// //If we don't run across a space at all then we return the length of the string.
// s.len()
// }
//We can change the following to the current function signature
// fn first_word(s: &String) -> &str {
//The new signature now allows us to operate on both Strings and str types
fn first_word(s: &str) -> &str {
let bytes = s.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &s[0..i];
}
}
&s[..]
} | ction will erro | identifier_body |
main.rs | fn main() {
//Rust deals with stack and heaps for memory managment no gc or direct memory management
//The stack memory is a first in last off type queue
//Stack data must take up a known and fixed size
//In rust the heap is used for when we don't know the size of the vector at compile time
//or if the memory to be allocated is dynamic
//Heap memory is not really organized data just kinda gets thrown where the os has space for it
//Therefore, the program has to jump around to get data which can slow things down.
//Function local variables get pushed onto the stack and then popped off when
//it's done
//A value is assigned an owner which is it's owner. Only one owner can exist at a time
//When the owner is out of scope the value disappears
//Examples to go over variable scope
let s = "hello";
{
let s = "hello2";
println!("s: {}", s);
}
println!("Previous s is out of scope but the one defined earlier isn't");
println!("s: {}", s);
//Onto the next example which goes over the rules of ownership
//It's going to be using the String type aka StringBuffers
let mut s = String::from("hello");
s.push_str(", world!");// s must be mutable for this to work
println!("{}", s);
//Note: In C++, this pattern of deallocating resources at the end of an item’s lifetime is sometimes
//called Resource Acquisition Is Initialization (RAII). The drop function in Rust will be familiar
//to you if you’ve used RAII patterns.
let x = 5;
let y = x;// y is just a copy of x since they are simple types and have a fixed size
let s1 = String::from("hello");
let s2 = s1; // s2 is a copy of the pointer to the data that s1 points to
// this errors out because s1 does not have a copy trait which meanse
//we made a shallow copy instead of a deep copy. Rust does not like this
// if we tried to use s1. If we use s2 we are fine since s1 is invalidated
//after we assign s2 to s1 values. This operation is called a move.
// println!("{}", s2);
let s1 = String::from("hello");
let s2 = s1.clone(); // This creates a deep copy of of s1. We can now use s1 in other places with out
// it being invalid
// println!("{}",s1);
//Info about what things that make a deep copy when you do let x = something; let y = x;
// Rust has a special annotation called the Copy trait that we can place on types like integers that are
// stored on the stack (we’ll talk more about traits in Chapter 10). If a type has the Copy trait, an older
// variable is still usable after assignment. Rust won’t let us annotate a type with the Copy trait if the
// type, or any of its parts, has implemented the Drop trait. If the type needs something special to happen
// when the value goes out of scope and we add the Copy annotation to that type, we’ll get a compile time error.
// To learn about how to add the Copy annotation to your type, see Appendix C on Derivable Traits.
// So what types are Copy? You can check the documentation for the given type to be sure, but as a general rule,
// any group of simple scalar values can be Copy, and nothing that requires allocation or is some form of resource
// is Copy. Here are some of the types that are Copy:
// All the integer types, like u32.
// The boolean type, bool, with values true and false.
// All the floating point types, like f64.
// Tuples, but only if they contain types that are also Copy. (i32, i32) is Copy, but (i32, String) is not.
let s = String::from("hello"); // s comes into scope.
//So in rust if we pass a variable into a function it loses it's ownership to the
//function. Then once the function is over that variable no longer exists
//because it is now out of scope.
takes_ownership(s); // s's value moves into the function...
// ... and so is no longer valid here.
let x = 5; // x comes into scope.
//If a variable has the copy trait then only a copy is made to the function and
//we can still use the variable afterwards even though all the variables in the
//function are now out of scope.
makes_copy(x); // x would move into the function,
// but i32 is Copy, so it’s okay to still
// use x afterward.
//we can give ownership of a variable from a function by having an expression at the end.
//We could pass in a variable and then take back its ownership by doing this. However, I think this
//is kinda of a pain. The people at Rust feel the same.
let s1 = gives_ownership();
//Rust also let's return variables as tuples so which we can then can deconstruct this when
//we get the returned values.
//Now it's time to go over references and borrowing!
let s1 = String::from("hello");
//The & creates a reference to a variable. They can be thought of a pointer to the original data.
//By doing this we do not pass ownership of the variable to the function
//Therefore when we go out of scope of the function we still have ownership of the variable
//where the function call was made.
//References as function parameters is called borrowing.
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
//We can not modify a borrowed variable.
//change(&s1);
let mut s1 = String::from("hello");
//We can fix this by making a mutable reference
//We also need to make sure that our variable we're passing in is also mutable.
change(&mut s1);
println!("{}", s1);
//You are only allowed one mutable reference to a particular piece of data in a particular scope.
//This insures that we don't have any aliasing with our references refering to the same data.
//The benefit of having this restriction is that Rust can prevent data races at compile time.
//From the rust book
//Whew! We also cannot have a mutable reference while we have an immutable one.
//Users of an immutable reference don’t expect the values to suddenly change out from under them!
//However, multiple immutable references are okay because no one who is just reading the data has
//the ability to affect anyone else’s reading of the data.
//let mut s = String::from("Hello");
//let r1 = &s; //Immutable reference
//let r2 = &s; //Immutable reference
//let r3 = &s; //Mutable reference -- big no no
//The compiler does not dangling pointers/references. It therefore will error out on us.
// let refernece_to_nothing = dangle();
//We are now going to go over slices.
//From the rust book: Another data type that does not have ownership is the slice.
//Slices let you reference a contiguous sequence of elements in a collection rather than the whole collection.
// let mut s = String::from("hello world");
// let word = first_word(&s); // word will get the value 5.
// s.clear(); // This empties the String, making it equal to "".
// word still has the value 5 here, but there's no more string that
// we could meaningfully use the value 5 with. word is now totally invalid!
//The index we got is now completely out of sync with our original string.
//If we end up having more indices we could get even more out of sync with our data.
//For strings we can take advantage of a built in feature called string slices.
//They create a reference to portions of a string.
let s = String::from("hello world");
//Slicing is similar to slicing in python where you have a starting index and then
//the ending value is +1 of the data you actually care about.
let hello = &s[0..5];
// let hello = &s[..5]; //Equivalent to the above
let world = &s[6..11];
// let world = &s[6..]; //Equivalent to the above
let len = s.len();
let slice = &s[0..len];
// let slice = &s[..]; //Equivalent to the above
// We now have a straightforward API that’s much harder to mess up, since the compiler will
//ensure the references into the String remain valid. Remember the bug in the program in Listing 4-11,
//when we got the index to the end of the first word but then cleared the string so our index was invalid?
//That code was logically incorrect but didn’t show any immediate errors. The problems would show up later
//if we kept trying to use the first word index with an emptied string. Slices make this bug impossible
//and let us know we have a problem with our code much sooner. Using the slice version of first_word
//will throw a compile time error:
// let mut s = String::from("hello world");
// let word = first_word(&s);
// s.clear(); // Error!
// Recall from the borrowing rules that if we have an immutable reference to something, we cannot also
// take a mutable reference. Because clear needs to truncate the String, it tries to take a mutable reference,
// which fails. Not only has Rust made our API easier to use, but it has also eliminated an entire class of errors
// at compile time!
let s = "Hello, world!";
// The type of s here is &str: it’s a slice pointing to that specific point of the binary. This is also why string
// literals are immutable; &str is an immutable reference.
let my_string = String::from("hello world");
// first_word works on slices of `String`s
let word = first_word(&my_string[..]);
let my_string_literal = "hello world";
// first_word works on slices of string literals
let word = first_word(&my_string_literal[..]);
// since string literals *are* string slices already,
// this works too, without the slice syntax!
let word = first_word(my_string_literal);
let a = [1, 2, 3, 4, 5];
let slice = &a[1..3];
// This slice has the type &[i32]. It works the same way as string slices do, by storing a reference to the
// first element and a length. You’ll use this kind of slice for all sorts of other collections. We’ll discuss
// these collections in detail when we talk about vectors in Chapter 8
} // Here, x goes out of scope, then s. But since s's value was moved, nothing
// special happens.
fn takes_ownership(some_string: String) { // some_string comes into scope.
println!("{}", some_string);
} // Here, some_string goes out of scope and `drop` is called. The backing
// memory is freed.
fn makes_copy(some_integer: i32) { // some_integer comes into scope.
println!("{}", some_integer);
} // Here, some_integer goes out of scope. Nothing special happens.
//Tell what type the function will return
fn gives_ownership() -> String { // gives_ownership will move its
// return value into the function
// that calls it.
let some_string = String::from("hello"); // some_string comes into scope.
some_string // some_string is returned and
// moves out to the calling
// function.
}
fn calculate_length(s: &String) -> usize {
s.len()
}
//This function will error on us since we are trying to
//modify a borrowed variable. We will always get an
//error for this function even if we never call it.
// fn change(some_string: &String) {
// some_string.push_str(", world");
// }
//This fixes the above code by making a mutable reference that we can now modify.
fn change(some_string: &mut String) {
some_string.push_str(", world");
}
//The below code creates a dangling pointer/reference.
//So when the data goes out of scope at the end of the function
//our reference now points to memory that has been freed.
//The compiler catches this and errors out on us.
// fn dangle() -> &String {
// let s = String::from("hello");
// &s
// }
//This version doesn't create slices of the data so things become out of index with each other
//We are going to rewrite it with a new version
// fn first_word(s: &String) -> usize {
// //We are converting our string into a byte
// let bytes = s.as_bytes();
// //We now iterate through the string using iter.
// //the enumerate function packages up each part of the
// //iterator as a tuple with an index and a reference to the value
// for (i, &item) in bytes.iter().enumerate() {
// //We check to see if the byte literal of the space is
// //equal to our item.
// //If it is then we return that index.
// if item == b' ' {
// return i;
// }
// }
// //If we don't run across a space at all then we return the length of the string.
// s.len()
// }
//We can change the following to the current function signature
// fn first_word(s: &String) -> &str {
//The new signature now allows us to operate on both Strings and str types
fn first_word(s: &str) -> &str {
let bytes = s.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &s[0. | .i];
}
}
&s[..]
} | conditional_block | |
main.rs | fn main() {
//Rust deals with stack and heaps for memory managment no gc or direct memory management
//The stack memory is a first in last off type queue
//Stack data must take up a known and fixed size
//In rust the heap is used for when we don't know the size of the vector at compile time
//or if the memory to be allocated is dynamic
//Heap memory is not really organized data just kinda gets thrown where the os has space for it
//Therefore, the program has to jump around to get data which can slow things down.
//Function local variables get pushed onto the stack and then popped off when
//it's done
//A value is assigned an owner which is it's owner. Only one owner can exist at a time
//When the owner is out of scope the value disappears
//Examples to go over variable scope
let s = "hello";
{
let s = "hello2";
println!("s: {}", s);
}
println!("Previous s is out of scope but the one defined earlier isn't");
println!("s: {}", s);
//Onto the next example which goes over the rules of ownership
//It's going to be using the String type aka StringBuffers
let mut s = String::from("hello");
s.push_str(", world!");// s must be mutable for this to work
println!("{}", s);
//Note: In C++, this pattern of deallocating resources at the end of an item’s lifetime is sometimes
//called Resource Acquisition Is Initialization (RAII). The drop function in Rust will be familiar
//to you if you’ve used RAII patterns.
let x = 5;
let y = x;// y is just a copy of x since they are simple types and have a fixed size
let s1 = String::from("hello");
let s2 = s1; // s2 is a copy of the pointer to the data that s1 points to
// this errors out because s1 does not have a copy trait which meanse
//we made a shallow copy instead of a deep copy. Rust does not like this
// if we tried to use s1. If we use s2 we are fine since s1 is invalidated
//after we assign s2 to s1 values. This operation is called a move.
// println!("{}", s2);
let s1 = String::from("hello");
let s2 = s1.clone(); // This creates a deep copy of of s1. We can now use s1 in other places with out
// it being invalid
// println!("{}",s1);
//Info about what things that make a deep copy when you do let x = something; let y = x;
// Rust has a special annotation called the Copy trait that we can place on types like integers that are
// stored on the stack (we’ll talk more about traits in Chapter 10). If a type has the Copy trait, an older
// variable is still usable after assignment. Rust won’t let us annotate a type with the Copy trait if the
// type, or any of its parts, has implemented the Drop trait. If the type needs something special to happen
// when the value goes out of scope and we add the Copy annotation to that type, we’ll get a compile time error.
// To learn about how to add the Copy annotation to your type, see Appendix C on Derivable Traits.
// So what types are Copy? You can check the documentation for the given type to be sure, but as a general rule,
// any group of simple scalar values can be Copy, and nothing that requires allocation or is some form of resource
// is Copy. Here are some of the types that are Copy:
// All the integer types, like u32.
// The boolean type, bool, with values true and false.
// All the floating point types, like f64.
// Tuples, but only if they contain types that are also Copy. (i32, i32) is Copy, but (i32, String) is not.
let s = String::from("hello"); // s comes into scope.
//So in rust if we pass a variable into a function it loses it's ownership to the
//function. Then once the function is over that variable no longer exists
//because it is now out of scope.
takes_ownership(s); // s's value moves into the function...
// ... and so is no longer valid here.
let x = 5; // x comes into scope.
//If a variable has the copy trait then only a copy is made to the function and
//we can still use the variable afterwards even though all the variables in the
//function are now out of scope.
makes_copy(x); // x would move into the function,
// but i32 is Copy, so it’s okay to still
// use x afterward.
//we can give ownership of a variable from a function by having an expression at the end.
//We could pass in a variable and then take back its ownership by doing this. However, I think this
//is kinda of a pain. The people at Rust feel the same.
let s1 = gives_ownership();
//Rust also let's return variables as tuples so which we can then can deconstruct this when
//we get the returned values.
//Now it's time to go over references and borrowing!
let s1 = String::from("hello");
//The & creates a reference to a variable. They can be thought of a pointer to the original data.
//By doing this we do not pass ownership of the variable to the function
//Therefore when we go out of scope of the function we still have ownership of the variable
//where the function call was made.
//References as function parameters is called borrowing.
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
//We can not modify a borrowed variable.
//change(&s1);
let mut s1 = String::from("hello");
//We can fix this by making a mutable reference
//We also need to make sure that our variable we're passing in is also mutable.
change(&mut s1);
println!("{}", s1);
//You are only allowed one mutable reference to a particular piece of data in a particular scope.
//This insures that we don't have any aliasing with our references refering to the same data.
//The benefit of having this restriction is that Rust can prevent data races at compile time.
//From the rust book
//Whew! We also cannot have a mutable reference while we have an immutable one.
//Users of an immutable reference don’t expect the values to suddenly change out from under them!
//However, multiple immutable references are okay because no one who is just reading the data has
//the ability to affect anyone else’s reading of the data.
//let mut s = String::from("Hello");
//let r1 = &s; //Immutable reference
//let r2 = &s; //Immutable reference
//let r3 = &s; //Mutable reference -- big no no
//The compiler does not dangling pointers/references. It therefore will error out on us.
// let refernece_to_nothing = dangle();
//We are now going to go over slices.
//From the rust book: Another data type that does not have ownership is the slice.
//Slices let you reference a contiguous sequence of elements in a collection rather than the whole collection.
// let mut s = String::from("hello world");
// let word = first_word(&s); // word will get the value 5.
// s.clear(); // This empties the String, making it equal to "".
// word still has the value 5 here, but there's no more string that
// we could meaningfully use the value 5 with. word is now totally invalid!
//The index we got is now completely out of sync with our original string.
//If we end up having more indices we could get even more out of sync with our data.
//For strings we can take advantage of a built in feature called string slices.
//They create a reference to portions of a string.
let s = String::from("hello world");
//Slicing is similar to slicing in python where you have a starting index and then
//the ending value is +1 of the data you actually care about.
let hello = &s[0..5];
// let hello = &s[..5]; //Equivalent to the above
let world = &s[6..11];
// let world = &s[6..]; //Equivalent to the above
let len = s.len();
let slice = &s[0..len];
// let slice = &s[..]; //Equivalent to the above
// We now have a straightforward API that’s much harder to mess up, since the compiler will
//ensure the references into the String remain valid. Remember the bug in the program in Listing 4-11,
//when we got the index to the end of the first word but then cleared the string so our index was invalid?
//That code was logically incorrect but didn’t show any immediate errors. The problems would show up later
//if we kept trying to use the first word index with an emptied string. Slices make this bug impossible
//and let us know we have a problem with our code much sooner. Using the slice version of first_word
//will throw a compile time error:
// let mut s = String::from("hello world");
// let word = first_word(&s);
// s.clear(); // Error!
// Recall from the borrowing rules that if we have an immutable reference to something, we cannot also
// take a mutable reference. Because clear needs to truncate the String, it tries to take a mutable reference,
// which fails. Not only has Rust made our API easier to use, but it has also eliminated an entire class of errors
// at compile time!
let s = "Hello, world!";
// The type of s here is &str: it’s a slice pointing to that specific point of the binary. This is also why string
// literals are immutable; &str is an immutable reference.
let my_string = String::from("hello world");
// first_word works on slices of `String`s
let word = first_word(&my_string[..]);
let my_string_literal = "hello world";
// first_word works on slices of string literals
let word = first_word(&my_string_literal[..]);
// since string literals *are* string slices already,
// this works too, without the slice syntax!
let word = first_word(my_string_literal);
let a = [1, 2, 3, 4, 5];
let slice = &a[1..3];
// This slice has the type &[i32]. It works the same way as string slices do, by storing a reference to the
// first element and a length. You’ll use this kind of slice for all sorts of other collections. We’ll discuss
// these collections in detail when we talk about vectors in Chapter 8
} // Here, x goes out of scope, then s. But since s's value was moved, nothing
// special happens.
fn takes_ownership(some_string: String) { // some_string comes into scope.
println!("{}", some_string);
} // Here, some_string goes out of scope and `drop` is called. The backing
// memory is freed.
fn makes_copy(some_integer: i32) { // some_integer comes into scope.
println!("{}", some_integer);
} // Here, some_integer goes out of scope. Nothing special happens.
//Tell what type the function will return
fn gives_ownership() -> String { // gives_ownership will move its
// return value into the function
// that calls it.
let some_string = String::from("hello"); // some_string comes into scope.
some_string // some_string is returned and
// moves out to the calling
// function.
}
fn calculate_length(s: &String) -> usize {
s.len()
}
//This function will error on us since we are trying to
//modify a borrowed variable. We will always get an
//error for this function even if we never call it.
// fn change(some_string: &String) {
// some_string.push_str(", world");
// }
//This fixes the above code by making a mutable reference that we can now modify.
fn change(some_string: &mut S | {
some_string.push_str(", world");
}
//The below code creates a dangling pointer/reference.
//So when the data goes out of scope at the end of the function
//our reference now points to memory that has been freed.
//The compiler catches this and errors out on us.
// fn dangle() -> &String {
// let s = String::from("hello");
// &s
// }
//This version doesn't create slices of the data so things become out of index with each other
//We are going to rewrite it with a new version
// fn first_word(s: &String) -> usize {
// //We are converting our string into a byte
// let bytes = s.as_bytes();
// //We now iterate through the string using iter.
// //the enumerate function packages up each part of the
// //iterator as a tuple with an index and a reference to the value
// for (i, &item) in bytes.iter().enumerate() {
// //We check to see if the byte literal of the space is
// //equal to our item.
// //If it is then we return that index.
// if item == b' ' {
// return i;
// }
// }
// //If we don't run across a space at all then we return the length of the string.
// s.len()
// }
//We can change the following to the current function signature
// fn first_word(s: &String) -> &str {
//The new signature now allows us to operate on both Strings and str types
fn first_word(s: &str) -> &str {
let bytes = s.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &s[0..i];
}
}
&s[..]
} | tring) | identifier_name |
main.rs | fn main() {
//Rust deals with stack and heaps for memory managment no gc or direct memory management
//The stack memory is a first in last off type queue
//Stack data must take up a known and fixed size
//In rust the heap is used for when we don't know the size of the vector at compile time
//or if the memory to be allocated is dynamic
//Heap memory is not really organized data just kinda gets thrown where the os has space for it
//Therefore, the program has to jump around to get data which can slow things down.
//Function local variables get pushed onto the stack and then popped off when
//it's done
//A value is assigned an owner which is it's owner. Only one owner can exist at a time
//When the owner is out of scope the value disappears
//Examples to go over variable scope
let s = "hello";
{
let s = "hello2";
println!("s: {}", s);
}
println!("Previous s is out of scope but the one defined earlier isn't");
println!("s: {}", s);
//Onto the next example which goes over the rules of ownership
//It's going to be using the String type aka StringBuffers
let mut s = String::from("hello");
s.push_str(", world!");// s must be mutable for this to work |
//Note: In C++, this pattern of deallocating resources at the end of an item’s lifetime is sometimes
//called Resource Acquisition Is Initialization (RAII). The drop function in Rust will be familiar
//to you if you’ve used RAII patterns.
let x = 5;
let y = x;// y is just a copy of x since they are simple types and have a fixed size
let s1 = String::from("hello");
let s2 = s1; // s2 is a copy of the pointer to the data that s1 points to
// this errors out because s1 does not have a copy trait which meanse
//we made a shallow copy instead of a deep copy. Rust does not like this
// if we tried to use s1. If we use s2 we are fine since s1 is invalidated
//after we assign s2 to s1 values. This operation is called a move.
// println!("{}", s2);
let s1 = String::from("hello");
let s2 = s1.clone(); // This creates a deep copy of of s1. We can now use s1 in other places with out
// it being invalid
// println!("{}",s1);
//Info about what things that make a deep copy when you do let x = something; let y = x;
// Rust has a special annotation called the Copy trait that we can place on types like integers that are
// stored on the stack (we’ll talk more about traits in Chapter 10). If a type has the Copy trait, an older
// variable is still usable after assignment. Rust won’t let us annotate a type with the Copy trait if the
// type, or any of its parts, has implemented the Drop trait. If the type needs something special to happen
// when the value goes out of scope and we add the Copy annotation to that type, we’ll get a compile time error.
// To learn about how to add the Copy annotation to your type, see Appendix C on Derivable Traits.
// So what types are Copy? You can check the documentation for the given type to be sure, but as a general rule,
// any group of simple scalar values can be Copy, and nothing that requires allocation or is some form of resource
// is Copy. Here are some of the types that are Copy:
// All the integer types, like u32.
// The boolean type, bool, with values true and false.
// All the floating point types, like f64.
// Tuples, but only if they contain types that are also Copy. (i32, i32) is Copy, but (i32, String) is not.
let s = String::from("hello"); // s comes into scope.
//So in rust if we pass a variable into a function it loses it's ownership to the
//function. Then once the function is over that variable no longer exists
//because it is now out of scope.
takes_ownership(s); // s's value moves into the function...
// ... and so is no longer valid here.
let x = 5; // x comes into scope.
//If a variable has the copy trait then only a copy is made to the function and
//we can still use the variable afterwards even though all the variables in the
//function are now out of scope.
makes_copy(x); // x would move into the function,
// but i32 is Copy, so it’s okay to still
// use x afterward.
//we can give ownership of a variable from a function by having an expression at the end.
//We could pass in a variable and then take back its ownership by doing this. However, I think this
//is kinda of a pain. The people at Rust feel the same.
let s1 = gives_ownership();
//Rust also let's return variables as tuples so which we can then can deconstruct this when
//we get the returned values.
//Now it's time to go over references and borrowing!
let s1 = String::from("hello");
//The & creates a reference to a variable. They can be thought of a pointer to the original data.
//By doing this we do not pass ownership of the variable to the function
//Therefore when we go out of scope of the function we still have ownership of the variable
//where the function call was made.
//References as function parameters is called borrowing.
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
//We can not modify a borrowed variable.
//change(&s1);
let mut s1 = String::from("hello");
//We can fix this by making a mutable reference
//We also need to make sure that our variable we're passing in is also mutable.
change(&mut s1);
println!("{}", s1);
//You are only allowed one mutable reference to a particular piece of data in a particular scope.
//This insures that we don't have any aliasing with our references refering to the same data.
//The benefit of having this restriction is that Rust can prevent data races at compile time.
//From the rust book
//Whew! We also cannot have a mutable reference while we have an immutable one.
//Users of an immutable reference don’t expect the values to suddenly change out from under them!
//However, multiple immutable references are okay because no one who is just reading the data has
//the ability to affect anyone else’s reading of the data.
//let mut s = String::from("Hello");
//let r1 = &s; //Immutable reference
//let r2 = &s; //Immutable reference
//let r3 = &s; //Mutable reference -- big no no
//The compiler does not dangling pointers/references. It therefore will error out on us.
// let refernece_to_nothing = dangle();
//We are now going to go over slices.
//From the rust book: Another data type that does not have ownership is the slice.
//Slices let you reference a contiguous sequence of elements in a collection rather than the whole collection.
// let mut s = String::from("hello world");
// let word = first_word(&s); // word will get the value 5.
// s.clear(); // This empties the String, making it equal to "".
// word still has the value 5 here, but there's no more string that
// we could meaningfully use the value 5 with. word is now totally invalid!
//The index we got is now completely out of sync with our original string.
//If we end up having more indices we could get even more out of sync with our data.
//For strings we can take advantage of a built in feature called string slices.
//They create a reference to portions of a string.
let s = String::from("hello world");
//Slicing is similar to slicing in python where you have a starting index and then
//the ending value is +1 of the data you actually care about.
let hello = &s[0..5];
// let hello = &s[..5]; //Equivalent to the above
let world = &s[6..11];
// let world = &s[6..]; //Equivalent to the above
let len = s.len();
let slice = &s[0..len];
// let slice = &s[..]; //Equivalent to the above
// We now have a straightforward API that’s much harder to mess up, since the compiler will
//ensure the references into the String remain valid. Remember the bug in the program in Listing 4-11,
//when we got the index to the end of the first word but then cleared the string so our index was invalid?
//That code was logically incorrect but didn’t show any immediate errors. The problems would show up later
//if we kept trying to use the first word index with an emptied string. Slices make this bug impossible
//and let us know we have a problem with our code much sooner. Using the slice version of first_word
//will throw a compile time error:
// let mut s = String::from("hello world");
// let word = first_word(&s);
// s.clear(); // Error!
// Recall from the borrowing rules that if we have an immutable reference to something, we cannot also
// take a mutable reference. Because clear needs to truncate the String, it tries to take a mutable reference,
// which fails. Not only has Rust made our API easier to use, but it has also eliminated an entire class of errors
// at compile time!
let s = "Hello, world!";
// The type of s here is &str: it’s a slice pointing to that specific point of the binary. This is also why string
// literals are immutable; &str is an immutable reference.
let my_string = String::from("hello world");
// first_word works on slices of `String`s
let word = first_word(&my_string[..]);
let my_string_literal = "hello world";
// first_word works on slices of string literals
let word = first_word(&my_string_literal[..]);
// since string literals *are* string slices already,
// this works too, without the slice syntax!
let word = first_word(my_string_literal);
let a = [1, 2, 3, 4, 5];
let slice = &a[1..3];
// This slice has the type &[i32]. It works the same way as string slices do, by storing a reference to the
// first element and a length. You’ll use this kind of slice for all sorts of other collections. We’ll discuss
// these collections in detail when we talk about vectors in Chapter 8
} // Here, x goes out of scope, then s. But since s's value was moved, nothing
// special happens.
fn takes_ownership(some_string: String) { // some_string comes into scope.
println!("{}", some_string);
} // Here, some_string goes out of scope and `drop` is called. The backing
// memory is freed.
fn makes_copy(some_integer: i32) { // some_integer comes into scope.
println!("{}", some_integer);
} // Here, some_integer goes out of scope. Nothing special happens.
//Tell what type the function will return
fn gives_ownership() -> String { // gives_ownership will move its
// return value into the function
// that calls it.
let some_string = String::from("hello"); // some_string comes into scope.
some_string // some_string is returned and
// moves out to the calling
// function.
}
fn calculate_length(s: &String) -> usize {
s.len()
}
//This function will error on us since we are trying to
//modify a borrowed variable. We will always get an
//error for this function even if we never call it.
// fn change(some_string: &String) {
// some_string.push_str(", world");
// }
//This fixes the above code by making a mutable reference that we can now modify.
fn change(some_string: &mut String) {
some_string.push_str(", world");
}
//The below code creates a dangling pointer/reference.
//So when the data goes out of scope at the end of the function
//our reference now points to memory that has been freed.
//The compiler catches this and errors out on us.
// fn dangle() -> &String {
// let s = String::from("hello");
// &s
// }
//This version doesn't create slices of the data so things become out of index with each other
//We are going to rewrite it with a new version
// fn first_word(s: &String) -> usize {
// //We are converting our string into a byte
// let bytes = s.as_bytes();
// //We now iterate through the string using iter.
// //the enumerate function packages up each part of the
// //iterator as a tuple with an index and a reference to the value
// for (i, &item) in bytes.iter().enumerate() {
// //We check to see if the byte literal of the space is
// //equal to our item.
// //If it is then we return that index.
// if item == b' ' {
// return i;
// }
// }
// //If we don't run across a space at all then we return the length of the string.
// s.len()
// }
//We can change the following to the current function signature
// fn first_word(s: &String) -> &str {
//The new signature now allows us to operate on both Strings and str types
fn first_word(s: &str) -> &str {
let bytes = s.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &s[0..i];
}
}
&s[..]
} |
println!("{}", s); | random_line_split |
mod.rs | // Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity Ethereum.
// Parity Ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or |
// Parity Ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
//! Utilities and helpers for transaction dispatch.
pub(crate) mod light;
mod full;
mod prospective_signer;
#[cfg(any(test, feature = "accounts"))]
mod signing;
#[cfg(not(any(test, feature = "accounts")))]
mod signing {
use super::*;
use v1::helpers::errors;
/// Dummy signer implementation
#[derive(Debug, Clone)]
pub struct Signer;
impl Signer {
/// Create new instance of dummy signer (accept any AccountProvider)
pub fn new<T>(_ap: T) -> Self {
Signer
}
}
impl super::Accounts for Signer {
fn sign_transaction(&self, _filled: FilledTransactionRequest, _chain_id: Option<u64>, _nonce: U256, _password: SignWith) -> Result<WithToken<SignedTransaction>> {
Err(errors::account("Signing unsupported", "See #9997"))
}
fn sign_message(&self, _address: Address, _password: SignWith, _hash: SignMessage) -> Result<WithToken<Signature>> {
Err(errors::account("Signing unsupported", "See #9997"))
}
fn decrypt(&self, _address: Address, _password: SignWith, _data: Bytes) -> Result<WithToken<Bytes>> {
Err(errors::account("Signing unsupported", "See #9997"))
}
fn supports_prospective_signing(&self, _address: &Address, _password: &SignWith) -> bool {
false
}
fn default_account(&self) -> Address {
Default::default()
}
fn is_unlocked(&self, _address: &Address) -> bool {
false
}
}
}
pub use self::light::LightDispatcher;
pub use self::full::FullDispatcher;
pub use self::signing::Signer;
pub use v1::helpers::nonce::Reservations;
use std::fmt::Debug;
use std::ops::Deref;
use std::sync::Arc;
use bytes::Bytes;
use client_traits::BlockChainClient;
use ethcore::miner::MinerService;
use ethereum_types::{H520, H256, U256, Address};
use ethkey::{Password, Signature};
use hash::keccak;
use types::transaction::{SignedTransaction, PendingTransaction};
use jsonrpc_core::{BoxFuture, Result, Error};
use jsonrpc_core::futures::{future, Future, IntoFuture};
use v1::helpers::{TransactionRequest, FilledTransactionRequest, ConfirmationPayload};
use v1::types::{
Bytes as RpcBytes,
RichRawTransaction as RpcRichRawTransaction,
ConfirmationPayload as RpcConfirmationPayload,
ConfirmationResponse,
EthSignRequest as RpcEthSignRequest,
EIP191SignRequest as RpcSignRequest,
DecryptRequest as RpcDecryptRequest,
};
/// Has the capability to dispatch, sign, and decrypt.
///
/// Requires a clone implementation, with the implication that it be cheap;
/// usually just bumping a reference count or two.
pub trait Dispatcher: Send + Sync + Clone {
// TODO: when ATC exist, use zero-cost
// type Out<T>: IntoFuture<T, Error>
/// Fill optional fields of a transaction request, fetching gas price but not nonce.
fn fill_optional_fields(&self, request: TransactionRequest, default_sender: Address, force_nonce: bool)
-> BoxFuture<FilledTransactionRequest>;
/// Sign the given transaction request without dispatching, fetching appropriate nonce.
fn sign<P>(
&self,
filled: FilledTransactionRequest,
signer: &Arc<dyn Accounts>,
password: SignWith,
post_sign: P,
) -> BoxFuture<P::Item> where
P: PostSign + 'static,
<P::Out as futures::future::IntoFuture>::Future: Send;
/// Converts a `SignedTransaction` into `RichRawTransaction`
fn enrich(&self, SignedTransaction) -> RpcRichRawTransaction;
/// "Dispatch" a local transaction.
fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result<H256>;
}
/// Payload to sign
pub enum SignMessage {
/// Eth-sign kind data (requires prefixing)
Data(Bytes),
/// Prefixed data hash
Hash(H256),
}
/// Abstract transaction signer.
///
/// NOTE This signer is semi-correct, it's a temporary measure to avoid moving too much code.
/// If accounts are ultimately removed all password-dealing endpoints will be wiped out.
pub trait Accounts: Send + Sync {
/// Sign given filled transaction request for the specified chain_id.
fn sign_transaction(&self, filled: FilledTransactionRequest, chain_id: Option<u64>, nonce: U256, password: SignWith) -> Result<WithToken<SignedTransaction>>;
/// Sign given message.
fn sign_message(&self, address: Address, password: SignWith, hash: SignMessage) -> Result<WithToken<Signature>>;
/// Decrypt given message.
fn decrypt(&self, address: Address, password: SignWith, data: Bytes) -> Result<WithToken<Bytes>>;
/// Returns `true` if the accounts can sign multiple times.
fn supports_prospective_signing(&self, address: &Address, password: &SignWith) -> bool;
/// Returns default account.
fn default_account(&self) -> Address;
/// Returns true if account is unlocked (i.e. can sign without a password)
fn is_unlocked(&self, address: &Address) -> bool;
}
/// action to execute after signing
/// e.g importing a transaction into the chain
pub trait PostSign: Send {
/// item that this PostSign returns
type Item: Send;
/// incase you need to perform async PostSign actions
type Out: IntoFuture<Item = Self::Item, Error = Error> + Send;
/// perform an action with the signed transaction
fn execute(self, signer: WithToken<SignedTransaction>) -> Self::Out;
}
impl PostSign for () {
type Item = WithToken<SignedTransaction>;
type Out = Result<Self::Item>;
fn execute(self, signed: WithToken<SignedTransaction>) -> Self::Out {
Ok(signed)
}
}
impl<F: Send, T: Send> PostSign for F
where F: FnOnce(WithToken<SignedTransaction>) -> Result<T>
{
type Item = T;
type Out = Result<Self::Item>;
fn execute(self, signed: WithToken<SignedTransaction>) -> Self::Out {
(self)(signed)
}
}
/// Single-use account token.
pub type AccountToken = Password;
/// Values used to unlock accounts for signing.
#[derive(Clone, PartialEq)]
pub enum SignWith {
/// Nothing -- implies the account is already unlocked.
Nothing,
/// Unlock with password.
Password(Password),
/// Unlock with single-use token.
Token(AccountToken),
}
impl SignWith {
#[cfg(any(test, feature = "accounts"))]
fn is_password(&self) -> bool {
if let SignWith::Password(_) = *self {
true
} else {
false
}
}
}
/// A value, potentially accompanied by a signing token.
pub enum WithToken<T> {
/// No token.
No(T),
/// With token.
Yes(T, AccountToken),
}
impl<T: Debug> Deref for WithToken<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
match *self {
WithToken::No(ref v) => v,
WithToken::Yes(ref v, _) => v,
}
}
}
impl<T: Debug> WithToken<T> {
/// Map the value with the given closure, preserving the token.
pub fn map<S, F>(self, f: F) -> WithToken<S> where
S: Debug,
F: FnOnce(T) -> S,
{
match self {
WithToken::No(v) => WithToken::No(f(v)),
WithToken::Yes(v, token) => WithToken::Yes(f(v), token),
}
}
/// Convert into inner value, ignoring possible token.
pub fn into_value(self) -> T {
match self {
WithToken::No(v) => v,
WithToken::Yes(v, _) => v,
}
}
/// Convert the `WithToken` into a tuple.
pub fn into_tuple(self) -> (T, Option<AccountToken>) {
match self {
WithToken::No(v) => (v, None),
WithToken::Yes(v, token) => (v, Some(token))
}
}
}
impl<T: Debug> From<(T, AccountToken)> for WithToken<T> {
fn from(tuple: (T, AccountToken)) -> Self {
WithToken::Yes(tuple.0, tuple.1)
}
}
impl<T: Debug> From<(T, Option<AccountToken>)> for WithToken<T> {
fn from(tuple: (T, Option<AccountToken>)) -> Self {
match tuple.1 {
Some(token) => WithToken::Yes(tuple.0, token),
None => WithToken::No(tuple.0),
}
}
}
/// Execute a confirmation payload.
pub fn execute<D: Dispatcher + 'static>(
dispatcher: D,
signer: &Arc<dyn Accounts>,
payload: ConfirmationPayload,
pass: SignWith
) -> BoxFuture<WithToken<ConfirmationResponse>> {
match payload {
ConfirmationPayload::SendTransaction(request) => {
let condition = request.condition.clone().map(Into::into);
let cloned_dispatcher = dispatcher.clone();
let post_sign = move |with_token_signed: WithToken<SignedTransaction>| {
let (signed, token) = with_token_signed.into_tuple();
let signed_transaction = PendingTransaction::new(signed, condition);
cloned_dispatcher.dispatch_transaction(signed_transaction)
.map(|hash| (hash, token))
};
Box::new(
dispatcher.sign(request, &signer, pass, post_sign).map(|(hash, token)| {
WithToken::from((ConfirmationResponse::SendTransaction(hash), token))
})
)
},
ConfirmationPayload::SignTransaction(request) => {
Box::new(dispatcher.sign(request, &signer, pass, ())
.map(move |result| result
.map(move |tx| dispatcher.enrich(tx))
.map(ConfirmationResponse::SignTransaction)
))
},
ConfirmationPayload::EthSignMessage(address, data) => {
let res = signer.sign_message(address, pass, SignMessage::Data(data))
.map(|result| result
.map(|s| H520(s.into_electrum()))
.map(ConfirmationResponse::Signature)
);
Box::new(future::done(res))
},
ConfirmationPayload::SignMessage(address, data) => {
let res = signer.sign_message(address, pass, SignMessage::Hash(data))
.map(|result| result
.map(|rsv| H520(rsv.into_electrum()))
.map(ConfirmationResponse::Signature)
);
Box::new(future::done(res))
},
ConfirmationPayload::Decrypt(address, data) => {
let res = signer.decrypt(address, pass, data)
.map(|result| result
.map(RpcBytes)
.map(ConfirmationResponse::Decrypt)
);
Box::new(future::done(res))
},
}
}
/// Returns a eth_sign-compatible hash of data to sign.
/// The data is prepended with special message to prevent
/// malicious DApps from using the function to sign forged transactions.
pub fn eth_data_hash(mut data: Bytes) -> H256 {
let mut message_data =
format!("\x19Ethereum Signed Message:\n{}", data.len())
.into_bytes();
message_data.append(&mut data);
keccak(message_data)
}
/// Extract the default gas price from a client and miner.
pub fn default_gas_price<C, M>(client: &C, miner: &M, percentile: usize) -> U256 where
C: BlockChainClient,
M: MinerService,
{
client.gas_price_corpus(100).percentile(percentile).cloned().unwrap_or_else(|| miner.sensible_gas_price())
}
/// Convert RPC confirmation payload to signer confirmation payload.
/// May need to resolve in the future to fetch things like gas price.
pub fn from_rpc<D>(payload: RpcConfirmationPayload, default_account: Address, dispatcher: &D) -> BoxFuture<ConfirmationPayload>
where D: Dispatcher
{
match payload {
RpcConfirmationPayload::SendTransaction(request) => {
Box::new(dispatcher.fill_optional_fields(request.into(), default_account, false)
.map(ConfirmationPayload::SendTransaction))
},
RpcConfirmationPayload::SignTransaction(request) => {
Box::new(dispatcher.fill_optional_fields(request.into(), default_account, false)
.map(ConfirmationPayload::SignTransaction))
},
RpcConfirmationPayload::Decrypt(RpcDecryptRequest { address, msg }) => {
Box::new(future::ok(ConfirmationPayload::Decrypt(address, msg.into())))
},
RpcConfirmationPayload::EthSignMessage(RpcEthSignRequest { address, data }) => {
Box::new(future::ok(ConfirmationPayload::EthSignMessage(address, data.into())))
},
RpcConfirmationPayload::EIP191SignMessage(RpcSignRequest { address, data }) => {
Box::new(future::ok(ConfirmationPayload::SignMessage(address, data)))
},
}
} | // (at your option) any later version. | random_line_split |
mod.rs | // Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity Ethereum.
// Parity Ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
//! Utilities and helpers for transaction dispatch.
pub(crate) mod light;
mod full;
mod prospective_signer;
#[cfg(any(test, feature = "accounts"))]
mod signing;
#[cfg(not(any(test, feature = "accounts")))]
mod signing {
use super::*;
use v1::helpers::errors;
/// Dummy signer implementation
#[derive(Debug, Clone)]
pub struct Signer;
impl Signer {
/// Create new instance of dummy signer (accept any AccountProvider)
pub fn new<T>(_ap: T) -> Self {
Signer
}
}
impl super::Accounts for Signer {
fn sign_transaction(&self, _filled: FilledTransactionRequest, _chain_id: Option<u64>, _nonce: U256, _password: SignWith) -> Result<WithToken<SignedTransaction>> {
Err(errors::account("Signing unsupported", "See #9997"))
}
fn sign_message(&self, _address: Address, _password: SignWith, _hash: SignMessage) -> Result<WithToken<Signature>> {
Err(errors::account("Signing unsupported", "See #9997"))
}
fn decrypt(&self, _address: Address, _password: SignWith, _data: Bytes) -> Result<WithToken<Bytes>> {
Err(errors::account("Signing unsupported", "See #9997"))
}
fn supports_prospective_signing(&self, _address: &Address, _password: &SignWith) -> bool {
false
}
fn default_account(&self) -> Address {
Default::default()
}
fn is_unlocked(&self, _address: &Address) -> bool {
false
}
}
}
pub use self::light::LightDispatcher;
pub use self::full::FullDispatcher;
pub use self::signing::Signer;
pub use v1::helpers::nonce::Reservations;
use std::fmt::Debug;
use std::ops::Deref;
use std::sync::Arc;
use bytes::Bytes;
use client_traits::BlockChainClient;
use ethcore::miner::MinerService;
use ethereum_types::{H520, H256, U256, Address};
use ethkey::{Password, Signature};
use hash::keccak;
use types::transaction::{SignedTransaction, PendingTransaction};
use jsonrpc_core::{BoxFuture, Result, Error};
use jsonrpc_core::futures::{future, Future, IntoFuture};
use v1::helpers::{TransactionRequest, FilledTransactionRequest, ConfirmationPayload};
use v1::types::{
Bytes as RpcBytes,
RichRawTransaction as RpcRichRawTransaction,
ConfirmationPayload as RpcConfirmationPayload,
ConfirmationResponse,
EthSignRequest as RpcEthSignRequest,
EIP191SignRequest as RpcSignRequest,
DecryptRequest as RpcDecryptRequest,
};
/// Has the capability to dispatch, sign, and decrypt.
///
/// Requires a clone implementation, with the implication that it be cheap;
/// usually just bumping a reference count or two.
pub trait Dispatcher: Send + Sync + Clone {
// TODO: when ATC exist, use zero-cost
// type Out<T>: IntoFuture<T, Error>
/// Fill optional fields of a transaction request, fetching gas price but not nonce.
fn fill_optional_fields(&self, request: TransactionRequest, default_sender: Address, force_nonce: bool)
-> BoxFuture<FilledTransactionRequest>;
/// Sign the given transaction request without dispatching, fetching appropriate nonce.
fn sign<P>(
&self,
filled: FilledTransactionRequest,
signer: &Arc<dyn Accounts>,
password: SignWith,
post_sign: P,
) -> BoxFuture<P::Item> where
P: PostSign + 'static,
<P::Out as futures::future::IntoFuture>::Future: Send;
/// Converts a `SignedTransaction` into `RichRawTransaction`
fn enrich(&self, SignedTransaction) -> RpcRichRawTransaction;
/// "Dispatch" a local transaction.
fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result<H256>;
}
/// Payload to sign
pub enum SignMessage {
/// Eth-sign kind data (requires prefixing)
Data(Bytes),
/// Prefixed data hash
Hash(H256),
}
/// Abstract transaction signer.
///
/// NOTE This signer is semi-correct, it's a temporary measure to avoid moving too much code.
/// If accounts are ultimately removed all password-dealing endpoints will be wiped out.
pub trait Accounts: Send + Sync {
/// Sign given filled transaction request for the specified chain_id.
fn sign_transaction(&self, filled: FilledTransactionRequest, chain_id: Option<u64>, nonce: U256, password: SignWith) -> Result<WithToken<SignedTransaction>>;
/// Sign given message.
fn sign_message(&self, address: Address, password: SignWith, hash: SignMessage) -> Result<WithToken<Signature>>;
/// Decrypt given message.
fn decrypt(&self, address: Address, password: SignWith, data: Bytes) -> Result<WithToken<Bytes>>;
/// Returns `true` if the accounts can sign multiple times.
fn supports_prospective_signing(&self, address: &Address, password: &SignWith) -> bool;
/// Returns default account.
fn default_account(&self) -> Address;
/// Returns true if account is unlocked (i.e. can sign without a password)
fn is_unlocked(&self, address: &Address) -> bool;
}
/// action to execute after signing
/// e.g importing a transaction into the chain
pub trait PostSign: Send {
/// item that this PostSign returns
type Item: Send;
/// incase you need to perform async PostSign actions
type Out: IntoFuture<Item = Self::Item, Error = Error> + Send;
/// perform an action with the signed transaction
fn execute(self, signer: WithToken<SignedTransaction>) -> Self::Out;
}
impl PostSign for () {
type Item = WithToken<SignedTransaction>;
type Out = Result<Self::Item>;
fn execute(self, signed: WithToken<SignedTransaction>) -> Self::Out {
Ok(signed)
}
}
impl<F: Send, T: Send> PostSign for F
where F: FnOnce(WithToken<SignedTransaction>) -> Result<T>
{
type Item = T;
type Out = Result<Self::Item>;
fn execute(self, signed: WithToken<SignedTransaction>) -> Self::Out {
(self)(signed)
}
}
/// Single-use account token.
pub type AccountToken = Password;
/// Values used to unlock accounts for signing.
#[derive(Clone, PartialEq)]
pub enum SignWith {
/// Nothing -- implies the account is already unlocked.
Nothing,
/// Unlock with password.
Password(Password),
/// Unlock with single-use token.
Token(AccountToken),
}
impl SignWith {
#[cfg(any(test, feature = "accounts"))]
fn is_password(&self) -> bool {
if let SignWith::Password(_) = *self {
true
} else {
false
}
}
}
/// A value, potentially accompanied by a signing token.
pub enum WithToken<T> {
/// No token.
No(T),
/// With token.
Yes(T, AccountToken),
}
impl<T: Debug> Deref for WithToken<T> {
type Target = T;
fn | (&self) -> &Self::Target {
match *self {
WithToken::No(ref v) => v,
WithToken::Yes(ref v, _) => v,
}
}
}
impl<T: Debug> WithToken<T> {
/// Map the value with the given closure, preserving the token.
pub fn map<S, F>(self, f: F) -> WithToken<S> where
S: Debug,
F: FnOnce(T) -> S,
{
match self {
WithToken::No(v) => WithToken::No(f(v)),
WithToken::Yes(v, token) => WithToken::Yes(f(v), token),
}
}
/// Convert into inner value, ignoring possible token.
pub fn into_value(self) -> T {
match self {
WithToken::No(v) => v,
WithToken::Yes(v, _) => v,
}
}
/// Convert the `WithToken` into a tuple.
pub fn into_tuple(self) -> (T, Option<AccountToken>) {
match self {
WithToken::No(v) => (v, None),
WithToken::Yes(v, token) => (v, Some(token))
}
}
}
impl<T: Debug> From<(T, AccountToken)> for WithToken<T> {
fn from(tuple: (T, AccountToken)) -> Self {
WithToken::Yes(tuple.0, tuple.1)
}
}
impl<T: Debug> From<(T, Option<AccountToken>)> for WithToken<T> {
fn from(tuple: (T, Option<AccountToken>)) -> Self {
match tuple.1 {
Some(token) => WithToken::Yes(tuple.0, token),
None => WithToken::No(tuple.0),
}
}
}
/// Execute a confirmation payload.
pub fn execute<D: Dispatcher + 'static>(
dispatcher: D,
signer: &Arc<dyn Accounts>,
payload: ConfirmationPayload,
pass: SignWith
) -> BoxFuture<WithToken<ConfirmationResponse>> {
match payload {
ConfirmationPayload::SendTransaction(request) => {
let condition = request.condition.clone().map(Into::into);
let cloned_dispatcher = dispatcher.clone();
let post_sign = move |with_token_signed: WithToken<SignedTransaction>| {
let (signed, token) = with_token_signed.into_tuple();
let signed_transaction = PendingTransaction::new(signed, condition);
cloned_dispatcher.dispatch_transaction(signed_transaction)
.map(|hash| (hash, token))
};
Box::new(
dispatcher.sign(request, &signer, pass, post_sign).map(|(hash, token)| {
WithToken::from((ConfirmationResponse::SendTransaction(hash), token))
})
)
},
ConfirmationPayload::SignTransaction(request) => {
Box::new(dispatcher.sign(request, &signer, pass, ())
.map(move |result| result
.map(move |tx| dispatcher.enrich(tx))
.map(ConfirmationResponse::SignTransaction)
))
},
ConfirmationPayload::EthSignMessage(address, data) => {
let res = signer.sign_message(address, pass, SignMessage::Data(data))
.map(|result| result
.map(|s| H520(s.into_electrum()))
.map(ConfirmationResponse::Signature)
);
Box::new(future::done(res))
},
ConfirmationPayload::SignMessage(address, data) => {
let res = signer.sign_message(address, pass, SignMessage::Hash(data))
.map(|result| result
.map(|rsv| H520(rsv.into_electrum()))
.map(ConfirmationResponse::Signature)
);
Box::new(future::done(res))
},
ConfirmationPayload::Decrypt(address, data) => {
let res = signer.decrypt(address, pass, data)
.map(|result| result
.map(RpcBytes)
.map(ConfirmationResponse::Decrypt)
);
Box::new(future::done(res))
},
}
}
/// Returns a eth_sign-compatible hash of data to sign.
/// The data is prepended with special message to prevent
/// malicious DApps from using the function to sign forged transactions.
pub fn eth_data_hash(mut data: Bytes) -> H256 {
let mut message_data =
format!("\x19Ethereum Signed Message:\n{}", data.len())
.into_bytes();
message_data.append(&mut data);
keccak(message_data)
}
/// Extract the default gas price from a client and miner.
pub fn default_gas_price<C, M>(client: &C, miner: &M, percentile: usize) -> U256 where
C: BlockChainClient,
M: MinerService,
{
client.gas_price_corpus(100).percentile(percentile).cloned().unwrap_or_else(|| miner.sensible_gas_price())
}
/// Convert RPC confirmation payload to signer confirmation payload.
/// May need to resolve in the future to fetch things like gas price.
pub fn from_rpc<D>(payload: RpcConfirmationPayload, default_account: Address, dispatcher: &D) -> BoxFuture<ConfirmationPayload>
where D: Dispatcher
{
match payload {
RpcConfirmationPayload::SendTransaction(request) => {
Box::new(dispatcher.fill_optional_fields(request.into(), default_account, false)
.map(ConfirmationPayload::SendTransaction))
},
RpcConfirmationPayload::SignTransaction(request) => {
Box::new(dispatcher.fill_optional_fields(request.into(), default_account, false)
.map(ConfirmationPayload::SignTransaction))
},
RpcConfirmationPayload::Decrypt(RpcDecryptRequest { address, msg }) => {
Box::new(future::ok(ConfirmationPayload::Decrypt(address, msg.into())))
},
RpcConfirmationPayload::EthSignMessage(RpcEthSignRequest { address, data }) => {
Box::new(future::ok(ConfirmationPayload::EthSignMessage(address, data.into())))
},
RpcConfirmationPayload::EIP191SignMessage(RpcSignRequest { address, data }) => {
Box::new(future::ok(ConfirmationPayload::SignMessage(address, data)))
},
}
}
| deref | identifier_name |
environment.py | """Module with code to be run before and after certain events during the testing."""
import json
import datetime
import subprocess
import os.path
import contextlib
from behave.log_capture import capture
import docker
import requests
import time
from src.s3interface import S3Interface
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logger = logging.getLogger(__file__)
_THIS_DIR = os.path.dirname(os.path.abspath(__file__))
_REPO_DIR = os.path.dirname(os.path.dirname(_THIS_DIR))
# The following API endpoint is used to check if the system is started
_API_ENDPOINT = 'api/v1'
# The following endpoint is used to get the access token from OSIO AUTH service
_AUTH_ENDPOINT = "/api/token/refresh"
# Ports used by various services
_FABRIC8_ANALYTICS_SERVER = 32000
_FABRIC8_ANALYTICS_JOBS = 34000
_FABRIC8_GREMLIN_SERVICE = 80
_FABRIC8_LICENSE_SERVICE = 80
# Endpoint for jobs debug API
_JOBS_DEBUG_API = _API_ENDPOINT + "/debug"
# Default timeout values for the stack analysis and component analysis endpoints
_DEFAULT_STACK_ANALYSIS_TIMEOUT = 1200
_DEFAULT_COMPONENT_ANALYSIS_TIMEOUT = 1200
def _make_compose_name(suffix='.yml'):
return os.path.join(_REPO_DIR, 'docker-compose' + suffix)
def _set_default_compose_path(context):
base_compose = _make_compose_name()
test_specific_compose = _make_compose_name(".integration-tests.yml")
# Extra containers are added as needed by integration setup commands
context.docker_compose_path = [base_compose, test_specific_compose]
# WARNING: make sure behave uses pytest improved asserts
# Behave runner uses behave.runner.exec_file function to read, compile
# and exec code of environment file and step files *in this order*.
# Therefore we provide a new implementation here, which uses pytest's
# _pytest.assertion.rewrite to rewrite the bytecode with pytest's
# improved asserts.
# This means that when behave tries to load steps, it will use our exec_file.
# => SUCCESS
# Don't ask how long it took me to figure this out.
import behave.runner
def exec_file(filename, globals=None, locals=None):
"""Execute the specified file, optionaly setup its context by using globals and locals."""
if globals is None:
globals = {}
if locals is None:
locals = globals
locals['__file__'] = filename
from py import path
from _pytest import config
from _pytest.assertion import rewrite
f = path.local(filename)
config = config._prepareconfig([], [])
source_stat, code = rewrite._rewrite_test(config, f)
logger.debug('filename: {} source_stat: {} code: {}'.format(filename, source_stat, code))
exec(code, globals, locals)
behave.runner.exec_file = exec_file
# *** end this madness
def _make_compose_command(context, *args):
cmd = ['docker-compose']
for compose_file in context.docker_compose_path:
cmd.append('-f')
cmd.append(compose_file)
cmd.extend(args)
logger.info(cmd)
return cmd
def _start_system(context):
if context.docker_compose_path:
cmd = _make_compose_command(context, 'up', '--no-build', '-d')
else:
cmd = ['kubectl', 'create', '-f', context.kubernetes_dir_path]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def _make_compose_teardown_callback(context, services):
cmds = []
cmds.append(_make_compose_command(context, 'kill', *services))
cmds.append(_make_compose_command(context, 'rm', '-fv', *services))
def teardown_services():
for cmd in cmds:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return teardown_services
def _run_command_in_service(context, service, command):
"""Start the specified service.
Service is started via `docker-compose run`;
command is list of strs
"""
if context.docker_compose_path:
cmd = _make_compose_command(context, 'run', '--rm', '-d', service)
cmd.extend(command)
else:
raise Exception("not implemented")
try:
# universal_newlines decodes output on Python 3.x
output = subprocess.check_output(cmd, universal_newlines=True).strip()
logger.info(output)
return output
except subprocess.CalledProcessError as ex:
logger.exception(ex.output)
raise
def _exec_command_in_container(client, container, command):
"""Run the specified command in container.
equiv of `docker exec`, command is str
"""
exec_id = client.exec_create(container, command)
output = client.exec_start(exec_id).decode('utf-8')
logger.info(output)
return output
def _get_k8s_volumes_to_delete():
# universal_newlines decodes output on Python 3.x
out = subprocess.check_output(['kubectl', 'get', 'pods', '-o', 'json'], universal_newlines=True)
j = json.loads(out)
volumes = []
for pod in j['items']:
pod_vols = pod['spec'].get('volumes', [])
for pod_vol in pod_vols:
if 'hostPath' in pod_vol:
volumes.append(pod_vol['hostPath']['path'])
return volumes
def _dump_server_logs(context, tail=None):
if context.docker_compose_path:
cmd = _make_compose_command(context, 'logs')
if tail is not None:
cmd.append('--tail={:d}'.format(tail))
subprocess.check_call(cmd, stderr=subprocess.STDOUT)
else:
pass # No current support for dumping logs under k8s
def _teardown_system(context):
cmds = []
if context.docker_compose_path:
cmds.append(_make_compose_command(context, 'kill'))
cmds.append(_make_compose_command(context, 'rm', '-fv'))
if hasattr(context, "container"):
cmds.append(['docker', "kill", context.container])
cmds.append(['docker', "rm", "-fv", "--rm-all", context.container])
_set_default_compose_path(context)
else:
cmds.append(['kubectl', 'delete', '--ignore-not-found', '-f', context.kubernetes_dir_path])
volumes = _get_k8s_volumes_to_delete()
for volume in volumes:
# TODO: the sudo thing is not very nice, but...
cmds.append(['sudo', 'rm', '-rf', volume])
cmds.append(['sudo', 'mkdir', volume])
for cmd in cmds:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def _post_startup(context, started_all, wait_for_server):
"""Post startup actions."""
if started_all:
# let's give the whole system a while to breathe
time.sleep(float(context.config.userdata.get('breath_time', 5)))
else:
raise Exception('Server failed to start in under {s} seconds'.
format(s=wait_for_server))
def _wait_for_system(context, wait_for_server=60):
start = datetime.datetime.utcnow()
wait_till = start + datetime.timedelta(seconds=wait_for_server)
# try to wait for server to start for some time
while datetime.datetime.utcnow() < wait_till:
time.sleep(1)
started_all = False
if context.kubernetes_dir_path:
res = json.loads(subprocess.check_output(['kubectl', 'get', 'pods', '-o', 'json']))
for pod in res['items']:
status = pod.get('status', {})
conditions = status.get('conditions', [])
phase = status.get('phase', '')
if status == {}:
continue
if phase != 'Running':
continue
for condition in conditions:
if condition['type'] == 'Ready' and condition['status'] != 'True':
continue
# if we got here, then everything is running
started_all = True
break
else:
if _is_running(context):
started_all = True
break
_post_startup(context, started_all, wait_for_server)
def _wait_for_api(context, wait_for_service, check_function):
for _ in range(wait_for_service):
if check_function(context):
break
time.sleep(1)
else:
raise Exception('Timeout waiting for the API service')
def _wait_for_jobs_debug_api_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_jobs_debug_api_running)
def _wait_for_component_search_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_component_search_service_running)
def _wait_for_master_tag_list_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_master_tag_list_service_running)
def _wait_for_get_untagged_component_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_get_untagged_component_service_running)
def _restart_system(context, wait_for_server=60):
# NOTE: it does make sense to restart the local system only
if context.running_locally:
try:
_teardown_system(context)
_start_system(context)
_wait_for_system(context, wait_for_server)
except subprocess.CalledProcessError as e:
raise Exception('Failed to restart system. Command "{c}" failed:\n{o}'.
format(c=' '.join(e.cmd), o=e.output))
def _is_api_running(url, accepted_codes=None):
accepted_codes = accepted_codes or {200, 401}
try:
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_3scale_staging_running(threescale_url, accepted_codes={200, 401}):
try:
res = requests.post(threescale_url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_3scale_preview_running(context, accepted_codes={200, 403, 401}):
try:
res = requests.post(context.threescale_preview_url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_backbone_api_running(backbone_api_url, accepted_codes={200}):
try:
url = '%s/api/v1/readiness' % backbone_api_url
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_gemini_api_running(gemini_api_url, accepted_codes={200}):
try:
url = '%s/api/v1/readiness' % gemini_api_url
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_api_running_post(url):
try:
res = requests.post(url)
if res.status_code in {200, 401}:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT) and \
_is_api_running(context.jobs_api_url + _API_ENDPOINT) and \
_is_api_running(context.gremlin_url, {400})
def _is_jobs_debug_api_running(context):
return _is_api_running(context.jobs_api_url + _JOBS_DEBUG_API +
"/analyses-report?ecosystem=maven")
def _is_component_search_service_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT +
"/component-search/any-component")
def _is_master_tag_list_service_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT +
"/master-tags/maven")
def _is_get_untagged_component_service_running(context):
return _is_api_running_post(context.coreapi_url + _API_ENDPOINT +
"/get-next-component/maven")
def _read_boolean_setting(context, setting_name):
setting = context.config.userdata.get(setting_name, '').lower()
if setting in ('1', 'yes', 'true', 'on'):
return True
if setting in ('', '0', 'no', 'false', 'off'):
return False
msg = '{!r} is not a valid option for boolean setting {!r}'
raise ValueError(msg.format(setting, setting_name))
def _add_slash(url):
if url and not url.endswith('/'):
url += '/'
return url
def _get_api_url(context, attribute, port):
return _add_slash(context.config.userdata.get(attribute,
'http://localhost:{port}/'.format(port=port)))
def _send_json_file(endpoint, filename, custom_headers=None):
"""Send the JSON file to the selected API endpoint.
The optional custom header is used (given it is provided).
"""
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
if custom_headers is not None:
headers.update(custom_headers)
with open(filename) as json_data:
response = requests.post(endpoint, data=json_data, headers=headers)
return response
def _check_env_for_remote_tests(env_var_name):
if os.environ.get(env_var_name):
logger.info("Note: {e} environment variable is specified, but tests are "
"still run locally\n"
"Check other values required to run tests against existing "
"deployent".format(e=env_var_name))
def _missing_api_token_warning(env_var_name):
if os.environ.get(env_var_name):
logger.info("OK: {name} environment is set and will be used as "
"authorization token".format(name=env_var_name))
else:
logger.info("Warning: the {name} environment variable is not"
" set.\n"
"Most tests that require authorization will probably fail".format(
name=env_var_name))
def _check_api_tokens_presence():
# we need RECOMMENDER_API_TOKEN or RECOMMENDER_REFRESH_TOKEN to be set
if not os.environ.get("RECOMMENDER_REFRESH_TOKEN"):
_missing_api_token_warning("RECOMMENDER_API_TOKEN")
else:
_missing_api_token_warning("RECOMMENDER_REFRESH_TOKEN")
_missing_api_token_warning("JOB_API_TOKEN")
def _check_env_var_presence_s3_db(env_var_name):
"""Check if given environment variable exist.
Check the existence of environment variable needed to connect to the
AWS S3 database.
"""
if os.environ.get(env_var_name) is None:
logger.info("Warning: the {name} environment variable is not set.\n"
"All tests that access AWS S3 database will fail\n".format(
name=env_var_name))
def _parse_int_env_var(env_var_name):
val = os.environ.get(env_var_name)
try:
return int(val)
except (TypeError, ValueError):
return None
def _read_url_from_env_var(env_var_name):
return _add_slash(os.environ.get(env_var_name, None))
def check_test_environment(context, coreapi_url):
"""Check the test environent - whether tests are run locally or in Docker."""
if context.running_locally:
logger.info("Note: integration tests are running localy via docker-compose")
if coreapi_url:
_check_env_for_remote_tests("F8A_API_URL")
_check_env_for_remote_tests("F8A_JOB_API_URL")
_check_env_for_remote_tests("F8A_GEMINI_API_URL")
else:
logger.info("Note: integration tests are running against existing deployment")
_check_api_tokens_presence()
def _running_locally(coreapi_url, jobs_api_url):
"""Check if tests are running locally."""
return not (coreapi_url and jobs_api_url)
def _get_url(context, actual, attribute_name, port):
"""Get the URL + port for the selected service."""
return actual or _get_api_url(context, attribute_name, port)
def check_token_structure(data):
"""Check the basic structure of response with access token."""
assert "token" in data
token_structure = data["token"]
assert "access_token" in token_structure
assert "token_type" in token_structure
assert "expires_in" in token_structure
def before_all(context):
"""Perform the setup before the first event."""
context.config.setup_logging()
context.start_system = _start_system
context.teardown_system = _teardown_system
context.restart_system = _restart_system
context.run_command_in_service = _run_command_in_service
context.exec_command_in_container = _exec_command_in_container
context.is_running = _is_running
context.is_jobs_debug_api_running = _is_jobs_debug_api_running
context.is_component_search_service_running = _is_component_search_service_running
context.is_master_tag_list_service_running = _is_master_tag_list_service_running
context.wait_for_master_tag_list_service = _wait_for_master_tag_list_service
context.is_get_untagged_component_service_running = _is_get_untagged_component_service_running
context.wait_for_get_untagged_component_service = _wait_for_get_untagged_component_service
context.send_json_file = _send_json_file
context.wait_for_jobs_debug_api_service = _wait_for_jobs_debug_api_service
context.wait_for_component_search_service = _wait_for_component_search_service
context.is_3scale_staging_running = _is_3scale_staging_running
context.is_3scale_preview_running = _is_3scale_preview_running
context.is_backbone_api_running = _is_backbone_api_running
context.is_gemini_api_running = _is_gemini_api_running
# Configure container logging
context.dump_logs = _read_boolean_setting(context, 'dump_logs')
tail_logs = int(context.config.userdata.get('tail_logs', 0))
dump_errors = _read_boolean_setting(context, 'dump_errors')
if tail_logs:
dump_errors = True
else:
tail_logs = 50
context.dump_errors = dump_errors
context.tail_logs = tail_logs
# Configure system under test
context.kubernetes_dir_path = context.config.userdata.get('kubernetes_dir', None)
if context.kubernetes_dir_path is not None:
context.docker_compose_path = None
else:
# If we're not running Kubernetes, use the local Docker Compose setup
_set_default_compose_path(context)
# for now, we just assume we know what compose file looks like (what services need what images)
context.images = {}
context.images['bayesian/bayesian-api'] = context.config.userdata.get(
'coreapi_server_image',
'registry.devshift.net/bayesian/bayesian-api')
context.images['bayesian/cucos-worker'] = context.config.userdata.get(
'coreapi_worker_image',
'registry.devshift.net/bayesian/cucos-worker')
core_v2_api_url = _read_url_from_env_var('F8A_API_V2_URL')
coreapi_url = _read_url_from_env_var('F8A_API_URL')
core_v2_api_url = _read_url_from_env_var('F8A_API_V2_URL')
valid_synk_token = os.environ.get("SNYK_TOKEN")
uuid = os.environ.get("REGISTERED_USER_UUID")
jobs_api_url = _read_url_from_env_var('F8A_JOB_API_URL')
gremlin_url = _read_url_from_env_var('F8A_GREMLIN_URL')
threescale_url = _read_url_from_env_var('F8A_3SCALE_URL')
threescale_preview_url = _read_url_from_env_var('F8A_THREE_SCALE_PREVIEW_URL')
backbone_api_url = _read_url_from_env_var('F8A_BACKBONE_API_URL')
service_id = _read_url_from_env_var('F8A_SERVICE_ID')
gemini_api_url = _read_url_from_env_var('F8A_GEMINI_API_URL')
license_service_url = _read_url_from_env_var('F8A_LICENSE_SERVICE_URL')
context.running_locally = _running_locally(coreapi_url, jobs_api_url)
check_test_environment(context, coreapi_url)
context.coreapi_url = _get_url(context, coreapi_url, 'coreapi_url', _FABRIC8_ANALYTICS_SERVER)
context.core_v2_api_url = core_v2_api_url
context.jobs_api_url = _get_url(context, jobs_api_url, 'jobs_api_url', _FABRIC8_ANALYTICS_JOBS)
context.gremlin_url = _get_url(context, gremlin_url, "gremlin_url", _FABRIC8_GREMLIN_SERVICE)
context.license_service_url = _get_url(context, license_service_url, 'license_service_url',
_FABRIC8_LICENSE_SERVICE)
context.core_v2_api_url = core_v2_api_url
context.threescale_url = threescale_url
context.valid_synk_token = valid_synk_token
context.uuid = uuid
context.threescale_preview_url = threescale_preview_url
context.backbone_api_url = backbone_api_url
context.service_id = service_id
context.gemini_api_url = gemini_api_url
# we can retrieve access token by using refresh/offline token
# informations needed to access S3 database from tests
_check_env_var_presence_s3_db('AWS_ACCESS_KEY_ID')
_check_env_var_presence_s3_db('AWS_SECRET_ACCESS_KEY')
_check_env_var_presence_s3_db('S3_REGION_NAME')
aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
s3_region_name = os.environ.get('S3_REGION_NAME')
deployment_prefix = os.environ.get('DEPLOYMENT_PREFIX', 'STAGE')
context.reports_bucket = os.environ.get('DEVELOPER_ANALYTICS_REPORTS_BUCKET')
context.s3interface = S3Interface(aws_access_key_id, aws_secret_access_key,
s3_region_name, deployment_prefix)
context.client = None
# timeout values can be overwritten by environment variables
stack_analysis_timeout = _parse_int_env_var('F8A_STACK_ANALYSIS_TIMEOUT')
component_analysis_timeout = _parse_int_env_var('F8A_COMPONENT_ANALYSIS_TIMEOUT')
context.stack_analysis_timeout = stack_analysis_timeout or _DEFAULT_STACK_ANALYSIS_TIMEOUT
context.component_analysis_timeout = component_analysis_timeout \
or _DEFAULT_COMPONENT_ANALYSIS_TIMEOUT
if context.running_locally:
context.client = docker.AutoVersionClient()
for desired, actual in context.images.items():
desired = 'registry.devshift.net/' + desired
if desired != actual:
|
# Specify the analyses checked for when looking for "complete" results
def _get_expected_component_analyses(ecosystem):
common = context.EXPECTED_COMPONENT_ANALYSES
specific = context.ECOSYSTEM_DEPENDENT_ANALYSES.get(ecosystem, set())
return common | specific
context.get_expected_component_analyses = _get_expected_component_analyses
def _compare_analysis_sets(actual, expected):
unreliable = context.UNRELIABLE_ANALYSES
missing = expected - actual - unreliable
unexpected = actual - expected - unreliable
return missing, unexpected
context.compare_analysis_sets = _compare_analysis_sets
context.EXPECTED_COMPONENT_ANALYSES = {
'metadata', 'source_licenses', 'digests',
'dependency_snapshot', 'code_metrics'
# The follower workers are currently disabled by default:
# 'static_analysis', 'binary_data', 'languages', 'crypto_algorithms'
}
# Analyses that are only executed for particular language ecosystems
context.ECOSYSTEM_DEPENDENT_ANALYSES = dict()
# Results that use a nonstandard format, so we don't check for the
# standard "status", "summary", and "details" keys
context.NONSTANDARD_ANALYSIS_FORMATS = set()
# Analyses that are just plain unreliable and so need to be excluded from
# consideration when determining whether or not an analysis is complete
context.UNRELIABLE_ANALYSES = {
'github_details', # if no github api token provided
'security_issues' # needs Snyk vulndb in S3
}
@capture
def before_scenario(context, scenario):
"""Perform the setup before each scenario is run."""
context.resource_manager = contextlib.ExitStack()
@capture
def after_scenario(context, scenario):
"""Perform the cleanup after each scenario is run."""
if context.running_locally:
if context.dump_logs or context.dump_errors and scenario.status == "failed":
try:
_dump_server_logs(context, int(context.tail_logs))
except subprocess.CalledProcessError as e:
raise Exception('Failed to dump server logs. Command "{c}" failed:\n{o}'.
format(c=' '.join(e.cmd), o=e.output))
# Clean up resources (which may destroy some container logs)
context.resource_manager.close()
@capture
def after_all(context):
"""Perform the cleanup after the last event."""
if context.running_locally:
try:
_teardown_system(context)
except subprocess.CalledProcessError as e:
raise Exception('Failed to teardown system. Command "{c}" failed:\n{o}'.
format(c=' '.join(e.cmd), o=e.output))
| context.client.tag(actual, desired, force=True) | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.