file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
sql_utils.rs | //! Module for SQL Utility functions
use diesel::prelude::*;
use std::{
borrow::Cow,
fs::File,
io::BufReader,
path::Path,
};
use crate::error::IOErrorToError;
use super::archive::import::{
detect_archive_type,
import_ytdlr_json_archive,
ArchiveType,
ImportProgress,
};
/// All migrations from "libytdlr/migrations" embedded into the binary
pub const MIGRATIONS: diesel_migrations::EmbeddedMigrations = diesel_migrations::embed_migrations!();
/// Open a SQLite Connection for `sqlite_path` and apply sqlite migrations
/// does not migrate archive formats, use [migrate_and_connect] instead
pub fn sqlite_connect<P: AsRef<Path>>(sqlite_path: P) -> Result<SqliteConnection, crate::Error> {
// having to convert the path to "str" because diesel (and underlying sqlite library) only accept strings
return match sqlite_path.as_ref().to_str() {
Some(path) => {
let mut connection = SqliteConnection::establish(path)?;
apply_sqlite_migrations(&mut connection)?;
return Ok(connection);
},
None => Err(crate::Error::other(format!("SQLite only accepts UTF-8 Paths, and given path failed to be converted to a string without being lossy, Path (converted lossy): \"{}\"", sqlite_path.as_ref().to_string_lossy()))),
};
}
/// Apply all (up) migrations to a SQLite Database
#[inline]
fn apply_sqlite_migrations(connection: &mut SqliteConnection) -> Result<(), crate::Error> {
let applied = diesel_migrations::MigrationHarness::run_pending_migrations(connection, MIGRATIONS)
.map_err(|err| return crate::Error::other(format!("Applying SQL Migrations Errored! Error:\n{err}")))?;
debug!("Applied Migrations: {:?}", applied);
return Ok(());
}
/// Check if the input path is a sql database, if not migrate to sql and return new path and open connection
/// Parameter `pgcb` will be used when migration will be applied
///
/// This function is intended to be used over [`sqlite_connect`] in all non-test cases
pub fn migrate_and_connect<S: FnMut(ImportProgress)>(
archive_path: &Path,
pgcb: S,
) -> Result<(Cow<Path>, SqliteConnection), crate::Error> {
// early return in case the file does not actually exist
if !archive_path.exists() {
return Ok((archive_path.into(), sqlite_connect(archive_path)?));
}
let migrate_to_path = {
let mut tmp = archive_path.to_path_buf();
tmp.set_extension("db");
tmp
};
// check if the "migrate-to" path already exists, and use that directly instead or error of already existing
if migrate_to_path.exists() {
if !migrate_to_path.is_file() {
return Err(crate::Error::not_a_file(
"Migrate-To Path exists but is not a file!",
migrate_to_path,
));
}
let mut sqlite_path_reader = BufReader::new(File::open(&migrate_to_path).attach_path_err(&migrate_to_path)?);
return Ok(
match detect_archive_type(&mut sqlite_path_reader)? {
ArchiveType::Unknown => return Err(crate::Error::other(format!("Migrate-To Path already exists, but is of unknown type! Path: \"{}\"", migrate_to_path.to_string_lossy()))),
ArchiveType::JSON => return Err(crate::Error::other(format!("Migrate-To Path already exists and is a JSON archive, please rename it and retry the migration! Path: \"{}\"", migrate_to_path.to_string_lossy()))),
ArchiveType::SQLite => {
// this has to be done before, because the following ".into" call will move the value
let connection = sqlite_connect(&migrate_to_path)?;
(migrate_to_path.into(), connection)
},
},
);
}
let mut input_archive_reader = BufReader::new(File::open(archive_path).attach_path_err(archive_path)?);
return Ok(match detect_archive_type(&mut input_archive_reader)? {
ArchiveType::Unknown => {
return Err(crate::Error::other(
"Unknown Archive type to migrate, maybe try importing",
))
},
ArchiveType::JSON => {
debug!("Applying Migration from JSON to SQLite");
// handle case where the input path matches the changed path
if migrate_to_path == archive_path {
return Err(crate::Error::other(
"Migration cannot be done: Input path matches output path (setting extension to \".db\")",
));
}
let mut connection = sqlite_connect(&migrate_to_path)?;
import_ytdlr_json_archive(&mut input_archive_reader, &mut connection, pgcb)?;
debug!("Migration from JSON to SQLite done");
(migrate_to_path.into(), connection)
},
ArchiveType::SQLite => (archive_path.into(), sqlite_connect(archive_path)?),
});
}
#[cfg(test)]
mod test {
use super::*;
use tempfile::{
Builder as TempBuilder,
TempDir,
};
fn create_connection() -> (SqliteConnection, TempDir) {
let testdir = TempBuilder::new()
.prefix("ytdl-test-sqlite-")
.tempdir()
.expect("Expected a temp dir to be created");
// chrono is used to create a different database for each thread
let path = testdir.as_ref().join(format!("{}-sqlite.db", chrono::Utc::now()));
// remove if already exists to have a clean test
if path.exists() {
std::fs::remove_file(&path).expect("Expected the file to be removed");
}
return (
crate::main::sql_utils::sqlite_connect(&path).expect("Expected SQLite to successfully start"),
testdir,
);
}
mod connect {
use super::*;
use std::{
ffi::OsString,
os::unix::prelude::OsStringExt,
};
#[test]
fn test_connect() {
let testdir = TempBuilder::new()
.prefix("ytdl-test-sqliteConnect-")
.tempdir()
.expect("Expected a temp dir to be created");
let path = testdir.as_ref().join(format!("{}-sqlite.db", chrono::Utc::now()));
std::fs::create_dir_all(path.parent().expect("Expected the file to have a parent"))
.expect("expected the directory to be created");
let connection = sqlite_connect(path);
assert!(connection.is_ok());
}
// it seems like non-utf8 paths are a pain to create os-independently, so it is just linux where the following works
#[cfg(target_os = "linux")]
#[test]
fn test_connect_notutf8() {
let path = OsString::from_vec(vec![255]);
let err = sqlite_connect(path);
assert!(err.is_err());
// Not using "unwrap_err", because of https://github.com/diesel-rs/diesel/discussions/3124
let err = match err {
Ok(_) => panic!("Expected a Error value"),
Err(err) => err,
};
// the following is only a "contains", because of the abitrary path that could be after it
assert!(err.to_string().contains("SQLite only accepts UTF-8 Paths, and given path failed to be converted to a string without being lossy, Path (converted lossy):"));
}
}
mod apply_sqlite_migrations {
use super::*;
#[test]
fn test_all_migrations_applied() {
let (mut connection, _tempdir) = create_connection();
let res = diesel_migrations::MigrationHarness::has_pending_migration(&mut connection, MIGRATIONS);
assert!(res.is_ok());
let res = res.unwrap();
assert!(!res);
}
}
mod migrate_and_connect {
use std::{
ffi::OsStr,
io::{
BufWriter,
Write,
},
ops::Deref,
path::PathBuf,
sync::RwLock,
};
use super::*;
fn gen_archive_path<P: AsRef<OsStr>>(extension: P) -> (PathBuf, TempDir) {
let testdir = TempBuilder::new()
.prefix("ytdl-test-sqliteMigrate-")
.tempdir()
.expect("Expected a temp dir to be created");
let mut path = testdir.as_ref().join(format!("{}-gen_archive", uuid::Uuid::new_v4()));
path.set_extension(extension);
println!("generated: {}", path.to_string_lossy());
// clear generated path
clear_path(&path);
{
let mut migrate_to_path = path.clone();
migrate_to_path.set_extension("db");
// clear migrate_to_path
clear_path(migrate_to_path);
}
return (path, testdir);
}
fn clear_path<P: AsRef<Path>>(path: P) {
let path = path.as_ref();
if path.exists() {
std::fs::remove_file(path).expect("Expected file to be removed");
}
}
fn create_dir_all_parent<P: AsRef<Path>>(path: P) {
let path = path.as_ref();
std::fs::create_dir_all(path.parent().expect("Expected the file to have a parent"))
.expect("expected the directory to be created");
}
fn write_file_with_content<S: AsRef<str>, P: AsRef<OsStr>>(input: S, extension: P) -> (PathBuf, TempDir) {
let (path, tempdir) = gen_archive_path(extension);
create_dir_all_parent(&path);
let mut file = BufWriter::new(std::fs::File::create(&path).expect("Expected file to be created"));
file.write_all(input.as_ref().as_bytes())
.expect("Expected successfull file write");
return (path, tempdir);
}
/// Test utility function for easy callbacks
fn | (c: &RwLock<Vec<ImportProgress>>) -> impl FnMut(ImportProgress) + '_ {
return |imp| c.write().expect("write failed").push(imp);
}
#[test]
fn test_input_unknown_archive() {
let string0 = "
youtube ____________
youtube ------------
youtube aaaaaaaaaaaa
soundcloud 0000000000
";
let (path, _tempdir) = write_file_with_content(string0, "unknown_ytdl");
let pgcounter = RwLock::new(Vec::<ImportProgress>::new());
let res = migrate_and_connect(&path, callback_counter(&pgcounter));
assert!(res.is_err());
let res = match res {
Ok(_) => panic!("Expected a Error value"),
Err(err) => err,
};
assert!(res
.to_string()
.contains("Unknown Archive type to migrate, maybe try importing"));
assert_eq!(0, pgcounter.read().expect("read failed").len());
}
#[test]
fn test_input_sqlite_archive() {
let (path, _tempdir) = gen_archive_path("db_sqlite");
create_dir_all_parent(&path);
{
// create database file
assert!(sqlite_connect(&path).is_ok());
}
let pgcounter = RwLock::new(Vec::<ImportProgress>::new());
let res = migrate_and_connect(&path, callback_counter(&pgcounter));
assert!(res.is_ok());
let res = res.unwrap();
assert_eq!(&path, res.0.as_ref());
assert_eq!(0, pgcounter.read().expect("read failed").len());
}
#[test]
fn test_input_json_archive() {
let string0 = r#"
{
"version": "0.1.0",
"videos": [
{
"id": "____________",
"provider": "youtube",
"dlFinished": true,
"editAsked": true,
"fileName": "someFile1.mp3"
},
{
"id": "------------",
"provider": "youtube",
"dlFinished": false,
"editAsked": true,
"fileName": "someFile2.mp3"
},
{
"id": "aaaaaaaaaaaa",
"provider": "youtube",
"dlFinished": true,
"editAsked": false,
"fileName": "someFile3.mp3"
},
{
"id": "0000000000",
"provider": "soundcloud",
"dlFinished": true,
"editAsked": true,
"fileName": "someFile4.mp3"
}
]
}
"#;
let (path, _tempdir) = write_file_with_content(string0, "json_json");
let expected_path = {
let mut tmp = path.clone();
tmp.set_extension("db");
tmp
};
clear_path(&expected_path);
let pgcounter = RwLock::new(Vec::<ImportProgress>::new());
let res = migrate_and_connect(&path, callback_counter(&pgcounter));
assert!(res.is_ok());
let res = res.unwrap();
assert_eq!(&expected_path, res.0.as_ref());
assert_eq!(
&vec![
ImportProgress::Starting,
ImportProgress::SizeHint(4), // Size Hint of 4, because of a intermediate array length
// index start at 0, thanks to json array index
ImportProgress::Increase(1, 0),
ImportProgress::Increase(1, 1),
ImportProgress::Increase(1, 2),
ImportProgress::Increase(1, 3),
ImportProgress::Finished(4)
],
pgcounter.read().expect("failed to read").deref()
);
}
#[test]
fn test_to_existing_json() {
let string0 = r#"
{
}
"#;
let (path, _tempdir) = write_file_with_content(string0, "db");
let pgcounter = RwLock::new(Vec::<ImportProgress>::new());
let res = migrate_and_connect(&path, callback_counter(&pgcounter));
assert!(res.is_err());
let res = match res {
Ok(_) => panic!("Expected a Error value"),
Err(err) => err,
};
assert_eq!(
res.to_string(),
format!("Other: Migrate-To Path already exists and is a JSON archive, please rename it and retry the migration! Path: \"{}\"", path.to_string_lossy())
);
assert_eq!(0, pgcounter.read().expect("read failed").len());
}
#[test]
fn test_to_existing_unknown() {
let string0 = "
youtube ____________
youtube ------------
youtube aaaaaaaaaaaa
soundcloud 0000000000
";
let (path, _tempdir) = write_file_with_content(string0, "db");
let pgcounter = RwLock::new(Vec::<ImportProgress>::new());
let res = migrate_and_connect(&path, callback_counter(&pgcounter));
assert!(res.is_err());
let res = match res {
Ok(_) => panic!("Expected a Error value"),
Err(err) => err,
};
assert_eq!(
res.to_string(),
format!(
"Other: Migrate-To Path already exists, but is of unknown type! Path: \"{}\"",
path.to_string_lossy()
)
);
assert_eq!(0, pgcounter.read().expect("read failed").len());
}
#[test]
fn test_to_existing_sqlite() {
let (path, _tempdir) = gen_archive_path("db");
create_dir_all_parent(&path);
{
// create database file
assert!(sqlite_connect(&path).is_ok());
}
let pgcounter = RwLock::new(Vec::<ImportProgress>::new());
let res = migrate_and_connect(&path, callback_counter(&pgcounter));
assert!(res.is_ok());
let res = res.unwrap();
assert_eq!(&path, res.0.as_ref());
assert_eq!(0, pgcounter.read().expect("read failed").len());
}
}
}
| callback_counter | identifier_name |
__init__.py | import numpy as np, functools as ft, itertools as it, pandas
import sys, cStringIO, contextlib
import re
from grading import *
def filter_trial(frame, exp_id, trial_id=None):
if trial_id is None:
return frame[frame.exp_id == exp_id]
else:
return frame[(frame.exp_id == exp_id) & (frame.trial_id == trial_id)]
def filter_program(frame, base, version=None):
if version is None:
return frame[frame.base == base]
else:
return frame[(frame.base == base) & (frame.version == version)]
def filter_aois(frame, kind, name=None):
if name is None:
return frame[frame.kind == kind]
else:
return frame[(frame.kind == kind) & (frame.name == name)]
def comma_list_contains(s, list_str):
return list_str.split(",").contains(s)
def comma_list_contains_any(fun, list_str):
for s in list_str.split(","):
if fun(s):
return True
return False
def filter_lines(fixations, hit_kind="circle", offset_kind="manual experiment"):
name_filter = ft.partial(comma_list_contains_any, lambda s: s.startswith("line "))
line_fixes = fixations[(fixations.hit_kind == hit_kind) &
(fixations.offset_kind == offset_kind) &
(fixations.aoi_names.apply(name_filter))]
line_fixes["line"] = line_fixes.aoi_name.apply(lambda n: int(n.split(" ")[1]))
return line_fixes
def split_by_median(frame, column):
m = frame[column].median()
return frame[frame[column] <= m], frame[frame[column] > m]
def split_by_boolean(frame, column):
return frame[frame[column]], frame[np.invert(frame[column])]
def contrast_color(rgba):
a = 1 - ((0.299 * rgba[0]) + (0.587 * rgba[1]) + (0.114 * rgba[2]))
return "black" if a < 0.5 else "white"
def transition_matrix(lines, num_lines=None):
if num_lines is None:
num_lines = max(lines)
trans_counts = np.zeros(shape=(num_lines, num_lines))
for l1, l2 in zip(lines, lines[1:]):
trans_counts[l1 - 1, l2 - 1] += 1
# Normalize by rows
row_sums = trans_counts.sum(axis=1)
trans_probs = trans_counts / row_sums.reshape((-1, 1))
# Get rid of NaNs
return np.nan_to_num(trans_probs)
def norm_by_rows(matrix):
"""Normalizes a numpy array by rows (axis 1).
Parameters
----------
matrix : array_like
A numpy array with at least two axes.
Returns
-------
a : array_like
A row-normalized numpy array
"""
row_sums = matrix.sum(axis=1)
return matrix / row_sums.reshape((-1, 1))
def gauss_kern(size, sigma=1.0):
""" Returns a normalized 2D gauss kernel array for convolutions """
h1 = size[0]
h2 = size[1]
x, y = np.mgrid[0:h2, 0:h1]
x = x-h2/2
y = y-h1/2
g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) );
return g / g.sum()
def make_heatmap(points, screen_size, point_size, sigma_denom=5.0):
point_radius = point_size / 2
screen = np.zeros((screen_size[0] + point_size, screen_size[1] + point_size))
kernel = gauss_kern((point_size, point_size), sigma=(point_size / sigma_denom))
for pt in points:
x_start, y_start = pt[0], pt[1]
x_end, y_end = x_start + point_size, y_start + point_size
scr_slice = screen[x_start:x_end, y_start:y_end]
width, height = scr_slice.shape[0], scr_slice.shape[1]
screen[x_start:x_end, y_start:y_end] = scr_slice + kernel[:width, :height]
screen = screen / screen.max()
screen = screen[point_radius:-point_radius,
point_radius:-point_radius]
return screen
def python_line_tokens(code_lines, blank_lines=False):
from pygments.lexers import PythonLexer
lexer = PythonLexer()
code_str = "".join(code_lines)
all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
line_tokens = []
current_line = []
for t in all_tokens:
if t[1] == u"\n":
line_tokens.append(current_line)
current_line = []
else:
current_line.append(t)
rows = []
for i, tokens in enumerate(line_tokens):
# Check for blank line
line_str = code_lines[i].rstrip()
if (not blank_lines) and len(line_str.strip()) == 0:
continue
for t in tokens:
kind, value = str(t[0]), t[1]
yield line_str, i, kind, value, t
def python_line_categories(code_lines):
from pygments.lexers import PythonLexer
lexer = PythonLexer()
code_str = "".join(code_lines)
all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
line_tokens = []
current_line = []
for t in all_tokens:
if t[1] == u"\n":
line_tokens.append(current_line)
current_line = []
else:
current_line.append(t)
line_categories = []
for i, tokens in enumerate(line_tokens):
# Check for blank line
line_str = code_lines[i].rstrip()
if len(line_str.strip()) == 0:
line_categories.append(["blank line"])
continue
assert len(tokens) > 0, "No tokens for line"
categories = []
last_kind, last_value = None, None
for t in tokens:
kind, value = str(t[0]), t[1]
if kind == u"Token.Keyword" and value == u"def":
categories.append("function definition")
elif kind == u"Token.Keyword" and value == u"if":
categories.append("if statement")
elif kind == u"Token.Keyword" and value == u"for":
categories.append("for loop")
elif kind == u"Token.Keyword" and value == u"return":
categories.append("return statement")
elif kind == u"Token.Keyword" and value == u"print":
categories.append("print statement")
elif kind == u"Token.Keyword" and value == u"class":
categories.append("class definition")
elif kind == u"Token.Operator" and value == u"=":
categories.append("assignment")
elif kind == u"Token.Operator" and value == u".":
categories.append("object access")
elif kind == u"Token.Operator" and value in [u"+", u"*"]:
categories.append("mathematical operation")
elif last_kind == u"Token.Operator" and last_value == u"-" and kind == "Token.Whitespace":
categories.append("mathematical operation")
elif kind == u"Token.Operator" and value in [u"<", u">"]:
categories.append("comparison")
elif last_kind == u"Token.Name" and kind == "Token.Punctuation" and value == u"(":
categories.append("function call")
elif kind == "Token.Punctuation" and value == u"[":
categories.append("list creation")
last_kind, last_value = kind, value
if len(categories) == 0:
categories.append("unknown")
line_categories.append(set(categories))
return line_categories
def python_token_metrics(code_lines, indent_size=4):
from pygments.lexers import PythonLexer
indent_regex = re.compile(r"^\s*")
lexer = PythonLexer()
code_str = "".join(code_lines)
all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
line_tokens = []
current_line = []
for t in all_tokens:
if t[1] == u"\n":
line_tokens.append(current_line)
current_line = []
else:
current_line.append(t)
rows = []
for i, tokens in enumerate(line_tokens):
line_number = i + 1
# Check for blank line
line_str = code_lines[i].rstrip()
if len(line_str.strip()) == 0:
rows.append([line_number, 0, 0, 0, 0, 0, 0])
continue
assert len(tokens) > 0, "No tokens for line"
num_keywords = 0
num_identifiers = 0
num_operators = 0
line_length = len(line_str)
line_indent = len(indent_regex.findall(line_str)[0]) / indent_size
# Indentation is not considered
line_str_noindent = line_str.lstrip()
line_length_noindent = len(line_str_noindent)
whitespace_prop = line_str_noindent.count(" ") / float(line_length_noindent)
for t in tokens:
kind, value = str(t[0]), t[1]
if kind.startswith(u"Token.Keyword"):
num_keywords += 1
elif kind.startswith(u"Token.Name"):
num_identifiers += 1
elif kind.startswith(u"Token.Operator"):
num_operators += 1
rows.append([line_number, line_length_noindent, num_keywords,
num_identifiers, num_operators, whitespace_prop,
line_indent])
columns = ["line", "line_length", "keywords",
"identifiers", "operators", "whitespace_prop",
"line_indent"]
return pandas.DataFrame(rows, columns=columns)
def all_pairs(items, fun, same_value=np.NaN):
results = np.zeros((len(items), len(items)))
for i, item_i in enumerate(items):
for j, item_j in enumerate(items):
if i < j:
results[i, j] = fun(item_i, item_j)
results[j, i] = results[i, j]
else:
results[i, j] = same_value
return results
def file_to_text_buffer(f, pad_left=0):
if isinstance(f, str):
f = open(f, "r")
lines = [l.strip() for l in f.readlines()]
rows = len(lines)
cols = max([len(l) for l in lines]) + pad_left
buffer = np.zeros((rows, cols), dtype=str)
buffer[:, :] = " " # Fill with white space
# Fill buffer
for r, line in enumerate(lines):
|
return buffer
@contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout is None:
stdout = cStringIO.StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
def rolling_func(fixations, fun, window_size_ms, step_ms):
start, end = 0, window_size_ms
return_series = False
if not isinstance(fun, dict):
fun = { "value" : fun }
return_series = True
values = { k : [] for k, v in fun.iteritems() }
times = []
while start < fixations.end_ms.max():
times.append(start + (window_size_ms / 2))
win_fixations = fixations[
((fixations.start_ms >= start) & (fixations.start_ms < end)) |
((fixations.end_ms >= start) & (fixations.end_ms < end))]
for k, f in fun.iteritems():
values[k].append(f(win_fixations))
start += step_ms
end += step_ms
first_values = values[values.keys()[0]]
if return_series:
series = pandas.Series(first_values, index=times)
return series
else:
df = pandas.DataFrame(values, index=times)
return df
def window(seq, n):
"""Returns a sliding window (of width n) over data from the iterable s ->
(s0,s1,...s[n-1]), (s1,s2,...,sn), ..."""
seq_it = iter(seq)
result = tuple(it.islice(seq_it, n))
if len(result) == n:
yield result
for elem in seq_it:
result = result[1:] + (elem,)
yield result
def just(n, seq):
"""Splits a sequence into n, rest parts."""
it = iter(seq)
for _ in range(n - 1):
yield next(it, None)
yield tuple(it)
def just2(n, seq):
"""Iterates over a sequence, splitting each item into n, rest parts."""
for inner_seq in seq:
yield tuple(just(n, inner_seq))
def significant(p_value):
if p_value < 0.001:
return "***"
if p_value < 0.01:
return "**"
if p_value < 0.05:
return "*"
return ""
def significant_p(p_value):
if p_value < 0.001:
return "p < .001"
if p_value < 0.01:
return "p < .01"
if p_value < 0.05:
return "p < .05"
return ""
def grouper(n, iterable, fillvalue=None):
"""grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx
See also
--------
http://www.python.org/doc//current/library/itertools.html
"""
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def pairwise(iterable, fillvalue=None):
"""s -> (s0,s1), (s1,s2), (s2, s3), ...
See also
--------
http://www.python.org/doc//current/library/itertools.html
"""
a, b = it.tee(iterable)
next(b, fillvalue)
return it.izip(a, b)
def split_whitespace_tokens(line):
"""Splits a line of text by whitespace"""
in_quote = False
token = ""
token_start = 0
for i, char in enumerate(line):
if char == ' ':
if len(token) > 0:
yield (token_start, token)
token = ""
else:
if len(token) == 0:
token_start = i
token += char
if len(token) > 0:
yield (token_start, token)
def fixations_to_saccades(fixations, exp_id=0, trial_id=0):
import scipy.spatial
saccades = []
def add_saccades(frame):
for (idx1, row1), (idx2, row2) in pairwise(frame.iterrows()):
start_ms = row1["end_ms"]
end_ms = row2["start_ms"]
duration_ms = end_ms - start_ms
#assert duration_ms >= 0, duration_ms
x1, y1 = row1["fix_x"], row1["fix_y"]
x2, y2 = row2["fix_x"], row2["fix_y"]
dist_euclid = scipy.spatial.distance.euclidean((x1, y1), (x2, y2))
saccades.append([exp_id, trial_id, start_ms, end_ms,
x1, y1, x2, y2, duration_ms, dist_euclid])
if ("exp_id" in fixations.columns):
for (exp_id, trial_id), frame in fixations.groupby(["exp_id", "trial_id"]):
add_saccades(frame)
else:
add_saccades(fixations)
cols = ["exp_id", "trial_id", "start_ms", "end_ms",
"sacc_x1", "sacc_y1", "sacc_x2", "sacc_y2",
"duration_ms", "dist_euclid"]
return pandas.DataFrame(saccades, columns=cols)
def unit_vector(vector):
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
angle = np.arccos(np.dot(v1_u, v2_u))
if np.isnan(angle):
if (v1_u == v2_u).all():
return 0.0
else:
return np.pi
return angle
def steady_state(trans_matrix):
import scipy.linalg
v, d = scipy.linalg.eig(np.transpose(trans_matrix))
max_vi = v.argmax()
return d[:, max_vi] / sum(d[:, max_vi])
| chars = ([" "] * pad_left) + list(line)
buffer[r, :len(chars)] = chars | conditional_block |
__init__.py | import numpy as np, functools as ft, itertools as it, pandas
import sys, cStringIO, contextlib
import re
from grading import *
def filter_trial(frame, exp_id, trial_id=None):
if trial_id is None:
return frame[frame.exp_id == exp_id]
else:
return frame[(frame.exp_id == exp_id) & (frame.trial_id == trial_id)]
def filter_program(frame, base, version=None):
if version is None:
return frame[frame.base == base]
else:
return frame[(frame.base == base) & (frame.version == version)]
def filter_aois(frame, kind, name=None):
if name is None:
return frame[frame.kind == kind]
else:
return frame[(frame.kind == kind) & (frame.name == name)]
def comma_list_contains(s, list_str):
return list_str.split(",").contains(s)
def comma_list_contains_any(fun, list_str):
for s in list_str.split(","):
if fun(s):
return True
return False
def filter_lines(fixations, hit_kind="circle", offset_kind="manual experiment"):
name_filter = ft.partial(comma_list_contains_any, lambda s: s.startswith("line "))
line_fixes = fixations[(fixations.hit_kind == hit_kind) &
(fixations.offset_kind == offset_kind) &
(fixations.aoi_names.apply(name_filter))]
line_fixes["line"] = line_fixes.aoi_name.apply(lambda n: int(n.split(" ")[1]))
return line_fixes
def split_by_median(frame, column):
m = frame[column].median()
return frame[frame[column] <= m], frame[frame[column] > m]
def split_by_boolean(frame, column):
return frame[frame[column]], frame[np.invert(frame[column])]
def contrast_color(rgba):
a = 1 - ((0.299 * rgba[0]) + (0.587 * rgba[1]) + (0.114 * rgba[2]))
return "black" if a < 0.5 else "white"
def transition_matrix(lines, num_lines=None):
if num_lines is None:
num_lines = max(lines)
trans_counts = np.zeros(shape=(num_lines, num_lines))
for l1, l2 in zip(lines, lines[1:]):
trans_counts[l1 - 1, l2 - 1] += 1
# Normalize by rows
row_sums = trans_counts.sum(axis=1)
trans_probs = trans_counts / row_sums.reshape((-1, 1))
# Get rid of NaNs
return np.nan_to_num(trans_probs)
def norm_by_rows(matrix):
"""Normalizes a numpy array by rows (axis 1).
Parameters
----------
matrix : array_like
A numpy array with at least two axes.
Returns
-------
a : array_like
A row-normalized numpy array
"""
row_sums = matrix.sum(axis=1)
return matrix / row_sums.reshape((-1, 1))
def gauss_kern(size, sigma=1.0):
""" Returns a normalized 2D gauss kernel array for convolutions """
h1 = size[0]
h2 = size[1]
x, y = np.mgrid[0:h2, 0:h1]
x = x-h2/2
y = y-h1/2
g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) );
return g / g.sum()
def make_heatmap(points, screen_size, point_size, sigma_denom=5.0):
point_radius = point_size / 2
screen = np.zeros((screen_size[0] + point_size, screen_size[1] + point_size))
kernel = gauss_kern((point_size, point_size), sigma=(point_size / sigma_denom))
for pt in points:
x_start, y_start = pt[0], pt[1]
x_end, y_end = x_start + point_size, y_start + point_size
scr_slice = screen[x_start:x_end, y_start:y_end]
width, height = scr_slice.shape[0], scr_slice.shape[1]
screen[x_start:x_end, y_start:y_end] = scr_slice + kernel[:width, :height]
screen = screen / screen.max()
screen = screen[point_radius:-point_radius,
point_radius:-point_radius]
return screen
def python_line_tokens(code_lines, blank_lines=False):
from pygments.lexers import PythonLexer
lexer = PythonLexer()
code_str = "".join(code_lines)
all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
line_tokens = []
current_line = []
for t in all_tokens:
if t[1] == u"\n":
line_tokens.append(current_line)
current_line = []
else:
current_line.append(t)
rows = []
for i, tokens in enumerate(line_tokens):
# Check for blank line
line_str = code_lines[i].rstrip()
if (not blank_lines) and len(line_str.strip()) == 0:
continue
for t in tokens:
kind, value = str(t[0]), t[1]
yield line_str, i, kind, value, t
def python_line_categories(code_lines):
from pygments.lexers import PythonLexer
lexer = PythonLexer()
code_str = "".join(code_lines)
all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
line_tokens = []
current_line = []
for t in all_tokens:
if t[1] == u"\n":
line_tokens.append(current_line)
current_line = []
else:
current_line.append(t)
line_categories = []
for i, tokens in enumerate(line_tokens):
# Check for blank line
line_str = code_lines[i].rstrip()
if len(line_str.strip()) == 0:
line_categories.append(["blank line"])
continue
assert len(tokens) > 0, "No tokens for line"
categories = []
last_kind, last_value = None, None
for t in tokens:
kind, value = str(t[0]), t[1]
if kind == u"Token.Keyword" and value == u"def":
categories.append("function definition")
elif kind == u"Token.Keyword" and value == u"if":
categories.append("if statement")
elif kind == u"Token.Keyword" and value == u"for":
categories.append("for loop")
elif kind == u"Token.Keyword" and value == u"return":
categories.append("return statement")
elif kind == u"Token.Keyword" and value == u"print":
categories.append("print statement")
elif kind == u"Token.Keyword" and value == u"class":
categories.append("class definition")
elif kind == u"Token.Operator" and value == u"=":
categories.append("assignment")
elif kind == u"Token.Operator" and value == u".":
categories.append("object access")
elif kind == u"Token.Operator" and value in [u"+", u"*"]:
categories.append("mathematical operation")
elif last_kind == u"Token.Operator" and last_value == u"-" and kind == "Token.Whitespace":
categories.append("mathematical operation")
elif kind == u"Token.Operator" and value in [u"<", u">"]:
categories.append("comparison")
elif last_kind == u"Token.Name" and kind == "Token.Punctuation" and value == u"(":
categories.append("function call")
elif kind == "Token.Punctuation" and value == u"[":
categories.append("list creation")
last_kind, last_value = kind, value
if len(categories) == 0:
categories.append("unknown")
line_categories.append(set(categories))
return line_categories
def | (code_lines, indent_size=4):
from pygments.lexers import PythonLexer
indent_regex = re.compile(r"^\s*")
lexer = PythonLexer()
code_str = "".join(code_lines)
all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
line_tokens = []
current_line = []
for t in all_tokens:
if t[1] == u"\n":
line_tokens.append(current_line)
current_line = []
else:
current_line.append(t)
rows = []
for i, tokens in enumerate(line_tokens):
line_number = i + 1
# Check for blank line
line_str = code_lines[i].rstrip()
if len(line_str.strip()) == 0:
rows.append([line_number, 0, 0, 0, 0, 0, 0])
continue
assert len(tokens) > 0, "No tokens for line"
num_keywords = 0
num_identifiers = 0
num_operators = 0
line_length = len(line_str)
line_indent = len(indent_regex.findall(line_str)[0]) / indent_size
# Indentation is not considered
line_str_noindent = line_str.lstrip()
line_length_noindent = len(line_str_noindent)
whitespace_prop = line_str_noindent.count(" ") / float(line_length_noindent)
for t in tokens:
kind, value = str(t[0]), t[1]
if kind.startswith(u"Token.Keyword"):
num_keywords += 1
elif kind.startswith(u"Token.Name"):
num_identifiers += 1
elif kind.startswith(u"Token.Operator"):
num_operators += 1
rows.append([line_number, line_length_noindent, num_keywords,
num_identifiers, num_operators, whitespace_prop,
line_indent])
columns = ["line", "line_length", "keywords",
"identifiers", "operators", "whitespace_prop",
"line_indent"]
return pandas.DataFrame(rows, columns=columns)
def all_pairs(items, fun, same_value=np.NaN):
results = np.zeros((len(items), len(items)))
for i, item_i in enumerate(items):
for j, item_j in enumerate(items):
if i < j:
results[i, j] = fun(item_i, item_j)
results[j, i] = results[i, j]
else:
results[i, j] = same_value
return results
def file_to_text_buffer(f, pad_left=0):
if isinstance(f, str):
f = open(f, "r")
lines = [l.strip() for l in f.readlines()]
rows = len(lines)
cols = max([len(l) for l in lines]) + pad_left
buffer = np.zeros((rows, cols), dtype=str)
buffer[:, :] = " " # Fill with white space
# Fill buffer
for r, line in enumerate(lines):
chars = ([" "] * pad_left) + list(line)
buffer[r, :len(chars)] = chars
return buffer
@contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout is None:
stdout = cStringIO.StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
def rolling_func(fixations, fun, window_size_ms, step_ms):
start, end = 0, window_size_ms
return_series = False
if not isinstance(fun, dict):
fun = { "value" : fun }
return_series = True
values = { k : [] for k, v in fun.iteritems() }
times = []
while start < fixations.end_ms.max():
times.append(start + (window_size_ms / 2))
win_fixations = fixations[
((fixations.start_ms >= start) & (fixations.start_ms < end)) |
((fixations.end_ms >= start) & (fixations.end_ms < end))]
for k, f in fun.iteritems():
values[k].append(f(win_fixations))
start += step_ms
end += step_ms
first_values = values[values.keys()[0]]
if return_series:
series = pandas.Series(first_values, index=times)
return series
else:
df = pandas.DataFrame(values, index=times)
return df
def window(seq, n):
"""Returns a sliding window (of width n) over data from the iterable s ->
(s0,s1,...s[n-1]), (s1,s2,...,sn), ..."""
seq_it = iter(seq)
result = tuple(it.islice(seq_it, n))
if len(result) == n:
yield result
for elem in seq_it:
result = result[1:] + (elem,)
yield result
def just(n, seq):
"""Splits a sequence into n, rest parts."""
it = iter(seq)
for _ in range(n - 1):
yield next(it, None)
yield tuple(it)
def just2(n, seq):
"""Iterates over a sequence, splitting each item into n, rest parts."""
for inner_seq in seq:
yield tuple(just(n, inner_seq))
def significant(p_value):
if p_value < 0.001:
return "***"
if p_value < 0.01:
return "**"
if p_value < 0.05:
return "*"
return ""
def significant_p(p_value):
if p_value < 0.001:
return "p < .001"
if p_value < 0.01:
return "p < .01"
if p_value < 0.05:
return "p < .05"
return ""
def grouper(n, iterable, fillvalue=None):
"""grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx
See also
--------
http://www.python.org/doc//current/library/itertools.html
"""
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def pairwise(iterable, fillvalue=None):
"""s -> (s0,s1), (s1,s2), (s2, s3), ...
See also
--------
http://www.python.org/doc//current/library/itertools.html
"""
a, b = it.tee(iterable)
next(b, fillvalue)
return it.izip(a, b)
def split_whitespace_tokens(line):
"""Splits a line of text by whitespace"""
in_quote = False
token = ""
token_start = 0
for i, char in enumerate(line):
if char == ' ':
if len(token) > 0:
yield (token_start, token)
token = ""
else:
if len(token) == 0:
token_start = i
token += char
if len(token) > 0:
yield (token_start, token)
def fixations_to_saccades(fixations, exp_id=0, trial_id=0):
import scipy.spatial
saccades = []
def add_saccades(frame):
for (idx1, row1), (idx2, row2) in pairwise(frame.iterrows()):
start_ms = row1["end_ms"]
end_ms = row2["start_ms"]
duration_ms = end_ms - start_ms
#assert duration_ms >= 0, duration_ms
x1, y1 = row1["fix_x"], row1["fix_y"]
x2, y2 = row2["fix_x"], row2["fix_y"]
dist_euclid = scipy.spatial.distance.euclidean((x1, y1), (x2, y2))
saccades.append([exp_id, trial_id, start_ms, end_ms,
x1, y1, x2, y2, duration_ms, dist_euclid])
if ("exp_id" in fixations.columns):
for (exp_id, trial_id), frame in fixations.groupby(["exp_id", "trial_id"]):
add_saccades(frame)
else:
add_saccades(fixations)
cols = ["exp_id", "trial_id", "start_ms", "end_ms",
"sacc_x1", "sacc_y1", "sacc_x2", "sacc_y2",
"duration_ms", "dist_euclid"]
return pandas.DataFrame(saccades, columns=cols)
def unit_vector(vector):
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
angle = np.arccos(np.dot(v1_u, v2_u))
if np.isnan(angle):
if (v1_u == v2_u).all():
return 0.0
else:
return np.pi
return angle
def steady_state(trans_matrix):
import scipy.linalg
v, d = scipy.linalg.eig(np.transpose(trans_matrix))
max_vi = v.argmax()
return d[:, max_vi] / sum(d[:, max_vi])
| python_token_metrics | identifier_name |
__init__.py | import numpy as np, functools as ft, itertools as it, pandas
import sys, cStringIO, contextlib
import re
from grading import *
def filter_trial(frame, exp_id, trial_id=None):
if trial_id is None:
return frame[frame.exp_id == exp_id]
else:
return frame[(frame.exp_id == exp_id) & (frame.trial_id == trial_id)]
def filter_program(frame, base, version=None):
if version is None:
return frame[frame.base == base]
else:
return frame[(frame.base == base) & (frame.version == version)]
def filter_aois(frame, kind, name=None):
if name is None:
return frame[frame.kind == kind]
else:
return frame[(frame.kind == kind) & (frame.name == name)]
def comma_list_contains(s, list_str):
return list_str.split(",").contains(s)
def comma_list_contains_any(fun, list_str):
for s in list_str.split(","):
if fun(s):
return True
return False
def filter_lines(fixations, hit_kind="circle", offset_kind="manual experiment"):
name_filter = ft.partial(comma_list_contains_any, lambda s: s.startswith("line "))
line_fixes = fixations[(fixations.hit_kind == hit_kind) &
(fixations.offset_kind == offset_kind) &
(fixations.aoi_names.apply(name_filter))]
line_fixes["line"] = line_fixes.aoi_name.apply(lambda n: int(n.split(" ")[1]))
return line_fixes
def split_by_median(frame, column):
m = frame[column].median()
return frame[frame[column] <= m], frame[frame[column] > m]
def split_by_boolean(frame, column):
return frame[frame[column]], frame[np.invert(frame[column])]
def contrast_color(rgba):
a = 1 - ((0.299 * rgba[0]) + (0.587 * rgba[1]) + (0.114 * rgba[2]))
return "black" if a < 0.5 else "white"
def transition_matrix(lines, num_lines=None):
if num_lines is None:
num_lines = max(lines)
trans_counts = np.zeros(shape=(num_lines, num_lines))
for l1, l2 in zip(lines, lines[1:]):
trans_counts[l1 - 1, l2 - 1] += 1
# Normalize by rows
row_sums = trans_counts.sum(axis=1)
trans_probs = trans_counts / row_sums.reshape((-1, 1))
# Get rid of NaNs
return np.nan_to_num(trans_probs)
def norm_by_rows(matrix):
"""Normalizes a numpy array by rows (axis 1).
Parameters
----------
matrix : array_like
A numpy array with at least two axes.
Returns
-------
a : array_like
A row-normalized numpy array
"""
row_sums = matrix.sum(axis=1)
return matrix / row_sums.reshape((-1, 1))
def gauss_kern(size, sigma=1.0):
""" Returns a normalized 2D gauss kernel array for convolutions """
h1 = size[0]
h2 = size[1]
x, y = np.mgrid[0:h2, 0:h1]
x = x-h2/2
y = y-h1/2
g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) );
return g / g.sum()
def make_heatmap(points, screen_size, point_size, sigma_denom=5.0):
point_radius = point_size / 2
screen = np.zeros((screen_size[0] + point_size, screen_size[1] + point_size))
kernel = gauss_kern((point_size, point_size), sigma=(point_size / sigma_denom))
for pt in points:
x_start, y_start = pt[0], pt[1]
x_end, y_end = x_start + point_size, y_start + point_size
scr_slice = screen[x_start:x_end, y_start:y_end]
width, height = scr_slice.shape[0], scr_slice.shape[1]
screen[x_start:x_end, y_start:y_end] = scr_slice + kernel[:width, :height]
screen = screen / screen.max()
screen = screen[point_radius:-point_radius,
point_radius:-point_radius]
return screen
def python_line_tokens(code_lines, blank_lines=False):
from pygments.lexers import PythonLexer
lexer = PythonLexer()
code_str = "".join(code_lines)
all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
line_tokens = []
current_line = []
for t in all_tokens:
if t[1] == u"\n":
line_tokens.append(current_line)
current_line = []
else:
current_line.append(t)
rows = []
for i, tokens in enumerate(line_tokens):
# Check for blank line
line_str = code_lines[i].rstrip()
if (not blank_lines) and len(line_str.strip()) == 0:
continue
for t in tokens:
kind, value = str(t[0]), t[1]
yield line_str, i, kind, value, t
def python_line_categories(code_lines):
from pygments.lexers import PythonLexer
lexer = PythonLexer()
code_str = "".join(code_lines)
all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
line_tokens = []
current_line = []
for t in all_tokens:
if t[1] == u"\n":
line_tokens.append(current_line)
current_line = []
else:
current_line.append(t)
line_categories = []
for i, tokens in enumerate(line_tokens):
# Check for blank line
line_str = code_lines[i].rstrip()
if len(line_str.strip()) == 0:
line_categories.append(["blank line"])
continue
assert len(tokens) > 0, "No tokens for line"
categories = []
last_kind, last_value = None, None
for t in tokens:
kind, value = str(t[0]), t[1]
if kind == u"Token.Keyword" and value == u"def":
categories.append("function definition")
elif kind == u"Token.Keyword" and value == u"if":
categories.append("if statement")
elif kind == u"Token.Keyword" and value == u"for":
categories.append("for loop")
elif kind == u"Token.Keyword" and value == u"return":
categories.append("return statement")
elif kind == u"Token.Keyword" and value == u"print":
categories.append("print statement")
elif kind == u"Token.Keyword" and value == u"class":
categories.append("class definition")
elif kind == u"Token.Operator" and value == u"=":
categories.append("assignment")
elif kind == u"Token.Operator" and value == u".":
categories.append("object access")
elif kind == u"Token.Operator" and value in [u"+", u"*"]:
categories.append("mathematical operation")
elif last_kind == u"Token.Operator" and last_value == u"-" and kind == "Token.Whitespace":
categories.append("mathematical operation")
elif kind == u"Token.Operator" and value in [u"<", u">"]:
categories.append("comparison")
elif last_kind == u"Token.Name" and kind == "Token.Punctuation" and value == u"(":
categories.append("function call")
elif kind == "Token.Punctuation" and value == u"[":
categories.append("list creation")
last_kind, last_value = kind, value
if len(categories) == 0:
categories.append("unknown")
line_categories.append(set(categories))
return line_categories
def python_token_metrics(code_lines, indent_size=4):
from pygments.lexers import PythonLexer
indent_regex = re.compile(r"^\s*")
lexer = PythonLexer()
code_str = "".join(code_lines)
all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
line_tokens = []
current_line = []
for t in all_tokens:
if t[1] == u"\n":
line_tokens.append(current_line)
current_line = []
else:
current_line.append(t)
rows = []
for i, tokens in enumerate(line_tokens):
line_number = i + 1
# Check for blank line
line_str = code_lines[i].rstrip()
if len(line_str.strip()) == 0:
rows.append([line_number, 0, 0, 0, 0, 0, 0])
continue
assert len(tokens) > 0, "No tokens for line"
num_keywords = 0
num_identifiers = 0
num_operators = 0
line_length = len(line_str)
line_indent = len(indent_regex.findall(line_str)[0]) / indent_size
# Indentation is not considered
line_str_noindent = line_str.lstrip()
line_length_noindent = len(line_str_noindent)
whitespace_prop = line_str_noindent.count(" ") / float(line_length_noindent)
for t in tokens:
kind, value = str(t[0]), t[1]
if kind.startswith(u"Token.Keyword"):
num_keywords += 1
elif kind.startswith(u"Token.Name"):
num_identifiers += 1
elif kind.startswith(u"Token.Operator"):
num_operators += 1
rows.append([line_number, line_length_noindent, num_keywords,
num_identifiers, num_operators, whitespace_prop,
line_indent])
columns = ["line", "line_length", "keywords",
"identifiers", "operators", "whitespace_prop",
"line_indent"]
return pandas.DataFrame(rows, columns=columns)
def all_pairs(items, fun, same_value=np.NaN):
results = np.zeros((len(items), len(items)))
for i, item_i in enumerate(items):
for j, item_j in enumerate(items):
if i < j:
results[i, j] = fun(item_i, item_j)
results[j, i] = results[i, j]
else:
results[i, j] = same_value
return results
def file_to_text_buffer(f, pad_left=0):
if isinstance(f, str):
f = open(f, "r")
lines = [l.strip() for l in f.readlines()]
rows = len(lines)
cols = max([len(l) for l in lines]) + pad_left
buffer = np.zeros((rows, cols), dtype=str)
buffer[:, :] = " " # Fill with white space
# Fill buffer
for r, line in enumerate(lines):
chars = ([" "] * pad_left) + list(line)
buffer[r, :len(chars)] = chars
return buffer
@contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout is None:
stdout = cStringIO.StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
def rolling_func(fixations, fun, window_size_ms, step_ms):
start, end = 0, window_size_ms
return_series = False
if not isinstance(fun, dict):
fun = { "value" : fun }
return_series = True
values = { k : [] for k, v in fun.iteritems() }
times = []
while start < fixations.end_ms.max():
times.append(start + (window_size_ms / 2))
win_fixations = fixations[
((fixations.start_ms >= start) & (fixations.start_ms < end)) |
((fixations.end_ms >= start) & (fixations.end_ms < end))]
for k, f in fun.iteritems():
values[k].append(f(win_fixations))
start += step_ms
end += step_ms
first_values = values[values.keys()[0]]
if return_series:
series = pandas.Series(first_values, index=times)
return series
else:
df = pandas.DataFrame(values, index=times)
return df
def window(seq, n):
"""Returns a sliding window (of width n) over data from the iterable s ->
(s0,s1,...s[n-1]), (s1,s2,...,sn), ..."""
seq_it = iter(seq)
result = tuple(it.islice(seq_it, n))
if len(result) == n:
yield result
for elem in seq_it:
result = result[1:] + (elem,)
yield result
def just(n, seq): | yield next(it, None)
yield tuple(it)
def just2(n, seq):
"""Iterates over a sequence, splitting each item into n, rest parts."""
for inner_seq in seq:
yield tuple(just(n, inner_seq))
def significant(p_value):
if p_value < 0.001:
return "***"
if p_value < 0.01:
return "**"
if p_value < 0.05:
return "*"
return ""
def significant_p(p_value):
if p_value < 0.001:
return "p < .001"
if p_value < 0.01:
return "p < .01"
if p_value < 0.05:
return "p < .05"
return ""
def grouper(n, iterable, fillvalue=None):
"""grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx
See also
--------
http://www.python.org/doc//current/library/itertools.html
"""
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def pairwise(iterable, fillvalue=None):
"""s -> (s0,s1), (s1,s2), (s2, s3), ...
See also
--------
http://www.python.org/doc//current/library/itertools.html
"""
a, b = it.tee(iterable)
next(b, fillvalue)
return it.izip(a, b)
def split_whitespace_tokens(line):
"""Splits a line of text by whitespace"""
in_quote = False
token = ""
token_start = 0
for i, char in enumerate(line):
if char == ' ':
if len(token) > 0:
yield (token_start, token)
token = ""
else:
if len(token) == 0:
token_start = i
token += char
if len(token) > 0:
yield (token_start, token)
def fixations_to_saccades(fixations, exp_id=0, trial_id=0):
import scipy.spatial
saccades = []
def add_saccades(frame):
for (idx1, row1), (idx2, row2) in pairwise(frame.iterrows()):
start_ms = row1["end_ms"]
end_ms = row2["start_ms"]
duration_ms = end_ms - start_ms
#assert duration_ms >= 0, duration_ms
x1, y1 = row1["fix_x"], row1["fix_y"]
x2, y2 = row2["fix_x"], row2["fix_y"]
dist_euclid = scipy.spatial.distance.euclidean((x1, y1), (x2, y2))
saccades.append([exp_id, trial_id, start_ms, end_ms,
x1, y1, x2, y2, duration_ms, dist_euclid])
if ("exp_id" in fixations.columns):
for (exp_id, trial_id), frame in fixations.groupby(["exp_id", "trial_id"]):
add_saccades(frame)
else:
add_saccades(fixations)
cols = ["exp_id", "trial_id", "start_ms", "end_ms",
"sacc_x1", "sacc_y1", "sacc_x2", "sacc_y2",
"duration_ms", "dist_euclid"]
return pandas.DataFrame(saccades, columns=cols)
def unit_vector(vector):
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
angle = np.arccos(np.dot(v1_u, v2_u))
if np.isnan(angle):
if (v1_u == v2_u).all():
return 0.0
else:
return np.pi
return angle
def steady_state(trans_matrix):
import scipy.linalg
v, d = scipy.linalg.eig(np.transpose(trans_matrix))
max_vi = v.argmax()
return d[:, max_vi] / sum(d[:, max_vi]) | """Splits a sequence into n, rest parts."""
it = iter(seq)
for _ in range(n - 1): | random_line_split |
__init__.py | import numpy as np, functools as ft, itertools as it, pandas
import sys, cStringIO, contextlib
import re
from grading import *
def filter_trial(frame, exp_id, trial_id=None):
if trial_id is None:
return frame[frame.exp_id == exp_id]
else:
return frame[(frame.exp_id == exp_id) & (frame.trial_id == trial_id)]
def filter_program(frame, base, version=None):
if version is None:
return frame[frame.base == base]
else:
return frame[(frame.base == base) & (frame.version == version)]
def filter_aois(frame, kind, name=None):
if name is None:
return frame[frame.kind == kind]
else:
return frame[(frame.kind == kind) & (frame.name == name)]
def comma_list_contains(s, list_str):
return list_str.split(",").contains(s)
def comma_list_contains_any(fun, list_str):
for s in list_str.split(","):
if fun(s):
return True
return False
def filter_lines(fixations, hit_kind="circle", offset_kind="manual experiment"):
name_filter = ft.partial(comma_list_contains_any, lambda s: s.startswith("line "))
line_fixes = fixations[(fixations.hit_kind == hit_kind) &
(fixations.offset_kind == offset_kind) &
(fixations.aoi_names.apply(name_filter))]
line_fixes["line"] = line_fixes.aoi_name.apply(lambda n: int(n.split(" ")[1]))
return line_fixes
def split_by_median(frame, column):
m = frame[column].median()
return frame[frame[column] <= m], frame[frame[column] > m]
def split_by_boolean(frame, column):
return frame[frame[column]], frame[np.invert(frame[column])]
def contrast_color(rgba):
a = 1 - ((0.299 * rgba[0]) + (0.587 * rgba[1]) + (0.114 * rgba[2]))
return "black" if a < 0.5 else "white"
def transition_matrix(lines, num_lines=None):
if num_lines is None:
num_lines = max(lines)
trans_counts = np.zeros(shape=(num_lines, num_lines))
for l1, l2 in zip(lines, lines[1:]):
trans_counts[l1 - 1, l2 - 1] += 1
# Normalize by rows
row_sums = trans_counts.sum(axis=1)
trans_probs = trans_counts / row_sums.reshape((-1, 1))
# Get rid of NaNs
return np.nan_to_num(trans_probs)
def norm_by_rows(matrix):
"""Normalizes a numpy array by rows (axis 1).
Parameters
----------
matrix : array_like
A numpy array with at least two axes.
Returns
-------
a : array_like
A row-normalized numpy array
"""
row_sums = matrix.sum(axis=1)
return matrix / row_sums.reshape((-1, 1))
def gauss_kern(size, sigma=1.0):
""" Returns a normalized 2D gauss kernel array for convolutions """
h1 = size[0]
h2 = size[1]
x, y = np.mgrid[0:h2, 0:h1]
x = x-h2/2
y = y-h1/2
g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) );
return g / g.sum()
def make_heatmap(points, screen_size, point_size, sigma_denom=5.0):
point_radius = point_size / 2
screen = np.zeros((screen_size[0] + point_size, screen_size[1] + point_size))
kernel = gauss_kern((point_size, point_size), sigma=(point_size / sigma_denom))
for pt in points:
x_start, y_start = pt[0], pt[1]
x_end, y_end = x_start + point_size, y_start + point_size
scr_slice = screen[x_start:x_end, y_start:y_end]
width, height = scr_slice.shape[0], scr_slice.shape[1]
screen[x_start:x_end, y_start:y_end] = scr_slice + kernel[:width, :height]
screen = screen / screen.max()
screen = screen[point_radius:-point_radius,
point_radius:-point_radius]
return screen
def python_line_tokens(code_lines, blank_lines=False):
from pygments.lexers import PythonLexer
lexer = PythonLexer()
code_str = "".join(code_lines)
all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
line_tokens = []
current_line = []
for t in all_tokens:
if t[1] == u"\n":
line_tokens.append(current_line)
current_line = []
else:
current_line.append(t)
rows = []
for i, tokens in enumerate(line_tokens):
# Check for blank line
line_str = code_lines[i].rstrip()
if (not blank_lines) and len(line_str.strip()) == 0:
continue
for t in tokens:
kind, value = str(t[0]), t[1]
yield line_str, i, kind, value, t
def python_line_categories(code_lines):
from pygments.lexers import PythonLexer
lexer = PythonLexer()
code_str = "".join(code_lines)
all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
line_tokens = []
current_line = []
for t in all_tokens:
if t[1] == u"\n":
line_tokens.append(current_line)
current_line = []
else:
current_line.append(t)
line_categories = []
for i, tokens in enumerate(line_tokens):
# Check for blank line
line_str = code_lines[i].rstrip()
if len(line_str.strip()) == 0:
line_categories.append(["blank line"])
continue
assert len(tokens) > 0, "No tokens for line"
categories = []
last_kind, last_value = None, None
for t in tokens:
kind, value = str(t[0]), t[1]
if kind == u"Token.Keyword" and value == u"def":
categories.append("function definition")
elif kind == u"Token.Keyword" and value == u"if":
categories.append("if statement")
elif kind == u"Token.Keyword" and value == u"for":
categories.append("for loop")
elif kind == u"Token.Keyword" and value == u"return":
categories.append("return statement")
elif kind == u"Token.Keyword" and value == u"print":
categories.append("print statement")
elif kind == u"Token.Keyword" and value == u"class":
categories.append("class definition")
elif kind == u"Token.Operator" and value == u"=":
categories.append("assignment")
elif kind == u"Token.Operator" and value == u".":
categories.append("object access")
elif kind == u"Token.Operator" and value in [u"+", u"*"]:
categories.append("mathematical operation")
elif last_kind == u"Token.Operator" and last_value == u"-" and kind == "Token.Whitespace":
categories.append("mathematical operation")
elif kind == u"Token.Operator" and value in [u"<", u">"]:
categories.append("comparison")
elif last_kind == u"Token.Name" and kind == "Token.Punctuation" and value == u"(":
categories.append("function call")
elif kind == "Token.Punctuation" and value == u"[":
categories.append("list creation")
last_kind, last_value = kind, value
if len(categories) == 0:
categories.append("unknown")
line_categories.append(set(categories))
return line_categories
def python_token_metrics(code_lines, indent_size=4):
from pygments.lexers import PythonLexer
indent_regex = re.compile(r"^\s*")
lexer = PythonLexer()
code_str = "".join(code_lines)
all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
line_tokens = []
current_line = []
for t in all_tokens:
if t[1] == u"\n":
line_tokens.append(current_line)
current_line = []
else:
current_line.append(t)
rows = []
for i, tokens in enumerate(line_tokens):
line_number = i + 1
# Check for blank line
line_str = code_lines[i].rstrip()
if len(line_str.strip()) == 0:
rows.append([line_number, 0, 0, 0, 0, 0, 0])
continue
assert len(tokens) > 0, "No tokens for line"
num_keywords = 0
num_identifiers = 0
num_operators = 0
line_length = len(line_str)
line_indent = len(indent_regex.findall(line_str)[0]) / indent_size
# Indentation is not considered
line_str_noindent = line_str.lstrip()
line_length_noindent = len(line_str_noindent)
whitespace_prop = line_str_noindent.count(" ") / float(line_length_noindent)
for t in tokens:
kind, value = str(t[0]), t[1]
if kind.startswith(u"Token.Keyword"):
num_keywords += 1
elif kind.startswith(u"Token.Name"):
num_identifiers += 1
elif kind.startswith(u"Token.Operator"):
num_operators += 1
rows.append([line_number, line_length_noindent, num_keywords,
num_identifiers, num_operators, whitespace_prop,
line_indent])
columns = ["line", "line_length", "keywords",
"identifiers", "operators", "whitespace_prop",
"line_indent"]
return pandas.DataFrame(rows, columns=columns)
def all_pairs(items, fun, same_value=np.NaN):
results = np.zeros((len(items), len(items)))
for i, item_i in enumerate(items):
for j, item_j in enumerate(items):
if i < j:
results[i, j] = fun(item_i, item_j)
results[j, i] = results[i, j]
else:
results[i, j] = same_value
return results
def file_to_text_buffer(f, pad_left=0):
if isinstance(f, str):
f = open(f, "r")
lines = [l.strip() for l in f.readlines()]
rows = len(lines)
cols = max([len(l) for l in lines]) + pad_left
buffer = np.zeros((rows, cols), dtype=str)
buffer[:, :] = " " # Fill with white space
# Fill buffer
for r, line in enumerate(lines):
chars = ([" "] * pad_left) + list(line)
buffer[r, :len(chars)] = chars
return buffer
@contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout is None:
stdout = cStringIO.StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
def rolling_func(fixations, fun, window_size_ms, step_ms):
|
def window(seq, n):
"""Returns a sliding window (of width n) over data from the iterable s ->
(s0,s1,...s[n-1]), (s1,s2,...,sn), ..."""
seq_it = iter(seq)
result = tuple(it.islice(seq_it, n))
if len(result) == n:
yield result
for elem in seq_it:
result = result[1:] + (elem,)
yield result
def just(n, seq):
"""Splits a sequence into n, rest parts."""
it = iter(seq)
for _ in range(n - 1):
yield next(it, None)
yield tuple(it)
def just2(n, seq):
"""Iterates over a sequence, splitting each item into n, rest parts."""
for inner_seq in seq:
yield tuple(just(n, inner_seq))
def significant(p_value):
if p_value < 0.001:
return "***"
if p_value < 0.01:
return "**"
if p_value < 0.05:
return "*"
return ""
def significant_p(p_value):
if p_value < 0.001:
return "p < .001"
if p_value < 0.01:
return "p < .01"
if p_value < 0.05:
return "p < .05"
return ""
def grouper(n, iterable, fillvalue=None):
"""grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx
See also
--------
http://www.python.org/doc//current/library/itertools.html
"""
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def pairwise(iterable, fillvalue=None):
"""s -> (s0,s1), (s1,s2), (s2, s3), ...
See also
--------
http://www.python.org/doc//current/library/itertools.html
"""
a, b = it.tee(iterable)
next(b, fillvalue)
return it.izip(a, b)
def split_whitespace_tokens(line):
"""Splits a line of text by whitespace"""
in_quote = False
token = ""
token_start = 0
for i, char in enumerate(line):
if char == ' ':
if len(token) > 0:
yield (token_start, token)
token = ""
else:
if len(token) == 0:
token_start = i
token += char
if len(token) > 0:
yield (token_start, token)
def fixations_to_saccades(fixations, exp_id=0, trial_id=0):
import scipy.spatial
saccades = []
def add_saccades(frame):
for (idx1, row1), (idx2, row2) in pairwise(frame.iterrows()):
start_ms = row1["end_ms"]
end_ms = row2["start_ms"]
duration_ms = end_ms - start_ms
#assert duration_ms >= 0, duration_ms
x1, y1 = row1["fix_x"], row1["fix_y"]
x2, y2 = row2["fix_x"], row2["fix_y"]
dist_euclid = scipy.spatial.distance.euclidean((x1, y1), (x2, y2))
saccades.append([exp_id, trial_id, start_ms, end_ms,
x1, y1, x2, y2, duration_ms, dist_euclid])
if ("exp_id" in fixations.columns):
for (exp_id, trial_id), frame in fixations.groupby(["exp_id", "trial_id"]):
add_saccades(frame)
else:
add_saccades(fixations)
cols = ["exp_id", "trial_id", "start_ms", "end_ms",
"sacc_x1", "sacc_y1", "sacc_x2", "sacc_y2",
"duration_ms", "dist_euclid"]
return pandas.DataFrame(saccades, columns=cols)
def unit_vector(vector):
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
angle = np.arccos(np.dot(v1_u, v2_u))
if np.isnan(angle):
if (v1_u == v2_u).all():
return 0.0
else:
return np.pi
return angle
def steady_state(trans_matrix):
import scipy.linalg
v, d = scipy.linalg.eig(np.transpose(trans_matrix))
max_vi = v.argmax()
return d[:, max_vi] / sum(d[:, max_vi])
| start, end = 0, window_size_ms
return_series = False
if not isinstance(fun, dict):
fun = { "value" : fun }
return_series = True
values = { k : [] for k, v in fun.iteritems() }
times = []
while start < fixations.end_ms.max():
times.append(start + (window_size_ms / 2))
win_fixations = fixations[
((fixations.start_ms >= start) & (fixations.start_ms < end)) |
((fixations.end_ms >= start) & (fixations.end_ms < end))]
for k, f in fun.iteritems():
values[k].append(f(win_fixations))
start += step_ms
end += step_ms
first_values = values[values.keys()[0]]
if return_series:
series = pandas.Series(first_values, index=times)
return series
else:
df = pandas.DataFrame(values, index=times)
return df | identifier_body |
scheduler.rs | #![allow(unused)]
mod run_queue;
use std::alloc::Layout;
use std::fmt::{self, Debug};
use std::mem;
use std::ops::Deref;
use std::ptr;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Weak};
use hashbrown::HashMap;
use anyhow::anyhow;
use lazy_static::lazy_static;
use log::info;
use liblumen_core::locks::{Mutex, RwLock};
use liblumen_core::util::thread_local::ThreadLocalCell;
use liblumen_alloc::atom;
use liblumen_alloc::erts::apply;
use liblumen_alloc::erts::process;
use liblumen_alloc::erts::process::{CalleeSavedRegisters, Priority, Process, Status};
use liblumen_alloc::erts::scheduler::id;
use liblumen_alloc::erts::term::prelude::{Atom, ReferenceNumber, Term};
use liblumen_alloc::erts::ModuleFunctionArity;
use lumen_rt_core as rt_core;
use lumen_rt_core::process::CURRENT_PROCESS;
use lumen_rt_core::timer::Hierarchy;
const MAX_REDUCTION_COUNT: u32 = 20;
// External thread locals owned by the generated code
extern "C" {
#[thread_local]
static mut CURRENT_REDUCTION_COUNT: u32;
}
thread_local! {
static SCHEDULER: Arc<Scheduler> = Scheduler::registered();
}
lazy_static! {
static ref SCHEDULERS: Mutex<HashMap<id::ID, Weak<Scheduler>>> = Mutex::new(Default::default());
}
#[export_name = "__scheduler_stop_waiting"]
pub fn scheduler_stop_waiting(process: &Process) {
let id = process.scheduler_id().unwrap();
if let Some(scheduler) = SCHEDULERS.lock().get(&id).and_then(|s| s.upgrade()) {
scheduler.stop_waiting(process)
}
}
#[derive(Copy, Clone)]
struct StackPointer(*mut u64);
#[export_name = "__lumen_builtin_spawn"]
pub extern "C" fn builtin_spawn(to: Term, msg: Term) -> Term |
#[export_name = "__lumen_builtin_yield"]
pub unsafe extern "C" fn process_yield() -> bool {
let s = <Scheduler as rt_core::Scheduler>::current();
// NOTE: We always set root=false here because the root
// process never invokes this function
s.process_yield(/* root= */ false)
}
#[naked]
#[inline(never)]
#[cfg(all(unix, target_arch = "x86_64"))]
pub unsafe extern "C" fn process_return_continuation() {
let f: fn() -> () = process_return;
asm!("
callq *$0
"
:
: "r"(f)
:
: "volatile", "alignstack"
);
}
#[inline(never)]
fn process_return() {
let s = <Scheduler as rt_core::Scheduler>::current();
do_process_return(&s);
}
#[export_name = "__lumen_builtin_malloc"]
pub unsafe extern "C" fn builtin_malloc(kind: u32, arity: usize) -> *mut u8 {
use core::convert::TryInto;
use liblumen_alloc::erts::term::closure::ClosureLayout;
use liblumen_alloc::erts::term::prelude::*;
use liblumen_core::alloc::Layout;
use liblumen_term::TermKind;
let kind_result: Result<TermKind, _> = kind.try_into();
match kind_result {
Ok(TermKind::Closure) => {
let s = <Scheduler as rt_core::Scheduler>::current();
let cl = ClosureLayout::for_env_len(arity);
let result = s.current.alloc_nofrag_layout(cl.layout().clone());
if let Ok(nn) = result {
return nn.as_ptr() as *mut u8;
}
}
Ok(TermKind::Tuple) => {
let s = <Scheduler as rt_core::Scheduler>::current();
let layout = Tuple::layout_for_len(arity);
let result = s.current.alloc_nofrag_layout(layout);
if let Ok(nn) = result {
return nn.as_ptr() as *mut u8;
}
}
Ok(TermKind::Cons) => {
let s = <Scheduler as rt_core::Scheduler>::current();
let layout = Layout::new::<Cons>();
let result = s.current.alloc_nofrag_layout(layout);
if let Ok(nn) = result {
return nn.as_ptr() as *mut u8;
}
}
Ok(tk) => {
unimplemented!("unhandled use of malloc for {:?}", tk);
}
Err(_) => {
panic!("invalid term kind: {}", kind);
}
}
ptr::null_mut()
}
/// Called when the current process has finished executing, and has
/// returned all the way to its entry function. This marks the process
/// as exiting (if it wasn't already), and then yields to the scheduler
fn do_process_return(scheduler: &Scheduler) -> bool {
use liblumen_alloc::erts::term::prelude::*;
if scheduler.current.pid() != scheduler.root.pid() {
scheduler
.current
.exit(atom!("normal"), anyhow!("Out of code").into());
// NOTE: We always set root=false here, even though this can
// be called from the root process, since returning from the
// root process exits the scheduler loop anyway, so no stack
// swapping can occur
scheduler.process_yield(/* root= */ false)
} else {
true
}
}
pub struct Scheduler {
id: id::ID,
hierarchy: RwLock<Hierarchy>,
// References are always 64-bits even on 32-bit platforms
reference_count: AtomicU64,
run_queues: RwLock<run_queue::Queues>,
// Non-monotonic unique integers are scoped to the scheduler ID and then use this per-scheduler
// `u64`.
unique_integer: AtomicU64,
root: Arc<Process>,
init: ThreadLocalCell<Arc<Process>>,
current: ThreadLocalCell<Arc<Process>>,
}
// This guarantee holds as long as `init` and `current` are only
// ever accessed by the scheduler when scheduling
unsafe impl Sync for Scheduler {}
impl rt_core::Scheduler for Scheduler {
#[inline]
fn current() -> Arc<Self> {
SCHEDULER.with(|s| s.clone())
}
fn id(&self) -> id::ID {
self.id
}
fn hierarchy(&self) -> &RwLock<Hierarchy> {
&self.hierarchy
}
/// Gets the next available reference number
fn next_reference_number(&self) -> ReferenceNumber {
self.reference_count.fetch_add(1, Ordering::SeqCst)
}
}
impl Scheduler {
/// Creates a new scheduler with the default configuration
fn new() -> anyhow::Result<Scheduler> {
let id = id::next();
// The root process is how the scheduler gets time for itself,
// and is also how we know when to shutdown the scheduler due
// to termination of all its processes
let root = Arc::new(Process::new(
Priority::Normal,
None,
Arc::new(ModuleFunctionArity {
module: Atom::from_str("root"),
function: Atom::from_str("init"),
arity: 0,
}),
ptr::null_mut(),
0,
));
let run_queues = Default::default();
Scheduler::spawn_root(root.clone(), id, &run_queues)?;
// Placeholder
let init = Arc::new(Process::new(
Priority::Normal,
None,
Arc::new(ModuleFunctionArity {
module: Atom::from_str("undef"),
function: Atom::from_str("undef"),
arity: 0,
}),
ptr::null_mut(),
0,
));
// The scheduler starts with the root process running
let current = ThreadLocalCell::new(root.clone());
Ok(Self {
id,
run_queues,
root,
init: ThreadLocalCell::new(init),
current,
hierarchy: Default::default(),
reference_count: AtomicU64::new(0),
unique_integer: AtomicU64::new(0),
})
}
// Spawns the init process, should be called immediately after
// scheduler creation
pub fn init(&self) -> anyhow::Result<()> {
// The init process is the actual "root" Erlang process, it acts
// as the entry point for the program from Erlang's perspective,
// and is responsible for starting/stopping the system in Erlang.
//
// If this process exits, the scheduler terminates
let (init_heap, init_heap_size) = process::alloc::default_heap()?;
let init = Arc::new(Process::new_with_stack(
Priority::Normal,
None,
Arc::new(ModuleFunctionArity {
module: Atom::from_str("init"),
function: Atom::from_str("start"),
arity: 0,
}),
init_heap,
init_heap_size,
)?);
let clone = init.clone();
unsafe {
self.init.set(init);
}
Scheduler::spawn_internal(clone, self.id, &self.run_queues);
Ok(())
}
/// Gets the scheduler registered to this thread
///
/// If no scheduler has been created for this thread, one is created
fn registered() -> Arc<Self> {
let mut schedulers = SCHEDULERS.lock();
let s = Arc::new(Self::new().unwrap());
if let Some(_) = schedulers.insert(s.id, Arc::downgrade(&s)) {
panic!("Scheduler already registered with ID ({:?}", s.id);
}
s
}
/// Gets a scheduler by its ID
pub fn from_id(id: &id::ID) -> Option<Arc<Self>> {
Self::current_from_id(id).or_else(|| SCHEDULERS.lock().get(id).and_then(|s| s.upgrade()))
}
/// Returns the current thread's scheduler if it matches the given ID
fn current_from_id(id: &id::ID) -> Option<Arc<Self>> {
SCHEDULER.with(|s| if &s.id == id { Some(s.clone()) } else { None })
}
/// Gets the next available unique integer
pub fn next_unique_integer(&self) -> u64 {
self.unique_integer.fetch_add(1, Ordering::SeqCst)
}
/// Returns the length of the current scheduler's run queue
pub fn run_queues_len(&self) -> usize {
self.run_queues.read().len()
}
/// Returns the length of a specific run queue in the current scheduler
#[cfg(test)]
pub fn run_queue_len(&self, priority: Priority) -> usize {
self.run_queues.read().run_queue_len(priority)
}
/// Returns true if the given process is in the current scheduler's run queue
#[cfg(test)]
pub fn is_run_queued(&self, value: &Arc<Process>) -> bool {
self.run_queues.read().contains(value)
}
pub fn stop_waiting(&self, process: &Process) {
self.run_queues.write().stop_waiting(process);
}
// TODO: Request application master termination for controlled shutdown
// This request will always come from the thread which spawned the application
// master, i.e. the "main" scheduler thread
//
// Returns `Ok(())` if shutdown was successful, `Err(anyhow::Error)` if something
// went wrong during shutdown, and it was not able to complete normally
pub fn shutdown(&self) -> anyhow::Result<()> {
// For now just Ok(()), but this needs to be addressed when proper
// system startup/shutdown is in place
CURRENT_PROCESS.with(|cp| cp.replace(None));
Ok(())
}
}
impl Debug for Scheduler {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Scheduler")
.field("id", &self.id)
// The hiearchy slots take a lot of space, so don't print them by default
.field("reference_count", &self.reference_count)
.field("run_queues", &self.run_queues)
.finish()
}
}
impl Drop for Scheduler {
fn drop(&mut self) {
let mut locked_scheduler_by_id = SCHEDULERS.lock();
locked_scheduler_by_id
.remove(&self.id)
.expect("Scheduler not registered");
}
}
impl PartialEq for Scheduler {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
/// What to run
pub enum Run {
/// Run the process now
Now(Arc<Process>),
/// There was a process in the queue, but it needs to be delayed because it is `Priority::Low`
/// and hadn't been delayed enough yet. Ask the `RunQueue` again for another process.
/// -- https://github.com/erlang/otp/blob/fe2b1323a3866ed0a9712e9d12e1f8f84793ec47/erts/emulator/beam/erl_process.c#L9601-L9606
Delayed,
/// There are no processes in the run queue, do other work
None,
}
impl Scheduler {
/// > 1. Update reduction counters
/// > 2. Check timers
/// > 3. If needed check balance
/// > 4. If needed migrated processes and ports
/// > 5. Do auxiliary scheduler work
/// > 6. If needed check I/O and update time
/// > 7. While needed pick a port task to execute
/// > 8. Pick a process to execute
/// > -- [The Scheduler Loop](https://blog.stenmans.org/theBeamBook/#_the_scheduler_loop)
///
/// Returns `true` if a process was run. Returns `false` if no process could be run and the
/// scheduler should sleep or work steal.
#[must_use]
pub fn run_once(&self) -> bool {
// We always set root=true here, since calling this function is always done
// from the scheduler loop, and only ever from the root context
self.process_yield(/* root= */ true)
}
/// This function performs two roles, albeit virtually identical:
///
/// First, this function is called by the scheduler to resume execution
/// of a process pulled from the run queue. It does so using its "root"
/// process as its context.
///
/// Second, this function is called by a process when it chooses to
/// yield back to the scheduler. In this case, the scheduler "root"
/// process is swapped in, so the scheduler has a chance to do its
/// auxilary tasks, after which the scheduler will call it again to
/// swap in a new process.
fn process_yield(&self, is_root: bool) -> bool {
info!("entering core scheduler loop");
self.hierarchy.write().timeout();
loop {
let next = {
let mut rq = self.run_queues.write();
rq.dequeue()
};
match next {
Run::Now(process) => {
info!("found process to schedule");
// Don't allow exiting processes to run again.
//
// Without this check, a process.exit() from outside the process during WAITING
// will return to the Frame that called `process.wait()`
if !process.is_exiting() {
info!("swapping into process (is_root = {})", is_root);
unsafe {
self.swap_process(process, is_root);
}
} else {
info!("process is exiting");
process.reduce()
}
info!("exiting scheduler loop");
// When reached, either the process scheduled is the root process,
// or the process is exiting and we called .reduce(); either way we're
// returning to the main scheduler loop to check for signals, etc.
break true;
}
Run::Delayed => {
info!("found process, but it is delayed");
continue;
}
Run::None if is_root => {
info!("no processes remaining to schedule, exiting loop");
// If no processes are available, then the scheduler should steal,
// but if it can't/doesn't, then it must terminate, as there is
// nothing we can swap to. When we break here, we're returning
// to the core scheduler loop, which _must_ terminate, if it does
// not, we'll just end up right back here again.
//
// TODO: stealing
break false;
}
Run::None => unreachable!(),
}
}
}
/// This function takes care of coordinating the scheduling of a new
/// process/descheduling of the current process.
///
/// - Updating process status
/// - Updating reduction count based on accumulated reductions during execution
/// - Resetting reduction counter for next process
/// - Handling exiting processes (logging/propagating)
///
/// Once that is complete, it swaps to the new process stack via `swap_stack`,
/// at which point execution resumes where the newly scheduled process left
/// off previously, or in its init function.
unsafe fn swap_process(&self, new: Arc<Process>, is_root: bool) {
// Mark the new process as Running
let new_ctx = &new.registers as *const _;
{
let mut new_status = new.status.write();
*new_status = Status::Running;
}
// Replace the previous process with the new as the currently scheduled process
let _ = CURRENT_PROCESS.with(|cp| cp.replace(Some(new.clone())));
let prev = self.current.replace(new.clone());
// Increment reduction count if not the root process
if !is_root {
let prev_reductions = reset_reduction_counter();
prev.total_reductions
.fetch_add(prev_reductions as u64, Ordering::Relaxed);
}
// Change the previous process status to Runnable
{
let mut prev_status = prev.status.write();
if Status::Running == *prev_status {
*prev_status = Status::Runnable
}
}
// Save the previous process registers for the stack swap
let prev_ctx = &prev.registers as *const _ as *mut _;
// Then try to schedule it for the future
// If the process is exiting, then handle the exit, otherwise
// proceed to the stack swap
if let Some(exiting) = self.run_queues.write().requeue(prev) {
if let Status::Exiting(ref ex) = *exiting.status.read() {
crate::process::log_exit(&exiting, ex);
crate::process::propagate_exit(&exiting, ex);
} else {
unreachable!()
}
}
// Execute the swap
//
// When swapping to the root process, we return here, which
// will unwind back to the main scheduler loop in `lib.rs`.
//
// When swapping to a newly spawned process, we return "into"
// its init function, or put another way, we jump to its
// function prologue. In this situation, all of the saved registers
// except %rsp and %rbp will be zeroed. %rsp is set during the call
// to `spawn`, but %rbp is set to the current %rbp value to ensure
// that stack traces link the new stack to the frame in which execution
// started
//
// When swapping to a previously spawned process, we return here,
// since the process called `process_yield`. From here we unwind back
// to the call to `process_yield` and resume execution from the point
// where it was called.
swap_stack(prev_ctx, new_ctx);
}
/// Schedules the given process for execution
pub fn schedule(&mut self, process: Arc<Process>) {
debug_assert_ne!(
Some(self.id),
process.scheduler_id(),
"process is already scheduled here!"
);
process.schedule_with(self.id);
let mut rq = self.run_queues.write();
rq.enqueue(process);
}
/// Spawns a new process using the given init function as its entry
#[inline]
pub fn spawn(&mut self, process: Arc<Process>) -> anyhow::Result<()> {
Self::spawn_internal(process, self.id, &self.run_queues);
Ok(())
}
// Root process uses the original thread stack, no initialization required.
//
// It also starts "running", so we don't put it on the run queue
fn spawn_root(
process: Arc<Process>,
id: id::ID,
_run_queues: &RwLock<run_queue::Queues>,
) -> anyhow::Result<()> {
process.schedule_with(id);
*process.status.write() = Status::Running;
Ok(())
}
fn spawn_internal(process: Arc<Process>, id: id::ID, run_queues: &RwLock<run_queue::Queues>) {
process.schedule_with(id);
let mfa = &process.initial_module_function_arity;
let init_fn_result = apply::find_symbol(&mfa);
if init_fn_result.is_none() {
panic!(
"invalid mfa provided for process ({}), no such symbol found",
&mfa
);
}
let init_fn = init_fn_result.unwrap();
#[inline(always)]
unsafe fn push(sp: &mut StackPointer, value: u64) {
sp.0 = sp.0.offset(-1);
ptr::write(sp.0, value);
}
// Write the return function and init function to the end of the stack,
// when execution resumes, the pointer before the stack pointer will be
// used as the return address - the first time that will be the init function.
//
// When execution returns from the init function, then it will return via
// `process_return`, which will return to the scheduler and indicate that
// the process exited. The nature of the exit is indicated by error state
// in the process itself
unsafe {
let mut sp = StackPointer(process.stack.top as *mut u64);
// Function that will be called when returning from init_fn
push(&mut sp, process_return_continuation as u64);
// Function that the newly spawned process should call first
push(&mut sp, init_fn as u64);
// Update process stack pointer
let s_top = &process.stack.top as *const _ as *mut _;
ptr::write(s_top, sp.0 as *const u8);
// Update rsp
let rsp = &process.registers.rsp as *const u64 as *mut _;
ptr::write(rsp, sp.0 as u64);
let rbp = &process.registers.rbp as *const u64 as *mut _;
ptr::write(rbp, sp.0 as u64);
}
*process.status.write() = Status::Runnable;
let mut rq = run_queues.write();
rq.enqueue(process);
}
}
fn reset_reduction_counter() -> u64 {
let count = unsafe { CURRENT_REDUCTION_COUNT };
unsafe {
CURRENT_REDUCTION_COUNT = 0;
}
count as u64
//CURRENT_REDUCTION_COUNT.swap(0, Ordering::Relaxed)
}
/// This function uses inline assembly to save the callee-saved registers for the outgoing
/// process, and restore them for the incoming process. When this function returns, it will
/// resume execution where `swap_stack` was called previously.
#[naked]
#[inline(never)]
#[cfg(all(unix, target_arch = "x86_64"))]
unsafe fn swap_stack(prev: *mut CalleeSavedRegisters, new: *const CalleeSavedRegisters) {
asm!("
# Save the stack pointer, and callee-saved registers of `prev`
movq %rsp, ($0)
movq %r15, 8($0)
movq %r14, 16($0)
movq %r13, 24($0)
movq %r12, 32($0)
movq %rbx, 40($0)
movq %rbp, 48($0)
# Restore the stack pointer, and callee-saved registers of `new`
movq ($1), %rsp
movq 8($1), %r15
movq 16($1), %r14
movq 24($1), %r13
movq 32($1), %r12
movq 40($1), %rbx
movq 48($1), %rbp
# We need to let the unwinder know that the CFA has changed, currently
# that is 8 bytes above %rsp, because the call to this function pushes
# %rip to the stack, and since we're restoring the stack pointer, the
# value of the CFA, from the perspective of the unwinder, has also been
# changed
.cfi_def_cfa %rsp, 8
.cfi_restore %rsp
.cfi_restore %r15
.cfi_restore %r14
.cfi_restore %r13
.cfi_restore %r12
.cfi_restore %rbx
.cfi_restore %rbp
"
:
: "r"(prev), "r"(new)
:
: "volatile", "alignstack"
);
}
#[cfg(not(all(unix, target_arch = "x86_64")))]
compile_error!("lumen_rt_minimal does not currently support this architecture!");
| {
unimplemented!()
} | identifier_body |
scheduler.rs | #![allow(unused)]
mod run_queue;
use std::alloc::Layout;
use std::fmt::{self, Debug};
use std::mem;
use std::ops::Deref;
use std::ptr;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Weak};
use hashbrown::HashMap;
use anyhow::anyhow;
use lazy_static::lazy_static;
use log::info;
use liblumen_core::locks::{Mutex, RwLock};
use liblumen_core::util::thread_local::ThreadLocalCell;
use liblumen_alloc::atom;
use liblumen_alloc::erts::apply;
use liblumen_alloc::erts::process;
use liblumen_alloc::erts::process::{CalleeSavedRegisters, Priority, Process, Status};
use liblumen_alloc::erts::scheduler::id;
use liblumen_alloc::erts::term::prelude::{Atom, ReferenceNumber, Term};
use liblumen_alloc::erts::ModuleFunctionArity;
use lumen_rt_core as rt_core;
use lumen_rt_core::process::CURRENT_PROCESS;
use lumen_rt_core::timer::Hierarchy;
const MAX_REDUCTION_COUNT: u32 = 20;
// External thread locals owned by the generated code
extern "C" {
#[thread_local]
static mut CURRENT_REDUCTION_COUNT: u32;
}
thread_local! {
static SCHEDULER: Arc<Scheduler> = Scheduler::registered();
}
lazy_static! {
static ref SCHEDULERS: Mutex<HashMap<id::ID, Weak<Scheduler>>> = Mutex::new(Default::default());
}
#[export_name = "__scheduler_stop_waiting"]
pub fn scheduler_stop_waiting(process: &Process) {
let id = process.scheduler_id().unwrap();
if let Some(scheduler) = SCHEDULERS.lock().get(&id).and_then(|s| s.upgrade()) {
scheduler.stop_waiting(process)
}
}
#[derive(Copy, Clone)]
struct StackPointer(*mut u64);
#[export_name = "__lumen_builtin_spawn"]
pub extern "C" fn builtin_spawn(to: Term, msg: Term) -> Term {
unimplemented!()
}
#[export_name = "__lumen_builtin_yield"]
pub unsafe extern "C" fn process_yield() -> bool {
let s = <Scheduler as rt_core::Scheduler>::current();
// NOTE: We always set root=false here because the root
// process never invokes this function
s.process_yield(/* root= */ false)
}
#[naked]
#[inline(never)]
#[cfg(all(unix, target_arch = "x86_64"))]
pub unsafe extern "C" fn process_return_continuation() {
let f: fn() -> () = process_return;
asm!("
callq *$0
"
:
: "r"(f)
:
: "volatile", "alignstack"
);
}
#[inline(never)]
fn process_return() {
let s = <Scheduler as rt_core::Scheduler>::current();
do_process_return(&s);
}
#[export_name = "__lumen_builtin_malloc"]
pub unsafe extern "C" fn builtin_malloc(kind: u32, arity: usize) -> *mut u8 {
use core::convert::TryInto;
use liblumen_alloc::erts::term::closure::ClosureLayout;
use liblumen_alloc::erts::term::prelude::*;
use liblumen_core::alloc::Layout;
use liblumen_term::TermKind;
let kind_result: Result<TermKind, _> = kind.try_into();
match kind_result {
Ok(TermKind::Closure) => {
let s = <Scheduler as rt_core::Scheduler>::current();
let cl = ClosureLayout::for_env_len(arity);
let result = s.current.alloc_nofrag_layout(cl.layout().clone());
if let Ok(nn) = result {
return nn.as_ptr() as *mut u8;
}
}
Ok(TermKind::Tuple) => {
let s = <Scheduler as rt_core::Scheduler>::current();
let layout = Tuple::layout_for_len(arity);
let result = s.current.alloc_nofrag_layout(layout);
if let Ok(nn) = result {
return nn.as_ptr() as *mut u8;
}
}
Ok(TermKind::Cons) => {
let s = <Scheduler as rt_core::Scheduler>::current();
let layout = Layout::new::<Cons>();
let result = s.current.alloc_nofrag_layout(layout);
if let Ok(nn) = result {
return nn.as_ptr() as *mut u8;
}
}
Ok(tk) => {
unimplemented!("unhandled use of malloc for {:?}", tk);
}
Err(_) => {
panic!("invalid term kind: {}", kind);
}
}
ptr::null_mut()
}
/// Called when the current process has finished executing, and has
/// returned all the way to its entry function. This marks the process
/// as exiting (if it wasn't already), and then yields to the scheduler
fn do_process_return(scheduler: &Scheduler) -> bool {
use liblumen_alloc::erts::term::prelude::*;
if scheduler.current.pid() != scheduler.root.pid() {
scheduler
.current
.exit(atom!("normal"), anyhow!("Out of code").into());
// NOTE: We always set root=false here, even though this can
// be called from the root process, since returning from the
// root process exits the scheduler loop anyway, so no stack
// swapping can occur
scheduler.process_yield(/* root= */ false)
} else {
true
}
}
pub struct Scheduler {
id: id::ID,
hierarchy: RwLock<Hierarchy>,
// References are always 64-bits even on 32-bit platforms
reference_count: AtomicU64,
run_queues: RwLock<run_queue::Queues>,
// Non-monotonic unique integers are scoped to the scheduler ID and then use this per-scheduler
// `u64`.
unique_integer: AtomicU64,
root: Arc<Process>,
init: ThreadLocalCell<Arc<Process>>,
current: ThreadLocalCell<Arc<Process>>,
}
// This guarantee holds as long as `init` and `current` are only
// ever accessed by the scheduler when scheduling
unsafe impl Sync for Scheduler {}
impl rt_core::Scheduler for Scheduler {
#[inline]
fn current() -> Arc<Self> {
SCHEDULER.with(|s| s.clone())
}
fn id(&self) -> id::ID {
self.id
}
fn hierarchy(&self) -> &RwLock<Hierarchy> {
&self.hierarchy
}
/// Gets the next available reference number
fn next_reference_number(&self) -> ReferenceNumber {
self.reference_count.fetch_add(1, Ordering::SeqCst)
}
}
impl Scheduler {
/// Creates a new scheduler with the default configuration
fn new() -> anyhow::Result<Scheduler> {
let id = id::next();
// The root process is how the scheduler gets time for itself,
// and is also how we know when to shutdown the scheduler due
// to termination of all its processes
let root = Arc::new(Process::new(
Priority::Normal,
None,
Arc::new(ModuleFunctionArity {
module: Atom::from_str("root"),
function: Atom::from_str("init"),
arity: 0,
}),
ptr::null_mut(),
0,
));
let run_queues = Default::default();
Scheduler::spawn_root(root.clone(), id, &run_queues)?;
// Placeholder
let init = Arc::new(Process::new(
Priority::Normal,
None,
Arc::new(ModuleFunctionArity {
module: Atom::from_str("undef"),
function: Atom::from_str("undef"),
arity: 0,
}),
ptr::null_mut(),
0,
));
// The scheduler starts with the root process running
let current = ThreadLocalCell::new(root.clone());
Ok(Self {
id,
run_queues,
root,
init: ThreadLocalCell::new(init),
current,
hierarchy: Default::default(),
reference_count: AtomicU64::new(0),
unique_integer: AtomicU64::new(0),
})
}
// Spawns the init process, should be called immediately after
// scheduler creation
pub fn init(&self) -> anyhow::Result<()> {
// The init process is the actual "root" Erlang process, it acts
// as the entry point for the program from Erlang's perspective,
// and is responsible for starting/stopping the system in Erlang.
//
// If this process exits, the scheduler terminates
let (init_heap, init_heap_size) = process::alloc::default_heap()?;
let init = Arc::new(Process::new_with_stack(
Priority::Normal,
None,
Arc::new(ModuleFunctionArity {
module: Atom::from_str("init"),
function: Atom::from_str("start"),
arity: 0,
}),
init_heap,
init_heap_size,
)?);
let clone = init.clone();
unsafe {
self.init.set(init);
}
Scheduler::spawn_internal(clone, self.id, &self.run_queues);
Ok(())
}
/// Gets the scheduler registered to this thread
///
/// If no scheduler has been created for this thread, one is created
fn registered() -> Arc<Self> {
let mut schedulers = SCHEDULERS.lock();
let s = Arc::new(Self::new().unwrap());
if let Some(_) = schedulers.insert(s.id, Arc::downgrade(&s)) {
panic!("Scheduler already registered with ID ({:?}", s.id);
}
s
}
/// Gets a scheduler by its ID
pub fn from_id(id: &id::ID) -> Option<Arc<Self>> {
Self::current_from_id(id).or_else(|| SCHEDULERS.lock().get(id).and_then(|s| s.upgrade()))
}
/// Returns the current thread's scheduler if it matches the given ID
fn current_from_id(id: &id::ID) -> Option<Arc<Self>> {
SCHEDULER.with(|s| if &s.id == id { Some(s.clone()) } else { None })
}
/// Gets the next available unique integer
pub fn next_unique_integer(&self) -> u64 {
self.unique_integer.fetch_add(1, Ordering::SeqCst)
}
/// Returns the length of the current scheduler's run queue
pub fn run_queues_len(&self) -> usize {
self.run_queues.read().len()
}
/// Returns the length of a specific run queue in the current scheduler
#[cfg(test)]
pub fn | (&self, priority: Priority) -> usize {
self.run_queues.read().run_queue_len(priority)
}
/// Returns true if the given process is in the current scheduler's run queue
#[cfg(test)]
pub fn is_run_queued(&self, value: &Arc<Process>) -> bool {
self.run_queues.read().contains(value)
}
pub fn stop_waiting(&self, process: &Process) {
self.run_queues.write().stop_waiting(process);
}
// TODO: Request application master termination for controlled shutdown
// This request will always come from the thread which spawned the application
// master, i.e. the "main" scheduler thread
//
// Returns `Ok(())` if shutdown was successful, `Err(anyhow::Error)` if something
// went wrong during shutdown, and it was not able to complete normally
pub fn shutdown(&self) -> anyhow::Result<()> {
// For now just Ok(()), but this needs to be addressed when proper
// system startup/shutdown is in place
CURRENT_PROCESS.with(|cp| cp.replace(None));
Ok(())
}
}
impl Debug for Scheduler {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Scheduler")
.field("id", &self.id)
// The hiearchy slots take a lot of space, so don't print them by default
.field("reference_count", &self.reference_count)
.field("run_queues", &self.run_queues)
.finish()
}
}
impl Drop for Scheduler {
fn drop(&mut self) {
let mut locked_scheduler_by_id = SCHEDULERS.lock();
locked_scheduler_by_id
.remove(&self.id)
.expect("Scheduler not registered");
}
}
impl PartialEq for Scheduler {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
/// What to run
pub enum Run {
/// Run the process now
Now(Arc<Process>),
/// There was a process in the queue, but it needs to be delayed because it is `Priority::Low`
/// and hadn't been delayed enough yet. Ask the `RunQueue` again for another process.
/// -- https://github.com/erlang/otp/blob/fe2b1323a3866ed0a9712e9d12e1f8f84793ec47/erts/emulator/beam/erl_process.c#L9601-L9606
Delayed,
/// There are no processes in the run queue, do other work
None,
}
impl Scheduler {
/// > 1. Update reduction counters
/// > 2. Check timers
/// > 3. If needed check balance
/// > 4. If needed migrated processes and ports
/// > 5. Do auxiliary scheduler work
/// > 6. If needed check I/O and update time
/// > 7. While needed pick a port task to execute
/// > 8. Pick a process to execute
/// > -- [The Scheduler Loop](https://blog.stenmans.org/theBeamBook/#_the_scheduler_loop)
///
/// Returns `true` if a process was run. Returns `false` if no process could be run and the
/// scheduler should sleep or work steal.
#[must_use]
pub fn run_once(&self) -> bool {
// We always set root=true here, since calling this function is always done
// from the scheduler loop, and only ever from the root context
self.process_yield(/* root= */ true)
}
/// This function performs two roles, albeit virtually identical:
///
/// First, this function is called by the scheduler to resume execution
/// of a process pulled from the run queue. It does so using its "root"
/// process as its context.
///
/// Second, this function is called by a process when it chooses to
/// yield back to the scheduler. In this case, the scheduler "root"
/// process is swapped in, so the scheduler has a chance to do its
/// auxilary tasks, after which the scheduler will call it again to
/// swap in a new process.
fn process_yield(&self, is_root: bool) -> bool {
info!("entering core scheduler loop");
self.hierarchy.write().timeout();
loop {
let next = {
let mut rq = self.run_queues.write();
rq.dequeue()
};
match next {
Run::Now(process) => {
info!("found process to schedule");
// Don't allow exiting processes to run again.
//
// Without this check, a process.exit() from outside the process during WAITING
// will return to the Frame that called `process.wait()`
if !process.is_exiting() {
info!("swapping into process (is_root = {})", is_root);
unsafe {
self.swap_process(process, is_root);
}
} else {
info!("process is exiting");
process.reduce()
}
info!("exiting scheduler loop");
// When reached, either the process scheduled is the root process,
// or the process is exiting and we called .reduce(); either way we're
// returning to the main scheduler loop to check for signals, etc.
break true;
}
Run::Delayed => {
info!("found process, but it is delayed");
continue;
}
Run::None if is_root => {
info!("no processes remaining to schedule, exiting loop");
// If no processes are available, then the scheduler should steal,
// but if it can't/doesn't, then it must terminate, as there is
// nothing we can swap to. When we break here, we're returning
// to the core scheduler loop, which _must_ terminate, if it does
// not, we'll just end up right back here again.
//
// TODO: stealing
break false;
}
Run::None => unreachable!(),
}
}
}
/// This function takes care of coordinating the scheduling of a new
/// process/descheduling of the current process.
///
/// - Updating process status
/// - Updating reduction count based on accumulated reductions during execution
/// - Resetting reduction counter for next process
/// - Handling exiting processes (logging/propagating)
///
/// Once that is complete, it swaps to the new process stack via `swap_stack`,
/// at which point execution resumes where the newly scheduled process left
/// off previously, or in its init function.
unsafe fn swap_process(&self, new: Arc<Process>, is_root: bool) {
// Mark the new process as Running
let new_ctx = &new.registers as *const _;
{
let mut new_status = new.status.write();
*new_status = Status::Running;
}
// Replace the previous process with the new as the currently scheduled process
let _ = CURRENT_PROCESS.with(|cp| cp.replace(Some(new.clone())));
let prev = self.current.replace(new.clone());
// Increment reduction count if not the root process
if !is_root {
let prev_reductions = reset_reduction_counter();
prev.total_reductions
.fetch_add(prev_reductions as u64, Ordering::Relaxed);
}
// Change the previous process status to Runnable
{
let mut prev_status = prev.status.write();
if Status::Running == *prev_status {
*prev_status = Status::Runnable
}
}
// Save the previous process registers for the stack swap
let prev_ctx = &prev.registers as *const _ as *mut _;
// Then try to schedule it for the future
// If the process is exiting, then handle the exit, otherwise
// proceed to the stack swap
if let Some(exiting) = self.run_queues.write().requeue(prev) {
if let Status::Exiting(ref ex) = *exiting.status.read() {
crate::process::log_exit(&exiting, ex);
crate::process::propagate_exit(&exiting, ex);
} else {
unreachable!()
}
}
// Execute the swap
//
// When swapping to the root process, we return here, which
// will unwind back to the main scheduler loop in `lib.rs`.
//
// When swapping to a newly spawned process, we return "into"
// its init function, or put another way, we jump to its
// function prologue. In this situation, all of the saved registers
// except %rsp and %rbp will be zeroed. %rsp is set during the call
// to `spawn`, but %rbp is set to the current %rbp value to ensure
// that stack traces link the new stack to the frame in which execution
// started
//
// When swapping to a previously spawned process, we return here,
// since the process called `process_yield`. From here we unwind back
// to the call to `process_yield` and resume execution from the point
// where it was called.
swap_stack(prev_ctx, new_ctx);
}
/// Schedules the given process for execution
pub fn schedule(&mut self, process: Arc<Process>) {
debug_assert_ne!(
Some(self.id),
process.scheduler_id(),
"process is already scheduled here!"
);
process.schedule_with(self.id);
let mut rq = self.run_queues.write();
rq.enqueue(process);
}
/// Spawns a new process using the given init function as its entry
#[inline]
pub fn spawn(&mut self, process: Arc<Process>) -> anyhow::Result<()> {
Self::spawn_internal(process, self.id, &self.run_queues);
Ok(())
}
// Root process uses the original thread stack, no initialization required.
//
// It also starts "running", so we don't put it on the run queue
fn spawn_root(
process: Arc<Process>,
id: id::ID,
_run_queues: &RwLock<run_queue::Queues>,
) -> anyhow::Result<()> {
process.schedule_with(id);
*process.status.write() = Status::Running;
Ok(())
}
fn spawn_internal(process: Arc<Process>, id: id::ID, run_queues: &RwLock<run_queue::Queues>) {
process.schedule_with(id);
let mfa = &process.initial_module_function_arity;
let init_fn_result = apply::find_symbol(&mfa);
if init_fn_result.is_none() {
panic!(
"invalid mfa provided for process ({}), no such symbol found",
&mfa
);
}
let init_fn = init_fn_result.unwrap();
#[inline(always)]
unsafe fn push(sp: &mut StackPointer, value: u64) {
sp.0 = sp.0.offset(-1);
ptr::write(sp.0, value);
}
// Write the return function and init function to the end of the stack,
// when execution resumes, the pointer before the stack pointer will be
// used as the return address - the first time that will be the init function.
//
// When execution returns from the init function, then it will return via
// `process_return`, which will return to the scheduler and indicate that
// the process exited. The nature of the exit is indicated by error state
// in the process itself
unsafe {
let mut sp = StackPointer(process.stack.top as *mut u64);
// Function that will be called when returning from init_fn
push(&mut sp, process_return_continuation as u64);
// Function that the newly spawned process should call first
push(&mut sp, init_fn as u64);
// Update process stack pointer
let s_top = &process.stack.top as *const _ as *mut _;
ptr::write(s_top, sp.0 as *const u8);
// Update rsp
let rsp = &process.registers.rsp as *const u64 as *mut _;
ptr::write(rsp, sp.0 as u64);
let rbp = &process.registers.rbp as *const u64 as *mut _;
ptr::write(rbp, sp.0 as u64);
}
*process.status.write() = Status::Runnable;
let mut rq = run_queues.write();
rq.enqueue(process);
}
}
fn reset_reduction_counter() -> u64 {
let count = unsafe { CURRENT_REDUCTION_COUNT };
unsafe {
CURRENT_REDUCTION_COUNT = 0;
}
count as u64
//CURRENT_REDUCTION_COUNT.swap(0, Ordering::Relaxed)
}
/// This function uses inline assembly to save the callee-saved registers for the outgoing
/// process, and restore them for the incoming process. When this function returns, it will
/// resume execution where `swap_stack` was called previously.
#[naked]
#[inline(never)]
#[cfg(all(unix, target_arch = "x86_64"))]
unsafe fn swap_stack(prev: *mut CalleeSavedRegisters, new: *const CalleeSavedRegisters) {
asm!("
# Save the stack pointer, and callee-saved registers of `prev`
movq %rsp, ($0)
movq %r15, 8($0)
movq %r14, 16($0)
movq %r13, 24($0)
movq %r12, 32($0)
movq %rbx, 40($0)
movq %rbp, 48($0)
# Restore the stack pointer, and callee-saved registers of `new`
movq ($1), %rsp
movq 8($1), %r15
movq 16($1), %r14
movq 24($1), %r13
movq 32($1), %r12
movq 40($1), %rbx
movq 48($1), %rbp
# We need to let the unwinder know that the CFA has changed, currently
# that is 8 bytes above %rsp, because the call to this function pushes
# %rip to the stack, and since we're restoring the stack pointer, the
# value of the CFA, from the perspective of the unwinder, has also been
# changed
.cfi_def_cfa %rsp, 8
.cfi_restore %rsp
.cfi_restore %r15
.cfi_restore %r14
.cfi_restore %r13
.cfi_restore %r12
.cfi_restore %rbx
.cfi_restore %rbp
"
:
: "r"(prev), "r"(new)
:
: "volatile", "alignstack"
);
}
#[cfg(not(all(unix, target_arch = "x86_64")))]
compile_error!("lumen_rt_minimal does not currently support this architecture!");
| run_queue_len | identifier_name |
scheduler.rs | #![allow(unused)]
mod run_queue;
use std::alloc::Layout;
use std::fmt::{self, Debug};
use std::mem;
use std::ops::Deref;
use std::ptr;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Weak};
use hashbrown::HashMap;
use anyhow::anyhow;
use lazy_static::lazy_static;
use log::info;
use liblumen_core::locks::{Mutex, RwLock};
use liblumen_core::util::thread_local::ThreadLocalCell;
use liblumen_alloc::atom;
use liblumen_alloc::erts::apply;
use liblumen_alloc::erts::process;
use liblumen_alloc::erts::process::{CalleeSavedRegisters, Priority, Process, Status};
use liblumen_alloc::erts::scheduler::id;
use liblumen_alloc::erts::term::prelude::{Atom, ReferenceNumber, Term};
use liblumen_alloc::erts::ModuleFunctionArity;
use lumen_rt_core as rt_core;
use lumen_rt_core::process::CURRENT_PROCESS;
use lumen_rt_core::timer::Hierarchy;
const MAX_REDUCTION_COUNT: u32 = 20;
// External thread locals owned by the generated code
extern "C" {
#[thread_local]
static mut CURRENT_REDUCTION_COUNT: u32;
}
thread_local! {
static SCHEDULER: Arc<Scheduler> = Scheduler::registered();
}
lazy_static! {
static ref SCHEDULERS: Mutex<HashMap<id::ID, Weak<Scheduler>>> = Mutex::new(Default::default());
}
#[export_name = "__scheduler_stop_waiting"]
pub fn scheduler_stop_waiting(process: &Process) {
let id = process.scheduler_id().unwrap();
if let Some(scheduler) = SCHEDULERS.lock().get(&id).and_then(|s| s.upgrade()) {
scheduler.stop_waiting(process)
}
}
#[derive(Copy, Clone)]
struct StackPointer(*mut u64);
#[export_name = "__lumen_builtin_spawn"]
pub extern "C" fn builtin_spawn(to: Term, msg: Term) -> Term {
unimplemented!()
}
#[export_name = "__lumen_builtin_yield"]
pub unsafe extern "C" fn process_yield() -> bool {
let s = <Scheduler as rt_core::Scheduler>::current();
// NOTE: We always set root=false here because the root
// process never invokes this function
s.process_yield(/* root= */ false)
}
#[naked]
#[inline(never)]
#[cfg(all(unix, target_arch = "x86_64"))]
pub unsafe extern "C" fn process_return_continuation() {
let f: fn() -> () = process_return;
asm!("
callq *$0
"
:
: "r"(f)
:
: "volatile", "alignstack"
);
}
#[inline(never)]
fn process_return() {
let s = <Scheduler as rt_core::Scheduler>::current();
do_process_return(&s);
}
#[export_name = "__lumen_builtin_malloc"]
pub unsafe extern "C" fn builtin_malloc(kind: u32, arity: usize) -> *mut u8 {
use core::convert::TryInto;
use liblumen_alloc::erts::term::closure::ClosureLayout;
use liblumen_alloc::erts::term::prelude::*;
use liblumen_core::alloc::Layout;
use liblumen_term::TermKind;
let kind_result: Result<TermKind, _> = kind.try_into();
match kind_result {
Ok(TermKind::Closure) => {
let s = <Scheduler as rt_core::Scheduler>::current();
let cl = ClosureLayout::for_env_len(arity);
let result = s.current.alloc_nofrag_layout(cl.layout().clone());
if let Ok(nn) = result {
return nn.as_ptr() as *mut u8;
}
}
Ok(TermKind::Tuple) => {
let s = <Scheduler as rt_core::Scheduler>::current();
let layout = Tuple::layout_for_len(arity);
let result = s.current.alloc_nofrag_layout(layout);
if let Ok(nn) = result {
return nn.as_ptr() as *mut u8;
}
}
Ok(TermKind::Cons) => {
let s = <Scheduler as rt_core::Scheduler>::current();
let layout = Layout::new::<Cons>();
let result = s.current.alloc_nofrag_layout(layout);
if let Ok(nn) = result {
return nn.as_ptr() as *mut u8;
}
}
Ok(tk) => |
Err(_) => {
panic!("invalid term kind: {}", kind);
}
}
ptr::null_mut()
}
/// Called when the current process has finished executing, and has
/// returned all the way to its entry function. This marks the process
/// as exiting (if it wasn't already), and then yields to the scheduler
fn do_process_return(scheduler: &Scheduler) -> bool {
use liblumen_alloc::erts::term::prelude::*;
if scheduler.current.pid() != scheduler.root.pid() {
scheduler
.current
.exit(atom!("normal"), anyhow!("Out of code").into());
// NOTE: We always set root=false here, even though this can
// be called from the root process, since returning from the
// root process exits the scheduler loop anyway, so no stack
// swapping can occur
scheduler.process_yield(/* root= */ false)
} else {
true
}
}
pub struct Scheduler {
id: id::ID,
hierarchy: RwLock<Hierarchy>,
// References are always 64-bits even on 32-bit platforms
reference_count: AtomicU64,
run_queues: RwLock<run_queue::Queues>,
// Non-monotonic unique integers are scoped to the scheduler ID and then use this per-scheduler
// `u64`.
unique_integer: AtomicU64,
root: Arc<Process>,
init: ThreadLocalCell<Arc<Process>>,
current: ThreadLocalCell<Arc<Process>>,
}
// This guarantee holds as long as `init` and `current` are only
// ever accessed by the scheduler when scheduling
unsafe impl Sync for Scheduler {}
impl rt_core::Scheduler for Scheduler {
#[inline]
fn current() -> Arc<Self> {
SCHEDULER.with(|s| s.clone())
}
fn id(&self) -> id::ID {
self.id
}
fn hierarchy(&self) -> &RwLock<Hierarchy> {
&self.hierarchy
}
/// Gets the next available reference number
fn next_reference_number(&self) -> ReferenceNumber {
self.reference_count.fetch_add(1, Ordering::SeqCst)
}
}
impl Scheduler {
/// Creates a new scheduler with the default configuration
fn new() -> anyhow::Result<Scheduler> {
let id = id::next();
// The root process is how the scheduler gets time for itself,
// and is also how we know when to shutdown the scheduler due
// to termination of all its processes
let root = Arc::new(Process::new(
Priority::Normal,
None,
Arc::new(ModuleFunctionArity {
module: Atom::from_str("root"),
function: Atom::from_str("init"),
arity: 0,
}),
ptr::null_mut(),
0,
));
let run_queues = Default::default();
Scheduler::spawn_root(root.clone(), id, &run_queues)?;
// Placeholder
let init = Arc::new(Process::new(
Priority::Normal,
None,
Arc::new(ModuleFunctionArity {
module: Atom::from_str("undef"),
function: Atom::from_str("undef"),
arity: 0,
}),
ptr::null_mut(),
0,
));
// The scheduler starts with the root process running
let current = ThreadLocalCell::new(root.clone());
Ok(Self {
id,
run_queues,
root,
init: ThreadLocalCell::new(init),
current,
hierarchy: Default::default(),
reference_count: AtomicU64::new(0),
unique_integer: AtomicU64::new(0),
})
}
// Spawns the init process, should be called immediately after
// scheduler creation
pub fn init(&self) -> anyhow::Result<()> {
// The init process is the actual "root" Erlang process, it acts
// as the entry point for the program from Erlang's perspective,
// and is responsible for starting/stopping the system in Erlang.
//
// If this process exits, the scheduler terminates
let (init_heap, init_heap_size) = process::alloc::default_heap()?;
let init = Arc::new(Process::new_with_stack(
Priority::Normal,
None,
Arc::new(ModuleFunctionArity {
module: Atom::from_str("init"),
function: Atom::from_str("start"),
arity: 0,
}),
init_heap,
init_heap_size,
)?);
let clone = init.clone();
unsafe {
self.init.set(init);
}
Scheduler::spawn_internal(clone, self.id, &self.run_queues);
Ok(())
}
/// Gets the scheduler registered to this thread
///
/// If no scheduler has been created for this thread, one is created
fn registered() -> Arc<Self> {
let mut schedulers = SCHEDULERS.lock();
let s = Arc::new(Self::new().unwrap());
if let Some(_) = schedulers.insert(s.id, Arc::downgrade(&s)) {
panic!("Scheduler already registered with ID ({:?}", s.id);
}
s
}
/// Gets a scheduler by its ID
pub fn from_id(id: &id::ID) -> Option<Arc<Self>> {
Self::current_from_id(id).or_else(|| SCHEDULERS.lock().get(id).and_then(|s| s.upgrade()))
}
/// Returns the current thread's scheduler if it matches the given ID
fn current_from_id(id: &id::ID) -> Option<Arc<Self>> {
SCHEDULER.with(|s| if &s.id == id { Some(s.clone()) } else { None })
}
/// Gets the next available unique integer
pub fn next_unique_integer(&self) -> u64 {
self.unique_integer.fetch_add(1, Ordering::SeqCst)
}
/// Returns the length of the current scheduler's run queue
pub fn run_queues_len(&self) -> usize {
self.run_queues.read().len()
}
/// Returns the length of a specific run queue in the current scheduler
#[cfg(test)]
pub fn run_queue_len(&self, priority: Priority) -> usize {
self.run_queues.read().run_queue_len(priority)
}
/// Returns true if the given process is in the current scheduler's run queue
#[cfg(test)]
pub fn is_run_queued(&self, value: &Arc<Process>) -> bool {
self.run_queues.read().contains(value)
}
pub fn stop_waiting(&self, process: &Process) {
self.run_queues.write().stop_waiting(process);
}
// TODO: Request application master termination for controlled shutdown
// This request will always come from the thread which spawned the application
// master, i.e. the "main" scheduler thread
//
// Returns `Ok(())` if shutdown was successful, `Err(anyhow::Error)` if something
// went wrong during shutdown, and it was not able to complete normally
pub fn shutdown(&self) -> anyhow::Result<()> {
// For now just Ok(()), but this needs to be addressed when proper
// system startup/shutdown is in place
CURRENT_PROCESS.with(|cp| cp.replace(None));
Ok(())
}
}
impl Debug for Scheduler {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Scheduler")
.field("id", &self.id)
// The hiearchy slots take a lot of space, so don't print them by default
.field("reference_count", &self.reference_count)
.field("run_queues", &self.run_queues)
.finish()
}
}
impl Drop for Scheduler {
fn drop(&mut self) {
let mut locked_scheduler_by_id = SCHEDULERS.lock();
locked_scheduler_by_id
.remove(&self.id)
.expect("Scheduler not registered");
}
}
impl PartialEq for Scheduler {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
/// What to run
pub enum Run {
/// Run the process now
Now(Arc<Process>),
/// There was a process in the queue, but it needs to be delayed because it is `Priority::Low`
/// and hadn't been delayed enough yet. Ask the `RunQueue` again for another process.
/// -- https://github.com/erlang/otp/blob/fe2b1323a3866ed0a9712e9d12e1f8f84793ec47/erts/emulator/beam/erl_process.c#L9601-L9606
Delayed,
/// There are no processes in the run queue, do other work
None,
}
impl Scheduler {
/// > 1. Update reduction counters
/// > 2. Check timers
/// > 3. If needed check balance
/// > 4. If needed migrated processes and ports
/// > 5. Do auxiliary scheduler work
/// > 6. If needed check I/O and update time
/// > 7. While needed pick a port task to execute
/// > 8. Pick a process to execute
/// > -- [The Scheduler Loop](https://blog.stenmans.org/theBeamBook/#_the_scheduler_loop)
///
/// Returns `true` if a process was run. Returns `false` if no process could be run and the
/// scheduler should sleep or work steal.
#[must_use]
pub fn run_once(&self) -> bool {
// We always set root=true here, since calling this function is always done
// from the scheduler loop, and only ever from the root context
self.process_yield(/* root= */ true)
}
/// This function performs two roles, albeit virtually identical:
///
/// First, this function is called by the scheduler to resume execution
/// of a process pulled from the run queue. It does so using its "root"
/// process as its context.
///
/// Second, this function is called by a process when it chooses to
/// yield back to the scheduler. In this case, the scheduler "root"
/// process is swapped in, so the scheduler has a chance to do its
/// auxilary tasks, after which the scheduler will call it again to
/// swap in a new process.
fn process_yield(&self, is_root: bool) -> bool {
info!("entering core scheduler loop");
self.hierarchy.write().timeout();
loop {
let next = {
let mut rq = self.run_queues.write();
rq.dequeue()
};
match next {
Run::Now(process) => {
info!("found process to schedule");
// Don't allow exiting processes to run again.
//
// Without this check, a process.exit() from outside the process during WAITING
// will return to the Frame that called `process.wait()`
if !process.is_exiting() {
info!("swapping into process (is_root = {})", is_root);
unsafe {
self.swap_process(process, is_root);
}
} else {
info!("process is exiting");
process.reduce()
}
info!("exiting scheduler loop");
// When reached, either the process scheduled is the root process,
// or the process is exiting and we called .reduce(); either way we're
// returning to the main scheduler loop to check for signals, etc.
break true;
}
Run::Delayed => {
info!("found process, but it is delayed");
continue;
}
Run::None if is_root => {
info!("no processes remaining to schedule, exiting loop");
// If no processes are available, then the scheduler should steal,
// but if it can't/doesn't, then it must terminate, as there is
// nothing we can swap to. When we break here, we're returning
// to the core scheduler loop, which _must_ terminate, if it does
// not, we'll just end up right back here again.
//
// TODO: stealing
break false;
}
Run::None => unreachable!(),
}
}
}
/// This function takes care of coordinating the scheduling of a new
/// process/descheduling of the current process.
///
/// - Updating process status
/// - Updating reduction count based on accumulated reductions during execution
/// - Resetting reduction counter for next process
/// - Handling exiting processes (logging/propagating)
///
/// Once that is complete, it swaps to the new process stack via `swap_stack`,
/// at which point execution resumes where the newly scheduled process left
/// off previously, or in its init function.
unsafe fn swap_process(&self, new: Arc<Process>, is_root: bool) {
// Mark the new process as Running
let new_ctx = &new.registers as *const _;
{
let mut new_status = new.status.write();
*new_status = Status::Running;
}
// Replace the previous process with the new as the currently scheduled process
let _ = CURRENT_PROCESS.with(|cp| cp.replace(Some(new.clone())));
let prev = self.current.replace(new.clone());
// Increment reduction count if not the root process
if !is_root {
let prev_reductions = reset_reduction_counter();
prev.total_reductions
.fetch_add(prev_reductions as u64, Ordering::Relaxed);
}
// Change the previous process status to Runnable
{
let mut prev_status = prev.status.write();
if Status::Running == *prev_status {
*prev_status = Status::Runnable
}
}
// Save the previous process registers for the stack swap
let prev_ctx = &prev.registers as *const _ as *mut _;
// Then try to schedule it for the future
// If the process is exiting, then handle the exit, otherwise
// proceed to the stack swap
if let Some(exiting) = self.run_queues.write().requeue(prev) {
if let Status::Exiting(ref ex) = *exiting.status.read() {
crate::process::log_exit(&exiting, ex);
crate::process::propagate_exit(&exiting, ex);
} else {
unreachable!()
}
}
// Execute the swap
//
// When swapping to the root process, we return here, which
// will unwind back to the main scheduler loop in `lib.rs`.
//
// When swapping to a newly spawned process, we return "into"
// its init function, or put another way, we jump to its
// function prologue. In this situation, all of the saved registers
// except %rsp and %rbp will be zeroed. %rsp is set during the call
// to `spawn`, but %rbp is set to the current %rbp value to ensure
// that stack traces link the new stack to the frame in which execution
// started
//
// When swapping to a previously spawned process, we return here,
// since the process called `process_yield`. From here we unwind back
// to the call to `process_yield` and resume execution from the point
// where it was called.
swap_stack(prev_ctx, new_ctx);
}
/// Schedules the given process for execution
pub fn schedule(&mut self, process: Arc<Process>) {
debug_assert_ne!(
Some(self.id),
process.scheduler_id(),
"process is already scheduled here!"
);
process.schedule_with(self.id);
let mut rq = self.run_queues.write();
rq.enqueue(process);
}
/// Spawns a new process using the given init function as its entry
#[inline]
pub fn spawn(&mut self, process: Arc<Process>) -> anyhow::Result<()> {
Self::spawn_internal(process, self.id, &self.run_queues);
Ok(())
}
// Root process uses the original thread stack, no initialization required.
//
// It also starts "running", so we don't put it on the run queue
fn spawn_root(
process: Arc<Process>,
id: id::ID,
_run_queues: &RwLock<run_queue::Queues>,
) -> anyhow::Result<()> {
process.schedule_with(id);
*process.status.write() = Status::Running;
Ok(())
}
fn spawn_internal(process: Arc<Process>, id: id::ID, run_queues: &RwLock<run_queue::Queues>) {
process.schedule_with(id);
let mfa = &process.initial_module_function_arity;
let init_fn_result = apply::find_symbol(&mfa);
if init_fn_result.is_none() {
panic!(
"invalid mfa provided for process ({}), no such symbol found",
&mfa
);
}
let init_fn = init_fn_result.unwrap();
#[inline(always)]
unsafe fn push(sp: &mut StackPointer, value: u64) {
sp.0 = sp.0.offset(-1);
ptr::write(sp.0, value);
}
// Write the return function and init function to the end of the stack,
// when execution resumes, the pointer before the stack pointer will be
// used as the return address - the first time that will be the init function.
//
// When execution returns from the init function, then it will return via
// `process_return`, which will return to the scheduler and indicate that
// the process exited. The nature of the exit is indicated by error state
// in the process itself
unsafe {
let mut sp = StackPointer(process.stack.top as *mut u64);
// Function that will be called when returning from init_fn
push(&mut sp, process_return_continuation as u64);
// Function that the newly spawned process should call first
push(&mut sp, init_fn as u64);
// Update process stack pointer
let s_top = &process.stack.top as *const _ as *mut _;
ptr::write(s_top, sp.0 as *const u8);
// Update rsp
let rsp = &process.registers.rsp as *const u64 as *mut _;
ptr::write(rsp, sp.0 as u64);
let rbp = &process.registers.rbp as *const u64 as *mut _;
ptr::write(rbp, sp.0 as u64);
}
*process.status.write() = Status::Runnable;
let mut rq = run_queues.write();
rq.enqueue(process);
}
}
fn reset_reduction_counter() -> u64 {
let count = unsafe { CURRENT_REDUCTION_COUNT };
unsafe {
CURRENT_REDUCTION_COUNT = 0;
}
count as u64
//CURRENT_REDUCTION_COUNT.swap(0, Ordering::Relaxed)
}
/// This function uses inline assembly to save the callee-saved registers for the outgoing
/// process, and restore them for the incoming process. When this function returns, it will
/// resume execution where `swap_stack` was called previously.
#[naked]
#[inline(never)]
#[cfg(all(unix, target_arch = "x86_64"))]
unsafe fn swap_stack(prev: *mut CalleeSavedRegisters, new: *const CalleeSavedRegisters) {
asm!("
# Save the stack pointer, and callee-saved registers of `prev`
movq %rsp, ($0)
movq %r15, 8($0)
movq %r14, 16($0)
movq %r13, 24($0)
movq %r12, 32($0)
movq %rbx, 40($0)
movq %rbp, 48($0)
# Restore the stack pointer, and callee-saved registers of `new`
movq ($1), %rsp
movq 8($1), %r15
movq 16($1), %r14
movq 24($1), %r13
movq 32($1), %r12
movq 40($1), %rbx
movq 48($1), %rbp
# We need to let the unwinder know that the CFA has changed, currently
# that is 8 bytes above %rsp, because the call to this function pushes
# %rip to the stack, and since we're restoring the stack pointer, the
# value of the CFA, from the perspective of the unwinder, has also been
# changed
.cfi_def_cfa %rsp, 8
.cfi_restore %rsp
.cfi_restore %r15
.cfi_restore %r14
.cfi_restore %r13
.cfi_restore %r12
.cfi_restore %rbx
.cfi_restore %rbp
"
:
: "r"(prev), "r"(new)
:
: "volatile", "alignstack"
);
}
#[cfg(not(all(unix, target_arch = "x86_64")))]
compile_error!("lumen_rt_minimal does not currently support this architecture!");
| {
unimplemented!("unhandled use of malloc for {:?}", tk);
} | conditional_block |
scheduler.rs | #![allow(unused)]
mod run_queue;
use std::alloc::Layout;
use std::fmt::{self, Debug};
use std::mem;
use std::ops::Deref;
use std::ptr;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Weak};
use hashbrown::HashMap;
use anyhow::anyhow;
use lazy_static::lazy_static;
use log::info;
use liblumen_core::locks::{Mutex, RwLock};
use liblumen_core::util::thread_local::ThreadLocalCell;
use liblumen_alloc::atom;
use liblumen_alloc::erts::apply;
use liblumen_alloc::erts::process;
use liblumen_alloc::erts::process::{CalleeSavedRegisters, Priority, Process, Status};
use liblumen_alloc::erts::scheduler::id;
use liblumen_alloc::erts::term::prelude::{Atom, ReferenceNumber, Term};
use liblumen_alloc::erts::ModuleFunctionArity;
use lumen_rt_core as rt_core;
use lumen_rt_core::process::CURRENT_PROCESS;
use lumen_rt_core::timer::Hierarchy;
const MAX_REDUCTION_COUNT: u32 = 20;
// External thread locals owned by the generated code
extern "C" {
#[thread_local]
static mut CURRENT_REDUCTION_COUNT: u32;
}
thread_local! {
static SCHEDULER: Arc<Scheduler> = Scheduler::registered();
}
lazy_static! {
static ref SCHEDULERS: Mutex<HashMap<id::ID, Weak<Scheduler>>> = Mutex::new(Default::default());
}
#[export_name = "__scheduler_stop_waiting"]
pub fn scheduler_stop_waiting(process: &Process) {
let id = process.scheduler_id().unwrap();
if let Some(scheduler) = SCHEDULERS.lock().get(&id).and_then(|s| s.upgrade()) {
scheduler.stop_waiting(process)
}
}
#[derive(Copy, Clone)]
struct StackPointer(*mut u64);
#[export_name = "__lumen_builtin_spawn"]
pub extern "C" fn builtin_spawn(to: Term, msg: Term) -> Term {
unimplemented!()
}
#[export_name = "__lumen_builtin_yield"]
pub unsafe extern "C" fn process_yield() -> bool {
let s = <Scheduler as rt_core::Scheduler>::current();
// NOTE: We always set root=false here because the root
// process never invokes this function
s.process_yield(/* root= */ false)
}
#[naked]
#[inline(never)]
#[cfg(all(unix, target_arch = "x86_64"))]
pub unsafe extern "C" fn process_return_continuation() {
let f: fn() -> () = process_return;
asm!("
callq *$0
"
:
: "r"(f)
:
: "volatile", "alignstack"
);
}
#[inline(never)]
fn process_return() {
let s = <Scheduler as rt_core::Scheduler>::current();
do_process_return(&s);
}
#[export_name = "__lumen_builtin_malloc"]
pub unsafe extern "C" fn builtin_malloc(kind: u32, arity: usize) -> *mut u8 {
use core::convert::TryInto;
use liblumen_alloc::erts::term::closure::ClosureLayout;
use liblumen_alloc::erts::term::prelude::*;
use liblumen_core::alloc::Layout;
use liblumen_term::TermKind;
let kind_result: Result<TermKind, _> = kind.try_into();
match kind_result {
Ok(TermKind::Closure) => {
let s = <Scheduler as rt_core::Scheduler>::current();
let cl = ClosureLayout::for_env_len(arity);
let result = s.current.alloc_nofrag_layout(cl.layout().clone());
if let Ok(nn) = result {
return nn.as_ptr() as *mut u8;
}
}
Ok(TermKind::Tuple) => {
let s = <Scheduler as rt_core::Scheduler>::current();
let layout = Tuple::layout_for_len(arity);
let result = s.current.alloc_nofrag_layout(layout);
if let Ok(nn) = result {
return nn.as_ptr() as *mut u8;
}
}
Ok(TermKind::Cons) => {
let s = <Scheduler as rt_core::Scheduler>::current();
let layout = Layout::new::<Cons>();
let result = s.current.alloc_nofrag_layout(layout);
if let Ok(nn) = result {
return nn.as_ptr() as *mut u8;
}
}
Ok(tk) => {
unimplemented!("unhandled use of malloc for {:?}", tk);
}
Err(_) => {
panic!("invalid term kind: {}", kind);
}
}
ptr::null_mut()
}
/// Called when the current process has finished executing, and has
/// returned all the way to its entry function. This marks the process
/// as exiting (if it wasn't already), and then yields to the scheduler
fn do_process_return(scheduler: &Scheduler) -> bool {
use liblumen_alloc::erts::term::prelude::*;
if scheduler.current.pid() != scheduler.root.pid() {
scheduler
.current
.exit(atom!("normal"), anyhow!("Out of code").into());
// NOTE: We always set root=false here, even though this can
// be called from the root process, since returning from the
// root process exits the scheduler loop anyway, so no stack
// swapping can occur
scheduler.process_yield(/* root= */ false)
} else {
true
}
}
pub struct Scheduler {
id: id::ID,
hierarchy: RwLock<Hierarchy>,
// References are always 64-bits even on 32-bit platforms
reference_count: AtomicU64,
run_queues: RwLock<run_queue::Queues>,
// Non-monotonic unique integers are scoped to the scheduler ID and then use this per-scheduler
// `u64`.
unique_integer: AtomicU64,
root: Arc<Process>,
init: ThreadLocalCell<Arc<Process>>,
current: ThreadLocalCell<Arc<Process>>,
}
// This guarantee holds as long as `init` and `current` are only
// ever accessed by the scheduler when scheduling
unsafe impl Sync for Scheduler {}
impl rt_core::Scheduler for Scheduler {
#[inline]
fn current() -> Arc<Self> {
SCHEDULER.with(|s| s.clone())
}
fn id(&self) -> id::ID {
self.id
}
fn hierarchy(&self) -> &RwLock<Hierarchy> {
&self.hierarchy
}
/// Gets the next available reference number
fn next_reference_number(&self) -> ReferenceNumber {
self.reference_count.fetch_add(1, Ordering::SeqCst)
}
}
impl Scheduler {
/// Creates a new scheduler with the default configuration
fn new() -> anyhow::Result<Scheduler> {
let id = id::next();
// The root process is how the scheduler gets time for itself,
// and is also how we know when to shutdown the scheduler due
// to termination of all its processes
let root = Arc::new(Process::new(
Priority::Normal,
None,
Arc::new(ModuleFunctionArity {
module: Atom::from_str("root"),
function: Atom::from_str("init"),
arity: 0,
}),
ptr::null_mut(),
0,
));
let run_queues = Default::default();
Scheduler::spawn_root(root.clone(), id, &run_queues)?;
// Placeholder
let init = Arc::new(Process::new(
Priority::Normal,
None,
Arc::new(ModuleFunctionArity {
module: Atom::from_str("undef"),
function: Atom::from_str("undef"),
arity: 0,
}),
ptr::null_mut(),
0,
));
// The scheduler starts with the root process running
let current = ThreadLocalCell::new(root.clone());
Ok(Self {
id,
run_queues,
root,
init: ThreadLocalCell::new(init),
current,
hierarchy: Default::default(),
reference_count: AtomicU64::new(0),
unique_integer: AtomicU64::new(0),
})
}
// Spawns the init process, should be called immediately after
// scheduler creation
pub fn init(&self) -> anyhow::Result<()> {
// The init process is the actual "root" Erlang process, it acts
// as the entry point for the program from Erlang's perspective,
// and is responsible for starting/stopping the system in Erlang.
//
// If this process exits, the scheduler terminates
let (init_heap, init_heap_size) = process::alloc::default_heap()?;
let init = Arc::new(Process::new_with_stack(
Priority::Normal,
None,
Arc::new(ModuleFunctionArity {
module: Atom::from_str("init"),
function: Atom::from_str("start"),
arity: 0,
}),
init_heap,
init_heap_size,
)?);
let clone = init.clone();
unsafe {
self.init.set(init);
}
Scheduler::spawn_internal(clone, self.id, &self.run_queues);
Ok(())
}
/// Gets the scheduler registered to this thread
///
/// If no scheduler has been created for this thread, one is created
fn registered() -> Arc<Self> {
let mut schedulers = SCHEDULERS.lock();
let s = Arc::new(Self::new().unwrap());
if let Some(_) = schedulers.insert(s.id, Arc::downgrade(&s)) {
panic!("Scheduler already registered with ID ({:?}", s.id);
}
s
}
/// Gets a scheduler by its ID
pub fn from_id(id: &id::ID) -> Option<Arc<Self>> {
Self::current_from_id(id).or_else(|| SCHEDULERS.lock().get(id).and_then(|s| s.upgrade()))
}
/// Returns the current thread's scheduler if it matches the given ID
fn current_from_id(id: &id::ID) -> Option<Arc<Self>> {
SCHEDULER.with(|s| if &s.id == id { Some(s.clone()) } else { None })
}
/// Gets the next available unique integer
pub fn next_unique_integer(&self) -> u64 {
self.unique_integer.fetch_add(1, Ordering::SeqCst)
}
/// Returns the length of the current scheduler's run queue
pub fn run_queues_len(&self) -> usize {
self.run_queues.read().len()
}
/// Returns the length of a specific run queue in the current scheduler
#[cfg(test)]
pub fn run_queue_len(&self, priority: Priority) -> usize {
self.run_queues.read().run_queue_len(priority)
}
/// Returns true if the given process is in the current scheduler's run queue
#[cfg(test)]
pub fn is_run_queued(&self, value: &Arc<Process>) -> bool {
self.run_queues.read().contains(value)
}
pub fn stop_waiting(&self, process: &Process) {
self.run_queues.write().stop_waiting(process);
}
// TODO: Request application master termination for controlled shutdown
// This request will always come from the thread which spawned the application
// master, i.e. the "main" scheduler thread
//
// Returns `Ok(())` if shutdown was successful, `Err(anyhow::Error)` if something
// went wrong during shutdown, and it was not able to complete normally
pub fn shutdown(&self) -> anyhow::Result<()> {
// For now just Ok(()), but this needs to be addressed when proper
// system startup/shutdown is in place
CURRENT_PROCESS.with(|cp| cp.replace(None));
Ok(())
}
}
impl Debug for Scheduler {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Scheduler")
.field("id", &self.id)
// The hiearchy slots take a lot of space, so don't print them by default
.field("reference_count", &self.reference_count)
.field("run_queues", &self.run_queues)
.finish()
}
}
impl Drop for Scheduler {
fn drop(&mut self) {
let mut locked_scheduler_by_id = SCHEDULERS.lock();
locked_scheduler_by_id
.remove(&self.id)
.expect("Scheduler not registered");
}
}
impl PartialEq for Scheduler {
fn eq(&self, other: &Self) -> bool {
self.id == other.id | pub enum Run {
/// Run the process now
Now(Arc<Process>),
/// There was a process in the queue, but it needs to be delayed because it is `Priority::Low`
/// and hadn't been delayed enough yet. Ask the `RunQueue` again for another process.
/// -- https://github.com/erlang/otp/blob/fe2b1323a3866ed0a9712e9d12e1f8f84793ec47/erts/emulator/beam/erl_process.c#L9601-L9606
Delayed,
/// There are no processes in the run queue, do other work
None,
}
impl Scheduler {
/// > 1. Update reduction counters
/// > 2. Check timers
/// > 3. If needed check balance
/// > 4. If needed migrated processes and ports
/// > 5. Do auxiliary scheduler work
/// > 6. If needed check I/O and update time
/// > 7. While needed pick a port task to execute
/// > 8. Pick a process to execute
/// > -- [The Scheduler Loop](https://blog.stenmans.org/theBeamBook/#_the_scheduler_loop)
///
/// Returns `true` if a process was run. Returns `false` if no process could be run and the
/// scheduler should sleep or work steal.
#[must_use]
pub fn run_once(&self) -> bool {
// We always set root=true here, since calling this function is always done
// from the scheduler loop, and only ever from the root context
self.process_yield(/* root= */ true)
}
/// This function performs two roles, albeit virtually identical:
///
/// First, this function is called by the scheduler to resume execution
/// of a process pulled from the run queue. It does so using its "root"
/// process as its context.
///
/// Second, this function is called by a process when it chooses to
/// yield back to the scheduler. In this case, the scheduler "root"
/// process is swapped in, so the scheduler has a chance to do its
/// auxilary tasks, after which the scheduler will call it again to
/// swap in a new process.
fn process_yield(&self, is_root: bool) -> bool {
info!("entering core scheduler loop");
self.hierarchy.write().timeout();
loop {
let next = {
let mut rq = self.run_queues.write();
rq.dequeue()
};
match next {
Run::Now(process) => {
info!("found process to schedule");
// Don't allow exiting processes to run again.
//
// Without this check, a process.exit() from outside the process during WAITING
// will return to the Frame that called `process.wait()`
if !process.is_exiting() {
info!("swapping into process (is_root = {})", is_root);
unsafe {
self.swap_process(process, is_root);
}
} else {
info!("process is exiting");
process.reduce()
}
info!("exiting scheduler loop");
// When reached, either the process scheduled is the root process,
// or the process is exiting and we called .reduce(); either way we're
// returning to the main scheduler loop to check for signals, etc.
break true;
}
Run::Delayed => {
info!("found process, but it is delayed");
continue;
}
Run::None if is_root => {
info!("no processes remaining to schedule, exiting loop");
// If no processes are available, then the scheduler should steal,
// but if it can't/doesn't, then it must terminate, as there is
// nothing we can swap to. When we break here, we're returning
// to the core scheduler loop, which _must_ terminate, if it does
// not, we'll just end up right back here again.
//
// TODO: stealing
break false;
}
Run::None => unreachable!(),
}
}
}
/// This function takes care of coordinating the scheduling of a new
/// process/descheduling of the current process.
///
/// - Updating process status
/// - Updating reduction count based on accumulated reductions during execution
/// - Resetting reduction counter for next process
/// - Handling exiting processes (logging/propagating)
///
/// Once that is complete, it swaps to the new process stack via `swap_stack`,
/// at which point execution resumes where the newly scheduled process left
/// off previously, or in its init function.
unsafe fn swap_process(&self, new: Arc<Process>, is_root: bool) {
// Mark the new process as Running
let new_ctx = &new.registers as *const _;
{
let mut new_status = new.status.write();
*new_status = Status::Running;
}
// Replace the previous process with the new as the currently scheduled process
let _ = CURRENT_PROCESS.with(|cp| cp.replace(Some(new.clone())));
let prev = self.current.replace(new.clone());
// Increment reduction count if not the root process
if !is_root {
let prev_reductions = reset_reduction_counter();
prev.total_reductions
.fetch_add(prev_reductions as u64, Ordering::Relaxed);
}
// Change the previous process status to Runnable
{
let mut prev_status = prev.status.write();
if Status::Running == *prev_status {
*prev_status = Status::Runnable
}
}
// Save the previous process registers for the stack swap
let prev_ctx = &prev.registers as *const _ as *mut _;
// Then try to schedule it for the future
// If the process is exiting, then handle the exit, otherwise
// proceed to the stack swap
if let Some(exiting) = self.run_queues.write().requeue(prev) {
if let Status::Exiting(ref ex) = *exiting.status.read() {
crate::process::log_exit(&exiting, ex);
crate::process::propagate_exit(&exiting, ex);
} else {
unreachable!()
}
}
// Execute the swap
//
// When swapping to the root process, we return here, which
// will unwind back to the main scheduler loop in `lib.rs`.
//
// When swapping to a newly spawned process, we return "into"
// its init function, or put another way, we jump to its
// function prologue. In this situation, all of the saved registers
// except %rsp and %rbp will be zeroed. %rsp is set during the call
// to `spawn`, but %rbp is set to the current %rbp value to ensure
// that stack traces link the new stack to the frame in which execution
// started
//
// When swapping to a previously spawned process, we return here,
// since the process called `process_yield`. From here we unwind back
// to the call to `process_yield` and resume execution from the point
// where it was called.
swap_stack(prev_ctx, new_ctx);
}
/// Schedules the given process for execution
pub fn schedule(&mut self, process: Arc<Process>) {
debug_assert_ne!(
Some(self.id),
process.scheduler_id(),
"process is already scheduled here!"
);
process.schedule_with(self.id);
let mut rq = self.run_queues.write();
rq.enqueue(process);
}
/// Spawns a new process using the given init function as its entry
#[inline]
pub fn spawn(&mut self, process: Arc<Process>) -> anyhow::Result<()> {
Self::spawn_internal(process, self.id, &self.run_queues);
Ok(())
}
// Root process uses the original thread stack, no initialization required.
//
// It also starts "running", so we don't put it on the run queue
fn spawn_root(
process: Arc<Process>,
id: id::ID,
_run_queues: &RwLock<run_queue::Queues>,
) -> anyhow::Result<()> {
process.schedule_with(id);
*process.status.write() = Status::Running;
Ok(())
}
fn spawn_internal(process: Arc<Process>, id: id::ID, run_queues: &RwLock<run_queue::Queues>) {
process.schedule_with(id);
let mfa = &process.initial_module_function_arity;
let init_fn_result = apply::find_symbol(&mfa);
if init_fn_result.is_none() {
panic!(
"invalid mfa provided for process ({}), no such symbol found",
&mfa
);
}
let init_fn = init_fn_result.unwrap();
#[inline(always)]
unsafe fn push(sp: &mut StackPointer, value: u64) {
sp.0 = sp.0.offset(-1);
ptr::write(sp.0, value);
}
// Write the return function and init function to the end of the stack,
// when execution resumes, the pointer before the stack pointer will be
// used as the return address - the first time that will be the init function.
//
// When execution returns from the init function, then it will return via
// `process_return`, which will return to the scheduler and indicate that
// the process exited. The nature of the exit is indicated by error state
// in the process itself
unsafe {
let mut sp = StackPointer(process.stack.top as *mut u64);
// Function that will be called when returning from init_fn
push(&mut sp, process_return_continuation as u64);
// Function that the newly spawned process should call first
push(&mut sp, init_fn as u64);
// Update process stack pointer
let s_top = &process.stack.top as *const _ as *mut _;
ptr::write(s_top, sp.0 as *const u8);
// Update rsp
let rsp = &process.registers.rsp as *const u64 as *mut _;
ptr::write(rsp, sp.0 as u64);
let rbp = &process.registers.rbp as *const u64 as *mut _;
ptr::write(rbp, sp.0 as u64);
}
*process.status.write() = Status::Runnable;
let mut rq = run_queues.write();
rq.enqueue(process);
}
}
fn reset_reduction_counter() -> u64 {
let count = unsafe { CURRENT_REDUCTION_COUNT };
unsafe {
CURRENT_REDUCTION_COUNT = 0;
}
count as u64
//CURRENT_REDUCTION_COUNT.swap(0, Ordering::Relaxed)
}
/// This function uses inline assembly to save the callee-saved registers for the outgoing
/// process, and restore them for the incoming process. When this function returns, it will
/// resume execution where `swap_stack` was called previously.
#[naked]
#[inline(never)]
#[cfg(all(unix, target_arch = "x86_64"))]
unsafe fn swap_stack(prev: *mut CalleeSavedRegisters, new: *const CalleeSavedRegisters) {
asm!("
# Save the stack pointer, and callee-saved registers of `prev`
movq %rsp, ($0)
movq %r15, 8($0)
movq %r14, 16($0)
movq %r13, 24($0)
movq %r12, 32($0)
movq %rbx, 40($0)
movq %rbp, 48($0)
# Restore the stack pointer, and callee-saved registers of `new`
movq ($1), %rsp
movq 8($1), %r15
movq 16($1), %r14
movq 24($1), %r13
movq 32($1), %r12
movq 40($1), %rbx
movq 48($1), %rbp
# We need to let the unwinder know that the CFA has changed, currently
# that is 8 bytes above %rsp, because the call to this function pushes
# %rip to the stack, and since we're restoring the stack pointer, the
# value of the CFA, from the perspective of the unwinder, has also been
# changed
.cfi_def_cfa %rsp, 8
.cfi_restore %rsp
.cfi_restore %r15
.cfi_restore %r14
.cfi_restore %r13
.cfi_restore %r12
.cfi_restore %rbx
.cfi_restore %rbp
"
:
: "r"(prev), "r"(new)
:
: "volatile", "alignstack"
);
}
#[cfg(not(all(unix, target_arch = "x86_64")))]
compile_error!("lumen_rt_minimal does not currently support this architecture!"); | }
}
/// What to run | random_line_split |
client.go | /*
Package synapse is a wrapper library for the Synapse API (https://docs.synapsefi.com)
Instantiate client
// credentials used to set headers for each method request
var client = synapse.New(
"CLIENT_ID",
"CLIENT_SECRET",
"IP_ADDRESS",
"FINGERPRINT",
)
# Examples
Enable logging & turn off developer mode (developer mode is true by default)
var client = synapse.New(
"CLIENT_ID",
"CLIENT_SECRET",
"IP_ADDRESS",
"FINGERPRINT",
true,
false,
)
Register Fingerprint
// payload response
{
"error": {
"en": "Fingerprint not registered. Please perform the MFA flow."
},
"error_code": "10",
"http_code": "202",
"phone_numbers": [
"developer@email.com",
"901-111-2222"
],
"success": false
}
// Submit a valid email address or phone number from "phone_numbers" list
res, err := user.Select2FA("developer@email.com")
// MFA sent to developer@email.com
res, err := user.VerifyPIN("123456")
Set an `IDEMPOTENCY_KEY` (for `POST` requests only)
scopeSettings := `{
"scope": [
"USERS|POST",
"USER|PATCH",
"NODES|POST",
"NODE|PATCH",
"TRANS|POST",
"TRAN|PATCH"
],
"url": "https://requestb.in/zp216zzp"
}`
idempotencyKey := `1234567890`
data, err := client.CreateSubscription(scopeSettings, idempotencyKey)
Submit optional query parameters
params := "per_page=3&page=2"
data, err := client.GetUsers(params)
*/
package synapse
import (
"github.com/mitchellh/mapstructure"
)
/********** GLOBAL VARIABLES **********/
var logMode = false
var developerMode = true
/********** TYPES **********/
type (
// Client represents the credentials used by the developer to instantiate a client
Client struct {
ClientID string
ClientSecret string
Fingerprint string
IP string
request Request
}
)
/********** METHODS **********/
func (c *Client) do(method, url, data string, queryParams []string) (map[string]interface{}, error) {
var body []byte
var err error
switch method {
case "GET":
body, err = c.request.Get(url, queryParams)
case "POST":
body, err = c.request.Post(url, data, queryParams)
case "PATCH":
body, err = c.request.Patch(url, data, queryParams)
case "DELETE":
body, err = c.request.Delete(url)
}
return readStream(body), err
}
/********** CLIENT **********/
// New creates a client object
func New(clientID, clientSecret, fingerprint, ipAddress string, modes ...bool) *Client {
log.info("========== CREATING CLIENT INSTANCE ==========")
if len(modes) > 0 {
if modes[0] == true {
logMode = true
}
if len(modes) > 1 && modes[1] == false {
developerMode = false
}
}
request := Request{
clientID: clientID,
clientSecret: clientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
return &Client{
ClientID: clientID,
ClientSecret: clientSecret,
Fingerprint: fingerprint,
IP: ipAddress,
request: request,
}
}
/********** AUTHENTICATION **********/
// GetPublicKey returns a public key as a token representing client credentials
func (c *Client) GetPublicKey(scope ...string) (map[string]interface{}, error) {
log.info("========== GET PUBLIC KEY ==========")
url := buildURL(path["client"])
defaultScope := "OAUTH|POST,USERS|POST,USERS|GET,USER|GET,USER|PATCH,SUBSCRIPTIONS|GET,SUBSCRIPTIONS|POST,SUBSCRIPTION|GET,SUBSCRIPTION|PATCH,CLIENT|REPORTS,CLIENT|CONTROLS"
if len(scope) > 0 {
defaultScope = scope[0]
}
qp := []string{"issue_public_key=YES&scope=" + defaultScope}
if len(scope) > 1 {
userId := scope[1]
qp[0] += "&user_id=" + userId
}
return c.do("GET", url, "", qp)
}
/********** NODE **********/
// GetNodes returns all of the nodes
func (c *Client) GetNodes(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT NODES ==========")
url := buildURL(path["nodes"])
return c.do("GET", url, "", queryParams)
}
// GetTradeMarketData returns data on a stock based on its ticker symbol
func (c *Client) GetTradeMarketData(tickerSymbol string) (map[string]interface{}, error) {
log.info("========== GET TRADE MARKET DATA ==========")
url := buildURL(path["nodes"], "trade-market-watch")
ts := []string{tickerSymbol}
return c.do("GET", url, "", ts)
}
// GetNodeTypes returns available node types
func (c *Client) GetNodeTypes() (map[string]interface{}, error) {
log.info("========== GET NODE TYPES ==========")
url := buildURL(path["nodes"], "types") |
return c.do("GET", url, "", nil)
}
/********** OTHER **********/
// GetCryptoMarketData returns market data for cryptocurrencies
func (c *Client) GetCryptoMarketData() (map[string]interface{}, error) {
log.info("========== GET CRYPTO MARKET DATA ==========")
url := buildURL(path["nodes"], "crypto-market-watch")
return c.do("GET", url, "", nil)
}
// GetCryptoQuotes returns all of the quotes for crypto currencies
func (c *Client) GetCryptoQuotes(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CRYPTO QUOTES ==========")
url := buildURL(path["nodes"], "crypto-quotes")
return c.do("GET", url, "", queryParams)
}
// GetInstitutions returns a list of all available banking institutions
func (c *Client) GetInstitutions() (map[string]interface{}, error) {
log.info("========== GET INSTITUTIONS ==========")
url := buildURL(path["institutions"])
return c.do("GET", url, "", nil)
}
// LocateATMs returns a list of nearby ATMs
func (c *Client) LocateATMs(queryParams ...string) (map[string]interface{}, error) {
log.info("========== LOCATE ATMS ==========")
url := buildURL(path["nodes"], "atms")
return c.do("GET", url, "", queryParams)
}
// VerifyAddress checks if an address if valid
func (c *Client) VerifyAddress(data string) (map[string]interface{}, error) {
log.info("========== VERIFY ADDRESS ==========")
url := buildURL("address-verification")
return c.do("POST", url, data, nil)
}
// VerifyRoutingNumber checks and returns the bank details of a routing number
func (c *Client) VerifyRoutingNumber(data string) (map[string]interface{}, error) {
log.info("========== VERIFY ROUTING NUMBER ==========")
url := buildURL("routing-number-verification")
return c.do("POST", url, data, nil)
}
/********** SUBSCRIPTION **********/
// GetSubscriptions returns all of the nodes associated with a user
func (c *Client) GetSubscriptions(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET SUBSCRIPTIONS ==========")
url := buildURL(path["subscriptions"])
return c.do("GET", url, "", queryParams)
}
// GetSubscription returns a single subscription
func (c *Client) GetSubscription(subscriptionID string) (map[string]interface{}, error) {
log.info("========== GET SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"], subscriptionID)
return c.do("GET", url, "", nil)
}
// CreateSubscription creates a subscription and returns the subscription data
func (c *Client) CreateSubscription(data string, idempotencyKey ...string) (map[string]interface{}, error) {
log.info("========== CREATE SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"])
return c.do("POST", url, data, idempotencyKey)
}
// UpdateSubscription updates an existing subscription
func (c *Client) UpdateSubscription(subscriptionID string, data string) (map[string]interface{}, error) {
log.info("========== UPDATE SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"], subscriptionID)
return c.do("PATCH", url, data, nil)
}
// GetWebhookLogs returns all of the webhooks sent to a specific client
func (c *Client) GetWebhookLogs() (map[string]interface{}, error) {
log.info("========== GET WEBHOOK LOGS ==========")
url := buildURL(path["subscriptions"], "logs")
return c.do("GET", url, "", nil)
}
/********** TRANSACTION **********/
// GetTransactions returns all client transactions
func (c *Client) GetTransactions(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT TRANSACTIONS ==========")
url := buildURL(path["transactions"])
return c.do("GET", url, "", queryParams)
}
/********** USER **********/
// GetUsers returns a list of users
func (c *Client) GetUsers(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT USERS ==========")
url := buildURL(path["users"])
return c.do("GET", url, "", queryParams)
}
// GetUser returns a single user
func (c *Client) GetUser(userID, fingerprint, ipAddress string, queryParams ...string) (*User, error) {
log.info("========== GET USER ==========")
url := buildURL(path["users"], userID)
res, err := c.do("GET", url, "", queryParams)
var user User
mapstructure.Decode(res, &user)
user.Response = res
request := Request{
clientID: c.ClientID,
clientSecret: c.ClientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
user.request = request
return &user, err
}
// CreateUser creates a single user and returns the new user data
func (c *Client) CreateUser(data, fingerprint, ipAddress string, idempotencyKey ...string) (*User, error) {
log.info("========== CREATE USER ==========")
var user User
user.request = Request{
clientID: c.ClientID,
clientSecret: c.ClientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
url := buildURL(path["users"])
res, err := user.do("POST", url, data, idempotencyKey)
mapstructure.Decode(res, &user)
user.Response = res
return &user, err
}
// GetUserDocumentTypes returns available user document types
func (c *Client) GetUserDocumentTypes() (map[string]interface{}, error) {
log.info("========== GET USER DOCUMENT TYPES ==========")
url := buildURL(path["users"], "document-types")
return c.do("GET", url, "", nil)
}
// GetUserEntityTypes returns available user entity types
func (c *Client) GetUserEntityTypes() (map[string]interface{}, error) {
log.info("========== GET USER ENTITY TYPES ==========")
url := buildURL(path["users"], "entity-types")
return c.do("GET", url, "", nil)
}
// GetUserEntityScopes returns available user entity scopes
func (c *Client) GetUserEntityScopes() (map[string]interface{}, error) {
log.info("========== GET USER ENTITY TYPES ==========")
url := buildURL(path["users"], "entity-scopes")
return c.do("GET", url, "", nil)
} | random_line_split | |
client.go | /*
Package synapse is a wrapper library for the Synapse API (https://docs.synapsefi.com)
Instantiate client
// credentials used to set headers for each method request
var client = synapse.New(
"CLIENT_ID",
"CLIENT_SECRET",
"IP_ADDRESS",
"FINGERPRINT",
)
# Examples
Enable logging & turn off developer mode (developer mode is true by default)
var client = synapse.New(
"CLIENT_ID",
"CLIENT_SECRET",
"IP_ADDRESS",
"FINGERPRINT",
true,
false,
)
Register Fingerprint
// payload response
{
"error": {
"en": "Fingerprint not registered. Please perform the MFA flow."
},
"error_code": "10",
"http_code": "202",
"phone_numbers": [
"developer@email.com",
"901-111-2222"
],
"success": false
}
// Submit a valid email address or phone number from "phone_numbers" list
res, err := user.Select2FA("developer@email.com")
// MFA sent to developer@email.com
res, err := user.VerifyPIN("123456")
Set an `IDEMPOTENCY_KEY` (for `POST` requests only)
scopeSettings := `{
"scope": [
"USERS|POST",
"USER|PATCH",
"NODES|POST",
"NODE|PATCH",
"TRANS|POST",
"TRAN|PATCH"
],
"url": "https://requestb.in/zp216zzp"
}`
idempotencyKey := `1234567890`
data, err := client.CreateSubscription(scopeSettings, idempotencyKey)
Submit optional query parameters
params := "per_page=3&page=2"
data, err := client.GetUsers(params)
*/
package synapse
import (
"github.com/mitchellh/mapstructure"
)
/********** GLOBAL VARIABLES **********/
var logMode = false
var developerMode = true
/********** TYPES **********/
type (
// Client represents the credentials used by the developer to instantiate a client
Client struct {
ClientID string
ClientSecret string
Fingerprint string
IP string
request Request
}
)
/********** METHODS **********/
func (c *Client) do(method, url, data string, queryParams []string) (map[string]interface{}, error) {
var body []byte
var err error
switch method {
case "GET":
body, err = c.request.Get(url, queryParams)
case "POST":
body, err = c.request.Post(url, data, queryParams)
case "PATCH":
body, err = c.request.Patch(url, data, queryParams)
case "DELETE":
body, err = c.request.Delete(url)
}
return readStream(body), err
}
/********** CLIENT **********/
// New creates a client object
func New(clientID, clientSecret, fingerprint, ipAddress string, modes ...bool) *Client {
log.info("========== CREATING CLIENT INSTANCE ==========")
if len(modes) > 0 {
if modes[0] == true {
logMode = true
}
if len(modes) > 1 && modes[1] == false {
developerMode = false
}
}
request := Request{
clientID: clientID,
clientSecret: clientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
return &Client{
ClientID: clientID,
ClientSecret: clientSecret,
Fingerprint: fingerprint,
IP: ipAddress,
request: request,
}
}
/********** AUTHENTICATION **********/
// GetPublicKey returns a public key as a token representing client credentials
func (c *Client) GetPublicKey(scope ...string) (map[string]interface{}, error) {
log.info("========== GET PUBLIC KEY ==========")
url := buildURL(path["client"])
defaultScope := "OAUTH|POST,USERS|POST,USERS|GET,USER|GET,USER|PATCH,SUBSCRIPTIONS|GET,SUBSCRIPTIONS|POST,SUBSCRIPTION|GET,SUBSCRIPTION|PATCH,CLIENT|REPORTS,CLIENT|CONTROLS"
if len(scope) > 0 |
qp := []string{"issue_public_key=YES&scope=" + defaultScope}
if len(scope) > 1 {
userId := scope[1]
qp[0] += "&user_id=" + userId
}
return c.do("GET", url, "", qp)
}
/********** NODE **********/
// GetNodes returns all of the nodes
func (c *Client) GetNodes(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT NODES ==========")
url := buildURL(path["nodes"])
return c.do("GET", url, "", queryParams)
}
// GetTradeMarketData returns data on a stock based on its ticker symbol
func (c *Client) GetTradeMarketData(tickerSymbol string) (map[string]interface{}, error) {
log.info("========== GET TRADE MARKET DATA ==========")
url := buildURL(path["nodes"], "trade-market-watch")
ts := []string{tickerSymbol}
return c.do("GET", url, "", ts)
}
// GetNodeTypes returns available node types
func (c *Client) GetNodeTypes() (map[string]interface{}, error) {
log.info("========== GET NODE TYPES ==========")
url := buildURL(path["nodes"], "types")
return c.do("GET", url, "", nil)
}
/********** OTHER **********/
// GetCryptoMarketData returns market data for cryptocurrencies
func (c *Client) GetCryptoMarketData() (map[string]interface{}, error) {
log.info("========== GET CRYPTO MARKET DATA ==========")
url := buildURL(path["nodes"], "crypto-market-watch")
return c.do("GET", url, "", nil)
}
// GetCryptoQuotes returns all of the quotes for crypto currencies
func (c *Client) GetCryptoQuotes(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CRYPTO QUOTES ==========")
url := buildURL(path["nodes"], "crypto-quotes")
return c.do("GET", url, "", queryParams)
}
// GetInstitutions returns a list of all available banking institutions
func (c *Client) GetInstitutions() (map[string]interface{}, error) {
log.info("========== GET INSTITUTIONS ==========")
url := buildURL(path["institutions"])
return c.do("GET", url, "", nil)
}
// LocateATMs returns a list of nearby ATMs
func (c *Client) LocateATMs(queryParams ...string) (map[string]interface{}, error) {
log.info("========== LOCATE ATMS ==========")
url := buildURL(path["nodes"], "atms")
return c.do("GET", url, "", queryParams)
}
// VerifyAddress checks if an address if valid
func (c *Client) VerifyAddress(data string) (map[string]interface{}, error) {
log.info("========== VERIFY ADDRESS ==========")
url := buildURL("address-verification")
return c.do("POST", url, data, nil)
}
// VerifyRoutingNumber checks and returns the bank details of a routing number
func (c *Client) VerifyRoutingNumber(data string) (map[string]interface{}, error) {
log.info("========== VERIFY ROUTING NUMBER ==========")
url := buildURL("routing-number-verification")
return c.do("POST", url, data, nil)
}
/********** SUBSCRIPTION **********/
// GetSubscriptions returns all of the nodes associated with a user
func (c *Client) GetSubscriptions(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET SUBSCRIPTIONS ==========")
url := buildURL(path["subscriptions"])
return c.do("GET", url, "", queryParams)
}
// GetSubscription returns a single subscription
func (c *Client) GetSubscription(subscriptionID string) (map[string]interface{}, error) {
log.info("========== GET SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"], subscriptionID)
return c.do("GET", url, "", nil)
}
// CreateSubscription creates a subscription and returns the subscription data
func (c *Client) CreateSubscription(data string, idempotencyKey ...string) (map[string]interface{}, error) {
log.info("========== CREATE SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"])
return c.do("POST", url, data, idempotencyKey)
}
// UpdateSubscription updates an existing subscription
func (c *Client) UpdateSubscription(subscriptionID string, data string) (map[string]interface{}, error) {
log.info("========== UPDATE SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"], subscriptionID)
return c.do("PATCH", url, data, nil)
}
// GetWebhookLogs returns all of the webhooks sent to a specific client
func (c *Client) GetWebhookLogs() (map[string]interface{}, error) {
log.info("========== GET WEBHOOK LOGS ==========")
url := buildURL(path["subscriptions"], "logs")
return c.do("GET", url, "", nil)
}
/********** TRANSACTION **********/
// GetTransactions returns all client transactions
func (c *Client) GetTransactions(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT TRANSACTIONS ==========")
url := buildURL(path["transactions"])
return c.do("GET", url, "", queryParams)
}
/********** USER **********/
// GetUsers returns a list of users
func (c *Client) GetUsers(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT USERS ==========")
url := buildURL(path["users"])
return c.do("GET", url, "", queryParams)
}
// GetUser returns a single user
func (c *Client) GetUser(userID, fingerprint, ipAddress string, queryParams ...string) (*User, error) {
log.info("========== GET USER ==========")
url := buildURL(path["users"], userID)
res, err := c.do("GET", url, "", queryParams)
var user User
mapstructure.Decode(res, &user)
user.Response = res
request := Request{
clientID: c.ClientID,
clientSecret: c.ClientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
user.request = request
return &user, err
}
// CreateUser creates a single user and returns the new user data
func (c *Client) CreateUser(data, fingerprint, ipAddress string, idempotencyKey ...string) (*User, error) {
log.info("========== CREATE USER ==========")
var user User
user.request = Request{
clientID: c.ClientID,
clientSecret: c.ClientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
url := buildURL(path["users"])
res, err := user.do("POST", url, data, idempotencyKey)
mapstructure.Decode(res, &user)
user.Response = res
return &user, err
}
// GetUserDocumentTypes returns available user document types
func (c *Client) GetUserDocumentTypes() (map[string]interface{}, error) {
log.info("========== GET USER DOCUMENT TYPES ==========")
url := buildURL(path["users"], "document-types")
return c.do("GET", url, "", nil)
}
// GetUserEntityTypes returns available user entity types
func (c *Client) GetUserEntityTypes() (map[string]interface{}, error) {
log.info("========== GET USER ENTITY TYPES ==========")
url := buildURL(path["users"], "entity-types")
return c.do("GET", url, "", nil)
}
// GetUserEntityScopes returns available user entity scopes
func (c *Client) GetUserEntityScopes() (map[string]interface{}, error) {
log.info("========== GET USER ENTITY TYPES ==========")
url := buildURL(path["users"], "entity-scopes")
return c.do("GET", url, "", nil)
}
| {
defaultScope = scope[0]
} | conditional_block |
client.go | /*
Package synapse is a wrapper library for the Synapse API (https://docs.synapsefi.com)
Instantiate client
// credentials used to set headers for each method request
var client = synapse.New(
"CLIENT_ID",
"CLIENT_SECRET",
"IP_ADDRESS",
"FINGERPRINT",
)
# Examples
Enable logging & turn off developer mode (developer mode is true by default)
var client = synapse.New(
"CLIENT_ID",
"CLIENT_SECRET",
"IP_ADDRESS",
"FINGERPRINT",
true,
false,
)
Register Fingerprint
// payload response
{
"error": {
"en": "Fingerprint not registered. Please perform the MFA flow."
},
"error_code": "10",
"http_code": "202",
"phone_numbers": [
"developer@email.com",
"901-111-2222"
],
"success": false
}
// Submit a valid email address or phone number from "phone_numbers" list
res, err := user.Select2FA("developer@email.com")
// MFA sent to developer@email.com
res, err := user.VerifyPIN("123456")
Set an `IDEMPOTENCY_KEY` (for `POST` requests only)
scopeSettings := `{
"scope": [
"USERS|POST",
"USER|PATCH",
"NODES|POST",
"NODE|PATCH",
"TRANS|POST",
"TRAN|PATCH"
],
"url": "https://requestb.in/zp216zzp"
}`
idempotencyKey := `1234567890`
data, err := client.CreateSubscription(scopeSettings, idempotencyKey)
Submit optional query parameters
params := "per_page=3&page=2"
data, err := client.GetUsers(params)
*/
package synapse
import (
"github.com/mitchellh/mapstructure"
)
/********** GLOBAL VARIABLES **********/
var logMode = false
var developerMode = true
/********** TYPES **********/
type (
// Client represents the credentials used by the developer to instantiate a client
Client struct {
ClientID string
ClientSecret string
Fingerprint string
IP string
request Request
}
)
/********** METHODS **********/
func (c *Client) do(method, url, data string, queryParams []string) (map[string]interface{}, error) {
var body []byte
var err error
switch method {
case "GET":
body, err = c.request.Get(url, queryParams)
case "POST":
body, err = c.request.Post(url, data, queryParams)
case "PATCH":
body, err = c.request.Patch(url, data, queryParams)
case "DELETE":
body, err = c.request.Delete(url)
}
return readStream(body), err
}
/********** CLIENT **********/
// New creates a client object
func New(clientID, clientSecret, fingerprint, ipAddress string, modes ...bool) *Client {
log.info("========== CREATING CLIENT INSTANCE ==========")
if len(modes) > 0 {
if modes[0] == true {
logMode = true
}
if len(modes) > 1 && modes[1] == false {
developerMode = false
}
}
request := Request{
clientID: clientID,
clientSecret: clientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
return &Client{
ClientID: clientID,
ClientSecret: clientSecret,
Fingerprint: fingerprint,
IP: ipAddress,
request: request,
}
}
/********** AUTHENTICATION **********/
// GetPublicKey returns a public key as a token representing client credentials
func (c *Client) GetPublicKey(scope ...string) (map[string]interface{}, error) {
log.info("========== GET PUBLIC KEY ==========")
url := buildURL(path["client"])
defaultScope := "OAUTH|POST,USERS|POST,USERS|GET,USER|GET,USER|PATCH,SUBSCRIPTIONS|GET,SUBSCRIPTIONS|POST,SUBSCRIPTION|GET,SUBSCRIPTION|PATCH,CLIENT|REPORTS,CLIENT|CONTROLS"
if len(scope) > 0 {
defaultScope = scope[0]
}
qp := []string{"issue_public_key=YES&scope=" + defaultScope}
if len(scope) > 1 {
userId := scope[1]
qp[0] += "&user_id=" + userId
}
return c.do("GET", url, "", qp)
}
/********** NODE **********/
// GetNodes returns all of the nodes
func (c *Client) GetNodes(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT NODES ==========")
url := buildURL(path["nodes"])
return c.do("GET", url, "", queryParams)
}
// GetTradeMarketData returns data on a stock based on its ticker symbol
func (c *Client) GetTradeMarketData(tickerSymbol string) (map[string]interface{}, error) {
log.info("========== GET TRADE MARKET DATA ==========")
url := buildURL(path["nodes"], "trade-market-watch")
ts := []string{tickerSymbol}
return c.do("GET", url, "", ts)
}
// GetNodeTypes returns available node types
func (c *Client) GetNodeTypes() (map[string]interface{}, error) {
log.info("========== GET NODE TYPES ==========")
url := buildURL(path["nodes"], "types")
return c.do("GET", url, "", nil)
}
/********** OTHER **********/
// GetCryptoMarketData returns market data for cryptocurrencies
func (c *Client) GetCryptoMarketData() (map[string]interface{}, error) {
log.info("========== GET CRYPTO MARKET DATA ==========")
url := buildURL(path["nodes"], "crypto-market-watch")
return c.do("GET", url, "", nil)
}
// GetCryptoQuotes returns all of the quotes for crypto currencies
func (c *Client) GetCryptoQuotes(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CRYPTO QUOTES ==========")
url := buildURL(path["nodes"], "crypto-quotes")
return c.do("GET", url, "", queryParams)
}
// GetInstitutions returns a list of all available banking institutions
func (c *Client) GetInstitutions() (map[string]interface{}, error) {
log.info("========== GET INSTITUTIONS ==========")
url := buildURL(path["institutions"])
return c.do("GET", url, "", nil)
}
// LocateATMs returns a list of nearby ATMs
func (c *Client) LocateATMs(queryParams ...string) (map[string]interface{}, error) {
log.info("========== LOCATE ATMS ==========")
url := buildURL(path["nodes"], "atms")
return c.do("GET", url, "", queryParams)
}
// VerifyAddress checks if an address if valid
func (c *Client) VerifyAddress(data string) (map[string]interface{}, error) {
log.info("========== VERIFY ADDRESS ==========")
url := buildURL("address-verification")
return c.do("POST", url, data, nil)
}
// VerifyRoutingNumber checks and returns the bank details of a routing number
func (c *Client) VerifyRoutingNumber(data string) (map[string]interface{}, error) {
log.info("========== VERIFY ROUTING NUMBER ==========")
url := buildURL("routing-number-verification")
return c.do("POST", url, data, nil)
}
/********** SUBSCRIPTION **********/
// GetSubscriptions returns all of the nodes associated with a user
func (c *Client) GetSubscriptions(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET SUBSCRIPTIONS ==========")
url := buildURL(path["subscriptions"])
return c.do("GET", url, "", queryParams)
}
// GetSubscription returns a single subscription
func (c *Client) GetSubscription(subscriptionID string) (map[string]interface{}, error) {
log.info("========== GET SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"], subscriptionID)
return c.do("GET", url, "", nil)
}
// CreateSubscription creates a subscription and returns the subscription data
func (c *Client) CreateSubscription(data string, idempotencyKey ...string) (map[string]interface{}, error) {
log.info("========== CREATE SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"])
return c.do("POST", url, data, idempotencyKey)
}
// UpdateSubscription updates an existing subscription
func (c *Client) | (subscriptionID string, data string) (map[string]interface{}, error) {
log.info("========== UPDATE SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"], subscriptionID)
return c.do("PATCH", url, data, nil)
}
// GetWebhookLogs returns all of the webhooks sent to a specific client
func (c *Client) GetWebhookLogs() (map[string]interface{}, error) {
log.info("========== GET WEBHOOK LOGS ==========")
url := buildURL(path["subscriptions"], "logs")
return c.do("GET", url, "", nil)
}
/********** TRANSACTION **********/
// GetTransactions returns all client transactions
func (c *Client) GetTransactions(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT TRANSACTIONS ==========")
url := buildURL(path["transactions"])
return c.do("GET", url, "", queryParams)
}
/********** USER **********/
// GetUsers returns a list of users
func (c *Client) GetUsers(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT USERS ==========")
url := buildURL(path["users"])
return c.do("GET", url, "", queryParams)
}
// GetUser returns a single user
func (c *Client) GetUser(userID, fingerprint, ipAddress string, queryParams ...string) (*User, error) {
log.info("========== GET USER ==========")
url := buildURL(path["users"], userID)
res, err := c.do("GET", url, "", queryParams)
var user User
mapstructure.Decode(res, &user)
user.Response = res
request := Request{
clientID: c.ClientID,
clientSecret: c.ClientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
user.request = request
return &user, err
}
// CreateUser creates a single user and returns the new user data
func (c *Client) CreateUser(data, fingerprint, ipAddress string, idempotencyKey ...string) (*User, error) {
log.info("========== CREATE USER ==========")
var user User
user.request = Request{
clientID: c.ClientID,
clientSecret: c.ClientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
url := buildURL(path["users"])
res, err := user.do("POST", url, data, idempotencyKey)
mapstructure.Decode(res, &user)
user.Response = res
return &user, err
}
// GetUserDocumentTypes returns available user document types
func (c *Client) GetUserDocumentTypes() (map[string]interface{}, error) {
log.info("========== GET USER DOCUMENT TYPES ==========")
url := buildURL(path["users"], "document-types")
return c.do("GET", url, "", nil)
}
// GetUserEntityTypes returns available user entity types
func (c *Client) GetUserEntityTypes() (map[string]interface{}, error) {
log.info("========== GET USER ENTITY TYPES ==========")
url := buildURL(path["users"], "entity-types")
return c.do("GET", url, "", nil)
}
// GetUserEntityScopes returns available user entity scopes
func (c *Client) GetUserEntityScopes() (map[string]interface{}, error) {
log.info("========== GET USER ENTITY TYPES ==========")
url := buildURL(path["users"], "entity-scopes")
return c.do("GET", url, "", nil)
}
| UpdateSubscription | identifier_name |
client.go | /*
Package synapse is a wrapper library for the Synapse API (https://docs.synapsefi.com)
Instantiate client
// credentials used to set headers for each method request
var client = synapse.New(
"CLIENT_ID",
"CLIENT_SECRET",
"IP_ADDRESS",
"FINGERPRINT",
)
# Examples
Enable logging & turn off developer mode (developer mode is true by default)
var client = synapse.New(
"CLIENT_ID",
"CLIENT_SECRET",
"IP_ADDRESS",
"FINGERPRINT",
true,
false,
)
Register Fingerprint
// payload response
{
"error": {
"en": "Fingerprint not registered. Please perform the MFA flow."
},
"error_code": "10",
"http_code": "202",
"phone_numbers": [
"developer@email.com",
"901-111-2222"
],
"success": false
}
// Submit a valid email address or phone number from "phone_numbers" list
res, err := user.Select2FA("developer@email.com")
// MFA sent to developer@email.com
res, err := user.VerifyPIN("123456")
Set an `IDEMPOTENCY_KEY` (for `POST` requests only)
scopeSettings := `{
"scope": [
"USERS|POST",
"USER|PATCH",
"NODES|POST",
"NODE|PATCH",
"TRANS|POST",
"TRAN|PATCH"
],
"url": "https://requestb.in/zp216zzp"
}`
idempotencyKey := `1234567890`
data, err := client.CreateSubscription(scopeSettings, idempotencyKey)
Submit optional query parameters
params := "per_page=3&page=2"
data, err := client.GetUsers(params)
*/
package synapse
import (
"github.com/mitchellh/mapstructure"
)
/********** GLOBAL VARIABLES **********/
var logMode = false
var developerMode = true
/********** TYPES **********/
type (
// Client represents the credentials used by the developer to instantiate a client
Client struct {
ClientID string
ClientSecret string
Fingerprint string
IP string
request Request
}
)
/********** METHODS **********/
func (c *Client) do(method, url, data string, queryParams []string) (map[string]interface{}, error) {
var body []byte
var err error
switch method {
case "GET":
body, err = c.request.Get(url, queryParams)
case "POST":
body, err = c.request.Post(url, data, queryParams)
case "PATCH":
body, err = c.request.Patch(url, data, queryParams)
case "DELETE":
body, err = c.request.Delete(url)
}
return readStream(body), err
}
/********** CLIENT **********/
// New creates a client object
func New(clientID, clientSecret, fingerprint, ipAddress string, modes ...bool) *Client {
log.info("========== CREATING CLIENT INSTANCE ==========")
if len(modes) > 0 {
if modes[0] == true {
logMode = true
}
if len(modes) > 1 && modes[1] == false {
developerMode = false
}
}
request := Request{
clientID: clientID,
clientSecret: clientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
return &Client{
ClientID: clientID,
ClientSecret: clientSecret,
Fingerprint: fingerprint,
IP: ipAddress,
request: request,
}
}
/********** AUTHENTICATION **********/
// GetPublicKey returns a public key as a token representing client credentials
func (c *Client) GetPublicKey(scope ...string) (map[string]interface{}, error) {
log.info("========== GET PUBLIC KEY ==========")
url := buildURL(path["client"])
defaultScope := "OAUTH|POST,USERS|POST,USERS|GET,USER|GET,USER|PATCH,SUBSCRIPTIONS|GET,SUBSCRIPTIONS|POST,SUBSCRIPTION|GET,SUBSCRIPTION|PATCH,CLIENT|REPORTS,CLIENT|CONTROLS"
if len(scope) > 0 {
defaultScope = scope[0]
}
qp := []string{"issue_public_key=YES&scope=" + defaultScope}
if len(scope) > 1 {
userId := scope[1]
qp[0] += "&user_id=" + userId
}
return c.do("GET", url, "", qp)
}
/********** NODE **********/
// GetNodes returns all of the nodes
func (c *Client) GetNodes(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT NODES ==========")
url := buildURL(path["nodes"])
return c.do("GET", url, "", queryParams)
}
// GetTradeMarketData returns data on a stock based on its ticker symbol
func (c *Client) GetTradeMarketData(tickerSymbol string) (map[string]interface{}, error) {
log.info("========== GET TRADE MARKET DATA ==========")
url := buildURL(path["nodes"], "trade-market-watch")
ts := []string{tickerSymbol}
return c.do("GET", url, "", ts)
}
// GetNodeTypes returns available node types
func (c *Client) GetNodeTypes() (map[string]interface{}, error) {
log.info("========== GET NODE TYPES ==========")
url := buildURL(path["nodes"], "types")
return c.do("GET", url, "", nil)
}
/********** OTHER **********/
// GetCryptoMarketData returns market data for cryptocurrencies
func (c *Client) GetCryptoMarketData() (map[string]interface{}, error) {
log.info("========== GET CRYPTO MARKET DATA ==========")
url := buildURL(path["nodes"], "crypto-market-watch")
return c.do("GET", url, "", nil)
}
// GetCryptoQuotes returns all of the quotes for crypto currencies
func (c *Client) GetCryptoQuotes(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CRYPTO QUOTES ==========")
url := buildURL(path["nodes"], "crypto-quotes")
return c.do("GET", url, "", queryParams)
}
// GetInstitutions returns a list of all available banking institutions
func (c *Client) GetInstitutions() (map[string]interface{}, error) {
log.info("========== GET INSTITUTIONS ==========")
url := buildURL(path["institutions"])
return c.do("GET", url, "", nil)
}
// LocateATMs returns a list of nearby ATMs
func (c *Client) LocateATMs(queryParams ...string) (map[string]interface{}, error) {
log.info("========== LOCATE ATMS ==========")
url := buildURL(path["nodes"], "atms")
return c.do("GET", url, "", queryParams)
}
// VerifyAddress checks if an address if valid
func (c *Client) VerifyAddress(data string) (map[string]interface{}, error) {
log.info("========== VERIFY ADDRESS ==========")
url := buildURL("address-verification")
return c.do("POST", url, data, nil)
}
// VerifyRoutingNumber checks and returns the bank details of a routing number
func (c *Client) VerifyRoutingNumber(data string) (map[string]interface{}, error) |
/********** SUBSCRIPTION **********/
// GetSubscriptions returns all of the nodes associated with a user
func (c *Client) GetSubscriptions(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET SUBSCRIPTIONS ==========")
url := buildURL(path["subscriptions"])
return c.do("GET", url, "", queryParams)
}
// GetSubscription returns a single subscription
func (c *Client) GetSubscription(subscriptionID string) (map[string]interface{}, error) {
log.info("========== GET SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"], subscriptionID)
return c.do("GET", url, "", nil)
}
// CreateSubscription creates a subscription and returns the subscription data
func (c *Client) CreateSubscription(data string, idempotencyKey ...string) (map[string]interface{}, error) {
log.info("========== CREATE SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"])
return c.do("POST", url, data, idempotencyKey)
}
// UpdateSubscription updates an existing subscription
func (c *Client) UpdateSubscription(subscriptionID string, data string) (map[string]interface{}, error) {
log.info("========== UPDATE SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"], subscriptionID)
return c.do("PATCH", url, data, nil)
}
// GetWebhookLogs returns all of the webhooks sent to a specific client
func (c *Client) GetWebhookLogs() (map[string]interface{}, error) {
log.info("========== GET WEBHOOK LOGS ==========")
url := buildURL(path["subscriptions"], "logs")
return c.do("GET", url, "", nil)
}
/********** TRANSACTION **********/
// GetTransactions returns all client transactions
func (c *Client) GetTransactions(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT TRANSACTIONS ==========")
url := buildURL(path["transactions"])
return c.do("GET", url, "", queryParams)
}
/********** USER **********/
// GetUsers returns a list of users
func (c *Client) GetUsers(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT USERS ==========")
url := buildURL(path["users"])
return c.do("GET", url, "", queryParams)
}
// GetUser returns a single user
func (c *Client) GetUser(userID, fingerprint, ipAddress string, queryParams ...string) (*User, error) {
log.info("========== GET USER ==========")
url := buildURL(path["users"], userID)
res, err := c.do("GET", url, "", queryParams)
var user User
mapstructure.Decode(res, &user)
user.Response = res
request := Request{
clientID: c.ClientID,
clientSecret: c.ClientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
user.request = request
return &user, err
}
// CreateUser creates a single user and returns the new user data
func (c *Client) CreateUser(data, fingerprint, ipAddress string, idempotencyKey ...string) (*User, error) {
log.info("========== CREATE USER ==========")
var user User
user.request = Request{
clientID: c.ClientID,
clientSecret: c.ClientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
url := buildURL(path["users"])
res, err := user.do("POST", url, data, idempotencyKey)
mapstructure.Decode(res, &user)
user.Response = res
return &user, err
}
// GetUserDocumentTypes returns available user document types
func (c *Client) GetUserDocumentTypes() (map[string]interface{}, error) {
log.info("========== GET USER DOCUMENT TYPES ==========")
url := buildURL(path["users"], "document-types")
return c.do("GET", url, "", nil)
}
// GetUserEntityTypes returns available user entity types
func (c *Client) GetUserEntityTypes() (map[string]interface{}, error) {
log.info("========== GET USER ENTITY TYPES ==========")
url := buildURL(path["users"], "entity-types")
return c.do("GET", url, "", nil)
}
// GetUserEntityScopes returns available user entity scopes
func (c *Client) GetUserEntityScopes() (map[string]interface{}, error) {
log.info("========== GET USER ENTITY TYPES ==========")
url := buildURL(path["users"], "entity-scopes")
return c.do("GET", url, "", nil)
}
| {
log.info("========== VERIFY ROUTING NUMBER ==========")
url := buildURL("routing-number-verification")
return c.do("POST", url, data, nil)
} | identifier_body |
controller.go |
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package operator
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"sync"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
coreV1 "k8s.io/api/core/v1"
k8sError "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
api "github.com/dell/csi-baremetal/api/generated/v1"
"github.com/dell/csi-baremetal/api/v1/nodecrd"
"github.com/dell/csi-baremetal/pkg/base/k8s"
"github.com/dell/csi-baremetal/pkg/base/util"
observer "github.com/dell/csi-baremetal/pkg/common"
"github.com/dell/csi-baremetal/pkg/crcontrollers/operator/common"
)
const (
// namePrefix it is a prefix for Node CR name
namePrefix = "csibmnode-"
// finalizer for Node custom resource
csibmNodeFinalizer = "dell.emc.csi/csibmnode-cleanup"
)
// Controller is a controller for Node CR
type Controller struct {
k8sClient *k8s.KubeClient
nodeSelector *label
cache nodesMapping
// holds k8s node names for which special ID settings is enabled,
// it is used in Node CR deletion for avoiding recreation
enabledForNode map[string]bool
enabledMu sync.RWMutex
observer observer.Observer
log *logrus.Entry
// if used external annotations
externalAnnotation bool
// holds annotation which contains node UUID
annotationKey string
}
type label struct {
key string
value string
}
// nodesMapping it is not a thread safety cache that holds mapping between names for k8s node and BMCSINode CR objects
type nodesMapping struct {
k8sToBMNode map[string]string // k8s node name to Node CR name
bmToK8sNode map[string]string // Node CR name to k8s node name
}
func (nc *nodesMapping) getK8sNodeName(bmNodeName string) (string, bool) {
res, ok := nc.bmToK8sNode[bmNodeName]
return res, ok
}
func (nc *nodesMapping) getCSIBMNodeName(k8sNodeName string) (string, bool) {
res, ok := nc.k8sToBMNode[k8sNodeName]
return res, ok
}
func (nc *nodesMapping) put(k8sNodeName, bmNodeName string) {
nc.k8sToBMNode[k8sNodeName] = bmNodeName
nc.bmToK8sNode[bmNodeName] = k8sNodeName
}
// NewController returns instance of Controller
func NewController(nodeSelector string, useExternalAnnotaion bool, nodeAnnotaion string,
k8sClient *k8s.KubeClient, observer observer.Observer, logger *logrus.Logger) (*Controller, error) {
c := &Controller{
k8sClient: k8sClient,
cache: nodesMapping{
k8sToBMNode: make(map[string]string),
bmToK8sNode: make(map[string]string),
},
observer: observer,
enabledForNode: make(map[string]bool, 3), // a little optimization, if cluster has 3 worker nodes this map won't be extended
log: logger.WithField("component", "Controller"),
externalAnnotation: useExternalAnnotaion,
}
if nodeSelector != "" {
splitted := strings.Split(nodeSelector, ":")
if len(splitted) != 2 {
return nil, fmt.Errorf("unable to parse nodeSelector %s", nodeSelector)
}
c.nodeSelector = &label{key: splitted[0], value: splitted[1]}
c.log.Infof("Controller will be working with nodes that matched next selector: %v", c.nodeSelector)
}
if c.externalAnnotation {
c.annotationKey = nodeAnnotaion
c.log.Infof("External annotation feature is enabled. Annotation: %s", c.annotationKey)
} else {
c.annotationKey = common.DeafultNodeIDAnnotationKey
c.log.Infof("External annotation feature is disabled. Annotation: %s", c.annotationKey)
}
return c, nil
}
func (bmc *Controller) enableForNode(nodeName string) {
bmc.enabledMu.Lock()
bmc.enabledForNode[nodeName] = true
bmc.enabledMu.Unlock()
}
func (bmc *Controller) disableForNode(nodeName string) {
bmc.enabledMu.Lock()
bmc.enabledForNode[nodeName] = false
bmc.enabledMu.Unlock()
}
func (bmc *Controller) isEnabledForNode(nodeName string) bool {
var enabled, ok bool
bmc.enabledMu.RLock()
defer bmc.enabledMu.RUnlock()
if enabled, ok = bmc.enabledForNode[nodeName]; !ok {
return false
}
return enabled
}
func (bmc *Controller) isMatchSelector(k8sNode *coreV1.Node) bool {
if bmc.nodeSelector == nil {
return true
}
val, ok := k8sNode.GetLabels()[bmc.nodeSelector.key]
matched := ok && val == bmc.nodeSelector.value
bmc.log.WithField("method", "isMatchSelector").
Debugf("Node %s matches node selector %v: %v", k8sNode.Name, bmc.nodeSelector, matched)
return matched
}
// SetupWithManager registers Controller to k8s controller manager
func (bmc *Controller) SetupWithManager(m ctrl.Manager) error {
return ctrl.NewControllerManagedBy(m).
For(&nodecrd.Node{}). // primary resource
WithOptions(controller.Options{
MaxConcurrentReconciles: 1, // reconcile all object by turn, concurrent reconciliation isn't supported
}).
Watches(&source.Kind{Type: &coreV1.Node{}}, &handler.EnqueueRequestForObject{}). // secondary resource
WithEventFilter(predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
if _, ok := e.Object.(*nodecrd.Node); ok {
return true
}
k8sNode, ok := e.Object.(*coreV1.Node)
if !ok || !bmc.isMatchSelector(k8sNode) {
return false
}
bmc.enableForNode(k8sNode.Name)
return true
},
UpdateFunc: func(e event.UpdateEvent) bool {
if _, ok := e.ObjectOld.(*nodecrd.Node); ok {
return true
}
nodeOld, ok := e.ObjectOld.(*coreV1.Node)
if !ok {
return false
}
nodeNew := e.ObjectNew.(*coreV1.Node)
if !bmc.isMatchSelector(nodeNew) {
return false
}
if !bmc.isEnabledForNode(nodeNew.Name) {
bmc.enableForNode(nodeNew.Name)
}
annotationAreTheSame := reflect.DeepEqual(nodeOld.GetAnnotations(), nodeNew.GetAnnotations())
addressesAreTheSame := reflect.DeepEqual(nodeOld.Status.Addresses, nodeNew.Status.Addresses)
labelsAreTheSame := bmc.nodeSelector == nil || reflect.DeepEqual(nodeOld.GetLabels(), nodeNew.GetLabels())
return !annotationAreTheSame || !addressesAreTheSame || !labelsAreTheSame
},
}).
Complete(bmc)
}
// Reconcile reconciles Node CR and k8s Node objects
// at first define for which object current Reconcile is triggered and then run corresponding reconciliation method
func (bmc *Controller) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ll := bmc.log.WithFields(logrus.Fields{
"method": "Reconcile",
"name": req.Name,
})
var err error
// if name in request doesn't start with namePrefix controller tries to read k8s node object at first
// however if it get NotFound error it tries to read Node object as well
if !strings.HasPrefix(req.Name, namePrefix) {
k8sNode := new(coreV1.Node)
err = bmc.k8sClient.ReadCR(context.Background(), req.Name, "", k8sNode)
switch {
case err == nil:
ll.Infof("Reconcile k8s node %s", k8sNode.Name)
return bmc.reconcileForK8sNode(k8sNode)
case !k8sError.IsNotFound(err):
ll.Errorf("Unable to read node object: %v", err)
return ctrl.Result{Requeue: true}, err
}
}
// try to read Node
bmNode := new(nodecrd.Node)
err = bmc.k8sClient.ReadCR(context.Background(), req.Name, "", bmNode)
switch {
case err == nil:
ll.Infof("Reconcile Node %s", bmNode.Name)
return bmc.reconcileForCSIBMNode(bmNode)
case !k8sError.IsNotFound(err):
ll.Errorf("Unable to read Node object: %v", err)
return ctrl.Result{Requeue: true}, err
}
ll.Warnf("unable to detect for which object (%s) that reconcile is. The object may have been deleted", req.String())
return ctrl.Result{}, nil
}
func (bmc *Controller) reconcileForK8sNode(k8sNode *coreV1.Node) (ctrl.Result, error) {
ll := bmc.log.WithFields(logrus.Fields{
"method": "reconcileForK8sNode",
"name": k8sNode.Name,
})
if len(k8sNode.Status.Addresses) == 0 {
err := errors.New("addresses are missing for current k8s node instance")
ll.Error(err)
return ctrl.Result{Requeue: false}, err
}
var (
bmNode = &nodecrd.Node{}
bmNodeFromCache bool
bmNodeName string
bmNodes []nodecrd.Node
)
// get corresponding Node CR name from cache
if bmNodeName, bmNodeFromCache = bmc.cache.getCSIBMNodeName(k8sNode.Name); bmNodeFromCache {
if err := bmc.k8sClient.ReadCR(context.Background(), bmNodeName, "", bmNode); err != nil {
ll.Errorf("Unable to read Node %s: %v", bmNodeName, err)
return ctrl.Result{Requeue: true}, err
}
bmNodes = []nodecrd.Node{*bmNode}
}
if !bmNodeFromCache {
bmNodeCRs := new(nodecrd.NodeList)
if err := bmc.k8sClient.ReadList(context.Background(), bmNodeCRs); err != nil {
ll.Errorf("Unable to read Node CRs list: %v", err)
return ctrl.Result{Requeue: true}, err
}
bmNodes = bmNodeCRs.Items
}
matchedCRs := make([]string, 0)
for i := range bmNodes {
matchedAddresses := bmc.matchedAddressesCount(&bmNodes[i], k8sNode)
if len(bmNodes[i].Spec.Addresses) > 0 && matchedAddresses == len(bmNodes[i].Spec.Addresses) {
bmNode = &bmNodes[i]
matchedCRs = append(matchedCRs, bmNode.Name)
continue
}
if matchedAddresses > 0 {
ll.Errorf("There is Node %s that partially match k8s node %s. Node.Spec: %v, k8s node addresses: %v. "+
"Node Spec should be edited to match exactly one kubernetes node",
bmNodes[i].Name, k8sNode.Name, bmNodes[i].Spec, k8sNode.Status.Addresses)
return ctrl.Result{}, nil
}
}
if len(matchedCRs) > 1 {
ll.Errorf("More then one Node CR corresponds to the current k8s node (%d). Matched Node CRs: %v", len(matchedCRs), matchedCRs)
return ctrl.Result{}, nil
}
// create Node CR
if len(matchedCRs) == 0 {
id := bmc.constructNodeID(k8sNode)
bmNodeName := namePrefix + id
bmNode = bmc.k8sClient.ConstructCSIBMNodeCR(bmNodeName, api.Node{
UUID: id,
Addresses: bmc.constructAddresses(k8sNode),
})
bmNode.Finalizers = []string{csibmNodeFinalizer}
if err := bmc.k8sClient.CreateCR(context.Background(), bmNodeName, bmNode); err != nil {
ll.Errorf("Unable to create Node CR: %v", err)
return ctrl.Result{Requeue: true}, err
}
}
bmc.cache.put(k8sNode.Name, bmNode.Name)
return bmc.updateNodeLabelsAndAnnotation(k8sNode, bmNode.Spec.UUID)
}
func (bmc *Controller) reconcileForCSIBMNode(bmNode *nodecrd.Node) (ctrl.Result, error) {
ll := bmc.log.WithFields(logrus.Fields{
"method": "reconcileForCSIBMNode",
"name": bmNode.Name,
})
if len(bmNode.Spec.Addresses) == 0 {
err := errors.New("addresses are missing for current Node instance")
ll.Error(err)
return ctrl.Result{Requeue: false}, err
}
var (
k8sNode = &coreV1.Node{}
k8sNodes []coreV1.Node
k8sNodeFromCache bool
)
// get corresponding k8s node name from cache
if k8sNodeName, k8sNodeFromCache := bmc.cache.getK8sNodeName(bmNode.Name); k8sNodeFromCache {
if err := bmc.k8sClient.ReadCR(context.Background(), k8sNodeName, "", k8sNode); err != nil {
ll.Errorf("Unable to read k8s node %s: %v", k8sNodeName, err)
return ctrl.Result{Requeue: true}, err
}
k8sNodes = []coreV1.Node{*k8sNode}
}
if !k8sNodeFromCache {
k8sNodeCRs := new(coreV1.NodeList)
if err := bmc.k8sClient.ReadList(context.Background(), k8sNodeCRs); err != nil {
ll.Errorf("Unable to read k8s nodes list: %v", err)
return ctrl.Result{Requeue: true}, err
}
k8sNodes = k8sNodeCRs.Items
}
matchedNodes := make([]string, 0)
for i := range k8sNodes {
matchedAddresses := bmc.matchedAddressesCount(bmNode, &k8sNodes[i])
if matchedAddresses == len(bmNode.Spec.Addresses) {
k8sNode = &k8sNodes[i]
matchedNodes = append(matchedNodes, k8sNode.Name)
continue
}
if matchedAddresses > 0 {
ll.Errorf("There is k8s node %s that partially match Node CR %s. Node.Spec: %v, k8s node addresses: %v",
k8sNodes[i].Name, bmNode.Name, bmNode.Spec, k8sNodes[i].Status.Addresses)
return ctrl.Result{}, nil
}
}
if !bmNode.GetDeletionTimestamp().IsZero() {
bmc.disableForNode(k8sNode.Name)
if err := bmc.removeLabelsAndAnnotation(k8sNode); err != nil {
ll.Errorf("Unable to remove annotations or labels from node %s: %v", k8sNode.Name, err)
bmc.enableForNode(k8sNode.Name)
return ctrl.Result{Requeue: true}, err
}
ll.Infof("Annotations and labels from node %s was removed. Removing finalizer from %s.", k8sNode.Name, bmNode.Name)
bmNode.Finalizers = nil
err := bmc.k8sClient.UpdateCR(context.Background(), bmNode)
if err != nil {
ll.Errorf("Unable to update Node %s: %v", bmNode.Name, err)
}
return ctrl.Result{}, err
}
if len(matchedNodes) == 1 {
bmc.cache.put(k8sNode.Name, bmNode.Name)
return bmc.updateNodeLabelsAndAnnotation(k8sNode, bmNode.Spec.UUID)
}
ll.Warnf("Unable to detect k8s node that corresponds to Node %v, matched nodes: %v", bmNode, matchedNodes)
return ctrl.Result{}, nil
}
// updateNodeLabelsAndAnnotation checks nodeIDAnnotationKey annotation value for provided k8s Node and compare that value with goalValue
// parses OS Image info and put/update os-name and os-version labels if needed
func (bmc *Controller) updateNodeLabelsAndAnnotation(k8sNode *coreV1.Node, nodeUUID string) (ctrl.Result, error) {
ll := bmc.log.WithField("method", "updateNodeLabelsAndAnnotation")
toUpdate := false
// check for annotations
val, ok := k8sNode.GetAnnotations()[bmc.annotationKey]
if bmc.externalAnnotation && !ok {
ll.Errorf("external annotaion %s is not accesible on node %s", bmc.annotationKey, k8sNode)
}
if !bmc.externalAnnotation && ok {
if val == nodeUUID {
ll.Tracef("%s value for node %s is already %s", bmc.annotationKey, k8sNode.Name, nodeUUID)
} else {
ll.Warnf("%s value for node %s is %s, however should have (according to corresponding Node's UUID) %s, going to update annotation's value.",
bmc.annotationKey, k8sNode.Name, val, nodeUUID)
k8sNode.ObjectMeta.Annotations[bmc.annotationKey] = nodeUUID
toUpdate = true
}
}
if !bmc.externalAnnotation && !ok {
ll.Errorf("annotaion %s is not accesible on node %s", bmc.annotationKey, k8sNode)
if k8sNode.ObjectMeta.Annotations == nil {
k8sNode.ObjectMeta.Annotations = make(map[string]string, 1)
}
k8sNode.ObjectMeta.Annotations[bmc.annotationKey] = nodeUUID
toUpdate = true
}
// initialize labels map if needed
if k8sNode.Labels == nil {
k8sNode.ObjectMeta.Labels = make(map[string]string, 1)
}
// check for OS labels
name, version, err := util.GetOSNameAndVersion(k8sNode.Status.NodeInfo.OSImage)
if err == nil {
// os name
if k8sNode.Labels[common.NodeOSNameLabelKey] != name {
// not set or matches
ll.Infof("Setting label %s=%s on node %s", common.NodeOSNameLabelKey, name, k8sNode.Name)
k8sNode.Labels[common.NodeOSNameLabelKey] = name
toUpdate = true
}
// os version
if k8sNode.Labels[common.NodeOSVersionLabelKey] != version {
// not set or matches
ll.Infof("Setting label %s=%s on node %s", common.NodeOSVersionLabelKey, version, k8sNode.Name)
k8sNode.Labels[common.NodeOSVersionLabelKey] = version
toUpdate = true
}
} else {
ll.Errorf("Failed to obtain OS information: %s", err)
}
// check for kernel version label
version, err = util.GetKernelVersion(k8sNode.Status.NodeInfo.KernelVersion)
if err == nil {
// os name
if k8sNode.Labels[common.NodeKernelVersionLabelKey] != version {
// not set or matches
ll.Infof("Setting label %s=%s on node %s", common.NodeKernelVersionLabelKey, version, k8sNode.Name)
k8sNode.Labels[common.NodeKernelVersionLabelKey] = version
toUpdate = true
if bmc.observer != nil {
bmc.observer.Notify(version)
}
}
} else {
ll.Errorf("Failed to obtain Kernel version information: %s", err)
}
if toUpdate {
if err := bmc.k8sClient.UpdateCR(context.Background(), k8sNode); err != nil {
ll.Errorf("Unable to update node object: %v", err)
return ctrl.Result{Requeue: true}, err
}
}
return ctrl.Result{}, nil
}
func (bmc *Controller) removeLabelsAndAnnotation(k8sNode *coreV1.Node) error {
toUpdate := false
// check annotations
annotations := k8sNode.GetAnnotations()
if _, ok := annotations[bmc.annotationKey]; ok {
if !bmc.externalAnnotation {
delete(annotations, bmc.annotationKey)
toUpdate = true
}
}
// check labels
labels := k8sNode.GetLabels()
// os name
if _, ok := labels[common.NodeOSNameLabelKey]; ok {
delete(labels, common.NodeOSNameLabelKey)
toUpdate = true
}
// os version
if _, ok := labels[common.NodeOSVersionLabelKey]; ok {
delete(labels, common.NodeOSVersionLabelKey)
toUpdate = true
}
// kernel version
if _, ok := labels[common.NodeKernelVersionLabelKey]; ok {
delete(labels, common.NodeKernelVersionLabelKey)
toUpdate = true
}
// external csi-provisioner label
// TODO https://github.com/dell/csi-baremetal/issues/319 Rework after operator implementation
if _, ok := labels[common.NodeIDTopologyLabelKey]; ok {
delete(labels, common.NodeIDTopologyLabelKey)
toUpdate = true
}
if toUpdate {
k8sNode.Annotations = annotations
k8sNode.Labels = labels
return bmc.k8sClient.UpdateCR(context.Background(), k8sNode)
}
return nil
}
// matchedAddressesCount return amount of k8s node addresses that has corresponding address in bmNodeCR.Spec.Addresses map
func (bmc *Controller) matchedAddressesCount(bmNodeCR *nodecrd.Node, k8sNode *coreV1.Node) int {
matchedCount := 0
for _, addr := range k8sNode.Status.Addresses {
crAddr, ok := bmNodeCR.Spec.Addresses[string(addr.Type)]
if ok && crAddr == addr.Address {
matchedCount++
}
}
return matchedCount
}
// constructAddresses converts k8sNode.Status.Addresses into the the map[string]string, key - address type, value - address
func (bmc *Controller) constructAddresses(k8sNode *coreV1.Node) map[string]string {
res := make(map[string]string, len(k8sNode.Status.Addresses))
for _, addr := range k8sNode.Status.Addresses {
res[string(addr.Type)] = addr.Address
}
return res
}
func (bmc *Controller) constructNodeID(k8sNode *coreV1.Node) string {
if bmc.externalAnnotation {
if val, ok := k8sNode.GetAnnotations()[bmc.annotationKey]; ok {
return val
}
}
return uuid.New().String()
} | /*
Copyright © 2020 Dell Inc. or its subsidiaries. All Rights Reserved. | random_line_split | |
controller.go | /*
Copyright © 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package operator
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"sync"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
coreV1 "k8s.io/api/core/v1"
k8sError "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
api "github.com/dell/csi-baremetal/api/generated/v1"
"github.com/dell/csi-baremetal/api/v1/nodecrd"
"github.com/dell/csi-baremetal/pkg/base/k8s"
"github.com/dell/csi-baremetal/pkg/base/util"
observer "github.com/dell/csi-baremetal/pkg/common"
"github.com/dell/csi-baremetal/pkg/crcontrollers/operator/common"
)
const (
// namePrefix it is a prefix for Node CR name
namePrefix = "csibmnode-"
// finalizer for Node custom resource
csibmNodeFinalizer = "dell.emc.csi/csibmnode-cleanup"
)
// Controller is a controller for Node CR
type Controller struct {
k8sClient *k8s.KubeClient
nodeSelector *label
cache nodesMapping
// holds k8s node names for which special ID settings is enabled,
// it is used in Node CR deletion for avoiding recreation
enabledForNode map[string]bool
enabledMu sync.RWMutex
observer observer.Observer
log *logrus.Entry
// if used external annotations
externalAnnotation bool
// holds annotation which contains node UUID
annotationKey string
}
type label struct {
key string
value string
}
// nodesMapping it is not a thread safety cache that holds mapping between names for k8s node and BMCSINode CR objects
type nodesMapping struct {
k8sToBMNode map[string]string // k8s node name to Node CR name
bmToK8sNode map[string]string // Node CR name to k8s node name
}
func (nc *nodesMapping) getK8sNodeName(bmNodeName string) (string, bool) { |
func (nc *nodesMapping) getCSIBMNodeName(k8sNodeName string) (string, bool) {
res, ok := nc.k8sToBMNode[k8sNodeName]
return res, ok
}
func (nc *nodesMapping) put(k8sNodeName, bmNodeName string) {
nc.k8sToBMNode[k8sNodeName] = bmNodeName
nc.bmToK8sNode[bmNodeName] = k8sNodeName
}
// NewController returns instance of Controller
func NewController(nodeSelector string, useExternalAnnotaion bool, nodeAnnotaion string,
k8sClient *k8s.KubeClient, observer observer.Observer, logger *logrus.Logger) (*Controller, error) {
c := &Controller{
k8sClient: k8sClient,
cache: nodesMapping{
k8sToBMNode: make(map[string]string),
bmToK8sNode: make(map[string]string),
},
observer: observer,
enabledForNode: make(map[string]bool, 3), // a little optimization, if cluster has 3 worker nodes this map won't be extended
log: logger.WithField("component", "Controller"),
externalAnnotation: useExternalAnnotaion,
}
if nodeSelector != "" {
splitted := strings.Split(nodeSelector, ":")
if len(splitted) != 2 {
return nil, fmt.Errorf("unable to parse nodeSelector %s", nodeSelector)
}
c.nodeSelector = &label{key: splitted[0], value: splitted[1]}
c.log.Infof("Controller will be working with nodes that matched next selector: %v", c.nodeSelector)
}
if c.externalAnnotation {
c.annotationKey = nodeAnnotaion
c.log.Infof("External annotation feature is enabled. Annotation: %s", c.annotationKey)
} else {
c.annotationKey = common.DeafultNodeIDAnnotationKey
c.log.Infof("External annotation feature is disabled. Annotation: %s", c.annotationKey)
}
return c, nil
}
func (bmc *Controller) enableForNode(nodeName string) {
bmc.enabledMu.Lock()
bmc.enabledForNode[nodeName] = true
bmc.enabledMu.Unlock()
}
func (bmc *Controller) disableForNode(nodeName string) {
bmc.enabledMu.Lock()
bmc.enabledForNode[nodeName] = false
bmc.enabledMu.Unlock()
}
func (bmc *Controller) isEnabledForNode(nodeName string) bool {
var enabled, ok bool
bmc.enabledMu.RLock()
defer bmc.enabledMu.RUnlock()
if enabled, ok = bmc.enabledForNode[nodeName]; !ok {
return false
}
return enabled
}
func (bmc *Controller) isMatchSelector(k8sNode *coreV1.Node) bool {
if bmc.nodeSelector == nil {
return true
}
val, ok := k8sNode.GetLabels()[bmc.nodeSelector.key]
matched := ok && val == bmc.nodeSelector.value
bmc.log.WithField("method", "isMatchSelector").
Debugf("Node %s matches node selector %v: %v", k8sNode.Name, bmc.nodeSelector, matched)
return matched
}
// SetupWithManager registers Controller to k8s controller manager
func (bmc *Controller) SetupWithManager(m ctrl.Manager) error {
return ctrl.NewControllerManagedBy(m).
For(&nodecrd.Node{}). // primary resource
WithOptions(controller.Options{
MaxConcurrentReconciles: 1, // reconcile all object by turn, concurrent reconciliation isn't supported
}).
Watches(&source.Kind{Type: &coreV1.Node{}}, &handler.EnqueueRequestForObject{}). // secondary resource
WithEventFilter(predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
if _, ok := e.Object.(*nodecrd.Node); ok {
return true
}
k8sNode, ok := e.Object.(*coreV1.Node)
if !ok || !bmc.isMatchSelector(k8sNode) {
return false
}
bmc.enableForNode(k8sNode.Name)
return true
},
UpdateFunc: func(e event.UpdateEvent) bool {
if _, ok := e.ObjectOld.(*nodecrd.Node); ok {
return true
}
nodeOld, ok := e.ObjectOld.(*coreV1.Node)
if !ok {
return false
}
nodeNew := e.ObjectNew.(*coreV1.Node)
if !bmc.isMatchSelector(nodeNew) {
return false
}
if !bmc.isEnabledForNode(nodeNew.Name) {
bmc.enableForNode(nodeNew.Name)
}
annotationAreTheSame := reflect.DeepEqual(nodeOld.GetAnnotations(), nodeNew.GetAnnotations())
addressesAreTheSame := reflect.DeepEqual(nodeOld.Status.Addresses, nodeNew.Status.Addresses)
labelsAreTheSame := bmc.nodeSelector == nil || reflect.DeepEqual(nodeOld.GetLabels(), nodeNew.GetLabels())
return !annotationAreTheSame || !addressesAreTheSame || !labelsAreTheSame
},
}).
Complete(bmc)
}
// Reconcile reconciles Node CR and k8s Node objects
// at first define for which object current Reconcile is triggered and then run corresponding reconciliation method
func (bmc *Controller) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ll := bmc.log.WithFields(logrus.Fields{
"method": "Reconcile",
"name": req.Name,
})
var err error
// if name in request doesn't start with namePrefix controller tries to read k8s node object at first
// however if it get NotFound error it tries to read Node object as well
if !strings.HasPrefix(req.Name, namePrefix) {
k8sNode := new(coreV1.Node)
err = bmc.k8sClient.ReadCR(context.Background(), req.Name, "", k8sNode)
switch {
case err == nil:
ll.Infof("Reconcile k8s node %s", k8sNode.Name)
return bmc.reconcileForK8sNode(k8sNode)
case !k8sError.IsNotFound(err):
ll.Errorf("Unable to read node object: %v", err)
return ctrl.Result{Requeue: true}, err
}
}
// try to read Node
bmNode := new(nodecrd.Node)
err = bmc.k8sClient.ReadCR(context.Background(), req.Name, "", bmNode)
switch {
case err == nil:
ll.Infof("Reconcile Node %s", bmNode.Name)
return bmc.reconcileForCSIBMNode(bmNode)
case !k8sError.IsNotFound(err):
ll.Errorf("Unable to read Node object: %v", err)
return ctrl.Result{Requeue: true}, err
}
ll.Warnf("unable to detect for which object (%s) that reconcile is. The object may have been deleted", req.String())
return ctrl.Result{}, nil
}
func (bmc *Controller) reconcileForK8sNode(k8sNode *coreV1.Node) (ctrl.Result, error) {
ll := bmc.log.WithFields(logrus.Fields{
"method": "reconcileForK8sNode",
"name": k8sNode.Name,
})
if len(k8sNode.Status.Addresses) == 0 {
err := errors.New("addresses are missing for current k8s node instance")
ll.Error(err)
return ctrl.Result{Requeue: false}, err
}
var (
bmNode = &nodecrd.Node{}
bmNodeFromCache bool
bmNodeName string
bmNodes []nodecrd.Node
)
// get corresponding Node CR name from cache
if bmNodeName, bmNodeFromCache = bmc.cache.getCSIBMNodeName(k8sNode.Name); bmNodeFromCache {
if err := bmc.k8sClient.ReadCR(context.Background(), bmNodeName, "", bmNode); err != nil {
ll.Errorf("Unable to read Node %s: %v", bmNodeName, err)
return ctrl.Result{Requeue: true}, err
}
bmNodes = []nodecrd.Node{*bmNode}
}
if !bmNodeFromCache {
bmNodeCRs := new(nodecrd.NodeList)
if err := bmc.k8sClient.ReadList(context.Background(), bmNodeCRs); err != nil {
ll.Errorf("Unable to read Node CRs list: %v", err)
return ctrl.Result{Requeue: true}, err
}
bmNodes = bmNodeCRs.Items
}
matchedCRs := make([]string, 0)
for i := range bmNodes {
matchedAddresses := bmc.matchedAddressesCount(&bmNodes[i], k8sNode)
if len(bmNodes[i].Spec.Addresses) > 0 && matchedAddresses == len(bmNodes[i].Spec.Addresses) {
bmNode = &bmNodes[i]
matchedCRs = append(matchedCRs, bmNode.Name)
continue
}
if matchedAddresses > 0 {
ll.Errorf("There is Node %s that partially match k8s node %s. Node.Spec: %v, k8s node addresses: %v. "+
"Node Spec should be edited to match exactly one kubernetes node",
bmNodes[i].Name, k8sNode.Name, bmNodes[i].Spec, k8sNode.Status.Addresses)
return ctrl.Result{}, nil
}
}
if len(matchedCRs) > 1 {
ll.Errorf("More then one Node CR corresponds to the current k8s node (%d). Matched Node CRs: %v", len(matchedCRs), matchedCRs)
return ctrl.Result{}, nil
}
// create Node CR
if len(matchedCRs) == 0 {
id := bmc.constructNodeID(k8sNode)
bmNodeName := namePrefix + id
bmNode = bmc.k8sClient.ConstructCSIBMNodeCR(bmNodeName, api.Node{
UUID: id,
Addresses: bmc.constructAddresses(k8sNode),
})
bmNode.Finalizers = []string{csibmNodeFinalizer}
if err := bmc.k8sClient.CreateCR(context.Background(), bmNodeName, bmNode); err != nil {
ll.Errorf("Unable to create Node CR: %v", err)
return ctrl.Result{Requeue: true}, err
}
}
bmc.cache.put(k8sNode.Name, bmNode.Name)
return bmc.updateNodeLabelsAndAnnotation(k8sNode, bmNode.Spec.UUID)
}
func (bmc *Controller) reconcileForCSIBMNode(bmNode *nodecrd.Node) (ctrl.Result, error) {
ll := bmc.log.WithFields(logrus.Fields{
"method": "reconcileForCSIBMNode",
"name": bmNode.Name,
})
if len(bmNode.Spec.Addresses) == 0 {
err := errors.New("addresses are missing for current Node instance")
ll.Error(err)
return ctrl.Result{Requeue: false}, err
}
var (
k8sNode = &coreV1.Node{}
k8sNodes []coreV1.Node
k8sNodeFromCache bool
)
// get corresponding k8s node name from cache
if k8sNodeName, k8sNodeFromCache := bmc.cache.getK8sNodeName(bmNode.Name); k8sNodeFromCache {
if err := bmc.k8sClient.ReadCR(context.Background(), k8sNodeName, "", k8sNode); err != nil {
ll.Errorf("Unable to read k8s node %s: %v", k8sNodeName, err)
return ctrl.Result{Requeue: true}, err
}
k8sNodes = []coreV1.Node{*k8sNode}
}
if !k8sNodeFromCache {
k8sNodeCRs := new(coreV1.NodeList)
if err := bmc.k8sClient.ReadList(context.Background(), k8sNodeCRs); err != nil {
ll.Errorf("Unable to read k8s nodes list: %v", err)
return ctrl.Result{Requeue: true}, err
}
k8sNodes = k8sNodeCRs.Items
}
matchedNodes := make([]string, 0)
for i := range k8sNodes {
matchedAddresses := bmc.matchedAddressesCount(bmNode, &k8sNodes[i])
if matchedAddresses == len(bmNode.Spec.Addresses) {
k8sNode = &k8sNodes[i]
matchedNodes = append(matchedNodes, k8sNode.Name)
continue
}
if matchedAddresses > 0 {
ll.Errorf("There is k8s node %s that partially match Node CR %s. Node.Spec: %v, k8s node addresses: %v",
k8sNodes[i].Name, bmNode.Name, bmNode.Spec, k8sNodes[i].Status.Addresses)
return ctrl.Result{}, nil
}
}
if !bmNode.GetDeletionTimestamp().IsZero() {
bmc.disableForNode(k8sNode.Name)
if err := bmc.removeLabelsAndAnnotation(k8sNode); err != nil {
ll.Errorf("Unable to remove annotations or labels from node %s: %v", k8sNode.Name, err)
bmc.enableForNode(k8sNode.Name)
return ctrl.Result{Requeue: true}, err
}
ll.Infof("Annotations and labels from node %s was removed. Removing finalizer from %s.", k8sNode.Name, bmNode.Name)
bmNode.Finalizers = nil
err := bmc.k8sClient.UpdateCR(context.Background(), bmNode)
if err != nil {
ll.Errorf("Unable to update Node %s: %v", bmNode.Name, err)
}
return ctrl.Result{}, err
}
if len(matchedNodes) == 1 {
bmc.cache.put(k8sNode.Name, bmNode.Name)
return bmc.updateNodeLabelsAndAnnotation(k8sNode, bmNode.Spec.UUID)
}
ll.Warnf("Unable to detect k8s node that corresponds to Node %v, matched nodes: %v", bmNode, matchedNodes)
return ctrl.Result{}, nil
}
// updateNodeLabelsAndAnnotation checks nodeIDAnnotationKey annotation value for provided k8s Node and compare that value with goalValue
// parses OS Image info and put/update os-name and os-version labels if needed
func (bmc *Controller) updateNodeLabelsAndAnnotation(k8sNode *coreV1.Node, nodeUUID string) (ctrl.Result, error) {
ll := bmc.log.WithField("method", "updateNodeLabelsAndAnnotation")
toUpdate := false
// check for annotations
val, ok := k8sNode.GetAnnotations()[bmc.annotationKey]
if bmc.externalAnnotation && !ok {
ll.Errorf("external annotaion %s is not accesible on node %s", bmc.annotationKey, k8sNode)
}
if !bmc.externalAnnotation && ok {
if val == nodeUUID {
ll.Tracef("%s value for node %s is already %s", bmc.annotationKey, k8sNode.Name, nodeUUID)
} else {
ll.Warnf("%s value for node %s is %s, however should have (according to corresponding Node's UUID) %s, going to update annotation's value.",
bmc.annotationKey, k8sNode.Name, val, nodeUUID)
k8sNode.ObjectMeta.Annotations[bmc.annotationKey] = nodeUUID
toUpdate = true
}
}
if !bmc.externalAnnotation && !ok {
ll.Errorf("annotaion %s is not accesible on node %s", bmc.annotationKey, k8sNode)
if k8sNode.ObjectMeta.Annotations == nil {
k8sNode.ObjectMeta.Annotations = make(map[string]string, 1)
}
k8sNode.ObjectMeta.Annotations[bmc.annotationKey] = nodeUUID
toUpdate = true
}
// initialize labels map if needed
if k8sNode.Labels == nil {
k8sNode.ObjectMeta.Labels = make(map[string]string, 1)
}
// check for OS labels
name, version, err := util.GetOSNameAndVersion(k8sNode.Status.NodeInfo.OSImage)
if err == nil {
// os name
if k8sNode.Labels[common.NodeOSNameLabelKey] != name {
// not set or matches
ll.Infof("Setting label %s=%s on node %s", common.NodeOSNameLabelKey, name, k8sNode.Name)
k8sNode.Labels[common.NodeOSNameLabelKey] = name
toUpdate = true
}
// os version
if k8sNode.Labels[common.NodeOSVersionLabelKey] != version {
// not set or matches
ll.Infof("Setting label %s=%s on node %s", common.NodeOSVersionLabelKey, version, k8sNode.Name)
k8sNode.Labels[common.NodeOSVersionLabelKey] = version
toUpdate = true
}
} else {
ll.Errorf("Failed to obtain OS information: %s", err)
}
// check for kernel version label
version, err = util.GetKernelVersion(k8sNode.Status.NodeInfo.KernelVersion)
if err == nil {
// os name
if k8sNode.Labels[common.NodeKernelVersionLabelKey] != version {
// not set or matches
ll.Infof("Setting label %s=%s on node %s", common.NodeKernelVersionLabelKey, version, k8sNode.Name)
k8sNode.Labels[common.NodeKernelVersionLabelKey] = version
toUpdate = true
if bmc.observer != nil {
bmc.observer.Notify(version)
}
}
} else {
ll.Errorf("Failed to obtain Kernel version information: %s", err)
}
if toUpdate {
if err := bmc.k8sClient.UpdateCR(context.Background(), k8sNode); err != nil {
ll.Errorf("Unable to update node object: %v", err)
return ctrl.Result{Requeue: true}, err
}
}
return ctrl.Result{}, nil
}
func (bmc *Controller) removeLabelsAndAnnotation(k8sNode *coreV1.Node) error {
toUpdate := false
// check annotations
annotations := k8sNode.GetAnnotations()
if _, ok := annotations[bmc.annotationKey]; ok {
if !bmc.externalAnnotation {
delete(annotations, bmc.annotationKey)
toUpdate = true
}
}
// check labels
labels := k8sNode.GetLabels()
// os name
if _, ok := labels[common.NodeOSNameLabelKey]; ok {
delete(labels, common.NodeOSNameLabelKey)
toUpdate = true
}
// os version
if _, ok := labels[common.NodeOSVersionLabelKey]; ok {
delete(labels, common.NodeOSVersionLabelKey)
toUpdate = true
}
// kernel version
if _, ok := labels[common.NodeKernelVersionLabelKey]; ok {
delete(labels, common.NodeKernelVersionLabelKey)
toUpdate = true
}
// external csi-provisioner label
// TODO https://github.com/dell/csi-baremetal/issues/319 Rework after operator implementation
if _, ok := labels[common.NodeIDTopologyLabelKey]; ok {
delete(labels, common.NodeIDTopologyLabelKey)
toUpdate = true
}
if toUpdate {
k8sNode.Annotations = annotations
k8sNode.Labels = labels
return bmc.k8sClient.UpdateCR(context.Background(), k8sNode)
}
return nil
}
// matchedAddressesCount return amount of k8s node addresses that has corresponding address in bmNodeCR.Spec.Addresses map
func (bmc *Controller) matchedAddressesCount(bmNodeCR *nodecrd.Node, k8sNode *coreV1.Node) int {
matchedCount := 0
for _, addr := range k8sNode.Status.Addresses {
crAddr, ok := bmNodeCR.Spec.Addresses[string(addr.Type)]
if ok && crAddr == addr.Address {
matchedCount++
}
}
return matchedCount
}
// constructAddresses converts k8sNode.Status.Addresses into the the map[string]string, key - address type, value - address
func (bmc *Controller) constructAddresses(k8sNode *coreV1.Node) map[string]string {
res := make(map[string]string, len(k8sNode.Status.Addresses))
for _, addr := range k8sNode.Status.Addresses {
res[string(addr.Type)] = addr.Address
}
return res
}
func (bmc *Controller) constructNodeID(k8sNode *coreV1.Node) string {
if bmc.externalAnnotation {
if val, ok := k8sNode.GetAnnotations()[bmc.annotationKey]; ok {
return val
}
}
return uuid.New().String()
}
|
res, ok := nc.bmToK8sNode[bmNodeName]
return res, ok
}
| identifier_body |
controller.go | /*
Copyright © 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package operator
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"sync"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
coreV1 "k8s.io/api/core/v1"
k8sError "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
api "github.com/dell/csi-baremetal/api/generated/v1"
"github.com/dell/csi-baremetal/api/v1/nodecrd"
"github.com/dell/csi-baremetal/pkg/base/k8s"
"github.com/dell/csi-baremetal/pkg/base/util"
observer "github.com/dell/csi-baremetal/pkg/common"
"github.com/dell/csi-baremetal/pkg/crcontrollers/operator/common"
)
const (
// namePrefix it is a prefix for Node CR name
namePrefix = "csibmnode-"
// finalizer for Node custom resource
csibmNodeFinalizer = "dell.emc.csi/csibmnode-cleanup"
)
// Controller is a controller for Node CR
type Controller struct {
k8sClient *k8s.KubeClient
nodeSelector *label
cache nodesMapping
// holds k8s node names for which special ID settings is enabled,
// it is used in Node CR deletion for avoiding recreation
enabledForNode map[string]bool
enabledMu sync.RWMutex
observer observer.Observer
log *logrus.Entry
// if used external annotations
externalAnnotation bool
// holds annotation which contains node UUID
annotationKey string
}
type label struct {
key string
value string
}
// nodesMapping it is not a thread safety cache that holds mapping between names for k8s node and BMCSINode CR objects
type nodesMapping struct {
k8sToBMNode map[string]string // k8s node name to Node CR name
bmToK8sNode map[string]string // Node CR name to k8s node name
}
func (nc *nodesMapping) getK8sNodeName(bmNodeName string) (string, bool) {
res, ok := nc.bmToK8sNode[bmNodeName]
return res, ok
}
func (nc *nodesMapping) getCSIBMNodeName(k8sNodeName string) (string, bool) {
res, ok := nc.k8sToBMNode[k8sNodeName]
return res, ok
}
func (nc *nodesMapping) put(k8sNodeName, bmNodeName string) {
nc.k8sToBMNode[k8sNodeName] = bmNodeName
nc.bmToK8sNode[bmNodeName] = k8sNodeName
}
// NewController returns instance of Controller
func NewController(nodeSelector string, useExternalAnnotaion bool, nodeAnnotaion string,
k8sClient *k8s.KubeClient, observer observer.Observer, logger *logrus.Logger) (*Controller, error) {
c := &Controller{
k8sClient: k8sClient,
cache: nodesMapping{
k8sToBMNode: make(map[string]string),
bmToK8sNode: make(map[string]string),
},
observer: observer,
enabledForNode: make(map[string]bool, 3), // a little optimization, if cluster has 3 worker nodes this map won't be extended
log: logger.WithField("component", "Controller"),
externalAnnotation: useExternalAnnotaion,
}
if nodeSelector != "" {
splitted := strings.Split(nodeSelector, ":")
if len(splitted) != 2 {
return nil, fmt.Errorf("unable to parse nodeSelector %s", nodeSelector)
}
c.nodeSelector = &label{key: splitted[0], value: splitted[1]}
c.log.Infof("Controller will be working with nodes that matched next selector: %v", c.nodeSelector)
}
if c.externalAnnotation {
c.annotationKey = nodeAnnotaion
c.log.Infof("External annotation feature is enabled. Annotation: %s", c.annotationKey)
} else {
c.annotationKey = common.DeafultNodeIDAnnotationKey
c.log.Infof("External annotation feature is disabled. Annotation: %s", c.annotationKey)
}
return c, nil
}
func (bmc *Controller) enableForNode(nodeName string) {
bmc.enabledMu.Lock()
bmc.enabledForNode[nodeName] = true
bmc.enabledMu.Unlock()
}
func (bmc *Controller) disableForNode(nodeName string) {
bmc.enabledMu.Lock()
bmc.enabledForNode[nodeName] = false
bmc.enabledMu.Unlock()
}
func (bmc *Controller) isEnabledForNode(nodeName string) bool {
var enabled, ok bool
bmc.enabledMu.RLock()
defer bmc.enabledMu.RUnlock()
if enabled, ok = bmc.enabledForNode[nodeName]; !ok {
return false
}
return enabled
}
func (bmc *Controller) isMatchSelector(k8sNode *coreV1.Node) bool {
if bmc.nodeSelector == nil {
return true
}
val, ok := k8sNode.GetLabels()[bmc.nodeSelector.key]
matched := ok && val == bmc.nodeSelector.value
bmc.log.WithField("method", "isMatchSelector").
Debugf("Node %s matches node selector %v: %v", k8sNode.Name, bmc.nodeSelector, matched)
return matched
}
// SetupWithManager registers Controller to k8s controller manager
func (bmc *Controller) SetupWithManager(m ctrl.Manager) error {
return ctrl.NewControllerManagedBy(m).
For(&nodecrd.Node{}). // primary resource
WithOptions(controller.Options{
MaxConcurrentReconciles: 1, // reconcile all object by turn, concurrent reconciliation isn't supported
}).
Watches(&source.Kind{Type: &coreV1.Node{}}, &handler.EnqueueRequestForObject{}). // secondary resource
WithEventFilter(predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
if _, ok := e.Object.(*nodecrd.Node); ok {
return true
}
k8sNode, ok := e.Object.(*coreV1.Node)
if !ok || !bmc.isMatchSelector(k8sNode) {
return false
}
bmc.enableForNode(k8sNode.Name)
return true
},
UpdateFunc: func(e event.UpdateEvent) bool {
if _, ok := e.ObjectOld.(*nodecrd.Node); ok {
return true
}
nodeOld, ok := e.ObjectOld.(*coreV1.Node)
if !ok {
return false
}
nodeNew := e.ObjectNew.(*coreV1.Node)
if !bmc.isMatchSelector(nodeNew) {
return false
}
if !bmc.isEnabledForNode(nodeNew.Name) {
bmc.enableForNode(nodeNew.Name)
}
annotationAreTheSame := reflect.DeepEqual(nodeOld.GetAnnotations(), nodeNew.GetAnnotations())
addressesAreTheSame := reflect.DeepEqual(nodeOld.Status.Addresses, nodeNew.Status.Addresses)
labelsAreTheSame := bmc.nodeSelector == nil || reflect.DeepEqual(nodeOld.GetLabels(), nodeNew.GetLabels())
return !annotationAreTheSame || !addressesAreTheSame || !labelsAreTheSame
},
}).
Complete(bmc)
}
// Reconcile reconciles Node CR and k8s Node objects
// at first define for which object current Reconcile is triggered and then run corresponding reconciliation method
func (bmc *Controller) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ll := bmc.log.WithFields(logrus.Fields{
"method": "Reconcile",
"name": req.Name,
})
var err error
// if name in request doesn't start with namePrefix controller tries to read k8s node object at first
// however if it get NotFound error it tries to read Node object as well
if !strings.HasPrefix(req.Name, namePrefix) {
k8sNode := new(coreV1.Node)
err = bmc.k8sClient.ReadCR(context.Background(), req.Name, "", k8sNode)
switch {
case err == nil:
ll.Infof("Reconcile k8s node %s", k8sNode.Name)
return bmc.reconcileForK8sNode(k8sNode)
case !k8sError.IsNotFound(err):
ll.Errorf("Unable to read node object: %v", err)
return ctrl.Result{Requeue: true}, err
}
}
// try to read Node
bmNode := new(nodecrd.Node)
err = bmc.k8sClient.ReadCR(context.Background(), req.Name, "", bmNode)
switch {
case err == nil:
ll.Infof("Reconcile Node %s", bmNode.Name)
return bmc.reconcileForCSIBMNode(bmNode)
case !k8sError.IsNotFound(err):
ll.Errorf("Unable to read Node object: %v", err)
return ctrl.Result{Requeue: true}, err
}
ll.Warnf("unable to detect for which object (%s) that reconcile is. The object may have been deleted", req.String())
return ctrl.Result{}, nil
}
func (bmc *Controller) reconcileForK8sNode(k8sNode *coreV1.Node) (ctrl.Result, error) {
ll := bmc.log.WithFields(logrus.Fields{
"method": "reconcileForK8sNode",
"name": k8sNode.Name,
})
if len(k8sNode.Status.Addresses) == 0 {
err := errors.New("addresses are missing for current k8s node instance")
ll.Error(err)
return ctrl.Result{Requeue: false}, err
}
var (
bmNode = &nodecrd.Node{}
bmNodeFromCache bool
bmNodeName string
bmNodes []nodecrd.Node
)
// get corresponding Node CR name from cache
if bmNodeName, bmNodeFromCache = bmc.cache.getCSIBMNodeName(k8sNode.Name); bmNodeFromCache {
if err := bmc.k8sClient.ReadCR(context.Background(), bmNodeName, "", bmNode); err != nil {
ll.Errorf("Unable to read Node %s: %v", bmNodeName, err)
return ctrl.Result{Requeue: true}, err
}
bmNodes = []nodecrd.Node{*bmNode}
}
if !bmNodeFromCache {
bmNodeCRs := new(nodecrd.NodeList)
if err := bmc.k8sClient.ReadList(context.Background(), bmNodeCRs); err != nil {
ll.Errorf("Unable to read Node CRs list: %v", err)
return ctrl.Result{Requeue: true}, err
}
bmNodes = bmNodeCRs.Items
}
matchedCRs := make([]string, 0)
for i := range bmNodes {
matchedAddresses := bmc.matchedAddressesCount(&bmNodes[i], k8sNode)
if len(bmNodes[i].Spec.Addresses) > 0 && matchedAddresses == len(bmNodes[i].Spec.Addresses) {
bmNode = &bmNodes[i]
matchedCRs = append(matchedCRs, bmNode.Name)
continue
}
if matchedAddresses > 0 {
ll.Errorf("There is Node %s that partially match k8s node %s. Node.Spec: %v, k8s node addresses: %v. "+
"Node Spec should be edited to match exactly one kubernetes node",
bmNodes[i].Name, k8sNode.Name, bmNodes[i].Spec, k8sNode.Status.Addresses)
return ctrl.Result{}, nil
}
}
if len(matchedCRs) > 1 {
ll.Errorf("More then one Node CR corresponds to the current k8s node (%d). Matched Node CRs: %v", len(matchedCRs), matchedCRs)
return ctrl.Result{}, nil
}
// create Node CR
if len(matchedCRs) == 0 {
id := bmc.constructNodeID(k8sNode)
bmNodeName := namePrefix + id
bmNode = bmc.k8sClient.ConstructCSIBMNodeCR(bmNodeName, api.Node{
UUID: id,
Addresses: bmc.constructAddresses(k8sNode),
})
bmNode.Finalizers = []string{csibmNodeFinalizer}
if err := bmc.k8sClient.CreateCR(context.Background(), bmNodeName, bmNode); err != nil {
ll.Errorf("Unable to create Node CR: %v", err)
return ctrl.Result{Requeue: true}, err
}
}
bmc.cache.put(k8sNode.Name, bmNode.Name)
return bmc.updateNodeLabelsAndAnnotation(k8sNode, bmNode.Spec.UUID)
}
func (bmc *Controller) reconcileForCSIBMNode(bmNode *nodecrd.Node) (ctrl.Result, error) {
ll := bmc.log.WithFields(logrus.Fields{
"method": "reconcileForCSIBMNode",
"name": bmNode.Name,
})
if len(bmNode.Spec.Addresses) == 0 {
err := errors.New("addresses are missing for current Node instance")
ll.Error(err)
return ctrl.Result{Requeue: false}, err
}
var (
k8sNode = &coreV1.Node{}
k8sNodes []coreV1.Node
k8sNodeFromCache bool
)
// get corresponding k8s node name from cache
if k8sNodeName, k8sNodeFromCache := bmc.cache.getK8sNodeName(bmNode.Name); k8sNodeFromCache {
if err := bmc.k8sClient.ReadCR(context.Background(), k8sNodeName, "", k8sNode); err != nil {
ll.Errorf("Unable to read k8s node %s: %v", k8sNodeName, err)
return ctrl.Result{Requeue: true}, err
}
k8sNodes = []coreV1.Node{*k8sNode}
}
if !k8sNodeFromCache {
k8sNodeCRs := new(coreV1.NodeList)
if err := bmc.k8sClient.ReadList(context.Background(), k8sNodeCRs); err != nil {
ll.Errorf("Unable to read k8s nodes list: %v", err)
return ctrl.Result{Requeue: true}, err
}
k8sNodes = k8sNodeCRs.Items
}
matchedNodes := make([]string, 0)
for i := range k8sNodes {
matchedAddresses := bmc.matchedAddressesCount(bmNode, &k8sNodes[i])
if matchedAddresses == len(bmNode.Spec.Addresses) {
k8sNode = &k8sNodes[i]
matchedNodes = append(matchedNodes, k8sNode.Name)
continue
}
if matchedAddresses > 0 {
ll.Errorf("There is k8s node %s that partially match Node CR %s. Node.Spec: %v, k8s node addresses: %v",
k8sNodes[i].Name, bmNode.Name, bmNode.Spec, k8sNodes[i].Status.Addresses)
return ctrl.Result{}, nil
}
}
if !bmNode.GetDeletionTimestamp().IsZero() {
bmc.disableForNode(k8sNode.Name)
if err := bmc.removeLabelsAndAnnotation(k8sNode); err != nil {
ll.Errorf("Unable to remove annotations or labels from node %s: %v", k8sNode.Name, err)
bmc.enableForNode(k8sNode.Name)
return ctrl.Result{Requeue: true}, err
}
ll.Infof("Annotations and labels from node %s was removed. Removing finalizer from %s.", k8sNode.Name, bmNode.Name)
bmNode.Finalizers = nil
err := bmc.k8sClient.UpdateCR(context.Background(), bmNode)
if err != nil {
ll.Errorf("Unable to update Node %s: %v", bmNode.Name, err)
}
return ctrl.Result{}, err
}
if len(matchedNodes) == 1 {
bmc.cache.put(k8sNode.Name, bmNode.Name)
return bmc.updateNodeLabelsAndAnnotation(k8sNode, bmNode.Spec.UUID)
}
ll.Warnf("Unable to detect k8s node that corresponds to Node %v, matched nodes: %v", bmNode, matchedNodes)
return ctrl.Result{}, nil
}
// updateNodeLabelsAndAnnotation checks nodeIDAnnotationKey annotation value for provided k8s Node and compare that value with goalValue
// parses OS Image info and put/update os-name and os-version labels if needed
func (bmc *Controller) updateNodeLabelsAndAnnotation(k8sNode *coreV1.Node, nodeUUID string) (ctrl.Result, error) {
ll := bmc.log.WithField("method", "updateNodeLabelsAndAnnotation")
toUpdate := false
// check for annotations
val, ok := k8sNode.GetAnnotations()[bmc.annotationKey]
if bmc.externalAnnotation && !ok {
ll.Errorf("external annotaion %s is not accesible on node %s", bmc.annotationKey, k8sNode)
}
if !bmc.externalAnnotation && ok {
if val == nodeUUID {
ll.Tracef("%s value for node %s is already %s", bmc.annotationKey, k8sNode.Name, nodeUUID)
} else {
ll.Warnf("%s value for node %s is %s, however should have (according to corresponding Node's UUID) %s, going to update annotation's value.",
bmc.annotationKey, k8sNode.Name, val, nodeUUID)
k8sNode.ObjectMeta.Annotations[bmc.annotationKey] = nodeUUID
toUpdate = true
}
}
if !bmc.externalAnnotation && !ok {
ll.Errorf("annotaion %s is not accesible on node %s", bmc.annotationKey, k8sNode)
if k8sNode.ObjectMeta.Annotations == nil {
k8sNode.ObjectMeta.Annotations = make(map[string]string, 1)
}
k8sNode.ObjectMeta.Annotations[bmc.annotationKey] = nodeUUID
toUpdate = true
}
// initialize labels map if needed
if k8sNode.Labels == nil { | // check for OS labels
name, version, err := util.GetOSNameAndVersion(k8sNode.Status.NodeInfo.OSImage)
if err == nil {
// os name
if k8sNode.Labels[common.NodeOSNameLabelKey] != name {
// not set or matches
ll.Infof("Setting label %s=%s on node %s", common.NodeOSNameLabelKey, name, k8sNode.Name)
k8sNode.Labels[common.NodeOSNameLabelKey] = name
toUpdate = true
}
// os version
if k8sNode.Labels[common.NodeOSVersionLabelKey] != version {
// not set or matches
ll.Infof("Setting label %s=%s on node %s", common.NodeOSVersionLabelKey, version, k8sNode.Name)
k8sNode.Labels[common.NodeOSVersionLabelKey] = version
toUpdate = true
}
} else {
ll.Errorf("Failed to obtain OS information: %s", err)
}
// check for kernel version label
version, err = util.GetKernelVersion(k8sNode.Status.NodeInfo.KernelVersion)
if err == nil {
// os name
if k8sNode.Labels[common.NodeKernelVersionLabelKey] != version {
// not set or matches
ll.Infof("Setting label %s=%s on node %s", common.NodeKernelVersionLabelKey, version, k8sNode.Name)
k8sNode.Labels[common.NodeKernelVersionLabelKey] = version
toUpdate = true
if bmc.observer != nil {
bmc.observer.Notify(version)
}
}
} else {
ll.Errorf("Failed to obtain Kernel version information: %s", err)
}
if toUpdate {
if err := bmc.k8sClient.UpdateCR(context.Background(), k8sNode); err != nil {
ll.Errorf("Unable to update node object: %v", err)
return ctrl.Result{Requeue: true}, err
}
}
return ctrl.Result{}, nil
}
func (bmc *Controller) removeLabelsAndAnnotation(k8sNode *coreV1.Node) error {
toUpdate := false
// check annotations
annotations := k8sNode.GetAnnotations()
if _, ok := annotations[bmc.annotationKey]; ok {
if !bmc.externalAnnotation {
delete(annotations, bmc.annotationKey)
toUpdate = true
}
}
// check labels
labels := k8sNode.GetLabels()
// os name
if _, ok := labels[common.NodeOSNameLabelKey]; ok {
delete(labels, common.NodeOSNameLabelKey)
toUpdate = true
}
// os version
if _, ok := labels[common.NodeOSVersionLabelKey]; ok {
delete(labels, common.NodeOSVersionLabelKey)
toUpdate = true
}
// kernel version
if _, ok := labels[common.NodeKernelVersionLabelKey]; ok {
delete(labels, common.NodeKernelVersionLabelKey)
toUpdate = true
}
// external csi-provisioner label
// TODO https://github.com/dell/csi-baremetal/issues/319 Rework after operator implementation
if _, ok := labels[common.NodeIDTopologyLabelKey]; ok {
delete(labels, common.NodeIDTopologyLabelKey)
toUpdate = true
}
if toUpdate {
k8sNode.Annotations = annotations
k8sNode.Labels = labels
return bmc.k8sClient.UpdateCR(context.Background(), k8sNode)
}
return nil
}
// matchedAddressesCount return amount of k8s node addresses that has corresponding address in bmNodeCR.Spec.Addresses map
func (bmc *Controller) matchedAddressesCount(bmNodeCR *nodecrd.Node, k8sNode *coreV1.Node) int {
matchedCount := 0
for _, addr := range k8sNode.Status.Addresses {
crAddr, ok := bmNodeCR.Spec.Addresses[string(addr.Type)]
if ok && crAddr == addr.Address {
matchedCount++
}
}
return matchedCount
}
// constructAddresses converts k8sNode.Status.Addresses into the the map[string]string, key - address type, value - address
func (bmc *Controller) constructAddresses(k8sNode *coreV1.Node) map[string]string {
res := make(map[string]string, len(k8sNode.Status.Addresses))
for _, addr := range k8sNode.Status.Addresses {
res[string(addr.Type)] = addr.Address
}
return res
}
func (bmc *Controller) constructNodeID(k8sNode *coreV1.Node) string {
if bmc.externalAnnotation {
if val, ok := k8sNode.GetAnnotations()[bmc.annotationKey]; ok {
return val
}
}
return uuid.New().String()
}
|
k8sNode.ObjectMeta.Labels = make(map[string]string, 1)
}
| conditional_block |
controller.go | /*
Copyright © 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package operator
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"sync"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
coreV1 "k8s.io/api/core/v1"
k8sError "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
api "github.com/dell/csi-baremetal/api/generated/v1"
"github.com/dell/csi-baremetal/api/v1/nodecrd"
"github.com/dell/csi-baremetal/pkg/base/k8s"
"github.com/dell/csi-baremetal/pkg/base/util"
observer "github.com/dell/csi-baremetal/pkg/common"
"github.com/dell/csi-baremetal/pkg/crcontrollers/operator/common"
)
const (
// namePrefix it is a prefix for Node CR name
namePrefix = "csibmnode-"
// finalizer for Node custom resource
csibmNodeFinalizer = "dell.emc.csi/csibmnode-cleanup"
)
// Controller is a controller for Node CR
type Controller struct {
k8sClient *k8s.KubeClient
nodeSelector *label
cache nodesMapping
// holds k8s node names for which special ID settings is enabled,
// it is used in Node CR deletion for avoiding recreation
enabledForNode map[string]bool
enabledMu sync.RWMutex
observer observer.Observer
log *logrus.Entry
// if used external annotations
externalAnnotation bool
// holds annotation which contains node UUID
annotationKey string
}
type label struct {
key string
value string
}
// nodesMapping it is not a thread safety cache that holds mapping between names for k8s node and BMCSINode CR objects
type nodesMapping struct {
k8sToBMNode map[string]string // k8s node name to Node CR name
bmToK8sNode map[string]string // Node CR name to k8s node name
}
func (nc *nodesMapping) getK8sNodeName(bmNodeName string) (string, bool) {
res, ok := nc.bmToK8sNode[bmNodeName]
return res, ok
}
func (nc *nodesMapping) getCSIBMNodeName(k8sNodeName string) (string, bool) {
res, ok := nc.k8sToBMNode[k8sNodeName]
return res, ok
}
func (nc *nodesMapping) put(k8sNodeName, bmNodeName string) {
nc.k8sToBMNode[k8sNodeName] = bmNodeName
nc.bmToK8sNode[bmNodeName] = k8sNodeName
}
// NewController returns instance of Controller
func NewController(nodeSelector string, useExternalAnnotaion bool, nodeAnnotaion string,
k8sClient *k8s.KubeClient, observer observer.Observer, logger *logrus.Logger) (*Controller, error) {
c := &Controller{
k8sClient: k8sClient,
cache: nodesMapping{
k8sToBMNode: make(map[string]string),
bmToK8sNode: make(map[string]string),
},
observer: observer,
enabledForNode: make(map[string]bool, 3), // a little optimization, if cluster has 3 worker nodes this map won't be extended
log: logger.WithField("component", "Controller"),
externalAnnotation: useExternalAnnotaion,
}
if nodeSelector != "" {
splitted := strings.Split(nodeSelector, ":")
if len(splitted) != 2 {
return nil, fmt.Errorf("unable to parse nodeSelector %s", nodeSelector)
}
c.nodeSelector = &label{key: splitted[0], value: splitted[1]}
c.log.Infof("Controller will be working with nodes that matched next selector: %v", c.nodeSelector)
}
if c.externalAnnotation {
c.annotationKey = nodeAnnotaion
c.log.Infof("External annotation feature is enabled. Annotation: %s", c.annotationKey)
} else {
c.annotationKey = common.DeafultNodeIDAnnotationKey
c.log.Infof("External annotation feature is disabled. Annotation: %s", c.annotationKey)
}
return c, nil
}
func (bmc *Controller) enableForNode(nodeName string) {
bmc.enabledMu.Lock()
bmc.enabledForNode[nodeName] = true
bmc.enabledMu.Unlock()
}
func (bmc *Controller) disableForNode(nodeName string) {
bmc.enabledMu.Lock()
bmc.enabledForNode[nodeName] = false
bmc.enabledMu.Unlock()
}
func (bmc *Controller) isEnabledForNode(nodeName string) bool {
var enabled, ok bool
bmc.enabledMu.RLock()
defer bmc.enabledMu.RUnlock()
if enabled, ok = bmc.enabledForNode[nodeName]; !ok {
return false
}
return enabled
}
func (bmc *Controller) isMatchSelector(k8sNode *coreV1.Node) bool {
if bmc.nodeSelector == nil {
return true
}
val, ok := k8sNode.GetLabels()[bmc.nodeSelector.key]
matched := ok && val == bmc.nodeSelector.value
bmc.log.WithField("method", "isMatchSelector").
Debugf("Node %s matches node selector %v: %v", k8sNode.Name, bmc.nodeSelector, matched)
return matched
}
// SetupWithManager registers Controller to k8s controller manager
func (bmc *Controller) SetupWithManager(m ctrl.Manager) error {
return ctrl.NewControllerManagedBy(m).
For(&nodecrd.Node{}). // primary resource
WithOptions(controller.Options{
MaxConcurrentReconciles: 1, // reconcile all object by turn, concurrent reconciliation isn't supported
}).
Watches(&source.Kind{Type: &coreV1.Node{}}, &handler.EnqueueRequestForObject{}). // secondary resource
WithEventFilter(predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
if _, ok := e.Object.(*nodecrd.Node); ok {
return true
}
k8sNode, ok := e.Object.(*coreV1.Node)
if !ok || !bmc.isMatchSelector(k8sNode) {
return false
}
bmc.enableForNode(k8sNode.Name)
return true
},
UpdateFunc: func(e event.UpdateEvent) bool {
if _, ok := e.ObjectOld.(*nodecrd.Node); ok {
return true
}
nodeOld, ok := e.ObjectOld.(*coreV1.Node)
if !ok {
return false
}
nodeNew := e.ObjectNew.(*coreV1.Node)
if !bmc.isMatchSelector(nodeNew) {
return false
}
if !bmc.isEnabledForNode(nodeNew.Name) {
bmc.enableForNode(nodeNew.Name)
}
annotationAreTheSame := reflect.DeepEqual(nodeOld.GetAnnotations(), nodeNew.GetAnnotations())
addressesAreTheSame := reflect.DeepEqual(nodeOld.Status.Addresses, nodeNew.Status.Addresses)
labelsAreTheSame := bmc.nodeSelector == nil || reflect.DeepEqual(nodeOld.GetLabels(), nodeNew.GetLabels())
return !annotationAreTheSame || !addressesAreTheSame || !labelsAreTheSame
},
}).
Complete(bmc)
}
// Reconcile reconciles Node CR and k8s Node objects
// at first define for which object current Reconcile is triggered and then run corresponding reconciliation method
func (bmc *Controller) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ll := bmc.log.WithFields(logrus.Fields{
"method": "Reconcile",
"name": req.Name,
})
var err error
// if name in request doesn't start with namePrefix controller tries to read k8s node object at first
// however if it get NotFound error it tries to read Node object as well
if !strings.HasPrefix(req.Name, namePrefix) {
k8sNode := new(coreV1.Node)
err = bmc.k8sClient.ReadCR(context.Background(), req.Name, "", k8sNode)
switch {
case err == nil:
ll.Infof("Reconcile k8s node %s", k8sNode.Name)
return bmc.reconcileForK8sNode(k8sNode)
case !k8sError.IsNotFound(err):
ll.Errorf("Unable to read node object: %v", err)
return ctrl.Result{Requeue: true}, err
}
}
// try to read Node
bmNode := new(nodecrd.Node)
err = bmc.k8sClient.ReadCR(context.Background(), req.Name, "", bmNode)
switch {
case err == nil:
ll.Infof("Reconcile Node %s", bmNode.Name)
return bmc.reconcileForCSIBMNode(bmNode)
case !k8sError.IsNotFound(err):
ll.Errorf("Unable to read Node object: %v", err)
return ctrl.Result{Requeue: true}, err
}
ll.Warnf("unable to detect for which object (%s) that reconcile is. The object may have been deleted", req.String())
return ctrl.Result{}, nil
}
func (bmc *Controller) reconcileForK8sNode(k8sNode *coreV1.Node) (ctrl.Result, error) {
ll := bmc.log.WithFields(logrus.Fields{
"method": "reconcileForK8sNode",
"name": k8sNode.Name,
})
if len(k8sNode.Status.Addresses) == 0 {
err := errors.New("addresses are missing for current k8s node instance")
ll.Error(err)
return ctrl.Result{Requeue: false}, err
}
var (
bmNode = &nodecrd.Node{}
bmNodeFromCache bool
bmNodeName string
bmNodes []nodecrd.Node
)
// get corresponding Node CR name from cache
if bmNodeName, bmNodeFromCache = bmc.cache.getCSIBMNodeName(k8sNode.Name); bmNodeFromCache {
if err := bmc.k8sClient.ReadCR(context.Background(), bmNodeName, "", bmNode); err != nil {
ll.Errorf("Unable to read Node %s: %v", bmNodeName, err)
return ctrl.Result{Requeue: true}, err
}
bmNodes = []nodecrd.Node{*bmNode}
}
if !bmNodeFromCache {
bmNodeCRs := new(nodecrd.NodeList)
if err := bmc.k8sClient.ReadList(context.Background(), bmNodeCRs); err != nil {
ll.Errorf("Unable to read Node CRs list: %v", err)
return ctrl.Result{Requeue: true}, err
}
bmNodes = bmNodeCRs.Items
}
matchedCRs := make([]string, 0)
for i := range bmNodes {
matchedAddresses := bmc.matchedAddressesCount(&bmNodes[i], k8sNode)
if len(bmNodes[i].Spec.Addresses) > 0 && matchedAddresses == len(bmNodes[i].Spec.Addresses) {
bmNode = &bmNodes[i]
matchedCRs = append(matchedCRs, bmNode.Name)
continue
}
if matchedAddresses > 0 {
ll.Errorf("There is Node %s that partially match k8s node %s. Node.Spec: %v, k8s node addresses: %v. "+
"Node Spec should be edited to match exactly one kubernetes node",
bmNodes[i].Name, k8sNode.Name, bmNodes[i].Spec, k8sNode.Status.Addresses)
return ctrl.Result{}, nil
}
}
if len(matchedCRs) > 1 {
ll.Errorf("More then one Node CR corresponds to the current k8s node (%d). Matched Node CRs: %v", len(matchedCRs), matchedCRs)
return ctrl.Result{}, nil
}
// create Node CR
if len(matchedCRs) == 0 {
id := bmc.constructNodeID(k8sNode)
bmNodeName := namePrefix + id
bmNode = bmc.k8sClient.ConstructCSIBMNodeCR(bmNodeName, api.Node{
UUID: id,
Addresses: bmc.constructAddresses(k8sNode),
})
bmNode.Finalizers = []string{csibmNodeFinalizer}
if err := bmc.k8sClient.CreateCR(context.Background(), bmNodeName, bmNode); err != nil {
ll.Errorf("Unable to create Node CR: %v", err)
return ctrl.Result{Requeue: true}, err
}
}
bmc.cache.put(k8sNode.Name, bmNode.Name)
return bmc.updateNodeLabelsAndAnnotation(k8sNode, bmNode.Spec.UUID)
}
func (bmc *Controller) reconcileForCSIBMNode(bmNode *nodecrd.Node) (ctrl.Result, error) {
ll := bmc.log.WithFields(logrus.Fields{
"method": "reconcileForCSIBMNode",
"name": bmNode.Name,
})
if len(bmNode.Spec.Addresses) == 0 {
err := errors.New("addresses are missing for current Node instance")
ll.Error(err)
return ctrl.Result{Requeue: false}, err
}
var (
k8sNode = &coreV1.Node{}
k8sNodes []coreV1.Node
k8sNodeFromCache bool
)
// get corresponding k8s node name from cache
if k8sNodeName, k8sNodeFromCache := bmc.cache.getK8sNodeName(bmNode.Name); k8sNodeFromCache {
if err := bmc.k8sClient.ReadCR(context.Background(), k8sNodeName, "", k8sNode); err != nil {
ll.Errorf("Unable to read k8s node %s: %v", k8sNodeName, err)
return ctrl.Result{Requeue: true}, err
}
k8sNodes = []coreV1.Node{*k8sNode}
}
if !k8sNodeFromCache {
k8sNodeCRs := new(coreV1.NodeList)
if err := bmc.k8sClient.ReadList(context.Background(), k8sNodeCRs); err != nil {
ll.Errorf("Unable to read k8s nodes list: %v", err)
return ctrl.Result{Requeue: true}, err
}
k8sNodes = k8sNodeCRs.Items
}
matchedNodes := make([]string, 0)
for i := range k8sNodes {
matchedAddresses := bmc.matchedAddressesCount(bmNode, &k8sNodes[i])
if matchedAddresses == len(bmNode.Spec.Addresses) {
k8sNode = &k8sNodes[i]
matchedNodes = append(matchedNodes, k8sNode.Name)
continue
}
if matchedAddresses > 0 {
ll.Errorf("There is k8s node %s that partially match Node CR %s. Node.Spec: %v, k8s node addresses: %v",
k8sNodes[i].Name, bmNode.Name, bmNode.Spec, k8sNodes[i].Status.Addresses)
return ctrl.Result{}, nil
}
}
if !bmNode.GetDeletionTimestamp().IsZero() {
bmc.disableForNode(k8sNode.Name)
if err := bmc.removeLabelsAndAnnotation(k8sNode); err != nil {
ll.Errorf("Unable to remove annotations or labels from node %s: %v", k8sNode.Name, err)
bmc.enableForNode(k8sNode.Name)
return ctrl.Result{Requeue: true}, err
}
ll.Infof("Annotations and labels from node %s was removed. Removing finalizer from %s.", k8sNode.Name, bmNode.Name)
bmNode.Finalizers = nil
err := bmc.k8sClient.UpdateCR(context.Background(), bmNode)
if err != nil {
ll.Errorf("Unable to update Node %s: %v", bmNode.Name, err)
}
return ctrl.Result{}, err
}
if len(matchedNodes) == 1 {
bmc.cache.put(k8sNode.Name, bmNode.Name)
return bmc.updateNodeLabelsAndAnnotation(k8sNode, bmNode.Spec.UUID)
}
ll.Warnf("Unable to detect k8s node that corresponds to Node %v, matched nodes: %v", bmNode, matchedNodes)
return ctrl.Result{}, nil
}
// updateNodeLabelsAndAnnotation checks nodeIDAnnotationKey annotation value for provided k8s Node and compare that value with goalValue
// parses OS Image info and put/update os-name and os-version labels if needed
func (bmc *Controller) updateNodeLabelsAndAnnotation(k8sNode *coreV1.Node, nodeUUID string) (ctrl.Result, error) {
ll := bmc.log.WithField("method", "updateNodeLabelsAndAnnotation")
toUpdate := false
// check for annotations
val, ok := k8sNode.GetAnnotations()[bmc.annotationKey]
if bmc.externalAnnotation && !ok {
ll.Errorf("external annotaion %s is not accesible on node %s", bmc.annotationKey, k8sNode)
}
if !bmc.externalAnnotation && ok {
if val == nodeUUID {
ll.Tracef("%s value for node %s is already %s", bmc.annotationKey, k8sNode.Name, nodeUUID)
} else {
ll.Warnf("%s value for node %s is %s, however should have (according to corresponding Node's UUID) %s, going to update annotation's value.",
bmc.annotationKey, k8sNode.Name, val, nodeUUID)
k8sNode.ObjectMeta.Annotations[bmc.annotationKey] = nodeUUID
toUpdate = true
}
}
if !bmc.externalAnnotation && !ok {
ll.Errorf("annotaion %s is not accesible on node %s", bmc.annotationKey, k8sNode)
if k8sNode.ObjectMeta.Annotations == nil {
k8sNode.ObjectMeta.Annotations = make(map[string]string, 1)
}
k8sNode.ObjectMeta.Annotations[bmc.annotationKey] = nodeUUID
toUpdate = true
}
// initialize labels map if needed
if k8sNode.Labels == nil {
k8sNode.ObjectMeta.Labels = make(map[string]string, 1)
}
// check for OS labels
name, version, err := util.GetOSNameAndVersion(k8sNode.Status.NodeInfo.OSImage)
if err == nil {
// os name
if k8sNode.Labels[common.NodeOSNameLabelKey] != name {
// not set or matches
ll.Infof("Setting label %s=%s on node %s", common.NodeOSNameLabelKey, name, k8sNode.Name)
k8sNode.Labels[common.NodeOSNameLabelKey] = name
toUpdate = true
}
// os version
if k8sNode.Labels[common.NodeOSVersionLabelKey] != version {
// not set or matches
ll.Infof("Setting label %s=%s on node %s", common.NodeOSVersionLabelKey, version, k8sNode.Name)
k8sNode.Labels[common.NodeOSVersionLabelKey] = version
toUpdate = true
}
} else {
ll.Errorf("Failed to obtain OS information: %s", err)
}
// check for kernel version label
version, err = util.GetKernelVersion(k8sNode.Status.NodeInfo.KernelVersion)
if err == nil {
// os name
if k8sNode.Labels[common.NodeKernelVersionLabelKey] != version {
// not set or matches
ll.Infof("Setting label %s=%s on node %s", common.NodeKernelVersionLabelKey, version, k8sNode.Name)
k8sNode.Labels[common.NodeKernelVersionLabelKey] = version
toUpdate = true
if bmc.observer != nil {
bmc.observer.Notify(version)
}
}
} else {
ll.Errorf("Failed to obtain Kernel version information: %s", err)
}
if toUpdate {
if err := bmc.k8sClient.UpdateCR(context.Background(), k8sNode); err != nil {
ll.Errorf("Unable to update node object: %v", err)
return ctrl.Result{Requeue: true}, err
}
}
return ctrl.Result{}, nil
}
func (bmc *Controller) removeLabelsAndAnnotation(k8sNode *coreV1.Node) error {
toUpdate := false
// check annotations
annotations := k8sNode.GetAnnotations()
if _, ok := annotations[bmc.annotationKey]; ok {
if !bmc.externalAnnotation {
delete(annotations, bmc.annotationKey)
toUpdate = true
}
}
// check labels
labels := k8sNode.GetLabels()
// os name
if _, ok := labels[common.NodeOSNameLabelKey]; ok {
delete(labels, common.NodeOSNameLabelKey)
toUpdate = true
}
// os version
if _, ok := labels[common.NodeOSVersionLabelKey]; ok {
delete(labels, common.NodeOSVersionLabelKey)
toUpdate = true
}
// kernel version
if _, ok := labels[common.NodeKernelVersionLabelKey]; ok {
delete(labels, common.NodeKernelVersionLabelKey)
toUpdate = true
}
// external csi-provisioner label
// TODO https://github.com/dell/csi-baremetal/issues/319 Rework after operator implementation
if _, ok := labels[common.NodeIDTopologyLabelKey]; ok {
delete(labels, common.NodeIDTopologyLabelKey)
toUpdate = true
}
if toUpdate {
k8sNode.Annotations = annotations
k8sNode.Labels = labels
return bmc.k8sClient.UpdateCR(context.Background(), k8sNode)
}
return nil
}
// matchedAddressesCount return amount of k8s node addresses that has corresponding address in bmNodeCR.Spec.Addresses map
func (bmc *Controller) matchedAddressesCount(bmNodeCR *nodecrd.Node, k8sNode *coreV1.Node) int {
matchedCount := 0
for _, addr := range k8sNode.Status.Addresses {
crAddr, ok := bmNodeCR.Spec.Addresses[string(addr.Type)]
if ok && crAddr == addr.Address {
matchedCount++
}
}
return matchedCount
}
// constructAddresses converts k8sNode.Status.Addresses into the the map[string]string, key - address type, value - address
func (bmc *Controller) constructAddresses(k8sNode *coreV1.Node) map[string]string {
res := make(map[string]string, len(k8sNode.Status.Addresses))
for _, addr := range k8sNode.Status.Addresses {
res[string(addr.Type)] = addr.Address
}
return res
}
func (bmc *Controller) c | k8sNode *coreV1.Node) string {
if bmc.externalAnnotation {
if val, ok := k8sNode.GetAnnotations()[bmc.annotationKey]; ok {
return val
}
}
return uuid.New().String()
}
| onstructNodeID( | identifier_name |
svh_visitor.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME (#14132): Even this SVH computation still has implementation
// artifacts: namely, the order of item declaration will affect the
// hash computation, but for many kinds of items the order of
// declaration should be irrelevant to the ABI.
pub use self::SawExprComponent::*;
pub use self::SawStmtComponent::*;
use self::SawAbiComponent::*;
use syntax::ast::{self, Name, NodeId};
use syntax::parse::token;
use syntax_pos::Span;
use rustc::hir;
use rustc::hir::*;
use rustc::hir::def::{Def, PathResolution};
use rustc::hir::def_id::DefId;
use rustc::hir::intravisit as visit;
use rustc::hir::intravisit::{Visitor, FnKind};
use rustc::ty::TyCtxt;
use std::hash::{Hash, SipHasher};
use super::def_path_hash::DefPathHashes;
pub struct StrictVersionHashVisitor<'a, 'hash: 'a, 'tcx: 'hash> {
pub tcx: TyCtxt<'hash, 'tcx, 'tcx>,
pub st: &'a mut SipHasher,
// collect a deterministic hash of def-ids that we have seen
def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>,
}
impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> {
pub fn new(st: &'a mut SipHasher,
tcx: TyCtxt<'hash, 'tcx, 'tcx>,
def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>)
-> Self {
StrictVersionHashVisitor { st: st, tcx: tcx, def_path_hashes: def_path_hashes }
}
fn compute_def_id_hash(&mut self, def_id: DefId) -> u64 {
self.def_path_hashes.hash(def_id)
}
}
// To off-load the bulk of the hash-computation on #[derive(Hash)],
// we define a set of enums corresponding to the content that our
// crate visitor will encounter as it traverses the ast.
//
// The important invariant is that all of the Saw*Component enums
// do not carry any Spans, Names, or Idents.
//
// Not carrying any Names/Idents is the important fix for problem
// noted on PR #13948: using the ident.name as the basis for a
// hash leads to unstable SVH, because ident.name is just an index
// into intern table (i.e. essentially a random address), not
// computed from the name content.
//
// With the below enums, the SVH computation is not sensitive to
// artifacts of how rustc was invoked nor of how the source code
// was laid out. (Or at least it is *less* sensitive.)
// This enum represents the different potential bits of code the
// visitor could encounter that could affect the ABI for the crate,
// and assigns each a distinct tag to feed into the hash computation.
#[derive(Hash)]
enum SawAbiComponent<'a> {
// FIXME (#14132): should we include (some function of)
// ident.ctxt as well?
SawIdent(token::InternedString),
SawStructDef(token::InternedString),
SawLifetime(token::InternedString),
SawLifetimeDef(token::InternedString),
SawMod,
SawForeignItem,
SawItem, | SawGenerics,
SawFn,
SawTraitItem,
SawImplItem,
SawStructField,
SawVariant,
SawPath,
SawBlock,
SawPat,
SawLocal,
SawArm,
SawExpr(SawExprComponent<'a>),
SawStmt(SawStmtComponent),
}
/// SawExprComponent carries all of the information that we want
/// to include in the hash that *won't* be covered by the
/// subsequent recursive traversal of the expression's
/// substructure by the visitor.
///
/// We know every Expr_ variant is covered by a variant because
/// `fn saw_expr` maps each to some case below. Ensuring that
/// each variant carries an appropriate payload has to be verified
/// by hand.
///
/// (However, getting that *exactly* right is not so important
/// because the SVH is just a developer convenience; there is no
/// guarantee of collision-freedom, hash collisions are just
/// (hopefully) unlikely.)
#[derive(Hash)]
pub enum SawExprComponent<'a> {
SawExprLoop(Option<token::InternedString>),
SawExprField(token::InternedString),
SawExprTupField(usize),
SawExprBreak(Option<token::InternedString>),
SawExprAgain(Option<token::InternedString>),
SawExprBox,
SawExprVec,
SawExprCall,
SawExprMethodCall,
SawExprTup,
SawExprBinary(hir::BinOp_),
SawExprUnary(hir::UnOp),
SawExprLit(ast::LitKind),
SawExprCast,
SawExprType,
SawExprIf,
SawExprWhile,
SawExprMatch,
SawExprClosure,
SawExprBlock,
SawExprAssign,
SawExprAssignOp(hir::BinOp_),
SawExprIndex,
SawExprPath(Option<usize>),
SawExprAddrOf(hir::Mutability),
SawExprRet,
SawExprInlineAsm(&'a hir::InlineAsm),
SawExprStruct,
SawExprRepeat,
}
fn saw_expr<'a>(node: &'a Expr_) -> SawExprComponent<'a> {
match *node {
ExprBox(..) => SawExprBox,
ExprVec(..) => SawExprVec,
ExprCall(..) => SawExprCall,
ExprMethodCall(..) => SawExprMethodCall,
ExprTup(..) => SawExprTup,
ExprBinary(op, _, _) => SawExprBinary(op.node),
ExprUnary(op, _) => SawExprUnary(op),
ExprLit(ref lit) => SawExprLit(lit.node.clone()),
ExprCast(..) => SawExprCast,
ExprType(..) => SawExprType,
ExprIf(..) => SawExprIf,
ExprWhile(..) => SawExprWhile,
ExprLoop(_, id) => SawExprLoop(id.map(|id| id.node.as_str())),
ExprMatch(..) => SawExprMatch,
ExprClosure(..) => SawExprClosure,
ExprBlock(..) => SawExprBlock,
ExprAssign(..) => SawExprAssign,
ExprAssignOp(op, _, _) => SawExprAssignOp(op.node),
ExprField(_, name) => SawExprField(name.node.as_str()),
ExprTupField(_, id) => SawExprTupField(id.node),
ExprIndex(..) => SawExprIndex,
ExprPath(ref qself, _) => SawExprPath(qself.as_ref().map(|q| q.position)),
ExprAddrOf(m, _) => SawExprAddrOf(m),
ExprBreak(id) => SawExprBreak(id.map(|id| id.node.as_str())),
ExprAgain(id) => SawExprAgain(id.map(|id| id.node.as_str())),
ExprRet(..) => SawExprRet,
ExprInlineAsm(ref a,_,_) => SawExprInlineAsm(a),
ExprStruct(..) => SawExprStruct,
ExprRepeat(..) => SawExprRepeat,
}
}
/// SawStmtComponent is analogous to SawExprComponent, but for statements.
#[derive(Hash)]
pub enum SawStmtComponent {
SawStmtExpr,
SawStmtSemi,
}
impl<'a, 'hash, 'tcx> Visitor<'tcx> for StrictVersionHashVisitor<'a, 'hash, 'tcx> {
fn visit_nested_item(&mut self, _: ItemId) {
// Each item is hashed independently; ignore nested items.
}
fn visit_variant_data(&mut self, s: &'tcx VariantData, name: Name,
g: &'tcx Generics, _: NodeId, _: Span) {
debug!("visit_variant_data: st={:?}", self.st);
SawStructDef(name.as_str()).hash(self.st);
visit::walk_generics(self, g);
visit::walk_struct_def(self, s)
}
fn visit_variant(&mut self, v: &'tcx Variant, g: &'tcx Generics, item_id: NodeId) {
debug!("visit_variant: st={:?}", self.st);
SawVariant.hash(self.st);
// walk_variant does not call walk_generics, so do it here.
visit::walk_generics(self, g);
visit::walk_variant(self, v, g, item_id)
}
// All of the remaining methods just record (in the hash
// SipHasher) that the visitor saw that particular variant
// (with its payload), and continue walking as the default
// visitor would.
//
// Some of the implementations have some notes as to how one
// might try to make their SVH computation less discerning
// (e.g. by incorporating reachability analysis). But
// currently all of their implementations are uniform and
// uninteresting.
//
// (If you edit a method such that it deviates from the
// pattern, please move that method up above this comment.)
fn visit_name(&mut self, _: Span, name: Name) {
debug!("visit_name: st={:?}", self.st);
SawIdent(name.as_str()).hash(self.st);
}
fn visit_lifetime(&mut self, l: &'tcx Lifetime) {
debug!("visit_lifetime: st={:?}", self.st);
SawLifetime(l.name.as_str()).hash(self.st);
}
fn visit_lifetime_def(&mut self, l: &'tcx LifetimeDef) {
debug!("visit_lifetime_def: st={:?}", self.st);
SawLifetimeDef(l.lifetime.name.as_str()).hash(self.st);
}
// We do recursively walk the bodies of functions/methods
// (rather than omitting their bodies from the hash) since
// monomorphization and cross-crate inlining generally implies
// that a change to a crate body will require downstream
// crates to be recompiled.
fn visit_expr(&mut self, ex: &'tcx Expr) {
debug!("visit_expr: st={:?}", self.st);
SawExpr(saw_expr(&ex.node)).hash(self.st); visit::walk_expr(self, ex)
}
fn visit_stmt(&mut self, s: &'tcx Stmt) {
debug!("visit_stmt: st={:?}", self.st);
// We don't want to modify the hash for decls, because
// they might be item decls (if they are local decls,
// we'll hash that fact in visit_local); but we do want to
// remember if this was a StmtExpr or StmtSemi (the later
// had an explicit semi-colon; this affects the typing
// rules).
match s.node {
StmtDecl(..) => (),
StmtExpr(..) => SawStmt(SawStmtExpr).hash(self.st),
StmtSemi(..) => SawStmt(SawStmtSemi).hash(self.st),
}
visit::walk_stmt(self, s)
}
fn visit_foreign_item(&mut self, i: &'tcx ForeignItem) {
debug!("visit_foreign_item: st={:?}", self.st);
// FIXME (#14132) ideally we would incorporate privacy (or
// perhaps reachability) somewhere here, so foreign items
// that do not leak into downstream crates would not be
// part of the ABI.
SawForeignItem.hash(self.st); visit::walk_foreign_item(self, i)
}
fn visit_item(&mut self, i: &'tcx Item) {
debug!("visit_item: {:?} st={:?}", i, self.st);
// FIXME (#14132) ideally would incorporate reachability
// analysis somewhere here, so items that never leak into
// downstream crates (e.g. via monomorphisation or
// inlining) would not be part of the ABI.
SawItem.hash(self.st); visit::walk_item(self, i)
}
fn visit_mod(&mut self, m: &'tcx Mod, _s: Span, n: NodeId) {
debug!("visit_mod: st={:?}", self.st);
SawMod.hash(self.st); visit::walk_mod(self, m, n)
}
fn visit_ty(&mut self, t: &'tcx Ty) {
debug!("visit_ty: st={:?}", self.st);
SawTy.hash(self.st); visit::walk_ty(self, t)
}
fn visit_generics(&mut self, g: &'tcx Generics) {
debug!("visit_generics: st={:?}", self.st);
SawGenerics.hash(self.st); visit::walk_generics(self, g)
}
fn visit_fn(&mut self, fk: FnKind<'tcx>, fd: &'tcx FnDecl,
b: &'tcx Block, s: Span, n: NodeId) {
debug!("visit_fn: st={:?}", self.st);
SawFn.hash(self.st); visit::walk_fn(self, fk, fd, b, s, n)
}
fn visit_trait_item(&mut self, ti: &'tcx TraitItem) {
debug!("visit_trait_item: st={:?}", self.st);
SawTraitItem.hash(self.st); visit::walk_trait_item(self, ti)
}
fn visit_impl_item(&mut self, ii: &'tcx ImplItem) {
debug!("visit_impl_item: st={:?}", self.st);
SawImplItem.hash(self.st); visit::walk_impl_item(self, ii)
}
fn visit_struct_field(&mut self, s: &'tcx StructField) {
debug!("visit_struct_field: st={:?}", self.st);
SawStructField.hash(self.st); visit::walk_struct_field(self, s)
}
fn visit_path(&mut self, path: &'tcx Path, _: ast::NodeId) {
debug!("visit_path: st={:?}", self.st);
SawPath.hash(self.st); visit::walk_path(self, path)
}
fn visit_block(&mut self, b: &'tcx Block) {
debug!("visit_block: st={:?}", self.st);
SawBlock.hash(self.st); visit::walk_block(self, b)
}
fn visit_pat(&mut self, p: &'tcx Pat) {
debug!("visit_pat: st={:?}", self.st);
SawPat.hash(self.st); visit::walk_pat(self, p)
}
fn visit_local(&mut self, l: &'tcx Local) {
debug!("visit_local: st={:?}", self.st);
SawLocal.hash(self.st); visit::walk_local(self, l)
}
fn visit_arm(&mut self, a: &'tcx Arm) {
debug!("visit_arm: st={:?}", self.st);
SawArm.hash(self.st); visit::walk_arm(self, a)
}
fn visit_id(&mut self, id: NodeId) {
debug!("visit_id: id={} st={:?}", id, self.st);
self.hash_resolve(id);
}
}
#[derive(Hash)]
pub enum DefHash {
SawDefId,
SawLabel,
SawPrimTy,
SawSelfTy,
SawErr,
}
impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> {
fn hash_resolve(&mut self, id: ast::NodeId) {
// Because whether or not a given id has an entry is dependent
// solely on expr variant etc, we don't need to hash whether
// or not an entry was present (we are already hashing what
// variant it is above when we visit the HIR).
if let Some(def) = self.tcx.def_map.borrow().get(&id) {
debug!("hash_resolve: id={:?} def={:?} st={:?}", id, def, self.st);
self.hash_partial_def(def);
}
if let Some(traits) = self.tcx.trait_map.get(&id) {
debug!("hash_resolve: id={:?} traits={:?} st={:?}", id, traits, self.st);
traits.len().hash(self.st);
// The ordering of the candidates is not fixed. So we hash
// the def-ids and then sort them and hash the collection.
let mut candidates: Vec<_> =
traits.iter()
.map(|&TraitCandidate { def_id, import_id: _ }| {
self.compute_def_id_hash(def_id)
})
.collect();
candidates.sort();
candidates.hash(self.st);
}
}
fn hash_def_id(&mut self, def_id: DefId) {
self.compute_def_id_hash(def_id).hash(self.st);
}
fn hash_partial_def(&mut self, def: &PathResolution) {
self.hash_def(def.base_def);
def.depth.hash(self.st);
}
fn hash_def(&mut self, def: Def) {
match def {
// Crucial point: for all of these variants, the variant +
// add'l data that is added is always the same if the
// def-id is the same, so it suffices to hash the def-id
Def::Fn(..) |
Def::Mod(..) |
Def::ForeignMod(..) |
Def::Static(..) |
Def::Variant(..) |
Def::Enum(..) |
Def::TyAlias(..) |
Def::AssociatedTy(..) |
Def::TyParam(..) |
Def::Struct(..) |
Def::Trait(..) |
Def::Method(..) |
Def::Const(..) |
Def::AssociatedConst(..) |
Def::Local(..) |
Def::Upvar(..) => {
DefHash::SawDefId.hash(self.st);
self.hash_def_id(def.def_id());
}
Def::Label(..) => {
DefHash::SawLabel.hash(self.st);
// we don't encode the `id` because it always refers to something
// within this item, so if it changed, there would have to be other
// changes too
}
Def::PrimTy(ref prim_ty) => {
DefHash::SawPrimTy.hash(self.st);
prim_ty.hash(self.st);
}
Def::SelfTy(..) => {
DefHash::SawSelfTy.hash(self.st);
// the meaning of Self is always the same within a
// given context, so we don't need to hash the other
// fields
}
Def::Err => {
DefHash::SawErr.hash(self.st);
}
}
}
} | SawTy, | random_line_split |
svh_visitor.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME (#14132): Even this SVH computation still has implementation
// artifacts: namely, the order of item declaration will affect the
// hash computation, but for many kinds of items the order of
// declaration should be irrelevant to the ABI.
pub use self::SawExprComponent::*;
pub use self::SawStmtComponent::*;
use self::SawAbiComponent::*;
use syntax::ast::{self, Name, NodeId};
use syntax::parse::token;
use syntax_pos::Span;
use rustc::hir;
use rustc::hir::*;
use rustc::hir::def::{Def, PathResolution};
use rustc::hir::def_id::DefId;
use rustc::hir::intravisit as visit;
use rustc::hir::intravisit::{Visitor, FnKind};
use rustc::ty::TyCtxt;
use std::hash::{Hash, SipHasher};
use super::def_path_hash::DefPathHashes;
pub struct StrictVersionHashVisitor<'a, 'hash: 'a, 'tcx: 'hash> {
pub tcx: TyCtxt<'hash, 'tcx, 'tcx>,
pub st: &'a mut SipHasher,
// collect a deterministic hash of def-ids that we have seen
def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>,
}
impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> {
pub fn new(st: &'a mut SipHasher,
tcx: TyCtxt<'hash, 'tcx, 'tcx>,
def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>)
-> Self {
StrictVersionHashVisitor { st: st, tcx: tcx, def_path_hashes: def_path_hashes }
}
fn compute_def_id_hash(&mut self, def_id: DefId) -> u64 {
self.def_path_hashes.hash(def_id)
}
}
// To off-load the bulk of the hash-computation on #[derive(Hash)],
// we define a set of enums corresponding to the content that our
// crate visitor will encounter as it traverses the ast.
//
// The important invariant is that all of the Saw*Component enums
// do not carry any Spans, Names, or Idents.
//
// Not carrying any Names/Idents is the important fix for problem
// noted on PR #13948: using the ident.name as the basis for a
// hash leads to unstable SVH, because ident.name is just an index
// into intern table (i.e. essentially a random address), not
// computed from the name content.
//
// With the below enums, the SVH computation is not sensitive to
// artifacts of how rustc was invoked nor of how the source code
// was laid out. (Or at least it is *less* sensitive.)
// This enum represents the different potential bits of code the
// visitor could encounter that could affect the ABI for the crate,
// and assigns each a distinct tag to feed into the hash computation.
#[derive(Hash)]
enum SawAbiComponent<'a> {
// FIXME (#14132): should we include (some function of)
// ident.ctxt as well?
SawIdent(token::InternedString),
SawStructDef(token::InternedString),
SawLifetime(token::InternedString),
SawLifetimeDef(token::InternedString),
SawMod,
SawForeignItem,
SawItem,
SawTy,
SawGenerics,
SawFn,
SawTraitItem,
SawImplItem,
SawStructField,
SawVariant,
SawPath,
SawBlock,
SawPat,
SawLocal,
SawArm,
SawExpr(SawExprComponent<'a>),
SawStmt(SawStmtComponent),
}
/// SawExprComponent carries all of the information that we want
/// to include in the hash that *won't* be covered by the
/// subsequent recursive traversal of the expression's
/// substructure by the visitor.
///
/// We know every Expr_ variant is covered by a variant because
/// `fn saw_expr` maps each to some case below. Ensuring that
/// each variant carries an appropriate payload has to be verified
/// by hand.
///
/// (However, getting that *exactly* right is not so important
/// because the SVH is just a developer convenience; there is no
/// guarantee of collision-freedom, hash collisions are just
/// (hopefully) unlikely.)
#[derive(Hash)]
pub enum SawExprComponent<'a> {
SawExprLoop(Option<token::InternedString>),
SawExprField(token::InternedString),
SawExprTupField(usize),
SawExprBreak(Option<token::InternedString>),
SawExprAgain(Option<token::InternedString>),
SawExprBox,
SawExprVec,
SawExprCall,
SawExprMethodCall,
SawExprTup,
SawExprBinary(hir::BinOp_),
SawExprUnary(hir::UnOp),
SawExprLit(ast::LitKind),
SawExprCast,
SawExprType,
SawExprIf,
SawExprWhile,
SawExprMatch,
SawExprClosure,
SawExprBlock,
SawExprAssign,
SawExprAssignOp(hir::BinOp_),
SawExprIndex,
SawExprPath(Option<usize>),
SawExprAddrOf(hir::Mutability),
SawExprRet,
SawExprInlineAsm(&'a hir::InlineAsm),
SawExprStruct,
SawExprRepeat,
}
fn saw_expr<'a>(node: &'a Expr_) -> SawExprComponent<'a> {
match *node {
ExprBox(..) => SawExprBox,
ExprVec(..) => SawExprVec,
ExprCall(..) => SawExprCall,
ExprMethodCall(..) => SawExprMethodCall,
ExprTup(..) => SawExprTup,
ExprBinary(op, _, _) => SawExprBinary(op.node),
ExprUnary(op, _) => SawExprUnary(op),
ExprLit(ref lit) => SawExprLit(lit.node.clone()),
ExprCast(..) => SawExprCast,
ExprType(..) => SawExprType,
ExprIf(..) => SawExprIf,
ExprWhile(..) => SawExprWhile,
ExprLoop(_, id) => SawExprLoop(id.map(|id| id.node.as_str())),
ExprMatch(..) => SawExprMatch,
ExprClosure(..) => SawExprClosure,
ExprBlock(..) => SawExprBlock,
ExprAssign(..) => SawExprAssign,
ExprAssignOp(op, _, _) => SawExprAssignOp(op.node),
ExprField(_, name) => SawExprField(name.node.as_str()),
ExprTupField(_, id) => SawExprTupField(id.node),
ExprIndex(..) => SawExprIndex,
ExprPath(ref qself, _) => SawExprPath(qself.as_ref().map(|q| q.position)),
ExprAddrOf(m, _) => SawExprAddrOf(m),
ExprBreak(id) => SawExprBreak(id.map(|id| id.node.as_str())),
ExprAgain(id) => SawExprAgain(id.map(|id| id.node.as_str())),
ExprRet(..) => SawExprRet,
ExprInlineAsm(ref a,_,_) => SawExprInlineAsm(a),
ExprStruct(..) => SawExprStruct,
ExprRepeat(..) => SawExprRepeat,
}
}
/// SawStmtComponent is analogous to SawExprComponent, but for statements.
#[derive(Hash)]
pub enum SawStmtComponent {
SawStmtExpr,
SawStmtSemi,
}
impl<'a, 'hash, 'tcx> Visitor<'tcx> for StrictVersionHashVisitor<'a, 'hash, 'tcx> {
fn visit_nested_item(&mut self, _: ItemId) {
// Each item is hashed independently; ignore nested items.
}
fn visit_variant_data(&mut self, s: &'tcx VariantData, name: Name,
g: &'tcx Generics, _: NodeId, _: Span) {
debug!("visit_variant_data: st={:?}", self.st);
SawStructDef(name.as_str()).hash(self.st);
visit::walk_generics(self, g);
visit::walk_struct_def(self, s)
}
fn visit_variant(&mut self, v: &'tcx Variant, g: &'tcx Generics, item_id: NodeId) {
debug!("visit_variant: st={:?}", self.st);
SawVariant.hash(self.st);
// walk_variant does not call walk_generics, so do it here.
visit::walk_generics(self, g);
visit::walk_variant(self, v, g, item_id)
}
// All of the remaining methods just record (in the hash
// SipHasher) that the visitor saw that particular variant
// (with its payload), and continue walking as the default
// visitor would.
//
// Some of the implementations have some notes as to how one
// might try to make their SVH computation less discerning
// (e.g. by incorporating reachability analysis). But
// currently all of their implementations are uniform and
// uninteresting.
//
// (If you edit a method such that it deviates from the
// pattern, please move that method up above this comment.)
fn visit_name(&mut self, _: Span, name: Name) {
debug!("visit_name: st={:?}", self.st);
SawIdent(name.as_str()).hash(self.st);
}
fn visit_lifetime(&mut self, l: &'tcx Lifetime) {
debug!("visit_lifetime: st={:?}", self.st);
SawLifetime(l.name.as_str()).hash(self.st);
}
fn visit_lifetime_def(&mut self, l: &'tcx LifetimeDef) {
debug!("visit_lifetime_def: st={:?}", self.st);
SawLifetimeDef(l.lifetime.name.as_str()).hash(self.st);
}
// We do recursively walk the bodies of functions/methods
// (rather than omitting their bodies from the hash) since
// monomorphization and cross-crate inlining generally implies
// that a change to a crate body will require downstream
// crates to be recompiled.
fn visit_expr(&mut self, ex: &'tcx Expr) {
debug!("visit_expr: st={:?}", self.st);
SawExpr(saw_expr(&ex.node)).hash(self.st); visit::walk_expr(self, ex)
}
fn visit_stmt(&mut self, s: &'tcx Stmt) {
debug!("visit_stmt: st={:?}", self.st);
// We don't want to modify the hash for decls, because
// they might be item decls (if they are local decls,
// we'll hash that fact in visit_local); but we do want to
// remember if this was a StmtExpr or StmtSemi (the later
// had an explicit semi-colon; this affects the typing
// rules).
match s.node {
StmtDecl(..) => (),
StmtExpr(..) => SawStmt(SawStmtExpr).hash(self.st),
StmtSemi(..) => SawStmt(SawStmtSemi).hash(self.st),
}
visit::walk_stmt(self, s)
}
fn visit_foreign_item(&mut self, i: &'tcx ForeignItem) {
debug!("visit_foreign_item: st={:?}", self.st);
// FIXME (#14132) ideally we would incorporate privacy (or
// perhaps reachability) somewhere here, so foreign items
// that do not leak into downstream crates would not be
// part of the ABI.
SawForeignItem.hash(self.st); visit::walk_foreign_item(self, i)
}
fn visit_item(&mut self, i: &'tcx Item) {
debug!("visit_item: {:?} st={:?}", i, self.st);
// FIXME (#14132) ideally would incorporate reachability
// analysis somewhere here, so items that never leak into
// downstream crates (e.g. via monomorphisation or
// inlining) would not be part of the ABI.
SawItem.hash(self.st); visit::walk_item(self, i)
}
fn visit_mod(&mut self, m: &'tcx Mod, _s: Span, n: NodeId) {
debug!("visit_mod: st={:?}", self.st);
SawMod.hash(self.st); visit::walk_mod(self, m, n)
}
fn visit_ty(&mut self, t: &'tcx Ty) {
debug!("visit_ty: st={:?}", self.st);
SawTy.hash(self.st); visit::walk_ty(self, t)
}
fn visit_generics(&mut self, g: &'tcx Generics) {
debug!("visit_generics: st={:?}", self.st);
SawGenerics.hash(self.st); visit::walk_generics(self, g)
}
fn visit_fn(&mut self, fk: FnKind<'tcx>, fd: &'tcx FnDecl,
b: &'tcx Block, s: Span, n: NodeId) {
debug!("visit_fn: st={:?}", self.st);
SawFn.hash(self.st); visit::walk_fn(self, fk, fd, b, s, n)
}
fn visit_trait_item(&mut self, ti: &'tcx TraitItem) {
debug!("visit_trait_item: st={:?}", self.st);
SawTraitItem.hash(self.st); visit::walk_trait_item(self, ti)
}
fn visit_impl_item(&mut self, ii: &'tcx ImplItem) {
debug!("visit_impl_item: st={:?}", self.st);
SawImplItem.hash(self.st); visit::walk_impl_item(self, ii)
}
fn visit_struct_field(&mut self, s: &'tcx StructField) |
fn visit_path(&mut self, path: &'tcx Path, _: ast::NodeId) {
debug!("visit_path: st={:?}", self.st);
SawPath.hash(self.st); visit::walk_path(self, path)
}
fn visit_block(&mut self, b: &'tcx Block) {
debug!("visit_block: st={:?}", self.st);
SawBlock.hash(self.st); visit::walk_block(self, b)
}
fn visit_pat(&mut self, p: &'tcx Pat) {
debug!("visit_pat: st={:?}", self.st);
SawPat.hash(self.st); visit::walk_pat(self, p)
}
fn visit_local(&mut self, l: &'tcx Local) {
debug!("visit_local: st={:?}", self.st);
SawLocal.hash(self.st); visit::walk_local(self, l)
}
fn visit_arm(&mut self, a: &'tcx Arm) {
debug!("visit_arm: st={:?}", self.st);
SawArm.hash(self.st); visit::walk_arm(self, a)
}
fn visit_id(&mut self, id: NodeId) {
debug!("visit_id: id={} st={:?}", id, self.st);
self.hash_resolve(id);
}
}
#[derive(Hash)]
pub enum DefHash {
SawDefId,
SawLabel,
SawPrimTy,
SawSelfTy,
SawErr,
}
impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> {
fn hash_resolve(&mut self, id: ast::NodeId) {
// Because whether or not a given id has an entry is dependent
// solely on expr variant etc, we don't need to hash whether
// or not an entry was present (we are already hashing what
// variant it is above when we visit the HIR).
if let Some(def) = self.tcx.def_map.borrow().get(&id) {
debug!("hash_resolve: id={:?} def={:?} st={:?}", id, def, self.st);
self.hash_partial_def(def);
}
if let Some(traits) = self.tcx.trait_map.get(&id) {
debug!("hash_resolve: id={:?} traits={:?} st={:?}", id, traits, self.st);
traits.len().hash(self.st);
// The ordering of the candidates is not fixed. So we hash
// the def-ids and then sort them and hash the collection.
let mut candidates: Vec<_> =
traits.iter()
.map(|&TraitCandidate { def_id, import_id: _ }| {
self.compute_def_id_hash(def_id)
})
.collect();
candidates.sort();
candidates.hash(self.st);
}
}
fn hash_def_id(&mut self, def_id: DefId) {
self.compute_def_id_hash(def_id).hash(self.st);
}
fn hash_partial_def(&mut self, def: &PathResolution) {
self.hash_def(def.base_def);
def.depth.hash(self.st);
}
fn hash_def(&mut self, def: Def) {
match def {
// Crucial point: for all of these variants, the variant +
// add'l data that is added is always the same if the
// def-id is the same, so it suffices to hash the def-id
Def::Fn(..) |
Def::Mod(..) |
Def::ForeignMod(..) |
Def::Static(..) |
Def::Variant(..) |
Def::Enum(..) |
Def::TyAlias(..) |
Def::AssociatedTy(..) |
Def::TyParam(..) |
Def::Struct(..) |
Def::Trait(..) |
Def::Method(..) |
Def::Const(..) |
Def::AssociatedConst(..) |
Def::Local(..) |
Def::Upvar(..) => {
DefHash::SawDefId.hash(self.st);
self.hash_def_id(def.def_id());
}
Def::Label(..) => {
DefHash::SawLabel.hash(self.st);
// we don't encode the `id` because it always refers to something
// within this item, so if it changed, there would have to be other
// changes too
}
Def::PrimTy(ref prim_ty) => {
DefHash::SawPrimTy.hash(self.st);
prim_ty.hash(self.st);
}
Def::SelfTy(..) => {
DefHash::SawSelfTy.hash(self.st);
// the meaning of Self is always the same within a
// given context, so we don't need to hash the other
// fields
}
Def::Err => {
DefHash::SawErr.hash(self.st);
}
}
}
}
| {
debug!("visit_struct_field: st={:?}", self.st);
SawStructField.hash(self.st); visit::walk_struct_field(self, s)
} | identifier_body |
svh_visitor.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME (#14132): Even this SVH computation still has implementation
// artifacts: namely, the order of item declaration will affect the
// hash computation, but for many kinds of items the order of
// declaration should be irrelevant to the ABI.
pub use self::SawExprComponent::*;
pub use self::SawStmtComponent::*;
use self::SawAbiComponent::*;
use syntax::ast::{self, Name, NodeId};
use syntax::parse::token;
use syntax_pos::Span;
use rustc::hir;
use rustc::hir::*;
use rustc::hir::def::{Def, PathResolution};
use rustc::hir::def_id::DefId;
use rustc::hir::intravisit as visit;
use rustc::hir::intravisit::{Visitor, FnKind};
use rustc::ty::TyCtxt;
use std::hash::{Hash, SipHasher};
use super::def_path_hash::DefPathHashes;
pub struct StrictVersionHashVisitor<'a, 'hash: 'a, 'tcx: 'hash> {
pub tcx: TyCtxt<'hash, 'tcx, 'tcx>,
pub st: &'a mut SipHasher,
// collect a deterministic hash of def-ids that we have seen
def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>,
}
impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> {
pub fn new(st: &'a mut SipHasher,
tcx: TyCtxt<'hash, 'tcx, 'tcx>,
def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>)
-> Self {
StrictVersionHashVisitor { st: st, tcx: tcx, def_path_hashes: def_path_hashes }
}
fn compute_def_id_hash(&mut self, def_id: DefId) -> u64 {
self.def_path_hashes.hash(def_id)
}
}
// To off-load the bulk of the hash-computation on #[derive(Hash)],
// we define a set of enums corresponding to the content that our
// crate visitor will encounter as it traverses the ast.
//
// The important invariant is that all of the Saw*Component enums
// do not carry any Spans, Names, or Idents.
//
// Not carrying any Names/Idents is the important fix for problem
// noted on PR #13948: using the ident.name as the basis for a
// hash leads to unstable SVH, because ident.name is just an index
// into intern table (i.e. essentially a random address), not
// computed from the name content.
//
// With the below enums, the SVH computation is not sensitive to
// artifacts of how rustc was invoked nor of how the source code
// was laid out. (Or at least it is *less* sensitive.)
// This enum represents the different potential bits of code the
// visitor could encounter that could affect the ABI for the crate,
// and assigns each a distinct tag to feed into the hash computation.
#[derive(Hash)]
enum SawAbiComponent<'a> {
// FIXME (#14132): should we include (some function of)
// ident.ctxt as well?
SawIdent(token::InternedString),
SawStructDef(token::InternedString),
SawLifetime(token::InternedString),
SawLifetimeDef(token::InternedString),
SawMod,
SawForeignItem,
SawItem,
SawTy,
SawGenerics,
SawFn,
SawTraitItem,
SawImplItem,
SawStructField,
SawVariant,
SawPath,
SawBlock,
SawPat,
SawLocal,
SawArm,
SawExpr(SawExprComponent<'a>),
SawStmt(SawStmtComponent),
}
/// SawExprComponent carries all of the information that we want
/// to include in the hash that *won't* be covered by the
/// subsequent recursive traversal of the expression's
/// substructure by the visitor.
///
/// We know every Expr_ variant is covered by a variant because
/// `fn saw_expr` maps each to some case below. Ensuring that
/// each variant carries an appropriate payload has to be verified
/// by hand.
///
/// (However, getting that *exactly* right is not so important
/// because the SVH is just a developer convenience; there is no
/// guarantee of collision-freedom, hash collisions are just
/// (hopefully) unlikely.)
#[derive(Hash)]
pub enum SawExprComponent<'a> {
SawExprLoop(Option<token::InternedString>),
SawExprField(token::InternedString),
SawExprTupField(usize),
SawExprBreak(Option<token::InternedString>),
SawExprAgain(Option<token::InternedString>),
SawExprBox,
SawExprVec,
SawExprCall,
SawExprMethodCall,
SawExprTup,
SawExprBinary(hir::BinOp_),
SawExprUnary(hir::UnOp),
SawExprLit(ast::LitKind),
SawExprCast,
SawExprType,
SawExprIf,
SawExprWhile,
SawExprMatch,
SawExprClosure,
SawExprBlock,
SawExprAssign,
SawExprAssignOp(hir::BinOp_),
SawExprIndex,
SawExprPath(Option<usize>),
SawExprAddrOf(hir::Mutability),
SawExprRet,
SawExprInlineAsm(&'a hir::InlineAsm),
SawExprStruct,
SawExprRepeat,
}
fn saw_expr<'a>(node: &'a Expr_) -> SawExprComponent<'a> {
match *node {
ExprBox(..) => SawExprBox,
ExprVec(..) => SawExprVec,
ExprCall(..) => SawExprCall,
ExprMethodCall(..) => SawExprMethodCall,
ExprTup(..) => SawExprTup,
ExprBinary(op, _, _) => SawExprBinary(op.node),
ExprUnary(op, _) => SawExprUnary(op),
ExprLit(ref lit) => SawExprLit(lit.node.clone()),
ExprCast(..) => SawExprCast,
ExprType(..) => SawExprType,
ExprIf(..) => SawExprIf,
ExprWhile(..) => SawExprWhile,
ExprLoop(_, id) => SawExprLoop(id.map(|id| id.node.as_str())),
ExprMatch(..) => SawExprMatch,
ExprClosure(..) => SawExprClosure,
ExprBlock(..) => SawExprBlock,
ExprAssign(..) => SawExprAssign,
ExprAssignOp(op, _, _) => SawExprAssignOp(op.node),
ExprField(_, name) => SawExprField(name.node.as_str()),
ExprTupField(_, id) => SawExprTupField(id.node),
ExprIndex(..) => SawExprIndex,
ExprPath(ref qself, _) => SawExprPath(qself.as_ref().map(|q| q.position)),
ExprAddrOf(m, _) => SawExprAddrOf(m),
ExprBreak(id) => SawExprBreak(id.map(|id| id.node.as_str())),
ExprAgain(id) => SawExprAgain(id.map(|id| id.node.as_str())),
ExprRet(..) => SawExprRet,
ExprInlineAsm(ref a,_,_) => SawExprInlineAsm(a),
ExprStruct(..) => SawExprStruct,
ExprRepeat(..) => SawExprRepeat,
}
}
/// SawStmtComponent is analogous to SawExprComponent, but for statements.
#[derive(Hash)]
pub enum SawStmtComponent {
SawStmtExpr,
SawStmtSemi,
}
impl<'a, 'hash, 'tcx> Visitor<'tcx> for StrictVersionHashVisitor<'a, 'hash, 'tcx> {
fn visit_nested_item(&mut self, _: ItemId) {
// Each item is hashed independently; ignore nested items.
}
fn visit_variant_data(&mut self, s: &'tcx VariantData, name: Name,
g: &'tcx Generics, _: NodeId, _: Span) {
debug!("visit_variant_data: st={:?}", self.st);
SawStructDef(name.as_str()).hash(self.st);
visit::walk_generics(self, g);
visit::walk_struct_def(self, s)
}
fn | (&mut self, v: &'tcx Variant, g: &'tcx Generics, item_id: NodeId) {
debug!("visit_variant: st={:?}", self.st);
SawVariant.hash(self.st);
// walk_variant does not call walk_generics, so do it here.
visit::walk_generics(self, g);
visit::walk_variant(self, v, g, item_id)
}
// All of the remaining methods just record (in the hash
// SipHasher) that the visitor saw that particular variant
// (with its payload), and continue walking as the default
// visitor would.
//
// Some of the implementations have some notes as to how one
// might try to make their SVH computation less discerning
// (e.g. by incorporating reachability analysis). But
// currently all of their implementations are uniform and
// uninteresting.
//
// (If you edit a method such that it deviates from the
// pattern, please move that method up above this comment.)
fn visit_name(&mut self, _: Span, name: Name) {
debug!("visit_name: st={:?}", self.st);
SawIdent(name.as_str()).hash(self.st);
}
fn visit_lifetime(&mut self, l: &'tcx Lifetime) {
debug!("visit_lifetime: st={:?}", self.st);
SawLifetime(l.name.as_str()).hash(self.st);
}
fn visit_lifetime_def(&mut self, l: &'tcx LifetimeDef) {
debug!("visit_lifetime_def: st={:?}", self.st);
SawLifetimeDef(l.lifetime.name.as_str()).hash(self.st);
}
// We do recursively walk the bodies of functions/methods
// (rather than omitting their bodies from the hash) since
// monomorphization and cross-crate inlining generally implies
// that a change to a crate body will require downstream
// crates to be recompiled.
fn visit_expr(&mut self, ex: &'tcx Expr) {
debug!("visit_expr: st={:?}", self.st);
SawExpr(saw_expr(&ex.node)).hash(self.st); visit::walk_expr(self, ex)
}
fn visit_stmt(&mut self, s: &'tcx Stmt) {
debug!("visit_stmt: st={:?}", self.st);
// We don't want to modify the hash for decls, because
// they might be item decls (if they are local decls,
// we'll hash that fact in visit_local); but we do want to
// remember if this was a StmtExpr or StmtSemi (the later
// had an explicit semi-colon; this affects the typing
// rules).
match s.node {
StmtDecl(..) => (),
StmtExpr(..) => SawStmt(SawStmtExpr).hash(self.st),
StmtSemi(..) => SawStmt(SawStmtSemi).hash(self.st),
}
visit::walk_stmt(self, s)
}
fn visit_foreign_item(&mut self, i: &'tcx ForeignItem) {
debug!("visit_foreign_item: st={:?}", self.st);
// FIXME (#14132) ideally we would incorporate privacy (or
// perhaps reachability) somewhere here, so foreign items
// that do not leak into downstream crates would not be
// part of the ABI.
SawForeignItem.hash(self.st); visit::walk_foreign_item(self, i)
}
fn visit_item(&mut self, i: &'tcx Item) {
debug!("visit_item: {:?} st={:?}", i, self.st);
// FIXME (#14132) ideally would incorporate reachability
// analysis somewhere here, so items that never leak into
// downstream crates (e.g. via monomorphisation or
// inlining) would not be part of the ABI.
SawItem.hash(self.st); visit::walk_item(self, i)
}
fn visit_mod(&mut self, m: &'tcx Mod, _s: Span, n: NodeId) {
debug!("visit_mod: st={:?}", self.st);
SawMod.hash(self.st); visit::walk_mod(self, m, n)
}
fn visit_ty(&mut self, t: &'tcx Ty) {
debug!("visit_ty: st={:?}", self.st);
SawTy.hash(self.st); visit::walk_ty(self, t)
}
fn visit_generics(&mut self, g: &'tcx Generics) {
debug!("visit_generics: st={:?}", self.st);
SawGenerics.hash(self.st); visit::walk_generics(self, g)
}
fn visit_fn(&mut self, fk: FnKind<'tcx>, fd: &'tcx FnDecl,
b: &'tcx Block, s: Span, n: NodeId) {
debug!("visit_fn: st={:?}", self.st);
SawFn.hash(self.st); visit::walk_fn(self, fk, fd, b, s, n)
}
fn visit_trait_item(&mut self, ti: &'tcx TraitItem) {
debug!("visit_trait_item: st={:?}", self.st);
SawTraitItem.hash(self.st); visit::walk_trait_item(self, ti)
}
fn visit_impl_item(&mut self, ii: &'tcx ImplItem) {
debug!("visit_impl_item: st={:?}", self.st);
SawImplItem.hash(self.st); visit::walk_impl_item(self, ii)
}
fn visit_struct_field(&mut self, s: &'tcx StructField) {
debug!("visit_struct_field: st={:?}", self.st);
SawStructField.hash(self.st); visit::walk_struct_field(self, s)
}
fn visit_path(&mut self, path: &'tcx Path, _: ast::NodeId) {
debug!("visit_path: st={:?}", self.st);
SawPath.hash(self.st); visit::walk_path(self, path)
}
fn visit_block(&mut self, b: &'tcx Block) {
debug!("visit_block: st={:?}", self.st);
SawBlock.hash(self.st); visit::walk_block(self, b)
}
fn visit_pat(&mut self, p: &'tcx Pat) {
debug!("visit_pat: st={:?}", self.st);
SawPat.hash(self.st); visit::walk_pat(self, p)
}
fn visit_local(&mut self, l: &'tcx Local) {
debug!("visit_local: st={:?}", self.st);
SawLocal.hash(self.st); visit::walk_local(self, l)
}
fn visit_arm(&mut self, a: &'tcx Arm) {
debug!("visit_arm: st={:?}", self.st);
SawArm.hash(self.st); visit::walk_arm(self, a)
}
fn visit_id(&mut self, id: NodeId) {
debug!("visit_id: id={} st={:?}", id, self.st);
self.hash_resolve(id);
}
}
#[derive(Hash)]
pub enum DefHash {
SawDefId,
SawLabel,
SawPrimTy,
SawSelfTy,
SawErr,
}
impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> {
fn hash_resolve(&mut self, id: ast::NodeId) {
// Because whether or not a given id has an entry is dependent
// solely on expr variant etc, we don't need to hash whether
// or not an entry was present (we are already hashing what
// variant it is above when we visit the HIR).
if let Some(def) = self.tcx.def_map.borrow().get(&id) {
debug!("hash_resolve: id={:?} def={:?} st={:?}", id, def, self.st);
self.hash_partial_def(def);
}
if let Some(traits) = self.tcx.trait_map.get(&id) {
debug!("hash_resolve: id={:?} traits={:?} st={:?}", id, traits, self.st);
traits.len().hash(self.st);
// The ordering of the candidates is not fixed. So we hash
// the def-ids and then sort them and hash the collection.
let mut candidates: Vec<_> =
traits.iter()
.map(|&TraitCandidate { def_id, import_id: _ }| {
self.compute_def_id_hash(def_id)
})
.collect();
candidates.sort();
candidates.hash(self.st);
}
}
fn hash_def_id(&mut self, def_id: DefId) {
self.compute_def_id_hash(def_id).hash(self.st);
}
fn hash_partial_def(&mut self, def: &PathResolution) {
self.hash_def(def.base_def);
def.depth.hash(self.st);
}
fn hash_def(&mut self, def: Def) {
match def {
// Crucial point: for all of these variants, the variant +
// add'l data that is added is always the same if the
// def-id is the same, so it suffices to hash the def-id
Def::Fn(..) |
Def::Mod(..) |
Def::ForeignMod(..) |
Def::Static(..) |
Def::Variant(..) |
Def::Enum(..) |
Def::TyAlias(..) |
Def::AssociatedTy(..) |
Def::TyParam(..) |
Def::Struct(..) |
Def::Trait(..) |
Def::Method(..) |
Def::Const(..) |
Def::AssociatedConst(..) |
Def::Local(..) |
Def::Upvar(..) => {
DefHash::SawDefId.hash(self.st);
self.hash_def_id(def.def_id());
}
Def::Label(..) => {
DefHash::SawLabel.hash(self.st);
// we don't encode the `id` because it always refers to something
// within this item, so if it changed, there would have to be other
// changes too
}
Def::PrimTy(ref prim_ty) => {
DefHash::SawPrimTy.hash(self.st);
prim_ty.hash(self.st);
}
Def::SelfTy(..) => {
DefHash::SawSelfTy.hash(self.st);
// the meaning of Self is always the same within a
// given context, so we don't need to hash the other
// fields
}
Def::Err => {
DefHash::SawErr.hash(self.st);
}
}
}
}
| visit_variant | identifier_name |
config.rs | //! Configuration for the iroh CLI.
use std::{
collections::HashMap,
env, fmt,
path::{Path, PathBuf},
str::FromStr,
};
use anyhow::{anyhow, bail, Result};
use config::{Environment, File, Value};
use iroh_net::{
defaults::{default_eu_derp_region, default_na_derp_region},
derp::{DerpMap, DerpRegion},
};
use serde::{Deserialize, Serialize};
use tracing::debug;
/// CONFIG_FILE_NAME is the name of the optional config file located in the iroh home directory
pub const CONFIG_FILE_NAME: &str = "iroh.config.toml";
/// ENV_PREFIX should be used along side the config field name to set a config field using
/// environment variables
/// For example, `IROH_PATH=/path/to/config` would set the value of the `Config.path` field
pub const ENV_PREFIX: &str = "IROH";
/// Paths to files or directory within the [`iroh_data_root`] used by Iroh.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum IrohPaths {
/// Path to the node's private key for the [`iroh_net::PeerId`].
Keypair,
/// Path to the node's [flat-file store](iroh::baomap::flat) for complete blobs.
BaoFlatStoreComplete,
/// Path to the node's [flat-file store](iroh::baomap::flat) for partial blobs.
BaoFlatStorePartial,
}
impl From<&IrohPaths> for &'static str {
fn from(value: &IrohPaths) -> Self {
match value {
IrohPaths::Keypair => "keypair",
IrohPaths::BaoFlatStoreComplete => "blobs.v0",
IrohPaths::BaoFlatStorePartial => "blobs-partial.v0",
}
}
}
impl FromStr for IrohPaths {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self> {
Ok(match s {
"keypair" => Self::Keypair,
"blobs.v0" => Self::BaoFlatStoreComplete,
"blobs-partial.v0" => Self::BaoFlatStorePartial,
_ => bail!("unknown file or directory"),
})
}
}
impl fmt::Display for IrohPaths {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s: &str = self.into();
write!(f, "{s}")
}
}
impl AsRef<Path> for IrohPaths {
fn as_ref(&self) -> &Path {
let s: &str = self.into();
Path::new(s)
}
}
impl IrohPaths {
/// Get the path for this [`IrohPath`] by joining the name to `IROH_DATA_DIR` environment variable.
pub fn with_env(self) -> Result<PathBuf> {
let mut root = iroh_data_root()?;
if !root.is_absolute() {
root = std::env::current_dir()?.join(root);
}
Ok(self.with_root(root))
}
/// Get the path for this [`IrohPath`] by joining the name to a root directory.
pub fn with_root(self, root: impl AsRef<Path>) -> PathBuf {
let path = root.as_ref().join(self);
path
}
}
/// The configuration for the iroh cli.
#[derive(PartialEq, Eq, Debug, Deserialize, Serialize, Clone)]
#[serde(default)]
pub struct Config {
/// The regions for DERP to use.
pub derp_regions: Vec<DerpRegion>,
}
impl Default for Config {
fn | () -> Self {
Self {
// TODO(ramfox): this should probably just be a derp map
derp_regions: [default_na_derp_region(), default_eu_derp_region()].into(),
}
}
}
impl Config {
/// Make a config using a default, files, environment variables, and commandline flags.
///
/// Later items in the *file_paths* slice will have a higher priority than earlier ones.
///
/// Environment variables are expected to start with the *env_prefix*. Nested fields can be
/// accessed using `.`, if your environment allows env vars with `.`
///
/// Note: For the metrics configuration env vars, it is recommended to use the metrics
/// specific prefix `IROH_METRICS` to set a field in the metrics config. You can use the
/// above dot notation to set a metrics field, eg, `IROH_CONFIG_METRICS.SERVICE_NAME`, but
/// only if your environment allows it
pub fn load<S, V>(
file_paths: &[Option<&Path>],
env_prefix: &str,
flag_overrides: HashMap<S, V>,
) -> Result<Config>
where
S: AsRef<str>,
V: Into<Value>,
{
let mut builder = config::Config::builder();
// layer on config options from files
for path in file_paths.iter().flatten() {
if path.exists() {
let p = path.to_str().ok_or_else(|| anyhow::anyhow!("empty path"))?;
builder = builder.add_source(File::with_name(p));
}
}
// next, add any environment variables
builder = builder.add_source(
Environment::with_prefix(env_prefix)
.separator("__")
.try_parsing(true),
);
// finally, override any values
for (flag, val) in flag_overrides.into_iter() {
builder = builder.set_override(flag, val)?;
}
let cfg = builder.build()?;
debug!("make_config:\n{:#?}\n", cfg);
let cfg = cfg.try_deserialize()?;
Ok(cfg)
}
/// Constructs a `DerpMap` based on the current configuration.
pub fn derp_map(&self) -> Option<DerpMap> {
if self.derp_regions.is_empty() {
return None;
}
let dm: DerpMap = self.derp_regions.iter().cloned().into();
Some(dm)
}
}
/// Name of directory that wraps all iroh files in a given application directory
const IROH_DIR: &str = "iroh";
/// Returns the path to the user's iroh config directory.
///
/// If the `IROH_CONFIG_DIR` environment variable is set it will be used unconditionally.
/// Otherwise the returned value depends on the operating system according to the following
/// table.
///
/// | Platform | Value | Example |
/// | -------- | ------------------------------------- | -------------------------------- |
/// | Linux | `$XDG_CONFIG_HOME` or `$HOME`/.config/iroh | /home/alice/.config/iroh |
/// | macOS | `$HOME`/Library/Application Support/iroh | /Users/Alice/Library/Application Support/iroh |
/// | Windows | `{FOLDERID_RoamingAppData}`/iroh | C:\Users\Alice\AppData\Roaming\iroh |
pub fn iroh_config_root() -> Result<PathBuf> {
if let Some(val) = env::var_os("IROH_CONFIG_DIR") {
return Ok(PathBuf::from(val));
}
let cfg = dirs_next::config_dir()
.ok_or_else(|| anyhow!("operating environment provides no directory for configuration"))?;
Ok(cfg.join(IROH_DIR))
}
/// Path that leads to a file in the iroh config directory.
pub fn iroh_config_path(file_name: impl AsRef<Path>) -> Result<PathBuf> {
let path = iroh_config_root()?.join(file_name);
Ok(path)
}
/// Returns the path to the user's iroh data directory.
///
/// If the `IROH_DATA_DIR` environment variable is set it will be used unconditionally.
/// Otherwise the returned value depends on the operating system according to the following
/// table.
///
/// | Platform | Value | Example |
/// | -------- | --------------------------------------------- | ---------------------------------------- |
/// | Linux | `$XDG_DATA_HOME`/iroh or `$HOME`/.local/share/iroh | /home/alice/.local/share/iroh |
/// | macOS | `$HOME`/Library/Application Support/iroh | /Users/Alice/Library/Application Support/iroh |
/// | Windows | `{FOLDERID_RoamingAppData}/iroh` | C:\Users\Alice\AppData\Roaming\iroh |
pub fn iroh_data_root() -> Result<PathBuf> {
if let Some(val) = env::var_os("IROH_DATA_DIR") {
return Ok(PathBuf::from(val));
}
let path = dirs_next::data_dir().ok_or_else(|| {
anyhow!("operating environment provides no directory for application data")
})?;
Ok(path.join(IROH_DIR))
}
/// Path that leads to a file in the iroh data directory.
#[allow(dead_code)]
pub fn iroh_data_path(file_name: &Path) -> Result<PathBuf> {
let path = iroh_data_root()?.join(file_name);
Ok(path)
}
/// Returns the path to the user's iroh cache directory.
///
/// If the `IROH_CACHE_DIR` environment variable is set it will be used unconditionally.
/// Otherwise the returned value depends on the operating system according to the following
/// table.
///
/// | Platform | Value | Example |
/// | -------- | --------------------------------------------- | ---------------------------------------- |
/// | Linux | `$XDG_CACHE_HOME`/iroh or `$HOME`/.cache/iroh | /home/.cache/iroh |
/// | macOS | `$HOME`/Library/Caches/iroh | /Users/Alice/Library/Caches/iroh |
/// | Windows | `{FOLDERID_LocalAppData}/iroh` | C:\Users\Alice\AppData\Roaming\iroh |
#[allow(dead_code)]
pub fn iroh_cache_root() -> Result<PathBuf> {
if let Some(val) = env::var_os("IROH_CACHE_DIR") {
return Ok(PathBuf::from(val));
}
let path = dirs_next::cache_dir().ok_or_else(|| {
anyhow!("operating environment provides no directory for application data")
})?;
Ok(path.join(IROH_DIR))
}
/// Path that leads to a file in the iroh cache directory.
#[allow(dead_code)]
pub fn iroh_cache_path(file_name: &Path) -> Result<PathBuf> {
let path = iroh_cache_root()?.join(file_name);
Ok(path)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default_settings() {
let config = Config::load::<String, String>(&[][..], "__FOO", Default::default()).unwrap();
assert_eq!(config.derp_regions.len(), 2);
}
#[test]
fn test_iroh_paths_parse_roundtrip() {
let kinds = [
IrohPaths::BaoFlatStoreComplete,
IrohPaths::BaoFlatStorePartial,
IrohPaths::Keypair,
];
for iroh_path in &kinds {
let root = PathBuf::from("/tmp");
let path = root.join(iroh_path);
let fname = path.file_name().unwrap().to_str().unwrap();
let parsed = IrohPaths::from_str(fname).unwrap();
assert_eq!(*iroh_path, parsed);
}
}
}
| default | identifier_name |
config.rs | //! Configuration for the iroh CLI.
use std::{
collections::HashMap,
env, fmt,
path::{Path, PathBuf},
str::FromStr,
};
use anyhow::{anyhow, bail, Result};
use config::{Environment, File, Value};
use iroh_net::{
defaults::{default_eu_derp_region, default_na_derp_region},
derp::{DerpMap, DerpRegion},
};
use serde::{Deserialize, Serialize};
use tracing::debug;
/// CONFIG_FILE_NAME is the name of the optional config file located in the iroh home directory
pub const CONFIG_FILE_NAME: &str = "iroh.config.toml";
/// ENV_PREFIX should be used along side the config field name to set a config field using
/// environment variables
/// For example, `IROH_PATH=/path/to/config` would set the value of the `Config.path` field
pub const ENV_PREFIX: &str = "IROH";
/// Paths to files or directory within the [`iroh_data_root`] used by Iroh.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum IrohPaths {
/// Path to the node's private key for the [`iroh_net::PeerId`].
Keypair,
/// Path to the node's [flat-file store](iroh::baomap::flat) for complete blobs.
BaoFlatStoreComplete,
/// Path to the node's [flat-file store](iroh::baomap::flat) for partial blobs.
BaoFlatStorePartial,
}
impl From<&IrohPaths> for &'static str {
fn from(value: &IrohPaths) -> Self {
match value {
IrohPaths::Keypair => "keypair",
IrohPaths::BaoFlatStoreComplete => "blobs.v0",
IrohPaths::BaoFlatStorePartial => "blobs-partial.v0",
}
}
}
impl FromStr for IrohPaths {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self> {
Ok(match s {
"keypair" => Self::Keypair,
"blobs.v0" => Self::BaoFlatStoreComplete,
"blobs-partial.v0" => Self::BaoFlatStorePartial,
_ => bail!("unknown file or directory"),
})
}
}
impl fmt::Display for IrohPaths {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s: &str = self.into();
write!(f, "{s}")
}
}
impl AsRef<Path> for IrohPaths {
fn as_ref(&self) -> &Path {
let s: &str = self.into();
Path::new(s)
}
}
impl IrohPaths {
/// Get the path for this [`IrohPath`] by joining the name to `IROH_DATA_DIR` environment variable.
pub fn with_env(self) -> Result<PathBuf> |
/// Get the path for this [`IrohPath`] by joining the name to a root directory.
pub fn with_root(self, root: impl AsRef<Path>) -> PathBuf {
let path = root.as_ref().join(self);
path
}
}
/// The configuration for the iroh cli.
#[derive(PartialEq, Eq, Debug, Deserialize, Serialize, Clone)]
#[serde(default)]
pub struct Config {
/// The regions for DERP to use.
pub derp_regions: Vec<DerpRegion>,
}
impl Default for Config {
fn default() -> Self {
Self {
// TODO(ramfox): this should probably just be a derp map
derp_regions: [default_na_derp_region(), default_eu_derp_region()].into(),
}
}
}
impl Config {
/// Make a config using a default, files, environment variables, and commandline flags.
///
/// Later items in the *file_paths* slice will have a higher priority than earlier ones.
///
/// Environment variables are expected to start with the *env_prefix*. Nested fields can be
/// accessed using `.`, if your environment allows env vars with `.`
///
/// Note: For the metrics configuration env vars, it is recommended to use the metrics
/// specific prefix `IROH_METRICS` to set a field in the metrics config. You can use the
/// above dot notation to set a metrics field, eg, `IROH_CONFIG_METRICS.SERVICE_NAME`, but
/// only if your environment allows it
pub fn load<S, V>(
file_paths: &[Option<&Path>],
env_prefix: &str,
flag_overrides: HashMap<S, V>,
) -> Result<Config>
where
S: AsRef<str>,
V: Into<Value>,
{
let mut builder = config::Config::builder();
// layer on config options from files
for path in file_paths.iter().flatten() {
if path.exists() {
let p = path.to_str().ok_or_else(|| anyhow::anyhow!("empty path"))?;
builder = builder.add_source(File::with_name(p));
}
}
// next, add any environment variables
builder = builder.add_source(
Environment::with_prefix(env_prefix)
.separator("__")
.try_parsing(true),
);
// finally, override any values
for (flag, val) in flag_overrides.into_iter() {
builder = builder.set_override(flag, val)?;
}
let cfg = builder.build()?;
debug!("make_config:\n{:#?}\n", cfg);
let cfg = cfg.try_deserialize()?;
Ok(cfg)
}
/// Constructs a `DerpMap` based on the current configuration.
pub fn derp_map(&self) -> Option<DerpMap> {
if self.derp_regions.is_empty() {
return None;
}
let dm: DerpMap = self.derp_regions.iter().cloned().into();
Some(dm)
}
}
/// Name of directory that wraps all iroh files in a given application directory
const IROH_DIR: &str = "iroh";
/// Returns the path to the user's iroh config directory.
///
/// If the `IROH_CONFIG_DIR` environment variable is set it will be used unconditionally.
/// Otherwise the returned value depends on the operating system according to the following
/// table.
///
/// | Platform | Value | Example |
/// | -------- | ------------------------------------- | -------------------------------- |
/// | Linux | `$XDG_CONFIG_HOME` or `$HOME`/.config/iroh | /home/alice/.config/iroh |
/// | macOS | `$HOME`/Library/Application Support/iroh | /Users/Alice/Library/Application Support/iroh |
/// | Windows | `{FOLDERID_RoamingAppData}`/iroh | C:\Users\Alice\AppData\Roaming\iroh |
pub fn iroh_config_root() -> Result<PathBuf> {
if let Some(val) = env::var_os("IROH_CONFIG_DIR") {
return Ok(PathBuf::from(val));
}
let cfg = dirs_next::config_dir()
.ok_or_else(|| anyhow!("operating environment provides no directory for configuration"))?;
Ok(cfg.join(IROH_DIR))
}
/// Path that leads to a file in the iroh config directory.
pub fn iroh_config_path(file_name: impl AsRef<Path>) -> Result<PathBuf> {
let path = iroh_config_root()?.join(file_name);
Ok(path)
}
/// Returns the path to the user's iroh data directory.
///
/// If the `IROH_DATA_DIR` environment variable is set it will be used unconditionally.
/// Otherwise the returned value depends on the operating system according to the following
/// table.
///
/// | Platform | Value | Example |
/// | -------- | --------------------------------------------- | ---------------------------------------- |
/// | Linux | `$XDG_DATA_HOME`/iroh or `$HOME`/.local/share/iroh | /home/alice/.local/share/iroh |
/// | macOS | `$HOME`/Library/Application Support/iroh | /Users/Alice/Library/Application Support/iroh |
/// | Windows | `{FOLDERID_RoamingAppData}/iroh` | C:\Users\Alice\AppData\Roaming\iroh |
pub fn iroh_data_root() -> Result<PathBuf> {
if let Some(val) = env::var_os("IROH_DATA_DIR") {
return Ok(PathBuf::from(val));
}
let path = dirs_next::data_dir().ok_or_else(|| {
anyhow!("operating environment provides no directory for application data")
})?;
Ok(path.join(IROH_DIR))
}
/// Path that leads to a file in the iroh data directory.
#[allow(dead_code)]
pub fn iroh_data_path(file_name: &Path) -> Result<PathBuf> {
let path = iroh_data_root()?.join(file_name);
Ok(path)
}
/// Returns the path to the user's iroh cache directory.
///
/// If the `IROH_CACHE_DIR` environment variable is set it will be used unconditionally.
/// Otherwise the returned value depends on the operating system according to the following
/// table.
///
/// | Platform | Value | Example |
/// | -------- | --------------------------------------------- | ---------------------------------------- |
/// | Linux | `$XDG_CACHE_HOME`/iroh or `$HOME`/.cache/iroh | /home/.cache/iroh |
/// | macOS | `$HOME`/Library/Caches/iroh | /Users/Alice/Library/Caches/iroh |
/// | Windows | `{FOLDERID_LocalAppData}/iroh` | C:\Users\Alice\AppData\Roaming\iroh |
#[allow(dead_code)]
pub fn iroh_cache_root() -> Result<PathBuf> {
if let Some(val) = env::var_os("IROH_CACHE_DIR") {
return Ok(PathBuf::from(val));
}
let path = dirs_next::cache_dir().ok_or_else(|| {
anyhow!("operating environment provides no directory for application data")
})?;
Ok(path.join(IROH_DIR))
}
/// Path that leads to a file in the iroh cache directory.
#[allow(dead_code)]
pub fn iroh_cache_path(file_name: &Path) -> Result<PathBuf> {
let path = iroh_cache_root()?.join(file_name);
Ok(path)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default_settings() {
let config = Config::load::<String, String>(&[][..], "__FOO", Default::default()).unwrap();
assert_eq!(config.derp_regions.len(), 2);
}
#[test]
fn test_iroh_paths_parse_roundtrip() {
let kinds = [
IrohPaths::BaoFlatStoreComplete,
IrohPaths::BaoFlatStorePartial,
IrohPaths::Keypair,
];
for iroh_path in &kinds {
let root = PathBuf::from("/tmp");
let path = root.join(iroh_path);
let fname = path.file_name().unwrap().to_str().unwrap();
let parsed = IrohPaths::from_str(fname).unwrap();
assert_eq!(*iroh_path, parsed);
}
}
}
| {
let mut root = iroh_data_root()?;
if !root.is_absolute() {
root = std::env::current_dir()?.join(root);
}
Ok(self.with_root(root))
} | identifier_body |
config.rs | //! Configuration for the iroh CLI.
use std::{
collections::HashMap,
env, fmt,
path::{Path, PathBuf},
str::FromStr,
};
use anyhow::{anyhow, bail, Result};
use config::{Environment, File, Value};
use iroh_net::{
defaults::{default_eu_derp_region, default_na_derp_region},
derp::{DerpMap, DerpRegion},
};
use serde::{Deserialize, Serialize};
use tracing::debug;
/// CONFIG_FILE_NAME is the name of the optional config file located in the iroh home directory
pub const CONFIG_FILE_NAME: &str = "iroh.config.toml";
/// ENV_PREFIX should be used along side the config field name to set a config field using
/// environment variables
/// For example, `IROH_PATH=/path/to/config` would set the value of the `Config.path` field
pub const ENV_PREFIX: &str = "IROH";
/// Paths to files or directory within the [`iroh_data_root`] used by Iroh.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum IrohPaths {
/// Path to the node's private key for the [`iroh_net::PeerId`].
Keypair,
/// Path to the node's [flat-file store](iroh::baomap::flat) for complete blobs.
BaoFlatStoreComplete,
/// Path to the node's [flat-file store](iroh::baomap::flat) for partial blobs.
BaoFlatStorePartial,
}
impl From<&IrohPaths> for &'static str {
fn from(value: &IrohPaths) -> Self {
match value { | }
}
impl FromStr for IrohPaths {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self> {
Ok(match s {
"keypair" => Self::Keypair,
"blobs.v0" => Self::BaoFlatStoreComplete,
"blobs-partial.v0" => Self::BaoFlatStorePartial,
_ => bail!("unknown file or directory"),
})
}
}
impl fmt::Display for IrohPaths {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s: &str = self.into();
write!(f, "{s}")
}
}
impl AsRef<Path> for IrohPaths {
fn as_ref(&self) -> &Path {
let s: &str = self.into();
Path::new(s)
}
}
impl IrohPaths {
/// Get the path for this [`IrohPath`] by joining the name to `IROH_DATA_DIR` environment variable.
pub fn with_env(self) -> Result<PathBuf> {
let mut root = iroh_data_root()?;
if !root.is_absolute() {
root = std::env::current_dir()?.join(root);
}
Ok(self.with_root(root))
}
/// Get the path for this [`IrohPath`] by joining the name to a root directory.
pub fn with_root(self, root: impl AsRef<Path>) -> PathBuf {
let path = root.as_ref().join(self);
path
}
}
/// The configuration for the iroh cli.
#[derive(PartialEq, Eq, Debug, Deserialize, Serialize, Clone)]
#[serde(default)]
pub struct Config {
/// The regions for DERP to use.
pub derp_regions: Vec<DerpRegion>,
}
impl Default for Config {
fn default() -> Self {
Self {
// TODO(ramfox): this should probably just be a derp map
derp_regions: [default_na_derp_region(), default_eu_derp_region()].into(),
}
}
}
impl Config {
/// Make a config using a default, files, environment variables, and commandline flags.
///
/// Later items in the *file_paths* slice will have a higher priority than earlier ones.
///
/// Environment variables are expected to start with the *env_prefix*. Nested fields can be
/// accessed using `.`, if your environment allows env vars with `.`
///
/// Note: For the metrics configuration env vars, it is recommended to use the metrics
/// specific prefix `IROH_METRICS` to set a field in the metrics config. You can use the
/// above dot notation to set a metrics field, eg, `IROH_CONFIG_METRICS.SERVICE_NAME`, but
/// only if your environment allows it
pub fn load<S, V>(
file_paths: &[Option<&Path>],
env_prefix: &str,
flag_overrides: HashMap<S, V>,
) -> Result<Config>
where
S: AsRef<str>,
V: Into<Value>,
{
let mut builder = config::Config::builder();
// layer on config options from files
for path in file_paths.iter().flatten() {
if path.exists() {
let p = path.to_str().ok_or_else(|| anyhow::anyhow!("empty path"))?;
builder = builder.add_source(File::with_name(p));
}
}
// next, add any environment variables
builder = builder.add_source(
Environment::with_prefix(env_prefix)
.separator("__")
.try_parsing(true),
);
// finally, override any values
for (flag, val) in flag_overrides.into_iter() {
builder = builder.set_override(flag, val)?;
}
let cfg = builder.build()?;
debug!("make_config:\n{:#?}\n", cfg);
let cfg = cfg.try_deserialize()?;
Ok(cfg)
}
/// Constructs a `DerpMap` based on the current configuration.
pub fn derp_map(&self) -> Option<DerpMap> {
if self.derp_regions.is_empty() {
return None;
}
let dm: DerpMap = self.derp_regions.iter().cloned().into();
Some(dm)
}
}
/// Name of directory that wraps all iroh files in a given application directory
const IROH_DIR: &str = "iroh";
/// Returns the path to the user's iroh config directory.
///
/// If the `IROH_CONFIG_DIR` environment variable is set it will be used unconditionally.
/// Otherwise the returned value depends on the operating system according to the following
/// table.
///
/// | Platform | Value | Example |
/// | -------- | ------------------------------------- | -------------------------------- |
/// | Linux | `$XDG_CONFIG_HOME` or `$HOME`/.config/iroh | /home/alice/.config/iroh |
/// | macOS | `$HOME`/Library/Application Support/iroh | /Users/Alice/Library/Application Support/iroh |
/// | Windows | `{FOLDERID_RoamingAppData}`/iroh | C:\Users\Alice\AppData\Roaming\iroh |
pub fn iroh_config_root() -> Result<PathBuf> {
if let Some(val) = env::var_os("IROH_CONFIG_DIR") {
return Ok(PathBuf::from(val));
}
let cfg = dirs_next::config_dir()
.ok_or_else(|| anyhow!("operating environment provides no directory for configuration"))?;
Ok(cfg.join(IROH_DIR))
}
/// Path that leads to a file in the iroh config directory.
pub fn iroh_config_path(file_name: impl AsRef<Path>) -> Result<PathBuf> {
let path = iroh_config_root()?.join(file_name);
Ok(path)
}
/// Returns the path to the user's iroh data directory.
///
/// If the `IROH_DATA_DIR` environment variable is set it will be used unconditionally.
/// Otherwise the returned value depends on the operating system according to the following
/// table.
///
/// | Platform | Value | Example |
/// | -------- | --------------------------------------------- | ---------------------------------------- |
/// | Linux | `$XDG_DATA_HOME`/iroh or `$HOME`/.local/share/iroh | /home/alice/.local/share/iroh |
/// | macOS | `$HOME`/Library/Application Support/iroh | /Users/Alice/Library/Application Support/iroh |
/// | Windows | `{FOLDERID_RoamingAppData}/iroh` | C:\Users\Alice\AppData\Roaming\iroh |
pub fn iroh_data_root() -> Result<PathBuf> {
if let Some(val) = env::var_os("IROH_DATA_DIR") {
return Ok(PathBuf::from(val));
}
let path = dirs_next::data_dir().ok_or_else(|| {
anyhow!("operating environment provides no directory for application data")
})?;
Ok(path.join(IROH_DIR))
}
/// Path that leads to a file in the iroh data directory.
#[allow(dead_code)]
pub fn iroh_data_path(file_name: &Path) -> Result<PathBuf> {
let path = iroh_data_root()?.join(file_name);
Ok(path)
}
/// Returns the path to the user's iroh cache directory.
///
/// If the `IROH_CACHE_DIR` environment variable is set it will be used unconditionally.
/// Otherwise the returned value depends on the operating system according to the following
/// table.
///
/// | Platform | Value | Example |
/// | -------- | --------------------------------------------- | ---------------------------------------- |
/// | Linux | `$XDG_CACHE_HOME`/iroh or `$HOME`/.cache/iroh | /home/.cache/iroh |
/// | macOS | `$HOME`/Library/Caches/iroh | /Users/Alice/Library/Caches/iroh |
/// | Windows | `{FOLDERID_LocalAppData}/iroh` | C:\Users\Alice\AppData\Roaming\iroh |
#[allow(dead_code)]
pub fn iroh_cache_root() -> Result<PathBuf> {
if let Some(val) = env::var_os("IROH_CACHE_DIR") {
return Ok(PathBuf::from(val));
}
let path = dirs_next::cache_dir().ok_or_else(|| {
anyhow!("operating environment provides no directory for application data")
})?;
Ok(path.join(IROH_DIR))
}
/// Path that leads to a file in the iroh cache directory.
#[allow(dead_code)]
pub fn iroh_cache_path(file_name: &Path) -> Result<PathBuf> {
let path = iroh_cache_root()?.join(file_name);
Ok(path)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default_settings() {
let config = Config::load::<String, String>(&[][..], "__FOO", Default::default()).unwrap();
assert_eq!(config.derp_regions.len(), 2);
}
#[test]
fn test_iroh_paths_parse_roundtrip() {
let kinds = [
IrohPaths::BaoFlatStoreComplete,
IrohPaths::BaoFlatStorePartial,
IrohPaths::Keypair,
];
for iroh_path in &kinds {
let root = PathBuf::from("/tmp");
let path = root.join(iroh_path);
let fname = path.file_name().unwrap().to_str().unwrap();
let parsed = IrohPaths::from_str(fname).unwrap();
assert_eq!(*iroh_path, parsed);
}
}
} | IrohPaths::Keypair => "keypair",
IrohPaths::BaoFlatStoreComplete => "blobs.v0",
IrohPaths::BaoFlatStorePartial => "blobs-partial.v0",
} | random_line_split |
UAGS.py |
#random test for VS2010
import glob, platform
from urllib.request import *
import ssl
import os
from bs4 import BeautifulSoup
from uags.UAGS_Functions import *
from uags.UAGS_oagd import *
class bc | HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
## main section starting here...
print()
print(bcolors.BOLD + bcolors.OKBLUE + "HoraceAndTheSpider" + bcolors.ENDC + "'s " + "openretro.org " + bcolors.BOLD + "UAE4Arm Amiga Game Scraper" + bcolors.ENDC + " | " + "" + bcolors.FAIL + "www.ultimateamiga.co.uk" + bcolors.ENDC)
print()
## check for overwrite of existing entries
NewScrapes = input("Scrape existing game entries? " + bcolors.OKBLUE + "(y/n) " + bcolors.ENDC)
if NewScrapes != "yes" and NewScrapes != "y" and NewScrapes != "Y" and NewScrapes != "YES":
NewScrapes = "n"
print("Existing game entries will " + bcolors.BOLD + "not" + bcolors.ENDC + " be scraped.")
else:
NewScrapes = "y"
print("All found game entries will be scraped.")
print()
## all a filter to be used
ScanFilter = input("Limit scanned files to a specific pattern match? " + bcolors.OKBLUE + "(Enter pattern or leave blank) " + bcolors.ENDC)
print()
## check for overwrite of existing images
NewImages = input("Overwrite existing images, such as in " + bcolors.BOLD + "boxarts/" + bcolors.ENDC + " ? " + bcolors.OKBLUE + "(y/n) " + bcolors.ENDC)
if NewImages != "yes" and NewImages != "y" and NewImages != "Y" and NewImages != "YES":
NewImages = "n"
print("Existing images will " + bcolors.BOLD + "not" + bcolors.ENDC + " be overwritten.")
else:
NewImages = "y"
print("Existing images will be overwritten.")
print()
# Check for saving bonus material images
AllImages = input ("Store additional images to " + bcolors.BOLD + "snap/" + bcolors.ENDC + "and" + bcolors.BOLD + " wheel/" + bcolors.ENDC + " ? " + bcolors.OKBLUE + "(y/n) "+ bcolors.ENDC )
if AllImages != "yes" and AllImages != "y" and AllImages != "Y" and AllImages != "YES":
AllImages = "n"
print ("Additional images will " + bcolors.BOLD + "not"+ bcolors.ENDC + " be stored.")
else:
AllImages = "y"
print ("Additional images will be stored.")
print()
## initialisations
ScannedGames = 0
LimitResults = 0
##ScanFilter = "Five"
XML = ""
ExitButton = False
KeyRead = 0
ErrorMessage = ""
try:
ssl._create_default_https_context = ssl._create_unverified_context
except:
pass
## -------- input dir
if platform.system()=="Darwin":
inputdir="/Volumes/roms-1/amiga/"
## inputdir = "/Users/horaceandthespider/Documents/Gaming/AmigaWHD/WorkingFolder2/ECS Pack/"
## -------- I SEE YOU AINGER! o_o
elif platform.node()=="RAVEN":
inputdir="C:\\Users\\oaing\\Desktop\\whdload\\"
else:
inputdir="//home/pi/RetroPie/roms/amiga/"
# paths/folders if needed
os.makedirs(inputdir + "boxart", exist_ok=True)
if AllImages == "y":
os.makedirs (inputdir + "wheel", exist_ok=True)
os.makedirs (inputdir + "snap", exist_ok=True)
# here, we will open an existing XML, *or* create one.
XML_File = inputdir + "gamelist.xml"
if (os.path.isfile(XML_File)) == False:
XML = '<?xml version="1.0"?>\n<gameList>\n'
XML = XML + "</gameList>"
text_file = open(XML_File, "w")
text_file.write(XML)
text_file.close()
text_file = open(XML_File, "r")
XML = text_file.read()
text_file.close()
## check XML validity
if XML.find("?xml version=")<0 or XML.find("<gameList>")<0 or XML.find("</gameList>")<0:
print (bcolors.FAIL + ">> XML File "+ bcolors.BOLD + XML_File + bcolors.ENDC + bcolors.FAIL + " is malformed." + bcolors.ENDC )
KillXML = input ("Delete file prior to restart? (y/n) "+ bcolors.ENDC )
if KillXML != "yes" and KillXML != "y" and KillXML != "Y" and KillXML != "YES":
raise SystemExit
else:
print("Deleting 'gamelist.xml'")
os.remove(XML_File)
raise SystemExit
## ======== MAIN FILE READING LOOP
for filename in glob.glob(inputdir+'*.uae'):
ScannedGames = ScannedGames + 1
## filename = "Bloodwych (& Extended Levels).uae"
GameVariant = ""
GameEntry = ""
# Get the name, and remove extension and path
GameName = filename
GameName = GameName.replace ('.uae','')
GameName = GameName.replace (inputdir,'')
RealName = filename
RealName = RealName.replace (inputdir,'')
# finally, this filter should work...
if GameName == "_Config Maker":
print("Scraping data for " + bcolors.OKGREEN + GameName + bcolors.ENDC + " from external source.")
GameEntry = MakeGameEntry(RealName,'','','',AllImages)
ErrorMessage = ErrorMessage + GetPictures(RealName,"",AllImages,NewImages,inputdir)
elif GameName.find(ScanFilter) == -1 and ScanFilter != '':
print("Filter applied. Entry for " + bcolors.OKGREEN + GameName + bcolors.ENDC + " skipped.")
# so, if it's aready in there, and we are *not* scraping everything
elif NewScrapes=="n" and XML.find("<path>./" + RealName + "</path>") > -1:
print("Existing entry for " + bcolors.OKGREEN + GameName + bcolors.ENDC + " - skipping.")
## OMG i cannot believe i just 'tabbed' *everything in the loop for this...
else:
# Find the game type
## i think i can remove lots of these now.... (1,2,4 and 5)
if GameName.find(' [AGA]')>0:
GameType = 'AGA'
SearchType = 'amiga'
GameName = GameName.replace (' [AGA]','')
elif GameName.find(' [CD32]')>0:
GameType = 'CD32'
SearchType = 'cd32'
GameName = GameName.replace (' [CD32]','')
elif GameName.find(' [CDTV]')>0:
GameType = 'CDTV'
SearchType = 'cdtv'
GameName = GameName.replace (' [CDTV]','')
elif GameName.find(' [CD]')>0:
GameType = 'AGA'
SearchType = 'amiga'
GameName = GameName.replace (' [CD]','')
else:
GameType = 'ECS'
SearchType = 'amiga'
# Tidy the spaces etc for the search string
ParseName = GameName.replace('-','')
ParseName = ParseName.replace('&','%26')
ParseName = ParseName.replace('+','%2B')
ParseName = ParseName.replace(' ','+')
ParseName = ParseName.replace('++','+')
## print ("Searched for " + ParseName)
print ()
print (bcolors.OKBLUE + str(ScannedGames) + bcolors.ENDC + ": Searching for: " + bcolors.BOLD + GameName + bcolors.ENDC + " (" + ParseName+") " + GameType)
print (bcolors.HEADER + " " + filename + bcolors.ENDC)
print ()
## lets search the database!
FindLink = ""
NewParseName = ""
### Here , we can do a loop
## we will 'break' if we find a link though
## pass 1, search as normal
## pass 2, search with brackets as '[' ']' ... for games where a name is shared
## pass 3, search with brackets as '' ... for games like Cannon Fodder (New Campaign)
## pass 4, search with brackets omitted completely and the extra bits stored for later use .. in GameVariant
## ...
## pass 9, ???
## pass 10, profit!
## if we *didnt find anything, we re-try, with anything in brackets removed (alternative versions etc)
## -- we will also store the brackets information to put in the XML description (to show different versions apart)
##import re
##re.sub(r'\s\(.*\)', '', "Lemmings (2 Disk)")
for Pass in range(1,5):
# special 'pass' rules
if Pass==1:
NewParseName = ParseName
elif Pass==2:
NewParseName = ParseName
NewParseName = NewParseName.replace(')','')
NewParseName = NewParseName.replace('(','')
elif Pass==3:
NewParseName = ParseName
NewParseName = NewParseName.replace('[','')
NewParseName = NewParseName.replace(']','')
elif Pass==4:
NewParseName = ParseName
NewParseName = NewParseName.replace(GetBrackets(ParseName),'').strip()
GameVariant = GetBrackets(GameName)
## here we do the actual searches
## first of all, we have a special rule for AGA games, because they are pain in the b*m
if GameType == 'AGA':
SearchString = 'https://openretro.org/browse/'+SearchType+'?q=' + NewParseName + "+[AGA]"+"&disabled=1&unpublished=1"
FindLink,FindImage,FindGame = WebSearchResult(SearchString,GameName,GameType,Pass)
## for everything else, we had no result, and/or we didnt select x/s, we will behave 'normally'
if FindLink=="":
SearchString = 'https://openretro.org/browse/'+SearchType+'?q=' + NewParseName + "&disabled=1&unpublished=1"
FindLink,FindImage,FindGame = WebSearchResult(SearchString,GameName,GameType,Pass)
## with these multiple searches, i may need a 'continue' option
if FindLink =="c":
FindLink = ""
if FindLink != "":
break
## check for abort
if FindLink == "x":
FindLink = ""
temp = ">> " + str(GameName) + " aborted. no single page selected, and scraping ended."
print (bcolors.FAIL + temp + bcolors.ENDC)
ErrorMessage = ErrorMessage + str(RealName) + "\t aborted. no single page selected, and scraping ended.\n"
break
elif FindLink == "s":
FindLink = ""
## temp = ">> " + str(GameName) + " aborted. no single page selected, user skipped."
## print (bcolors.FAIL + temp + bcolors.ENDC)
## ErrorMessage = ErrorMessage + str(RealName) + "\t aborted. no single page selected, user skipped..\n"
## after all that, we still havent found a link.
if FindLink=="":
temp = ">> " + str(GameName) + " skipped. no single page selected."
print()
ErrorMessage = ErrorMessage + str(RealName) + "\t skipped. no single page selected.\n"
## game=str(game.encode('utf-8'), 'utf-8').replace("Cover for ","")
print (bcolors.FAIL + temp + bcolors.ENDC)
else:
## resolve the link into a single string
FindLink = "https://openretro.org" + FindLink + "/edit"
WebString = ""
f = urllib.request.urlopen(FindLink)
WebString = f.read()
f.close()
# ==== create individual game XML based on reading from the above cached page
## see UAGS_oagd.py
GameEntry = MakeGameEntry(RealName,GameVariant,GameType,WebString,AllImages)
## ========== do the image downloads
ErrorMessage = ErrorMessage + GetPictures(RealName,WebString,AllImages,NewImages,inputdir)
## remove any previous game entry (if overwrite is on)
if GameEntry != '':
if NewScrapes=="y" and XML.find(RealName) > -1:
print(" Removing existing entry for " + bcolors.OKBLUE + RealName + bcolors.ENDC +".")
print()
OldGameEntry = FindGameTagEntry(XML,RealName,"<game>")
XML = XML.replace(OldGameEntry,"")
## adds the game-entry
XML = XML.replace("</gameList>",GameEntry +"</gameList>")
print (bcolors.OKGREEN + ">> " + filename + " scraped." + bcolors.ENDC)
print ()
## save out the file(s)
## we are done!! let's create the new XML
print (bcolors.OKGREEN + ">> Updating "+ bcolors.BOLD + "gamelist.xml" + bcolors.ENDC )
text_file = open(XML_File, "w")
text_file.write(XML)
text_file.close()
print()
## special code for testing only!
if ScannedGames > LimitResults-1 and LimitResults != 0:
break
#### we are done!! let's create the new XML
##print()
##print (bcolors.OKGREEN + ">> Generating "+ bcolors.BOLD + "gamelist.xml" + bcolors.ENDC )
if ErrorMessage != "":
ErrorMessage = "The following errors occured during scraping:" + "\n\n" + ErrorMessage
print (bcolors.FAIL + ">> Generating "+ bcolors.BOLD + "errorlist.txt" + bcolors.ENDC )
text_file = open(inputdir + "errorlist.txt", "w")
text_file.write(ErrorMessage)
text_file.close()
print()
raise SystemExit
| olors:
| identifier_name |
UAGS.py |
#random test for VS2010
import glob, platform
from urllib.request import *
import ssl
import os
from bs4 import BeautifulSoup
from uags.UAGS_Functions import *
from uags.UAGS_oagd import *
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
## main section starting here...
print()
print(bcolors.BOLD + bcolors.OKBLUE + "HoraceAndTheSpider" + bcolors.ENDC + "'s " + "openretro.org " + bcolors.BOLD + "UAE4Arm Amiga Game Scraper" + bcolors.ENDC + " | " + "" + bcolors.FAIL + "www.ultimateamiga.co.uk" + bcolors.ENDC)
print()
## check for overwrite of existing entries
NewScrapes = input("Scrape existing game entries? " + bcolors.OKBLUE + "(y/n) " + bcolors.ENDC)
if NewScrapes != "yes" and NewScrapes != "y" and NewScrapes != "Y" and NewScrapes != "YES":
NewScrapes = "n"
print("Existing game entries will " + bcolors.BOLD + "not" + bcolors.ENDC + " be scraped.")
else:
NewScrapes = "y"
print("All found game entries will be scraped.")
print()
## all a filter to be used
ScanFilter = input("Limit scanned files to a specific pattern match? " + bcolors.OKBLUE + "(Enter pattern or leave blank) " + bcolors.ENDC)
print()
## check for overwrite of existing images
NewImages = input("Overwrite existing images, such as in " + bcolors.BOLD + "boxarts/" + bcolors.ENDC + " ? " + bcolors.OKBLUE + "(y/n) " + bcolors.ENDC)
if NewImages != "yes" and NewImages != "y" and NewImages != "Y" and NewImages != "YES":
NewImages = "n"
print("Existing images will " + bcolors.BOLD + "not" + bcolors.ENDC + " be overwritten.")
else:
NewImages = "y"
print("Existing images will be overwritten.")
print()
# Check for saving bonus material images
AllImages = input ("Store additional images to " + bcolors.BOLD + "snap/" + bcolors.ENDC + "and" + bcolors.BOLD + " wheel/" + bcolors.ENDC + " ? " + bcolors.OKBLUE + "(y/n) "+ bcolors.ENDC )
if AllImages != "yes" and AllImages != "y" and AllImages != "Y" and AllImages != "YES":
AllImages = "n"
print ("Additional images will " + bcolors.BOLD + "not"+ bcolors.ENDC + " be stored.")
else:
AllImages = "y"
print ("Additional images will be stored.")
print()
## initialisations
ScannedGames = 0
LimitResults = 0
##ScanFilter = "Five"
XML = ""
ExitButton = False
KeyRead = 0
ErrorMessage = ""
try:
ssl._create_default_https_context = ssl._create_unverified_context
except:
pass
## -------- input dir
if platform.system()=="Darwin":
inputdir="/Volumes/roms-1/amiga/"
## inputdir = "/Users/horaceandthespider/Documents/Gaming/AmigaWHD/WorkingFolder2/ECS Pack/"
## -------- I SEE YOU AINGER! o_o
elif platform.node()=="RAVEN":
inputdir="C:\\Users\\oaing\\Desktop\\whdload\\"
else:
inputdir="//home/pi/RetroPie/roms/amiga/"
# paths/folders if needed
os.makedirs(inputdir + "boxart", exist_ok=True)
if AllImages == "y":
os.makedirs (inputdir + "wheel", exist_ok=True)
os.makedirs (inputdir + "snap", exist_ok=True)
# here, we will open an existing XML, *or* create one.
XML_File = inputdir + "gamelist.xml"
if (os.path.isfile(XML_File)) == False:
XML = '<?xml version="1.0"?>\n<gameList>\n'
XML = XML + "</gameList>"
text_file = open(XML_File, "w")
text_file.write(XML)
text_file.close()
| ## check XML validity
if XML.find("?xml version=")<0 or XML.find("<gameList>")<0 or XML.find("</gameList>")<0:
print (bcolors.FAIL + ">> XML File "+ bcolors.BOLD + XML_File + bcolors.ENDC + bcolors.FAIL + " is malformed." + bcolors.ENDC )
KillXML = input ("Delete file prior to restart? (y/n) "+ bcolors.ENDC )
if KillXML != "yes" and KillXML != "y" and KillXML != "Y" and KillXML != "YES":
raise SystemExit
else:
print("Deleting 'gamelist.xml'")
os.remove(XML_File)
raise SystemExit
## ======== MAIN FILE READING LOOP
for filename in glob.glob(inputdir+'*.uae'):
ScannedGames = ScannedGames + 1
## filename = "Bloodwych (& Extended Levels).uae"
GameVariant = ""
GameEntry = ""
# Get the name, and remove extension and path
GameName = filename
GameName = GameName.replace ('.uae','')
GameName = GameName.replace (inputdir,'')
RealName = filename
RealName = RealName.replace (inputdir,'')
# finally, this filter should work...
if GameName == "_Config Maker":
print("Scraping data for " + bcolors.OKGREEN + GameName + bcolors.ENDC + " from external source.")
GameEntry = MakeGameEntry(RealName,'','','',AllImages)
ErrorMessage = ErrorMessage + GetPictures(RealName,"",AllImages,NewImages,inputdir)
elif GameName.find(ScanFilter) == -1 and ScanFilter != '':
print("Filter applied. Entry for " + bcolors.OKGREEN + GameName + bcolors.ENDC + " skipped.")
# so, if it's aready in there, and we are *not* scraping everything
elif NewScrapes=="n" and XML.find("<path>./" + RealName + "</path>") > -1:
print("Existing entry for " + bcolors.OKGREEN + GameName + bcolors.ENDC + " - skipping.")
## OMG i cannot believe i just 'tabbed' *everything in the loop for this...
else:
# Find the game type
## i think i can remove lots of these now.... (1,2,4 and 5)
if GameName.find(' [AGA]')>0:
GameType = 'AGA'
SearchType = 'amiga'
GameName = GameName.replace (' [AGA]','')
elif GameName.find(' [CD32]')>0:
GameType = 'CD32'
SearchType = 'cd32'
GameName = GameName.replace (' [CD32]','')
elif GameName.find(' [CDTV]')>0:
GameType = 'CDTV'
SearchType = 'cdtv'
GameName = GameName.replace (' [CDTV]','')
elif GameName.find(' [CD]')>0:
GameType = 'AGA'
SearchType = 'amiga'
GameName = GameName.replace (' [CD]','')
else:
GameType = 'ECS'
SearchType = 'amiga'
# Tidy the spaces etc for the search string
ParseName = GameName.replace('-','')
ParseName = ParseName.replace('&','%26')
ParseName = ParseName.replace('+','%2B')
ParseName = ParseName.replace(' ','+')
ParseName = ParseName.replace('++','+')
## print ("Searched for " + ParseName)
print ()
print (bcolors.OKBLUE + str(ScannedGames) + bcolors.ENDC + ": Searching for: " + bcolors.BOLD + GameName + bcolors.ENDC + " (" + ParseName+") " + GameType)
print (bcolors.HEADER + " " + filename + bcolors.ENDC)
print ()
## lets search the database!
FindLink = ""
NewParseName = ""
### Here , we can do a loop
## we will 'break' if we find a link though
## pass 1, search as normal
## pass 2, search with brackets as '[' ']' ... for games where a name is shared
## pass 3, search with brackets as '' ... for games like Cannon Fodder (New Campaign)
## pass 4, search with brackets omitted completely and the extra bits stored for later use .. in GameVariant
## ...
## pass 9, ???
## pass 10, profit!
## if we *didnt find anything, we re-try, with anything in brackets removed (alternative versions etc)
## -- we will also store the brackets information to put in the XML description (to show different versions apart)
##import re
##re.sub(r'\s\(.*\)', '', "Lemmings (2 Disk)")
for Pass in range(1,5):
# special 'pass' rules
if Pass==1:
NewParseName = ParseName
elif Pass==2:
NewParseName = ParseName
NewParseName = NewParseName.replace(')','')
NewParseName = NewParseName.replace('(','')
elif Pass==3:
NewParseName = ParseName
NewParseName = NewParseName.replace('[','')
NewParseName = NewParseName.replace(']','')
elif Pass==4:
NewParseName = ParseName
NewParseName = NewParseName.replace(GetBrackets(ParseName),'').strip()
GameVariant = GetBrackets(GameName)
## here we do the actual searches
## first of all, we have a special rule for AGA games, because they are pain in the b*m
if GameType == 'AGA':
SearchString = 'https://openretro.org/browse/'+SearchType+'?q=' + NewParseName + "+[AGA]"+"&disabled=1&unpublished=1"
FindLink,FindImage,FindGame = WebSearchResult(SearchString,GameName,GameType,Pass)
## for everything else, we had no result, and/or we didnt select x/s, we will behave 'normally'
if FindLink=="":
SearchString = 'https://openretro.org/browse/'+SearchType+'?q=' + NewParseName + "&disabled=1&unpublished=1"
FindLink,FindImage,FindGame = WebSearchResult(SearchString,GameName,GameType,Pass)
## with these multiple searches, i may need a 'continue' option
if FindLink =="c":
FindLink = ""
if FindLink != "":
break
## check for abort
if FindLink == "x":
FindLink = ""
temp = ">> " + str(GameName) + " aborted. no single page selected, and scraping ended."
print (bcolors.FAIL + temp + bcolors.ENDC)
ErrorMessage = ErrorMessage + str(RealName) + "\t aborted. no single page selected, and scraping ended.\n"
break
elif FindLink == "s":
FindLink = ""
## temp = ">> " + str(GameName) + " aborted. no single page selected, user skipped."
## print (bcolors.FAIL + temp + bcolors.ENDC)
## ErrorMessage = ErrorMessage + str(RealName) + "\t aborted. no single page selected, user skipped..\n"
## after all that, we still havent found a link.
if FindLink=="":
temp = ">> " + str(GameName) + " skipped. no single page selected."
print()
ErrorMessage = ErrorMessage + str(RealName) + "\t skipped. no single page selected.\n"
## game=str(game.encode('utf-8'), 'utf-8').replace("Cover for ","")
print (bcolors.FAIL + temp + bcolors.ENDC)
else:
## resolve the link into a single string
FindLink = "https://openretro.org" + FindLink + "/edit"
WebString = ""
f = urllib.request.urlopen(FindLink)
WebString = f.read()
f.close()
# ==== create individual game XML based on reading from the above cached page
## see UAGS_oagd.py
GameEntry = MakeGameEntry(RealName,GameVariant,GameType,WebString,AllImages)
## ========== do the image downloads
ErrorMessage = ErrorMessage + GetPictures(RealName,WebString,AllImages,NewImages,inputdir)
## remove any previous game entry (if overwrite is on)
if GameEntry != '':
if NewScrapes=="y" and XML.find(RealName) > -1:
print(" Removing existing entry for " + bcolors.OKBLUE + RealName + bcolors.ENDC +".")
print()
OldGameEntry = FindGameTagEntry(XML,RealName,"<game>")
XML = XML.replace(OldGameEntry,"")
## adds the game-entry
XML = XML.replace("</gameList>",GameEntry +"</gameList>")
print (bcolors.OKGREEN + ">> " + filename + " scraped." + bcolors.ENDC)
print ()
## save out the file(s)
## we are done!! let's create the new XML
print (bcolors.OKGREEN + ">> Updating "+ bcolors.BOLD + "gamelist.xml" + bcolors.ENDC )
text_file = open(XML_File, "w")
text_file.write(XML)
text_file.close()
print()
## special code for testing only!
if ScannedGames > LimitResults-1 and LimitResults != 0:
break
#### we are done!! let's create the new XML
##print()
##print (bcolors.OKGREEN + ">> Generating "+ bcolors.BOLD + "gamelist.xml" + bcolors.ENDC )
if ErrorMessage != "":
ErrorMessage = "The following errors occured during scraping:" + "\n\n" + ErrorMessage
print (bcolors.FAIL + ">> Generating "+ bcolors.BOLD + "errorlist.txt" + bcolors.ENDC )
text_file = open(inputdir + "errorlist.txt", "w")
text_file.write(ErrorMessage)
text_file.close()
print()
raise SystemExit | text_file = open(XML_File, "r")
XML = text_file.read()
text_file.close()
| random_line_split |
UAGS.py |
#random test for VS2010
import glob, platform
from urllib.request import *
import ssl
import os
from bs4 import BeautifulSoup
from uags.UAGS_Functions import *
from uags.UAGS_oagd import *
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
## main section starting here...
print()
print(bcolors.BOLD + bcolors.OKBLUE + "HoraceAndTheSpider" + bcolors.ENDC + "'s " + "openretro.org " + bcolors.BOLD + "UAE4Arm Amiga Game Scraper" + bcolors.ENDC + " | " + "" + bcolors.FAIL + "www.ultimateamiga.co.uk" + bcolors.ENDC)
print()
## check for overwrite of existing entries
NewScrapes = input("Scrape existing game entries? " + bcolors.OKBLUE + "(y/n) " + bcolors.ENDC)
if NewScrapes != "yes" and NewScrapes != "y" and NewScrapes != "Y" and NewScrapes != "YES":
NewScrapes = "n"
print("Existing game entries will " + bcolors.BOLD + "not" + bcolors.ENDC + " be scraped.")
else:
Ne |
print()
## all a filter to be used
ScanFilter = input("Limit scanned files to a specific pattern match? " + bcolors.OKBLUE + "(Enter pattern or leave blank) " + bcolors.ENDC)
print()
## check for overwrite of existing images
NewImages = input("Overwrite existing images, such as in " + bcolors.BOLD + "boxarts/" + bcolors.ENDC + " ? " + bcolors.OKBLUE + "(y/n) " + bcolors.ENDC)
if NewImages != "yes" and NewImages != "y" and NewImages != "Y" and NewImages != "YES":
NewImages = "n"
print("Existing images will " + bcolors.BOLD + "not" + bcolors.ENDC + " be overwritten.")
else:
NewImages = "y"
print("Existing images will be overwritten.")
print()
# Check for saving bonus material images
AllImages = input ("Store additional images to " + bcolors.BOLD + "snap/" + bcolors.ENDC + "and" + bcolors.BOLD + " wheel/" + bcolors.ENDC + " ? " + bcolors.OKBLUE + "(y/n) "+ bcolors.ENDC )
if AllImages != "yes" and AllImages != "y" and AllImages != "Y" and AllImages != "YES":
AllImages = "n"
print ("Additional images will " + bcolors.BOLD + "not"+ bcolors.ENDC + " be stored.")
else:
AllImages = "y"
print ("Additional images will be stored.")
print()
## initialisations
ScannedGames = 0
LimitResults = 0
##ScanFilter = "Five"
XML = ""
ExitButton = False
KeyRead = 0
ErrorMessage = ""
try:
ssl._create_default_https_context = ssl._create_unverified_context
except:
pass
## -------- input dir
if platform.system()=="Darwin":
inputdir="/Volumes/roms-1/amiga/"
## inputdir = "/Users/horaceandthespider/Documents/Gaming/AmigaWHD/WorkingFolder2/ECS Pack/"
## -------- I SEE YOU AINGER! o_o
elif platform.node()=="RAVEN":
inputdir="C:\\Users\\oaing\\Desktop\\whdload\\"
else:
inputdir="//home/pi/RetroPie/roms/amiga/"
# paths/folders if needed
os.makedirs(inputdir + "boxart", exist_ok=True)
if AllImages == "y":
os.makedirs (inputdir + "wheel", exist_ok=True)
os.makedirs (inputdir + "snap", exist_ok=True)
# here, we will open an existing XML, *or* create one.
XML_File = inputdir + "gamelist.xml"
if (os.path.isfile(XML_File)) == False:
XML = '<?xml version="1.0"?>\n<gameList>\n'
XML = XML + "</gameList>"
text_file = open(XML_File, "w")
text_file.write(XML)
text_file.close()
text_file = open(XML_File, "r")
XML = text_file.read()
text_file.close()
## check XML validity
if XML.find("?xml version=")<0 or XML.find("<gameList>")<0 or XML.find("</gameList>")<0:
print (bcolors.FAIL + ">> XML File "+ bcolors.BOLD + XML_File + bcolors.ENDC + bcolors.FAIL + " is malformed." + bcolors.ENDC )
KillXML = input ("Delete file prior to restart? (y/n) "+ bcolors.ENDC )
if KillXML != "yes" and KillXML != "y" and KillXML != "Y" and KillXML != "YES":
raise SystemExit
else:
print("Deleting 'gamelist.xml'")
os.remove(XML_File)
raise SystemExit
## ======== MAIN FILE READING LOOP
for filename in glob.glob(inputdir+'*.uae'):
ScannedGames = ScannedGames + 1
## filename = "Bloodwych (& Extended Levels).uae"
GameVariant = ""
GameEntry = ""
# Get the name, and remove extension and path
GameName = filename
GameName = GameName.replace ('.uae','')
GameName = GameName.replace (inputdir,'')
RealName = filename
RealName = RealName.replace (inputdir,'')
# finally, this filter should work...
if GameName == "_Config Maker":
print("Scraping data for " + bcolors.OKGREEN + GameName + bcolors.ENDC + " from external source.")
GameEntry = MakeGameEntry(RealName,'','','',AllImages)
ErrorMessage = ErrorMessage + GetPictures(RealName,"",AllImages,NewImages,inputdir)
elif GameName.find(ScanFilter) == -1 and ScanFilter != '':
print("Filter applied. Entry for " + bcolors.OKGREEN + GameName + bcolors.ENDC + " skipped.")
# so, if it's aready in there, and we are *not* scraping everything
elif NewScrapes=="n" and XML.find("<path>./" + RealName + "</path>") > -1:
print("Existing entry for " + bcolors.OKGREEN + GameName + bcolors.ENDC + " - skipping.")
## OMG i cannot believe i just 'tabbed' *everything in the loop for this...
else:
# Find the game type
## i think i can remove lots of these now.... (1,2,4 and 5)
if GameName.find(' [AGA]')>0:
GameType = 'AGA'
SearchType = 'amiga'
GameName = GameName.replace (' [AGA]','')
elif GameName.find(' [CD32]')>0:
GameType = 'CD32'
SearchType = 'cd32'
GameName = GameName.replace (' [CD32]','')
elif GameName.find(' [CDTV]')>0:
GameType = 'CDTV'
SearchType = 'cdtv'
GameName = GameName.replace (' [CDTV]','')
elif GameName.find(' [CD]')>0:
GameType = 'AGA'
SearchType = 'amiga'
GameName = GameName.replace (' [CD]','')
else:
GameType = 'ECS'
SearchType = 'amiga'
# Tidy the spaces etc for the search string
ParseName = GameName.replace('-','')
ParseName = ParseName.replace('&','%26')
ParseName = ParseName.replace('+','%2B')
ParseName = ParseName.replace(' ','+')
ParseName = ParseName.replace('++','+')
## print ("Searched for " + ParseName)
print ()
print (bcolors.OKBLUE + str(ScannedGames) + bcolors.ENDC + ": Searching for: " + bcolors.BOLD + GameName + bcolors.ENDC + " (" + ParseName+") " + GameType)
print (bcolors.HEADER + " " + filename + bcolors.ENDC)
print ()
## lets search the database!
FindLink = ""
NewParseName = ""
### Here , we can do a loop
## we will 'break' if we find a link though
## pass 1, search as normal
## pass 2, search with brackets as '[' ']' ... for games where a name is shared
## pass 3, search with brackets as '' ... for games like Cannon Fodder (New Campaign)
## pass 4, search with brackets omitted completely and the extra bits stored for later use .. in GameVariant
## ...
## pass 9, ???
## pass 10, profit!
## if we *didnt find anything, we re-try, with anything in brackets removed (alternative versions etc)
## -- we will also store the brackets information to put in the XML description (to show different versions apart)
##import re
##re.sub(r'\s\(.*\)', '', "Lemmings (2 Disk)")
for Pass in range(1,5):
# special 'pass' rules
if Pass==1:
NewParseName = ParseName
elif Pass==2:
NewParseName = ParseName
NewParseName = NewParseName.replace(')','')
NewParseName = NewParseName.replace('(','')
elif Pass==3:
NewParseName = ParseName
NewParseName = NewParseName.replace('[','')
NewParseName = NewParseName.replace(']','')
elif Pass==4:
NewParseName = ParseName
NewParseName = NewParseName.replace(GetBrackets(ParseName),'').strip()
GameVariant = GetBrackets(GameName)
## here we do the actual searches
## first of all, we have a special rule for AGA games, because they are pain in the b*m
if GameType == 'AGA':
SearchString = 'https://openretro.org/browse/'+SearchType+'?q=' + NewParseName + "+[AGA]"+"&disabled=1&unpublished=1"
FindLink,FindImage,FindGame = WebSearchResult(SearchString,GameName,GameType,Pass)
## for everything else, we had no result, and/or we didnt select x/s, we will behave 'normally'
if FindLink=="":
SearchString = 'https://openretro.org/browse/'+SearchType+'?q=' + NewParseName + "&disabled=1&unpublished=1"
FindLink,FindImage,FindGame = WebSearchResult(SearchString,GameName,GameType,Pass)
## with these multiple searches, i may need a 'continue' option
if FindLink =="c":
FindLink = ""
if FindLink != "":
break
## check for abort
if FindLink == "x":
FindLink = ""
temp = ">> " + str(GameName) + " aborted. no single page selected, and scraping ended."
print (bcolors.FAIL + temp + bcolors.ENDC)
ErrorMessage = ErrorMessage + str(RealName) + "\t aborted. no single page selected, and scraping ended.\n"
break
elif FindLink == "s":
FindLink = ""
## temp = ">> " + str(GameName) + " aborted. no single page selected, user skipped."
## print (bcolors.FAIL + temp + bcolors.ENDC)
## ErrorMessage = ErrorMessage + str(RealName) + "\t aborted. no single page selected, user skipped..\n"
## after all that, we still havent found a link.
if FindLink=="":
temp = ">> " + str(GameName) + " skipped. no single page selected."
print()
ErrorMessage = ErrorMessage + str(RealName) + "\t skipped. no single page selected.\n"
## game=str(game.encode('utf-8'), 'utf-8').replace("Cover for ","")
print (bcolors.FAIL + temp + bcolors.ENDC)
else:
## resolve the link into a single string
FindLink = "https://openretro.org" + FindLink + "/edit"
WebString = ""
f = urllib.request.urlopen(FindLink)
WebString = f.read()
f.close()
# ==== create individual game XML based on reading from the above cached page
## see UAGS_oagd.py
GameEntry = MakeGameEntry(RealName,GameVariant,GameType,WebString,AllImages)
## ========== do the image downloads
ErrorMessage = ErrorMessage + GetPictures(RealName,WebString,AllImages,NewImages,inputdir)
## remove any previous game entry (if overwrite is on)
if GameEntry != '':
if NewScrapes=="y" and XML.find(RealName) > -1:
print(" Removing existing entry for " + bcolors.OKBLUE + RealName + bcolors.ENDC +".")
print()
OldGameEntry = FindGameTagEntry(XML,RealName,"<game>")
XML = XML.replace(OldGameEntry,"")
## adds the game-entry
XML = XML.replace("</gameList>",GameEntry +"</gameList>")
print (bcolors.OKGREEN + ">> " + filename + " scraped." + bcolors.ENDC)
print ()
## save out the file(s)
## we are done!! let's create the new XML
print (bcolors.OKGREEN + ">> Updating "+ bcolors.BOLD + "gamelist.xml" + bcolors.ENDC )
text_file = open(XML_File, "w")
text_file.write(XML)
text_file.close()
print()
## special code for testing only!
if ScannedGames > LimitResults-1 and LimitResults != 0:
break
#### we are done!! let's create the new XML
##print()
##print (bcolors.OKGREEN + ">> Generating "+ bcolors.BOLD + "gamelist.xml" + bcolors.ENDC )
if ErrorMessage != "":
ErrorMessage = "The following errors occured during scraping:" + "\n\n" + ErrorMessage
print (bcolors.FAIL + ">> Generating "+ bcolors.BOLD + "errorlist.txt" + bcolors.ENDC )
text_file = open(inputdir + "errorlist.txt", "w")
text_file.write(ErrorMessage)
text_file.close()
print()
raise SystemExit
| wScrapes = "y"
print("All found game entries will be scraped.")
| conditional_block |
UAGS.py |
#random test for VS2010
import glob, platform
from urllib.request import *
import ssl
import os
from bs4 import BeautifulSoup
from uags.UAGS_Functions import *
from uags.UAGS_oagd import *
class bcolors:
HE | ## main section starting here...
print()
print(bcolors.BOLD + bcolors.OKBLUE + "HoraceAndTheSpider" + bcolors.ENDC + "'s " + "openretro.org " + bcolors.BOLD + "UAE4Arm Amiga Game Scraper" + bcolors.ENDC + " | " + "" + bcolors.FAIL + "www.ultimateamiga.co.uk" + bcolors.ENDC)
print()
## check for overwrite of existing entries
NewScrapes = input("Scrape existing game entries? " + bcolors.OKBLUE + "(y/n) " + bcolors.ENDC)
if NewScrapes != "yes" and NewScrapes != "y" and NewScrapes != "Y" and NewScrapes != "YES":
NewScrapes = "n"
print("Existing game entries will " + bcolors.BOLD + "not" + bcolors.ENDC + " be scraped.")
else:
NewScrapes = "y"
print("All found game entries will be scraped.")
print()
## all a filter to be used
ScanFilter = input("Limit scanned files to a specific pattern match? " + bcolors.OKBLUE + "(Enter pattern or leave blank) " + bcolors.ENDC)
print()
## check for overwrite of existing images
NewImages = input("Overwrite existing images, such as in " + bcolors.BOLD + "boxarts/" + bcolors.ENDC + " ? " + bcolors.OKBLUE + "(y/n) " + bcolors.ENDC)
if NewImages != "yes" and NewImages != "y" and NewImages != "Y" and NewImages != "YES":
NewImages = "n"
print("Existing images will " + bcolors.BOLD + "not" + bcolors.ENDC + " be overwritten.")
else:
NewImages = "y"
print("Existing images will be overwritten.")
print()
# Check for saving bonus material images
AllImages = input ("Store additional images to " + bcolors.BOLD + "snap/" + bcolors.ENDC + "and" + bcolors.BOLD + " wheel/" + bcolors.ENDC + " ? " + bcolors.OKBLUE + "(y/n) "+ bcolors.ENDC )
if AllImages != "yes" and AllImages != "y" and AllImages != "Y" and AllImages != "YES":
AllImages = "n"
print ("Additional images will " + bcolors.BOLD + "not"+ bcolors.ENDC + " be stored.")
else:
AllImages = "y"
print ("Additional images will be stored.")
print()
## initialisations
ScannedGames = 0
LimitResults = 0
##ScanFilter = "Five"
XML = ""
ExitButton = False
KeyRead = 0
ErrorMessage = ""
try:
ssl._create_default_https_context = ssl._create_unverified_context
except:
pass
## -------- input dir
if platform.system()=="Darwin":
inputdir="/Volumes/roms-1/amiga/"
## inputdir = "/Users/horaceandthespider/Documents/Gaming/AmigaWHD/WorkingFolder2/ECS Pack/"
## -------- I SEE YOU AINGER! o_o
elif platform.node()=="RAVEN":
inputdir="C:\\Users\\oaing\\Desktop\\whdload\\"
else:
inputdir="//home/pi/RetroPie/roms/amiga/"
# paths/folders if needed
os.makedirs(inputdir + "boxart", exist_ok=True)
if AllImages == "y":
os.makedirs (inputdir + "wheel", exist_ok=True)
os.makedirs (inputdir + "snap", exist_ok=True)
# here, we will open an existing XML, *or* create one.
XML_File = inputdir + "gamelist.xml"
if (os.path.isfile(XML_File)) == False:
XML = '<?xml version="1.0"?>\n<gameList>\n'
XML = XML + "</gameList>"
text_file = open(XML_File, "w")
text_file.write(XML)
text_file.close()
text_file = open(XML_File, "r")
XML = text_file.read()
text_file.close()
## check XML validity
if XML.find("?xml version=")<0 or XML.find("<gameList>")<0 or XML.find("</gameList>")<0:
print (bcolors.FAIL + ">> XML File "+ bcolors.BOLD + XML_File + bcolors.ENDC + bcolors.FAIL + " is malformed." + bcolors.ENDC )
KillXML = input ("Delete file prior to restart? (y/n) "+ bcolors.ENDC )
if KillXML != "yes" and KillXML != "y" and KillXML != "Y" and KillXML != "YES":
raise SystemExit
else:
print("Deleting 'gamelist.xml'")
os.remove(XML_File)
raise SystemExit
## ======== MAIN FILE READING LOOP
for filename in glob.glob(inputdir+'*.uae'):
ScannedGames = ScannedGames + 1
## filename = "Bloodwych (& Extended Levels).uae"
GameVariant = ""
GameEntry = ""
# Get the name, and remove extension and path
GameName = filename
GameName = GameName.replace ('.uae','')
GameName = GameName.replace (inputdir,'')
RealName = filename
RealName = RealName.replace (inputdir,'')
# finally, this filter should work...
if GameName == "_Config Maker":
print("Scraping data for " + bcolors.OKGREEN + GameName + bcolors.ENDC + " from external source.")
GameEntry = MakeGameEntry(RealName,'','','',AllImages)
ErrorMessage = ErrorMessage + GetPictures(RealName,"",AllImages,NewImages,inputdir)
elif GameName.find(ScanFilter) == -1 and ScanFilter != '':
print("Filter applied. Entry for " + bcolors.OKGREEN + GameName + bcolors.ENDC + " skipped.")
# so, if it's aready in there, and we are *not* scraping everything
elif NewScrapes=="n" and XML.find("<path>./" + RealName + "</path>") > -1:
print("Existing entry for " + bcolors.OKGREEN + GameName + bcolors.ENDC + " - skipping.")
## OMG i cannot believe i just 'tabbed' *everything in the loop for this...
else:
# Find the game type
## i think i can remove lots of these now.... (1,2,4 and 5)
if GameName.find(' [AGA]')>0:
GameType = 'AGA'
SearchType = 'amiga'
GameName = GameName.replace (' [AGA]','')
elif GameName.find(' [CD32]')>0:
GameType = 'CD32'
SearchType = 'cd32'
GameName = GameName.replace (' [CD32]','')
elif GameName.find(' [CDTV]')>0:
GameType = 'CDTV'
SearchType = 'cdtv'
GameName = GameName.replace (' [CDTV]','')
elif GameName.find(' [CD]')>0:
GameType = 'AGA'
SearchType = 'amiga'
GameName = GameName.replace (' [CD]','')
else:
GameType = 'ECS'
SearchType = 'amiga'
# Tidy the spaces etc for the search string
ParseName = GameName.replace('-','')
ParseName = ParseName.replace('&','%26')
ParseName = ParseName.replace('+','%2B')
ParseName = ParseName.replace(' ','+')
ParseName = ParseName.replace('++','+')
## print ("Searched for " + ParseName)
print ()
print (bcolors.OKBLUE + str(ScannedGames) + bcolors.ENDC + ": Searching for: " + bcolors.BOLD + GameName + bcolors.ENDC + " (" + ParseName+") " + GameType)
print (bcolors.HEADER + " " + filename + bcolors.ENDC)
print ()
## lets search the database!
FindLink = ""
NewParseName = ""
### Here , we can do a loop
## we will 'break' if we find a link though
## pass 1, search as normal
## pass 2, search with brackets as '[' ']' ... for games where a name is shared
## pass 3, search with brackets as '' ... for games like Cannon Fodder (New Campaign)
## pass 4, search with brackets omitted completely and the extra bits stored for later use .. in GameVariant
## ...
## pass 9, ???
## pass 10, profit!
## if we *didnt find anything, we re-try, with anything in brackets removed (alternative versions etc)
## -- we will also store the brackets information to put in the XML description (to show different versions apart)
##import re
##re.sub(r'\s\(.*\)', '', "Lemmings (2 Disk)")
for Pass in range(1,5):
# special 'pass' rules
if Pass==1:
NewParseName = ParseName
elif Pass==2:
NewParseName = ParseName
NewParseName = NewParseName.replace(')','')
NewParseName = NewParseName.replace('(','')
elif Pass==3:
NewParseName = ParseName
NewParseName = NewParseName.replace('[','')
NewParseName = NewParseName.replace(']','')
elif Pass==4:
NewParseName = ParseName
NewParseName = NewParseName.replace(GetBrackets(ParseName),'').strip()
GameVariant = GetBrackets(GameName)
## here we do the actual searches
## first of all, we have a special rule for AGA games, because they are pain in the b*m
if GameType == 'AGA':
SearchString = 'https://openretro.org/browse/'+SearchType+'?q=' + NewParseName + "+[AGA]"+"&disabled=1&unpublished=1"
FindLink,FindImage,FindGame = WebSearchResult(SearchString,GameName,GameType,Pass)
## for everything else, we had no result, and/or we didnt select x/s, we will behave 'normally'
if FindLink=="":
SearchString = 'https://openretro.org/browse/'+SearchType+'?q=' + NewParseName + "&disabled=1&unpublished=1"
FindLink,FindImage,FindGame = WebSearchResult(SearchString,GameName,GameType,Pass)
## with these multiple searches, i may need a 'continue' option
if FindLink =="c":
FindLink = ""
if FindLink != "":
break
## check for abort
if FindLink == "x":
FindLink = ""
temp = ">> " + str(GameName) + " aborted. no single page selected, and scraping ended."
print (bcolors.FAIL + temp + bcolors.ENDC)
ErrorMessage = ErrorMessage + str(RealName) + "\t aborted. no single page selected, and scraping ended.\n"
break
elif FindLink == "s":
FindLink = ""
## temp = ">> " + str(GameName) + " aborted. no single page selected, user skipped."
## print (bcolors.FAIL + temp + bcolors.ENDC)
## ErrorMessage = ErrorMessage + str(RealName) + "\t aborted. no single page selected, user skipped..\n"
## after all that, we still havent found a link.
if FindLink=="":
temp = ">> " + str(GameName) + " skipped. no single page selected."
print()
ErrorMessage = ErrorMessage + str(RealName) + "\t skipped. no single page selected.\n"
## game=str(game.encode('utf-8'), 'utf-8').replace("Cover for ","")
print (bcolors.FAIL + temp + bcolors.ENDC)
else:
## resolve the link into a single string
FindLink = "https://openretro.org" + FindLink + "/edit"
WebString = ""
f = urllib.request.urlopen(FindLink)
WebString = f.read()
f.close()
# ==== create individual game XML based on reading from the above cached page
## see UAGS_oagd.py
GameEntry = MakeGameEntry(RealName,GameVariant,GameType,WebString,AllImages)
## ========== do the image downloads
ErrorMessage = ErrorMessage + GetPictures(RealName,WebString,AllImages,NewImages,inputdir)
## remove any previous game entry (if overwrite is on)
if GameEntry != '':
if NewScrapes=="y" and XML.find(RealName) > -1:
print(" Removing existing entry for " + bcolors.OKBLUE + RealName + bcolors.ENDC +".")
print()
OldGameEntry = FindGameTagEntry(XML,RealName,"<game>")
XML = XML.replace(OldGameEntry,"")
## adds the game-entry
XML = XML.replace("</gameList>",GameEntry +"</gameList>")
print (bcolors.OKGREEN + ">> " + filename + " scraped." + bcolors.ENDC)
print ()
## save out the file(s)
## we are done!! let's create the new XML
print (bcolors.OKGREEN + ">> Updating "+ bcolors.BOLD + "gamelist.xml" + bcolors.ENDC )
text_file = open(XML_File, "w")
text_file.write(XML)
text_file.close()
print()
## special code for testing only!
if ScannedGames > LimitResults-1 and LimitResults != 0:
break
#### we are done!! let's create the new XML
##print()
##print (bcolors.OKGREEN + ">> Generating "+ bcolors.BOLD + "gamelist.xml" + bcolors.ENDC )
if ErrorMessage != "":
ErrorMessage = "The following errors occured during scraping:" + "\n\n" + ErrorMessage
print (bcolors.FAIL + ">> Generating "+ bcolors.BOLD + "errorlist.txt" + bcolors.ENDC )
text_file = open(inputdir + "errorlist.txt", "w")
text_file.write(ErrorMessage)
text_file.close()
print()
raise SystemExit
| ADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
| identifier_body |
databaseDemo.go | package main
import (
"bufio"
"database/sql"
"fmt"
_ "github.com/mattn/go-sqlite3" //import for side effects
"log"
"math/rand"
"os"
"strconv"
"strings"
)
func main() {
myDatabase := OpenDataBase("./Demo.db")
defer myDatabase.Close()
create_tables(myDatabase)
//addSampleStudents(myDatabase)
//addCourses(myDatabase)
//registerForClasses(myDatabase)
findProbationStudents(myDatabase)
}
func | (dbfile string) *sql.DB {
database, err := sql.Open("sqlite3", dbfile)
if err != nil {
log.Fatal(err)
}
return database
}
func getMinGPA() float64 {
fmt.Print("What is the minimum GPA for good standing:")
reader := bufio.NewReader(os.Stdin)
value, err := reader.ReadString('\n')
if err != nil {
log.Fatal("How did we fail to read from standard in!?!?")
}
value = strings.TrimSpace(value)
min_gpa, err := strconv.ParseFloat(value, 32)
if err != nil {
log.Fatal("oooops you typed that wrong", err)
}
return min_gpa
}
func findProbationStudents(database *sql.DB) {
var firstName, lastName string
var gpa float64
minGpa := getMinGPA()
selectStatement := "SELECT first_name, last_name, gpa FROM STUDENTS WHERE gpa < ?"
resultSet, err := database.Query(selectStatement, minGpa)
if err != nil {
log.Fatal("Bad Query", err)
}
defer resultSet.Close()
for resultSet.Next() {
err = resultSet.Scan(&firstName, &lastName, &gpa)
if err != nil {
log.Fatal(err)
}
fmt.Printf("%s %s is on probation with a GPA of %f\n", firstName, lastName, gpa)
}
}
func registerForClasses(database *sql.DB) {
insertStatement := "INSERT INTO CLASS_LIST (banner_id, course_prefix, course_number, registration_date)" +
"VALUES(?, 'Comp', 510, DATE('now'))"
preppedStatement, err := database.Prepare(insertStatement)
if err != nil {
log.Fatal("Hey prof you goofed it trying to type live", err)
}
for i := 1001; i <= 1008; i++ {
preppedStatement.Exec(i)
}
}
func addCourses(database *sql.DB) {
var sampleData = map[string]string{
"comp502": "Research\n(3 credits)\nPrerequisite: Consent of the department; formal application required\nOriginal research is undertaken by the graduate student in their field. This course culminates in a capstone project. For details, consult the paragraph titled “Directed or Independent Study” in the “College of Graduate Studies” section of this catalog. Offered fall and spring semesters.",
"comp503": "Directed Study\n(1-3 credits)\nPrerequisite: Consent of the department; formal application required\nDirected study is designed for the graduate student who desires to study selected topics in a specific field. For details, consult the paragraph titled “Directed or Independent Study” in the “College of Graduate Studies” section of this catalog. Repeatable: may earn a maximum of six credits. Offered fall and spring semesters.",
"comp510": "Topics in Programming Languages\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course investigates programming language development from designer’s, user’s and implementer’s point of view. Topics include formal syntax and semantics, language system, extensible languages and control structures. There is also a survey of intralanguage features, covering ALGOL-60, ALGOL-68, Ada, Pascal, LISP, SNOBOL-4 APL, SIMULA-67, CLU, MODULA, and others. Offered periodically.",
"comp520": "Operating Systems Principles\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course examines design principles such as optimal scheduling; file systems, system integrity and security, as well as the mathematical analysis of selected aspects of operating system design. Topics include queuing theory, disk scheduling, storage management and the working set model. Design and implementation of an operating system nucleus is also studied. Offered periodically.",
"comp525": "Design and Construction of Compilers\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nIn this course, topics will include lexical and syntactic analysis; code generation; error detection and correction; optimization techniques; models of code generators; and incremental and interactive compiling. Students will design and implement a compiler. Offered periodically.",
"comp530": "Software Engineering\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nTopics in this course will include construction of reliable software, software tools, software testing methodologies, structured design, structured programming, software characteristics and quality and formal proofs of program correctness. Chief programmer teams and structure walk-throughs will be employed. Offered periodically.\n",
"comp540": "Automata, Computability and Formal Languages\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nTopics in this course will include finite automata and regular languages, context- free languages, Turing machines and their variants, partial recursive functions and grammars, Church’s thesis, undecidable problems, complexity of algorithms and completeness. Offered periodically.",
"comp545": "Analysis of Algorithms\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course deals with techniques in the analysis of algorithms. Topics to be chosen from among the following: dynamic programming, search and traverse techniques, backtracking, numerical techniques, NP-hard and NP-complete problems, approximation algorithms and other topics in the analysis and design of algorithms. Offered fall semester.\n",
"comp560": "Artificial Intelligence\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course is an introduction to LISP or another AI programming language. Topics are chosen from pattern recognition, theorem proving, learning, cognitive science and vision. It also presents introduction to the basic techniques of AI such as heuristic search, semantic nets, production systems, frames, planning and other AI topics. Offered periodically.\n",
"comp570": "Robotics\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis is a project-oriented course in robotics. Topics are chosen from manipulator motion and control, motion planning, legged-motion, vision, touch sensing, grasping, programming languages for robots and automated factory design. Offered periodically.",
"comp580": "Database Systems\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nIn this course, topics will include relational, hierarchical and network data models; design theory for relational databases and query optimization; classification of data models, data languages; concurrency, integrity, privacy; modeling and measurement of access strategies; and dedicated processors, information retrieval and real time applications. Offered periodically.",
"comp590": "Computer Architecture\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course is an introduction to the internal structure of digital computers including design of gates, flip-fops, registers and memories to perform operations on numerical and other data represented in binary form; computer system analysis and design; organizational dependence on computations to be performed; and theoretical aspects of parallel and pipeline computation. Offered periodically.",
"comp594": "Computer Networks\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course provides an introduction to fundamental concepts in computer networks, including their design and implementation. Topics include network architectures and protocols, placing emphasis on protocol used in the Internet; routing; data link layer issues; multimedia networking; network security; and network management. Offered periodically.\n",
"comp596": "Topics in Computer Science\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nIn this course, topics are chosen from program verification, formal semantics, formal language theory, concurrent programming, complexity or algorithms, programming language theory, graphics and other computer science topics. Repeatable for different topics. Offered as topics arise.",
"comp598": " Computer Science Graduate Internship\n(3 credits)\nPrerequisite: Matriculation in the computer science master’s program; at least six credits of graduate-level course work in computer science (COMP); formal application required\nAn internship provides an opportunity to apply what has been learned in the classroom and allows the student to further professional skills. Faculty supervision allows for reflection on the internship experience and connects the applied portion of the academic study to other courses. Repeatable; may earn a maximum of six credits, however, only three credits can be used toward the degree. Graded on (P) Pass/(N) No Pass basis. Offered fall and spring semesters.\n",
}
insertStatement := "INSERT INTO COURSE (course_prefix, course_number, description) VALUES (?,?,?);"
preppedStatement, err := database.Prepare(insertStatement)
if err != nil {
log.Fatal(err)
}
for course, desc := range sampleData {
prefix := course[0:4]
numVal := course[4:7]
courseNum, err := strconv.Atoi(numVal)
if err != nil {
log.Fatal("ooops we must have mistyped", err)
}
preppedStatement.Exec(prefix, courseNum, desc)
}
}
func addSampleStudents(database *sql.DB) {
sampleNames := map[string]string{"John": "Santore", "Enping": "Li", "Michael": "Black",
"Seikyung": "Jung", "Haleh": "Khojasteh", "Abdul": "Sattar", "Paul": "Kim", "Yiheng": "Liang"}
statement := "INSERT INTO STUDENTS (banner_id, first_name, last_name, gpa, credits)" +
" VALUES (?, ?, ?, ?, ?);"
prepped_statement, err := database.Prepare(statement)
if err != nil {
log.Fatal(err)
}
idNum := 1001
for firstName, lastName := range sampleNames {
randGPA := rand.Float32() + float32(rand.Intn(4))
randCredits := rand.Intn(30)
prepped_statement.Exec(idNum, firstName, lastName, randGPA, randCredits)
idNum += 1
}
}
func create_tables(database *sql.DB) {
createStatement1 := "CREATE TABLE IF NOT EXISTS students( " +
"banner_id INTEGER PRIMARY KEY," +
"first_name TEXT NOT NULL," +
"last_name TEXT NOT NULL," +
"gpa REAL DEFAULT 0," +
"credits INTEGER DEFAULT 0);"
create_course := "CREATE TABLE IF NOT EXISTS course(" +
" course_prefix TEXT NOT NULL," +
" course_number INTEGER NOT NULL," +
" cap INTEGER DEFAULT 20," +
" description TEXT," +
" PRIMARY KEY(course_prefix, course_number) );"
create_reg_statement := "CREATE TABLE IF NOT EXISTS class_list(" +
"registration_id INTEGER PRIMARY KEY, course_prefix TEXT NOT NULL," +
"course_number INTEGER NOT NULL," +
"banner_id INTEGER NOT NULL," +
"registration_date TEXT," +
"FOREIGN KEY (banner_id) REFERENCES student (banner_id)" +
"ON DELETE CASCADE ON UPDATE NO ACTION," +
"FOREIGN KEY (course_prefix, course_number) REFERENCES courses (course_prefix, course_number)" +
"ON DELETE CASCADE ON UPDATE NO ACTION" +
");"
database.Exec(createStatement1)
database.Exec(create_course)
database.Exec(create_reg_statement)
}
| OpenDataBase | identifier_name |
databaseDemo.go | package main
import (
"bufio"
"database/sql"
"fmt"
_ "github.com/mattn/go-sqlite3" //import for side effects
"log"
"math/rand"
"os"
"strconv"
"strings"
)
func main() {
myDatabase := OpenDataBase("./Demo.db")
defer myDatabase.Close()
create_tables(myDatabase)
//addSampleStudents(myDatabase)
//addCourses(myDatabase)
//registerForClasses(myDatabase)
findProbationStudents(myDatabase)
}
func OpenDataBase(dbfile string) *sql.DB {
database, err := sql.Open("sqlite3", dbfile)
if err != nil {
log.Fatal(err)
}
return database
}
func getMinGPA() float64 {
fmt.Print("What is the minimum GPA for good standing:")
reader := bufio.NewReader(os.Stdin)
value, err := reader.ReadString('\n')
if err != nil {
log.Fatal("How did we fail to read from standard in!?!?")
}
value = strings.TrimSpace(value)
min_gpa, err := strconv.ParseFloat(value, 32)
if err != nil {
log.Fatal("oooops you typed that wrong", err)
}
return min_gpa
}
func findProbationStudents(database *sql.DB) {
var firstName, lastName string
var gpa float64
minGpa := getMinGPA()
selectStatement := "SELECT first_name, last_name, gpa FROM STUDENTS WHERE gpa < ?"
resultSet, err := database.Query(selectStatement, minGpa)
if err != nil {
log.Fatal("Bad Query", err)
}
defer resultSet.Close()
for resultSet.Next() {
err = resultSet.Scan(&firstName, &lastName, &gpa)
if err != nil { | }
}
func registerForClasses(database *sql.DB) {
insertStatement := "INSERT INTO CLASS_LIST (banner_id, course_prefix, course_number, registration_date)" +
"VALUES(?, 'Comp', 510, DATE('now'))"
preppedStatement, err := database.Prepare(insertStatement)
if err != nil {
log.Fatal("Hey prof you goofed it trying to type live", err)
}
for i := 1001; i <= 1008; i++ {
preppedStatement.Exec(i)
}
}
func addCourses(database *sql.DB) {
var sampleData = map[string]string{
"comp502": "Research\n(3 credits)\nPrerequisite: Consent of the department; formal application required\nOriginal research is undertaken by the graduate student in their field. This course culminates in a capstone project. For details, consult the paragraph titled “Directed or Independent Study” in the “College of Graduate Studies” section of this catalog. Offered fall and spring semesters.",
"comp503": "Directed Study\n(1-3 credits)\nPrerequisite: Consent of the department; formal application required\nDirected study is designed for the graduate student who desires to study selected topics in a specific field. For details, consult the paragraph titled “Directed or Independent Study” in the “College of Graduate Studies” section of this catalog. Repeatable: may earn a maximum of six credits. Offered fall and spring semesters.",
"comp510": "Topics in Programming Languages\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course investigates programming language development from designer’s, user’s and implementer’s point of view. Topics include formal syntax and semantics, language system, extensible languages and control structures. There is also a survey of intralanguage features, covering ALGOL-60, ALGOL-68, Ada, Pascal, LISP, SNOBOL-4 APL, SIMULA-67, CLU, MODULA, and others. Offered periodically.",
"comp520": "Operating Systems Principles\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course examines design principles such as optimal scheduling; file systems, system integrity and security, as well as the mathematical analysis of selected aspects of operating system design. Topics include queuing theory, disk scheduling, storage management and the working set model. Design and implementation of an operating system nucleus is also studied. Offered periodically.",
"comp525": "Design and Construction of Compilers\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nIn this course, topics will include lexical and syntactic analysis; code generation; error detection and correction; optimization techniques; models of code generators; and incremental and interactive compiling. Students will design and implement a compiler. Offered periodically.",
"comp530": "Software Engineering\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nTopics in this course will include construction of reliable software, software tools, software testing methodologies, structured design, structured programming, software characteristics and quality and formal proofs of program correctness. Chief programmer teams and structure walk-throughs will be employed. Offered periodically.\n",
"comp540": "Automata, Computability and Formal Languages\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nTopics in this course will include finite automata and regular languages, context- free languages, Turing machines and their variants, partial recursive functions and grammars, Church’s thesis, undecidable problems, complexity of algorithms and completeness. Offered periodically.",
"comp545": "Analysis of Algorithms\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course deals with techniques in the analysis of algorithms. Topics to be chosen from among the following: dynamic programming, search and traverse techniques, backtracking, numerical techniques, NP-hard and NP-complete problems, approximation algorithms and other topics in the analysis and design of algorithms. Offered fall semester.\n",
"comp560": "Artificial Intelligence\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course is an introduction to LISP or another AI programming language. Topics are chosen from pattern recognition, theorem proving, learning, cognitive science and vision. It also presents introduction to the basic techniques of AI such as heuristic search, semantic nets, production systems, frames, planning and other AI topics. Offered periodically.\n",
"comp570": "Robotics\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis is a project-oriented course in robotics. Topics are chosen from manipulator motion and control, motion planning, legged-motion, vision, touch sensing, grasping, programming languages for robots and automated factory design. Offered periodically.",
"comp580": "Database Systems\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nIn this course, topics will include relational, hierarchical and network data models; design theory for relational databases and query optimization; classification of data models, data languages; concurrency, integrity, privacy; modeling and measurement of access strategies; and dedicated processors, information retrieval and real time applications. Offered periodically.",
"comp590": "Computer Architecture\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course is an introduction to the internal structure of digital computers including design of gates, flip-fops, registers and memories to perform operations on numerical and other data represented in binary form; computer system analysis and design; organizational dependence on computations to be performed; and theoretical aspects of parallel and pipeline computation. Offered periodically.",
"comp594": "Computer Networks\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course provides an introduction to fundamental concepts in computer networks, including their design and implementation. Topics include network architectures and protocols, placing emphasis on protocol used in the Internet; routing; data link layer issues; multimedia networking; network security; and network management. Offered periodically.\n",
"comp596": "Topics in Computer Science\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nIn this course, topics are chosen from program verification, formal semantics, formal language theory, concurrent programming, complexity or algorithms, programming language theory, graphics and other computer science topics. Repeatable for different topics. Offered as topics arise.",
"comp598": " Computer Science Graduate Internship\n(3 credits)\nPrerequisite: Matriculation in the computer science master’s program; at least six credits of graduate-level course work in computer science (COMP); formal application required\nAn internship provides an opportunity to apply what has been learned in the classroom and allows the student to further professional skills. Faculty supervision allows for reflection on the internship experience and connects the applied portion of the academic study to other courses. Repeatable; may earn a maximum of six credits, however, only three credits can be used toward the degree. Graded on (P) Pass/(N) No Pass basis. Offered fall and spring semesters.\n",
}
insertStatement := "INSERT INTO COURSE (course_prefix, course_number, description) VALUES (?,?,?);"
preppedStatement, err := database.Prepare(insertStatement)
if err != nil {
log.Fatal(err)
}
for course, desc := range sampleData {
prefix := course[0:4]
numVal := course[4:7]
courseNum, err := strconv.Atoi(numVal)
if err != nil {
log.Fatal("ooops we must have mistyped", err)
}
preppedStatement.Exec(prefix, courseNum, desc)
}
}
func addSampleStudents(database *sql.DB) {
sampleNames := map[string]string{"John": "Santore", "Enping": "Li", "Michael": "Black",
"Seikyung": "Jung", "Haleh": "Khojasteh", "Abdul": "Sattar", "Paul": "Kim", "Yiheng": "Liang"}
statement := "INSERT INTO STUDENTS (banner_id, first_name, last_name, gpa, credits)" +
" VALUES (?, ?, ?, ?, ?);"
prepped_statement, err := database.Prepare(statement)
if err != nil {
log.Fatal(err)
}
idNum := 1001
for firstName, lastName := range sampleNames {
randGPA := rand.Float32() + float32(rand.Intn(4))
randCredits := rand.Intn(30)
prepped_statement.Exec(idNum, firstName, lastName, randGPA, randCredits)
idNum += 1
}
}
func create_tables(database *sql.DB) {
createStatement1 := "CREATE TABLE IF NOT EXISTS students( " +
"banner_id INTEGER PRIMARY KEY," +
"first_name TEXT NOT NULL," +
"last_name TEXT NOT NULL," +
"gpa REAL DEFAULT 0," +
"credits INTEGER DEFAULT 0);"
create_course := "CREATE TABLE IF NOT EXISTS course(" +
" course_prefix TEXT NOT NULL," +
" course_number INTEGER NOT NULL," +
" cap INTEGER DEFAULT 20," +
" description TEXT," +
" PRIMARY KEY(course_prefix, course_number) );"
create_reg_statement := "CREATE TABLE IF NOT EXISTS class_list(" +
"registration_id INTEGER PRIMARY KEY, course_prefix TEXT NOT NULL," +
"course_number INTEGER NOT NULL," +
"banner_id INTEGER NOT NULL," +
"registration_date TEXT," +
"FOREIGN KEY (banner_id) REFERENCES student (banner_id)" +
"ON DELETE CASCADE ON UPDATE NO ACTION," +
"FOREIGN KEY (course_prefix, course_number) REFERENCES courses (course_prefix, course_number)" +
"ON DELETE CASCADE ON UPDATE NO ACTION" +
");"
database.Exec(createStatement1)
database.Exec(create_course)
database.Exec(create_reg_statement)
} | log.Fatal(err)
}
fmt.Printf("%s %s is on probation with a GPA of %f\n", firstName, lastName, gpa) | random_line_split |
databaseDemo.go | package main
import (
"bufio"
"database/sql"
"fmt"
_ "github.com/mattn/go-sqlite3" //import for side effects
"log"
"math/rand"
"os"
"strconv"
"strings"
)
func main() {
myDatabase := OpenDataBase("./Demo.db")
defer myDatabase.Close()
create_tables(myDatabase)
//addSampleStudents(myDatabase)
//addCourses(myDatabase)
//registerForClasses(myDatabase)
findProbationStudents(myDatabase)
}
func OpenDataBase(dbfile string) *sql.DB {
database, err := sql.Open("sqlite3", dbfile)
if err != nil {
log.Fatal(err)
}
return database
}
func getMinGPA() float64 {
fmt.Print("What is the minimum GPA for good standing:")
reader := bufio.NewReader(os.Stdin)
value, err := reader.ReadString('\n')
if err != nil {
log.Fatal("How did we fail to read from standard in!?!?")
}
value = strings.TrimSpace(value)
min_gpa, err := strconv.ParseFloat(value, 32)
if err != nil {
log.Fatal("oooops you typed that wrong", err)
}
return min_gpa
}
func findProbationStudents(database *sql.DB) {
var firstName, lastName string
var gpa float64
minGpa := getMinGPA()
selectStatement := "SELECT first_name, last_name, gpa FROM STUDENTS WHERE gpa < ?"
resultSet, err := database.Query(selectStatement, minGpa)
if err != nil {
log.Fatal("Bad Query", err)
}
defer resultSet.Close()
for resultSet.Next() {
err = resultSet.Scan(&firstName, &lastName, &gpa)
if err != nil {
log.Fatal(err)
}
fmt.Printf("%s %s is on probation with a GPA of %f\n", firstName, lastName, gpa)
}
}
func registerForClasses(database *sql.DB) {
insertStatement := "INSERT INTO CLASS_LIST (banner_id, course_prefix, course_number, registration_date)" +
"VALUES(?, 'Comp', 510, DATE('now'))"
preppedStatement, err := database.Prepare(insertStatement)
if err != nil {
log.Fatal("Hey prof you goofed it trying to type live", err)
}
for i := 1001; i <= 1008; i++ {
preppedStatement.Exec(i)
}
}
func addCourses(database *sql.DB) | atabase *sql.DB) {
sampleNames := map[string]string{"John": "Santore", "Enping": "Li", "Michael": "Black",
"Seikyung": "Jung", "Haleh": "Khojasteh", "Abdul": "Sattar", "Paul": "Kim", "Yiheng": "Liang"}
statement := "INSERT INTO STUDENTS (banner_id, first_name, last_name, gpa, credits)" +
" VALUES (?, ?, ?, ?, ?);"
prepped_statement, err := database.Prepare(statement)
if err != nil {
log.Fatal(err)
}
idNum := 1001
for firstName, lastName := range sampleNames {
randGPA := rand.Float32() + float32(rand.Intn(4))
randCredits := rand.Intn(30)
prepped_statement.Exec(idNum, firstName, lastName, randGPA, randCredits)
idNum += 1
}
}
func create_tables(database *sql.DB) {
createStatement1 := "CREATE TABLE IF NOT EXISTS students( " +
"banner_id INTEGER PRIMARY KEY," +
"first_name TEXT NOT NULL," +
"last_name TEXT NOT NULL," +
"gpa REAL DEFAULT 0," +
"credits INTEGER DEFAULT 0);"
create_course := "CREATE TABLE IF NOT EXISTS course(" +
" course_prefix TEXT NOT NULL," +
" course_number INTEGER NOT NULL," +
" cap INTEGER DEFAULT 20," +
" description TEXT," +
" PRIMARY KEY(course_prefix, course_number) );"
create_reg_statement := "CREATE TABLE IF NOT EXISTS class_list(" +
"registration_id INTEGER PRIMARY KEY, course_prefix TEXT NOT NULL," +
"course_number INTEGER NOT NULL," +
"banner_id INTEGER NOT NULL," +
"registration_date TEXT," +
"FOREIGN KEY (banner_id) REFERENCES student (banner_id)" +
"ON DELETE CASCADE ON UPDATE NO ACTION," +
"FOREIGN KEY (course_prefix, course_number) REFERENCES courses (course_prefix, course_number)" +
"ON DELETE CASCADE ON UPDATE NO ACTION" +
");"
database.Exec(createStatement1)
database.Exec(create_course)
database.Exec(create_reg_statement)
}
| {
var sampleData = map[string]string{
"comp502": "Research\n(3 credits)\nPrerequisite: Consent of the department; formal application required\nOriginal research is undertaken by the graduate student in their field. This course culminates in a capstone project. For details, consult the paragraph titled “Directed or Independent Study” in the “College of Graduate Studies” section of this catalog. Offered fall and spring semesters.",
"comp503": "Directed Study\n(1-3 credits)\nPrerequisite: Consent of the department; formal application required\nDirected study is designed for the graduate student who desires to study selected topics in a specific field. For details, consult the paragraph titled “Directed or Independent Study” in the “College of Graduate Studies” section of this catalog. Repeatable: may earn a maximum of six credits. Offered fall and spring semesters.",
"comp510": "Topics in Programming Languages\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course investigates programming language development from designer’s, user’s and implementer’s point of view. Topics include formal syntax and semantics, language system, extensible languages and control structures. There is also a survey of intralanguage features, covering ALGOL-60, ALGOL-68, Ada, Pascal, LISP, SNOBOL-4 APL, SIMULA-67, CLU, MODULA, and others. Offered periodically.",
"comp520": "Operating Systems Principles\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course examines design principles such as optimal scheduling; file systems, system integrity and security, as well as the mathematical analysis of selected aspects of operating system design. Topics include queuing theory, disk scheduling, storage management and the working set model. Design and implementation of an operating system nucleus is also studied. Offered periodically.",
"comp525": "Design and Construction of Compilers\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nIn this course, topics will include lexical and syntactic analysis; code generation; error detection and correction; optimization techniques; models of code generators; and incremental and interactive compiling. Students will design and implement a compiler. Offered periodically.",
"comp530": "Software Engineering\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nTopics in this course will include construction of reliable software, software tools, software testing methodologies, structured design, structured programming, software characteristics and quality and formal proofs of program correctness. Chief programmer teams and structure walk-throughs will be employed. Offered periodically.\n",
"comp540": "Automata, Computability and Formal Languages\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nTopics in this course will include finite automata and regular languages, context- free languages, Turing machines and their variants, partial recursive functions and grammars, Church’s thesis, undecidable problems, complexity of algorithms and completeness. Offered periodically.",
"comp545": "Analysis of Algorithms\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course deals with techniques in the analysis of algorithms. Topics to be chosen from among the following: dynamic programming, search and traverse techniques, backtracking, numerical techniques, NP-hard and NP-complete problems, approximation algorithms and other topics in the analysis and design of algorithms. Offered fall semester.\n",
"comp560": "Artificial Intelligence\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course is an introduction to LISP or another AI programming language. Topics are chosen from pattern recognition, theorem proving, learning, cognitive science and vision. It also presents introduction to the basic techniques of AI such as heuristic search, semantic nets, production systems, frames, planning and other AI topics. Offered periodically.\n",
"comp570": "Robotics\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis is a project-oriented course in robotics. Topics are chosen from manipulator motion and control, motion planning, legged-motion, vision, touch sensing, grasping, programming languages for robots and automated factory design. Offered periodically.",
"comp580": "Database Systems\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nIn this course, topics will include relational, hierarchical and network data models; design theory for relational databases and query optimization; classification of data models, data languages; concurrency, integrity, privacy; modeling and measurement of access strategies; and dedicated processors, information retrieval and real time applications. Offered periodically.",
"comp590": "Computer Architecture\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course is an introduction to the internal structure of digital computers including design of gates, flip-fops, registers and memories to perform operations on numerical and other data represented in binary form; computer system analysis and design; organizational dependence on computations to be performed; and theoretical aspects of parallel and pipeline computation. Offered periodically.",
"comp594": "Computer Networks\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course provides an introduction to fundamental concepts in computer networks, including their design and implementation. Topics include network architectures and protocols, placing emphasis on protocol used in the Internet; routing; data link layer issues; multimedia networking; network security; and network management. Offered periodically.\n",
"comp596": "Topics in Computer Science\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nIn this course, topics are chosen from program verification, formal semantics, formal language theory, concurrent programming, complexity or algorithms, programming language theory, graphics and other computer science topics. Repeatable for different topics. Offered as topics arise.",
"comp598": " Computer Science Graduate Internship\n(3 credits)\nPrerequisite: Matriculation in the computer science master’s program; at least six credits of graduate-level course work in computer science (COMP); formal application required\nAn internship provides an opportunity to apply what has been learned in the classroom and allows the student to further professional skills. Faculty supervision allows for reflection on the internship experience and connects the applied portion of the academic study to other courses. Repeatable; may earn a maximum of six credits, however, only three credits can be used toward the degree. Graded on (P) Pass/(N) No Pass basis. Offered fall and spring semesters.\n",
}
insertStatement := "INSERT INTO COURSE (course_prefix, course_number, description) VALUES (?,?,?);"
preppedStatement, err := database.Prepare(insertStatement)
if err != nil {
log.Fatal(err)
}
for course, desc := range sampleData {
prefix := course[0:4]
numVal := course[4:7]
courseNum, err := strconv.Atoi(numVal)
if err != nil {
log.Fatal("ooops we must have mistyped", err)
}
preppedStatement.Exec(prefix, courseNum, desc)
}
}
func addSampleStudents(d | identifier_body |
databaseDemo.go | package main
import (
"bufio"
"database/sql"
"fmt"
_ "github.com/mattn/go-sqlite3" //import for side effects
"log"
"math/rand"
"os"
"strconv"
"strings"
)
func main() {
myDatabase := OpenDataBase("./Demo.db")
defer myDatabase.Close()
create_tables(myDatabase)
//addSampleStudents(myDatabase)
//addCourses(myDatabase)
//registerForClasses(myDatabase)
findProbationStudents(myDatabase)
}
func OpenDataBase(dbfile string) *sql.DB {
database, err := sql.Open("sqlite3", dbfile)
if err != nil {
log.Fatal(err)
}
return database
}
func getMinGPA() float64 {
fmt.Print("What is the minimum GPA for good standing:")
reader := bufio.NewReader(os.Stdin)
value, err := reader.ReadString('\n')
if err != nil {
log.Fatal("How did we fail to read from standard in!?!?")
}
value = strings.TrimSpace(value)
min_gpa, err := strconv.ParseFloat(value, 32)
if err != nil {
log.Fatal("oooops you typed that wrong", err)
}
return min_gpa
}
func findProbationStudents(database *sql.DB) {
var firstName, lastName string
var gpa float64
minGpa := getMinGPA()
selectStatement := "SELECT first_name, last_name, gpa FROM STUDENTS WHERE gpa < ?"
resultSet, err := database.Query(selectStatement, minGpa)
if err != nil {
log.Fatal("Bad Query", err)
}
defer resultSet.Close()
for resultSet.Next() {
err = resultSet.Scan(&firstName, &lastName, &gpa)
if err != nil {
log.Fatal(err)
}
fmt.Printf("%s %s is on probation with a GPA of %f\n", firstName, lastName, gpa)
}
}
func registerForClasses(database *sql.DB) {
insertStatement := "INSERT INTO CLASS_LIST (banner_id, course_prefix, course_number, registration_date)" +
"VALUES(?, 'Comp', 510, DATE('now'))"
preppedStatement, err := database.Prepare(insertStatement)
if err != nil {
log.Fatal("Hey prof you goofed it trying to type live", err)
}
for i := 1001; i <= 1008; i++ {
preppedStatement.Exec(i)
}
}
func addCourses(database *sql.DB) {
var sampleData = map[string]string{
"comp502": "Research\n(3 credits)\nPrerequisite: Consent of the department; formal application required\nOriginal research is undertaken by the graduate student in their field. This course culminates in a capstone project. For details, consult the paragraph titled “Directed or Independent Study” in the “College of Graduate Studies” section of this catalog. Offered fall and spring semesters.",
"comp503": "Directed Study\n(1-3 credits)\nPrerequisite: Consent of the department; formal application required\nDirected study is designed for the graduate student who desires to study selected topics in a specific field. For details, consult the paragraph titled “Directed or Independent Study” in the “College of Graduate Studies” section of this catalog. Repeatable: may earn a maximum of six credits. Offered fall and spring semesters.",
"comp510": "Topics in Programming Languages\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course investigates programming language development from designer’s, user’s and implementer’s point of view. Topics include formal syntax and semantics, language system, extensible languages and control structures. There is also a survey of intralanguage features, covering ALGOL-60, ALGOL-68, Ada, Pascal, LISP, SNOBOL-4 APL, SIMULA-67, CLU, MODULA, and others. Offered periodically.",
"comp520": "Operating Systems Principles\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course examines design principles such as optimal scheduling; file systems, system integrity and security, as well as the mathematical analysis of selected aspects of operating system design. Topics include queuing theory, disk scheduling, storage management and the working set model. Design and implementation of an operating system nucleus is also studied. Offered periodically.",
"comp525": "Design and Construction of Compilers\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nIn this course, topics will include lexical and syntactic analysis; code generation; error detection and correction; optimization techniques; models of code generators; and incremental and interactive compiling. Students will design and implement a compiler. Offered periodically.",
"comp530": "Software Engineering\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nTopics in this course will include construction of reliable software, software tools, software testing methodologies, structured design, structured programming, software characteristics and quality and formal proofs of program correctness. Chief programmer teams and structure walk-throughs will be employed. Offered periodically.\n",
"comp540": "Automata, Computability and Formal Languages\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nTopics in this course will include finite automata and regular languages, context- free languages, Turing machines and their variants, partial recursive functions and grammars, Church’s thesis, undecidable problems, complexity of algorithms and completeness. Offered periodically.",
"comp545": "Analysis of Algorithms\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course deals with techniques in the analysis of algorithms. Topics to be chosen from among the following: dynamic programming, search and traverse techniques, backtracking, numerical techniques, NP-hard and NP-complete problems, approximation algorithms and other topics in the analysis and design of algorithms. Offered fall semester.\n",
"comp560": "Artificial Intelligence\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course is an introduction to LISP or another AI programming language. Topics are chosen from pattern recognition, theorem proving, learning, cognitive science and vision. It also presents introduction to the basic techniques of AI such as heuristic search, semantic nets, production systems, frames, planning and other AI topics. Offered periodically.\n",
"comp570": "Robotics\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis is a project-oriented course in robotics. Topics are chosen from manipulator motion and control, motion planning, legged-motion, vision, touch sensing, grasping, programming languages for robots and automated factory design. Offered periodically.",
"comp580": "Database Systems\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nIn this course, topics will include relational, hierarchical and network data models; design theory for relational databases and query optimization; classification of data models, data languages; concurrency, integrity, privacy; modeling and measurement of access strategies; and dedicated processors, information retrieval and real time applications. Offered periodically.",
"comp590": "Computer Architecture\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course is an introduction to the internal structure of digital computers including design of gates, flip-fops, registers and memories to perform operations on numerical and other data represented in binary form; computer system analysis and design; organizational dependence on computations to be performed; and theoretical aspects of parallel and pipeline computation. Offered periodically.",
"comp594": "Computer Networks\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nThis course provides an introduction to fundamental concepts in computer networks, including their design and implementation. Topics include network architectures and protocols, placing emphasis on protocol used in the Internet; routing; data link layer issues; multimedia networking; network security; and network management. Offered periodically.\n",
"comp596": "Topics in Computer Science\n(3 credits)\nPrerequisite: Admission to the MS program in Computer Science or consent of instructor\nIn this course, topics are chosen from program verification, formal semantics, formal language theory, concurrent programming, complexity or algorithms, programming language theory, graphics and other computer science topics. Repeatable for different topics. Offered as topics arise.",
"comp598": " Computer Science Graduate Internship\n(3 credits)\nPrerequisite: Matriculation in the computer science master’s program; at least six credits of graduate-level course work in computer science (COMP); formal application required\nAn internship provides an opportunity to apply what has been learned in the classroom and allows the student to further professional skills. Faculty supervision allows for reflection on the internship experience and connects the applied portion of the academic study to other courses. Repeatable; may earn a maximum of six credits, however, only three credits can be used toward the degree. Graded on (P) Pass/(N) No Pass basis. Offered fall and spring semesters.\n",
}
insertStatement := "INSERT INTO COURSE (course_prefix, course_number, description) VALUES (?,?,?);"
preppedStatement, err := database.Prepare(insertStatement)
if err != nil {
log.Fatal(err)
}
for course, desc := range sampleData {
prefix := course[0:4]
| s(database *sql.DB) {
sampleNames := map[string]string{"John": "Santore", "Enping": "Li", "Michael": "Black",
"Seikyung": "Jung", "Haleh": "Khojasteh", "Abdul": "Sattar", "Paul": "Kim", "Yiheng": "Liang"}
statement := "INSERT INTO STUDENTS (banner_id, first_name, last_name, gpa, credits)" +
" VALUES (?, ?, ?, ?, ?);"
prepped_statement, err := database.Prepare(statement)
if err != nil {
log.Fatal(err)
}
idNum := 1001
for firstName, lastName := range sampleNames {
randGPA := rand.Float32() + float32(rand.Intn(4))
randCredits := rand.Intn(30)
prepped_statement.Exec(idNum, firstName, lastName, randGPA, randCredits)
idNum += 1
}
}
func create_tables(database *sql.DB) {
createStatement1 := "CREATE TABLE IF NOT EXISTS students( " +
"banner_id INTEGER PRIMARY KEY," +
"first_name TEXT NOT NULL," +
"last_name TEXT NOT NULL," +
"gpa REAL DEFAULT 0," +
"credits INTEGER DEFAULT 0);"
create_course := "CREATE TABLE IF NOT EXISTS course(" +
" course_prefix TEXT NOT NULL," +
" course_number INTEGER NOT NULL," +
" cap INTEGER DEFAULT 20," +
" description TEXT," +
" PRIMARY KEY(course_prefix, course_number) );"
create_reg_statement := "CREATE TABLE IF NOT EXISTS class_list(" +
"registration_id INTEGER PRIMARY KEY, course_prefix TEXT NOT NULL," +
"course_number INTEGER NOT NULL," +
"banner_id INTEGER NOT NULL," +
"registration_date TEXT," +
"FOREIGN KEY (banner_id) REFERENCES student (banner_id)" +
"ON DELETE CASCADE ON UPDATE NO ACTION," +
"FOREIGN KEY (course_prefix, course_number) REFERENCES courses (course_prefix, course_number)" +
"ON DELETE CASCADE ON UPDATE NO ACTION" +
");"
database.Exec(createStatement1)
database.Exec(create_course)
database.Exec(create_reg_statement)
}
| numVal := course[4:7]
courseNum, err := strconv.Atoi(numVal)
if err != nil {
log.Fatal("ooops we must have mistyped", err)
}
preppedStatement.Exec(prefix, courseNum, desc)
}
}
func addSampleStudent | conditional_block |
utils.rs | // Copyright (c) 2011 Jan Kokemüller
// Copyright (c) 2020 Sebastian Dröge <sebastian@centricular.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use dasp_frame::Frame;
/// Convert linear energy to logarithmic loudness.
pub fn energy_to_loudness(energy: f64) -> f64 {
// The non-test version is faster and more accurate but gives
// slightly different results than the C version and fails the
// tests because of that.
#[cfg(test)]
{
10.0 * (f64::ln(energy) / std::f64::consts::LN_10) - 0.691
}
#[cfg(not(test))]
{
10.0 * f64::log10(energy) - 0.691
}
}
/// Trait for abstracting over interleaved and planar samples.
pub trait Samples<'a, S: Sample + 'a>: Sized {
/// Call the given closure for each sample of the given channel.
// FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable
// and because of that we wouldn't get nice optimizations
fn foreach_sample(&self, channel: usize, func: impl FnMut(&'a S));
/// Call the given closure for each sample of the given channel.
// FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable
// and because of that we wouldn't get nice optimizations
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
func: impl FnMut(&'a S, U),
);
fn foreach_frame<F: Frame<Sample = S>>(&self, func: impl FnMut(F));
/// Number of frames.
fn frames(&self) -> usize;
/// Number of channels.
fn channels(&self) -> usize;
/// Split into two at the given sample.
fn split_at(self, sample: usize) -> (Self, Self);
}
/// Struct representing interleaved samples.
pub struct Interleaved<'a, S> {
/// Interleaved sample data.
data: &'a [S],
/// Number of channels.
channels: usize,
}
impl<'a, S> Interleaved<'a, S> {
/// Create a new wrapper around the interleaved channels and do a sanity check.
pub fn new(data: &'a [S], channels: usize) -> Result<Self, crate::Error> {
if channels == 0 {
return Err(crate::Error::NoMem);
}
if data.len() % channels != 0 {
return Err(crate::Error::NoMem);
}
Ok(Interleaved { data, channels })
}
}
impl<'a, S: Sample> Samples<'a, S> for Interleaved<'a, S> {
#[inline]
fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a S)) {
assert!(channel < self.channels);
for v in self.data.chunks_exact(self.channels) {
func(&v[channel])
}
}
#[inline]
fn fo | >(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
mut func: impl FnMut(&'a S, U),
) {
assert!(channel < self.channels);
for (v, u) in Iterator::zip(self.data.chunks_exact(self.channels), iter) {
func(&v[channel], u)
}
}
#[inline]
fn foreach_frame<F: Frame<Sample = S>>(&self, mut func: impl FnMut(F)) {
assert_eq!(F::CHANNELS, self.channels);
for f in self.data.chunks_exact(self.channels) {
func(F::from_samples(&mut f.iter().copied()).unwrap());
}
}
#[inline]
fn frames(&self) -> usize {
self.data.len() / self.channels
}
#[inline]
fn channels(&self) -> usize {
self.channels
}
#[inline]
fn split_at(self, sample: usize) -> (Self, Self) {
assert!(sample * self.channels <= self.data.len());
let (fst, snd) = self.data.split_at(sample * self.channels);
(
Interleaved {
data: fst,
channels: self.channels,
},
Interleaved {
data: snd,
channels: self.channels,
},
)
}
}
/// Struct representing interleaved samples.
pub struct Planar<'a, S> {
data: &'a [&'a [S]],
start: usize,
end: usize,
}
impl<'a, S> Planar<'a, S> {
/// Create a new wrapper around the planar channels and do a sanity check.
pub fn new(data: &'a [&'a [S]]) -> Result<Self, crate::Error> {
if data.is_empty() {
return Err(crate::Error::NoMem);
}
if data.iter().any(|d| data[0].len() != d.len()) {
return Err(crate::Error::NoMem);
}
Ok(Planar {
data,
start: 0,
end: data[0].len(),
})
}
}
impl<'a, S: Sample> Samples<'a, S> for Planar<'a, S> {
#[inline]
fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a S)) {
assert!(channel < self.data.len());
for v in &self.data[channel][self.start..self.end] {
func(v)
}
}
#[inline]
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
mut func: impl FnMut(&'a S, U),
) {
assert!(channel < self.data.len());
for (v, u) in Iterator::zip(self.data[channel][self.start..self.end].iter(), iter) {
func(v, u)
}
}
#[inline]
fn foreach_frame<F: Frame<Sample = S>>(&self, mut func: impl FnMut(F)) {
let channels = self.data.len();
assert_eq!(F::CHANNELS, channels);
for f in self.start..self.end {
func(F::from_fn(|c| self.data[c][f]));
}
}
#[inline]
fn frames(&self) -> usize {
self.end - self.start
}
#[inline]
fn channels(&self) -> usize {
self.data.len()
}
#[inline]
fn split_at(self, sample: usize) -> (Self, Self) {
assert!(self.start + sample <= self.end);
(
Planar {
data: self.data,
start: self.start,
end: self.start + sample,
},
Planar {
data: self.data,
start: self.start + sample,
end: self.end,
},
)
}
}
pub trait Sample:
dasp_sample::Sample + dasp_sample::Duplex<f32> + dasp_sample::Duplex<f64>
{
const MAX_AMPLITUDE: f64;
fn as_f64_raw(self) -> f64;
}
impl Sample for f32 {
const MAX_AMPLITUDE: f64 = 1.0;
#[inline(always)]
fn as_f64_raw(self) -> f64 {
self as f64
}
}
impl Sample for f64 {
const MAX_AMPLITUDE: f64 = 1.0;
#[inline(always)]
fn as_f64_raw(self) -> f64 {
self
}
}
impl Sample for i16 {
const MAX_AMPLITUDE: f64 = -(Self::MIN as f64);
#[inline(always)]
fn as_f64_raw(self) -> f64 {
self as f64
}
}
impl Sample for i32 {
const MAX_AMPLITUDE: f64 = -(Self::MIN as f64);
#[inline(always)]
fn as_f64_raw(self) -> f64 {
self as f64
}
}
/// An extension-trait to accumulate samples into a frame
pub trait FrameAccumulator: Frame {
fn scale_add(&mut self, other: &Self, coeff: f32);
fn retain_max_samples(&mut self, other: &Self);
}
impl<F: Frame, S> FrameAccumulator for F
where
S: SampleAccumulator + std::fmt::Debug,
F: IndexMut<Target = S>,
{
#[inline(always)]
fn scale_add(&mut self, other: &Self, coeff: f32) {
for i in 0..Self::CHANNELS {
self.index_mut(i).scale_add(*other.index(i), coeff);
}
}
fn retain_max_samples(&mut self, other: &Self) {
for i in 0..Self::CHANNELS {
let this = self.index_mut(i);
let other = other.index(i);
if *other > *this {
*this = *other;
}
}
}
}
// Required since std::ops::IndexMut seem to not be implemented for arrays
// making FrameAcc hard to implement for auto-vectorization
// IndexMut seems to be coming to stdlib, when https://github.com/rust-lang/rust/pull/74989
// implemented, this trait can be removed
pub trait IndexMut {
type Target;
fn index_mut(&mut self, i: usize) -> &mut Self::Target;
fn index(&self, i: usize) -> &Self::Target;
}
macro_rules! index_mut_impl {
( $channels:expr ) => {
impl<T: SampleAccumulator> IndexMut for [T; $channels] {
type Target = T;
#[inline(always)]
fn index_mut(&mut self, i: usize) -> &mut Self::Target {
&mut self[i]
}
#[inline(always)]
fn index(&self, i: usize) -> &Self::Target {
&self[i]
}
}
};
}
index_mut_impl!(1);
index_mut_impl!(2);
index_mut_impl!(4);
index_mut_impl!(6);
index_mut_impl!(8);
pub trait SampleAccumulator: Sample {
fn scale_add(&mut self, other: Self, coeff: f32);
}
impl SampleAccumulator for f32 {
#[inline(always)]
fn scale_add(&mut self, other: Self, coeff: f32) {
#[cfg(feature = "precision-true-peak")]
{
*self = other.mul_add(coeff, *self);
}
#[cfg(not(feature = "precision-true-peak"))]
{
*self += other * coeff
}
}
}
#[cfg(test)]
pub mod tests {
use dasp_sample::{FromSample, Sample};
#[derive(Clone, Debug)]
pub struct Signal<S: FromSample<f32>> {
pub data: Vec<S>,
pub channels: u32,
pub rate: u32,
}
impl<S: Sample + FromSample<f32> + quickcheck::Arbitrary> quickcheck::Arbitrary for Signal<S> {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
use rand::Rng;
let channels = g.gen_range(1, 16);
let rate = g.gen_range(16_000, 224_000);
let num_frames = (rate as f64 * g.gen_range(0.0, 5.0)) as usize;
let max = g.gen_range(0.0, 1.0);
let freqs = [
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
];
let volumes = [
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
];
let volume_scale = 1.0 / volumes.iter().sum::<f32>();
let mut accumulators = [0.0; 4];
let steps = [
2.0 * std::f32::consts::PI * freqs[0] / rate as f32,
2.0 * std::f32::consts::PI * freqs[1] / rate as f32,
2.0 * std::f32::consts::PI * freqs[2] / rate as f32,
2.0 * std::f32::consts::PI * freqs[3] / rate as f32,
];
let mut data = vec![S::from_sample(0.0f32); num_frames * channels as usize];
for frame in data.chunks_exact_mut(channels as usize) {
let val = max
* (f32::sin(accumulators[0]) * volumes[0]
+ f32::sin(accumulators[1]) * volumes[1]
+ f32::sin(accumulators[2]) * volumes[2]
+ f32::sin(accumulators[3]) * volumes[3])
/ volume_scale;
for sample in frame.iter_mut() {
*sample = S::from_sample(val);
}
for (acc, step) in accumulators.iter_mut().zip(steps.iter()) {
*acc += step;
}
}
Signal {
data,
channels,
rate,
}
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
SignalShrinker::boxed(self.clone())
}
}
struct SignalShrinker<A: FromSample<f32>> {
seed: Signal<A>,
/// How many elements to take
size: usize,
/// Whether we tried with one channel already
tried_one_channel: bool,
}
impl<A: FromSample<f32> + quickcheck::Arbitrary> SignalShrinker<A> {
fn boxed(seed: Signal<A>) -> Box<dyn Iterator<Item = Signal<A>>> {
let channels = seed.channels;
Box::new(SignalShrinker {
seed,
size: 0,
tried_one_channel: channels == 1,
})
}
}
impl<A> Iterator for SignalShrinker<A>
where
A: FromSample<f32> + quickcheck::Arbitrary,
{
type Item = Signal<A>;
fn next(&mut self) -> Option<Signal<A>> {
if self.size < self.seed.data.len() {
// Generate a smaller vector by removing size elements
let xs1 = if self.tried_one_channel {
Vec::from(&self.seed.data[..self.size])
} else {
self.seed
.data
.iter()
.cloned()
.step_by(self.seed.channels as usize)
.take(self.size)
.collect()
};
if self.size == 0 {
self.size = if self.tried_one_channel {
self.seed.channels as usize
} else {
1
};
} else {
self.size *= 2;
}
Some(Signal {
data: xs1,
channels: if self.tried_one_channel {
self.seed.channels
} else {
1
},
rate: self.seed.rate,
})
} else if !self.tried_one_channel {
self.tried_one_channel = true;
self.size = 0;
self.next()
} else {
None
}
}
}
}
| reach_sample_zipped<U | identifier_name |
utils.rs | // Copyright (c) 2011 Jan Kokemüller
// Copyright (c) 2020 Sebastian Dröge <sebastian@centricular.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use dasp_frame::Frame;
/// Convert linear energy to logarithmic loudness.
pub fn energy_to_loudness(energy: f64) -> f64 {
// The non-test version is faster and more accurate but gives
// slightly different results than the C version and fails the
// tests because of that.
#[cfg(test)]
{
10.0 * (f64::ln(energy) / std::f64::consts::LN_10) - 0.691
}
#[cfg(not(test))]
{
10.0 * f64::log10(energy) - 0.691
}
}
/// Trait for abstracting over interleaved and planar samples.
pub trait Samples<'a, S: Sample + 'a>: Sized {
/// Call the given closure for each sample of the given channel.
// FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable
// and because of that we wouldn't get nice optimizations
fn foreach_sample(&self, channel: usize, func: impl FnMut(&'a S));
/// Call the given closure for each sample of the given channel.
// FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable
// and because of that we wouldn't get nice optimizations
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
func: impl FnMut(&'a S, U),
);
fn foreach_frame<F: Frame<Sample = S>>(&self, func: impl FnMut(F));
/// Number of frames.
fn frames(&self) -> usize;
/// Number of channels.
fn channels(&self) -> usize;
/// Split into two at the given sample.
fn split_at(self, sample: usize) -> (Self, Self);
}
/// Struct representing interleaved samples.
pub struct Interleaved<'a, S> {
/// Interleaved sample data.
data: &'a [S],
/// Number of channels.
channels: usize,
}
impl<'a, S> Interleaved<'a, S> {
/// Create a new wrapper around the interleaved channels and do a sanity check.
pub fn new(data: &'a [S], channels: usize) -> Result<Self, crate::Error> {
if channels == 0 {
return Err(crate::Error::NoMem);
}
if data.len() % channels != 0 {
return Err(crate::Error::NoMem);
}
Ok(Interleaved { data, channels })
}
}
impl<'a, S: Sample> Samples<'a, S> for Interleaved<'a, S> {
#[inline]
fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a S)) {
assert!(channel < self.channels);
for v in self.data.chunks_exact(self.channels) {
func(&v[channel])
}
}
#[inline]
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
mut func: impl FnMut(&'a S, U),
) {
assert!(channel < self.channels);
for (v, u) in Iterator::zip(self.data.chunks_exact(self.channels), iter) {
func(&v[channel], u)
}
}
#[inline]
fn foreach_frame<F: Frame<Sample = S>>(&self, mut func: impl FnMut(F)) {
assert_eq!(F::CHANNELS, self.channels);
for f in self.data.chunks_exact(self.channels) {
func(F::from_samples(&mut f.iter().copied()).unwrap());
}
}
#[inline]
fn frames(&self) -> usize {
self.data.len() / self.channels
}
#[inline]
fn channels(&self) -> usize {
self.channels
}
#[inline]
fn split_at(self, sample: usize) -> (Self, Self) {
assert!(sample * self.channels <= self.data.len());
let (fst, snd) = self.data.split_at(sample * self.channels);
(
Interleaved {
data: fst,
channels: self.channels,
},
Interleaved {
data: snd,
channels: self.channels,
},
)
}
}
/// Struct representing interleaved samples.
pub struct Planar<'a, S> {
data: &'a [&'a [S]],
start: usize,
end: usize,
}
impl<'a, S> Planar<'a, S> {
/// Create a new wrapper around the planar channels and do a sanity check.
pub fn new(data: &'a [&'a [S]]) -> Result<Self, crate::Error> {
if data.is_empty() {
return Err(crate::Error::NoMem);
}
if data.iter().any(|d| data[0].len() != d.len()) {
return Err(crate::Error::NoMem);
}
Ok(Planar {
data,
start: 0,
end: data[0].len(),
})
}
}
impl<'a, S: Sample> Samples<'a, S> for Planar<'a, S> {
#[inline]
fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a S)) {
assert!(channel < self.data.len());
for v in &self.data[channel][self.start..self.end] {
func(v)
}
}
#[inline]
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
mut func: impl FnMut(&'a S, U),
) {
assert!(channel < self.data.len());
for (v, u) in Iterator::zip(self.data[channel][self.start..self.end].iter(), iter) {
func(v, u)
}
}
#[inline]
fn foreach_frame<F: Frame<Sample = S>>(&self, mut func: impl FnMut(F)) {
let channels = self.data.len();
assert_eq!(F::CHANNELS, channels);
for f in self.start..self.end {
func(F::from_fn(|c| self.data[c][f]));
}
}
#[inline]
fn frames(&self) -> usize {
self.end - self.start
}
#[inline]
fn channels(&self) -> usize {
self.data.len()
}
#[inline]
fn split_at(self, sample: usize) -> (Self, Self) {
assert!(self.start + sample <= self.end);
(
Planar {
data: self.data,
start: self.start,
end: self.start + sample,
},
Planar {
data: self.data,
start: self.start + sample,
end: self.end,
},
)
}
}
pub trait Sample:
dasp_sample::Sample + dasp_sample::Duplex<f32> + dasp_sample::Duplex<f64>
{
const MAX_AMPLITUDE: f64;
fn as_f64_raw(self) -> f64;
}
impl Sample for f32 {
const MAX_AMPLITUDE: f64 = 1.0;
#[inline(always)]
fn as_f64_raw(self) -> f64 {
self as f64
}
}
impl Sample for f64 {
const MAX_AMPLITUDE: f64 = 1.0;
#[inline(always)]
fn as_f64_raw(self) -> f64 {
self | const MAX_AMPLITUDE: f64 = -(Self::MIN as f64);
#[inline(always)]
fn as_f64_raw(self) -> f64 {
self as f64
}
}
impl Sample for i32 {
const MAX_AMPLITUDE: f64 = -(Self::MIN as f64);
#[inline(always)]
fn as_f64_raw(self) -> f64 {
self as f64
}
}
/// An extension-trait to accumulate samples into a frame
pub trait FrameAccumulator: Frame {
fn scale_add(&mut self, other: &Self, coeff: f32);
fn retain_max_samples(&mut self, other: &Self);
}
impl<F: Frame, S> FrameAccumulator for F
where
S: SampleAccumulator + std::fmt::Debug,
F: IndexMut<Target = S>,
{
#[inline(always)]
fn scale_add(&mut self, other: &Self, coeff: f32) {
for i in 0..Self::CHANNELS {
self.index_mut(i).scale_add(*other.index(i), coeff);
}
}
fn retain_max_samples(&mut self, other: &Self) {
for i in 0..Self::CHANNELS {
let this = self.index_mut(i);
let other = other.index(i);
if *other > *this {
*this = *other;
}
}
}
}
// Required since std::ops::IndexMut seem to not be implemented for arrays
// making FrameAcc hard to implement for auto-vectorization
// IndexMut seems to be coming to stdlib, when https://github.com/rust-lang/rust/pull/74989
// implemented, this trait can be removed
pub trait IndexMut {
type Target;
fn index_mut(&mut self, i: usize) -> &mut Self::Target;
fn index(&self, i: usize) -> &Self::Target;
}
macro_rules! index_mut_impl {
( $channels:expr ) => {
impl<T: SampleAccumulator> IndexMut for [T; $channels] {
type Target = T;
#[inline(always)]
fn index_mut(&mut self, i: usize) -> &mut Self::Target {
&mut self[i]
}
#[inline(always)]
fn index(&self, i: usize) -> &Self::Target {
&self[i]
}
}
};
}
index_mut_impl!(1);
index_mut_impl!(2);
index_mut_impl!(4);
index_mut_impl!(6);
index_mut_impl!(8);
pub trait SampleAccumulator: Sample {
fn scale_add(&mut self, other: Self, coeff: f32);
}
impl SampleAccumulator for f32 {
#[inline(always)]
fn scale_add(&mut self, other: Self, coeff: f32) {
#[cfg(feature = "precision-true-peak")]
{
*self = other.mul_add(coeff, *self);
}
#[cfg(not(feature = "precision-true-peak"))]
{
*self += other * coeff
}
}
}
#[cfg(test)]
pub mod tests {
use dasp_sample::{FromSample, Sample};
#[derive(Clone, Debug)]
pub struct Signal<S: FromSample<f32>> {
pub data: Vec<S>,
pub channels: u32,
pub rate: u32,
}
impl<S: Sample + FromSample<f32> + quickcheck::Arbitrary> quickcheck::Arbitrary for Signal<S> {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
use rand::Rng;
let channels = g.gen_range(1, 16);
let rate = g.gen_range(16_000, 224_000);
let num_frames = (rate as f64 * g.gen_range(0.0, 5.0)) as usize;
let max = g.gen_range(0.0, 1.0);
let freqs = [
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
];
let volumes = [
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
];
let volume_scale = 1.0 / volumes.iter().sum::<f32>();
let mut accumulators = [0.0; 4];
let steps = [
2.0 * std::f32::consts::PI * freqs[0] / rate as f32,
2.0 * std::f32::consts::PI * freqs[1] / rate as f32,
2.0 * std::f32::consts::PI * freqs[2] / rate as f32,
2.0 * std::f32::consts::PI * freqs[3] / rate as f32,
];
let mut data = vec![S::from_sample(0.0f32); num_frames * channels as usize];
for frame in data.chunks_exact_mut(channels as usize) {
let val = max
* (f32::sin(accumulators[0]) * volumes[0]
+ f32::sin(accumulators[1]) * volumes[1]
+ f32::sin(accumulators[2]) * volumes[2]
+ f32::sin(accumulators[3]) * volumes[3])
/ volume_scale;
for sample in frame.iter_mut() {
*sample = S::from_sample(val);
}
for (acc, step) in accumulators.iter_mut().zip(steps.iter()) {
*acc += step;
}
}
Signal {
data,
channels,
rate,
}
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
SignalShrinker::boxed(self.clone())
}
}
struct SignalShrinker<A: FromSample<f32>> {
seed: Signal<A>,
/// How many elements to take
size: usize,
/// Whether we tried with one channel already
tried_one_channel: bool,
}
impl<A: FromSample<f32> + quickcheck::Arbitrary> SignalShrinker<A> {
fn boxed(seed: Signal<A>) -> Box<dyn Iterator<Item = Signal<A>>> {
let channels = seed.channels;
Box::new(SignalShrinker {
seed,
size: 0,
tried_one_channel: channels == 1,
})
}
}
impl<A> Iterator for SignalShrinker<A>
where
A: FromSample<f32> + quickcheck::Arbitrary,
{
type Item = Signal<A>;
fn next(&mut self) -> Option<Signal<A>> {
if self.size < self.seed.data.len() {
// Generate a smaller vector by removing size elements
let xs1 = if self.tried_one_channel {
Vec::from(&self.seed.data[..self.size])
} else {
self.seed
.data
.iter()
.cloned()
.step_by(self.seed.channels as usize)
.take(self.size)
.collect()
};
if self.size == 0 {
self.size = if self.tried_one_channel {
self.seed.channels as usize
} else {
1
};
} else {
self.size *= 2;
}
Some(Signal {
data: xs1,
channels: if self.tried_one_channel {
self.seed.channels
} else {
1
},
rate: self.seed.rate,
})
} else if !self.tried_one_channel {
self.tried_one_channel = true;
self.size = 0;
self.next()
} else {
None
}
}
}
} | }
}
impl Sample for i16 { | random_line_split |
utils.rs | // Copyright (c) 2011 Jan Kokemüller
// Copyright (c) 2020 Sebastian Dröge <sebastian@centricular.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use dasp_frame::Frame;
/// Convert linear energy to logarithmic loudness.
pub fn energy_to_loudness(energy: f64) -> f64 {
// The non-test version is faster and more accurate but gives
// slightly different results than the C version and fails the
// tests because of that.
#[cfg(test)]
{
10.0 * (f64::ln(energy) / std::f64::consts::LN_10) - 0.691
}
#[cfg(not(test))]
{
10.0 * f64::log10(energy) - 0.691
}
}
/// Trait for abstracting over interleaved and planar samples.
pub trait Samples<'a, S: Sample + 'a>: Sized {
/// Call the given closure for each sample of the given channel.
// FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable
// and because of that we wouldn't get nice optimizations
fn foreach_sample(&self, channel: usize, func: impl FnMut(&'a S));
/// Call the given closure for each sample of the given channel.
// FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable
// and because of that we wouldn't get nice optimizations
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
func: impl FnMut(&'a S, U),
);
fn foreach_frame<F: Frame<Sample = S>>(&self, func: impl FnMut(F));
/// Number of frames.
fn frames(&self) -> usize;
/// Number of channels.
fn channels(&self) -> usize;
/// Split into two at the given sample.
fn split_at(self, sample: usize) -> (Self, Self);
}
/// Struct representing interleaved samples.
pub struct Interleaved<'a, S> {
/// Interleaved sample data.
data: &'a [S],
/// Number of channels.
channels: usize,
}
impl<'a, S> Interleaved<'a, S> {
/// Create a new wrapper around the interleaved channels and do a sanity check.
pub fn new(data: &'a [S], channels: usize) -> Result<Self, crate::Error> {
if channels == 0 {
return Err(crate::Error::NoMem);
}
if data.len() % channels != 0 {
return Err(crate::Error::NoMem);
}
Ok(Interleaved { data, channels })
}
}
impl<'a, S: Sample> Samples<'a, S> for Interleaved<'a, S> {
#[inline]
fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a S)) {
assert!(channel < self.channels);
for v in self.data.chunks_exact(self.channels) {
func(&v[channel])
}
}
#[inline]
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
mut func: impl FnMut(&'a S, U),
) {
assert!(channel < self.channels);
for (v, u) in Iterator::zip(self.data.chunks_exact(self.channels), iter) {
func(&v[channel], u)
}
}
#[inline]
fn foreach_frame<F: Frame<Sample = S>>(&self, mut func: impl FnMut(F)) {
assert_eq!(F::CHANNELS, self.channels);
for f in self.data.chunks_exact(self.channels) {
func(F::from_samples(&mut f.iter().copied()).unwrap());
}
}
#[inline]
fn frames(&self) -> usize {
self.data.len() / self.channels
}
#[inline]
fn channels(&self) -> usize {
self.channels
}
#[inline]
fn split_at(self, sample: usize) -> (Self, Self) {
assert!(sample * self.channels <= self.data.len());
let (fst, snd) = self.data.split_at(sample * self.channels);
(
Interleaved {
data: fst,
channels: self.channels,
},
Interleaved {
data: snd,
channels: self.channels,
},
)
}
}
/// Struct representing interleaved samples.
pub struct Planar<'a, S> {
data: &'a [&'a [S]],
start: usize,
end: usize,
}
impl<'a, S> Planar<'a, S> {
/// Create a new wrapper around the planar channels and do a sanity check.
pub fn new(data: &'a [&'a [S]]) -> Result<Self, crate::Error> {
if data.is_empty() {
return Err(crate::Error::NoMem);
}
if data.iter().any(|d| data[0].len() != d.len()) {
return Err(crate::Error::NoMem);
}
Ok(Planar {
data,
start: 0,
end: data[0].len(),
})
}
}
impl<'a, S: Sample> Samples<'a, S> for Planar<'a, S> {
#[inline]
fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a S)) {
assert!(channel < self.data.len());
for v in &self.data[channel][self.start..self.end] {
func(v)
}
}
#[inline]
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
mut func: impl FnMut(&'a S, U),
) {
assert!(channel < self.data.len());
for (v, u) in Iterator::zip(self.data[channel][self.start..self.end].iter(), iter) {
func(v, u)
}
}
#[inline]
fn foreach_frame<F: Frame<Sample = S>>(&self, mut func: impl FnMut(F)) {
let channels = self.data.len();
assert_eq!(F::CHANNELS, channels);
for f in self.start..self.end {
func(F::from_fn(|c| self.data[c][f]));
}
}
#[inline]
fn frames(&self) -> usize {
self.end - self.start
}
#[inline]
fn channels(&self) -> usize {
self.data.len()
}
#[inline]
fn split_at(self, sample: usize) -> (Self, Self) {
assert!(self.start + sample <= self.end);
(
Planar {
data: self.data,
start: self.start,
end: self.start + sample,
},
Planar {
data: self.data,
start: self.start + sample,
end: self.end,
},
)
}
}
pub trait Sample:
dasp_sample::Sample + dasp_sample::Duplex<f32> + dasp_sample::Duplex<f64>
{
const MAX_AMPLITUDE: f64;
fn as_f64_raw(self) -> f64;
}
impl Sample for f32 {
const MAX_AMPLITUDE: f64 = 1.0;
#[inline(always)]
fn as_f64_raw(self) -> f64 {
self as f64
}
}
impl Sample for f64 {
const MAX_AMPLITUDE: f64 = 1.0;
#[inline(always)]
fn as_f64_raw(self) -> f64 {
|
impl Sample for i16 {
const MAX_AMPLITUDE: f64 = -(Self::MIN as f64);
#[inline(always)]
fn as_f64_raw(self) -> f64 {
self as f64
}
}
impl Sample for i32 {
const MAX_AMPLITUDE: f64 = -(Self::MIN as f64);
#[inline(always)]
fn as_f64_raw(self) -> f64 {
self as f64
}
}
/// An extension-trait to accumulate samples into a frame
pub trait FrameAccumulator: Frame {
fn scale_add(&mut self, other: &Self, coeff: f32);
fn retain_max_samples(&mut self, other: &Self);
}
impl<F: Frame, S> FrameAccumulator for F
where
S: SampleAccumulator + std::fmt::Debug,
F: IndexMut<Target = S>,
{
#[inline(always)]
fn scale_add(&mut self, other: &Self, coeff: f32) {
for i in 0..Self::CHANNELS {
self.index_mut(i).scale_add(*other.index(i), coeff);
}
}
fn retain_max_samples(&mut self, other: &Self) {
for i in 0..Self::CHANNELS {
let this = self.index_mut(i);
let other = other.index(i);
if *other > *this {
*this = *other;
}
}
}
}
// Required since std::ops::IndexMut seem to not be implemented for arrays
// making FrameAcc hard to implement for auto-vectorization
// IndexMut seems to be coming to stdlib, when https://github.com/rust-lang/rust/pull/74989
// implemented, this trait can be removed
pub trait IndexMut {
type Target;
fn index_mut(&mut self, i: usize) -> &mut Self::Target;
fn index(&self, i: usize) -> &Self::Target;
}
macro_rules! index_mut_impl {
( $channels:expr ) => {
impl<T: SampleAccumulator> IndexMut for [T; $channels] {
type Target = T;
#[inline(always)]
fn index_mut(&mut self, i: usize) -> &mut Self::Target {
&mut self[i]
}
#[inline(always)]
fn index(&self, i: usize) -> &Self::Target {
&self[i]
}
}
};
}
index_mut_impl!(1);
index_mut_impl!(2);
index_mut_impl!(4);
index_mut_impl!(6);
index_mut_impl!(8);
pub trait SampleAccumulator: Sample {
fn scale_add(&mut self, other: Self, coeff: f32);
}
impl SampleAccumulator for f32 {
#[inline(always)]
fn scale_add(&mut self, other: Self, coeff: f32) {
#[cfg(feature = "precision-true-peak")]
{
*self = other.mul_add(coeff, *self);
}
#[cfg(not(feature = "precision-true-peak"))]
{
*self += other * coeff
}
}
}
#[cfg(test)]
pub mod tests {
use dasp_sample::{FromSample, Sample};
#[derive(Clone, Debug)]
pub struct Signal<S: FromSample<f32>> {
pub data: Vec<S>,
pub channels: u32,
pub rate: u32,
}
impl<S: Sample + FromSample<f32> + quickcheck::Arbitrary> quickcheck::Arbitrary for Signal<S> {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
use rand::Rng;
let channels = g.gen_range(1, 16);
let rate = g.gen_range(16_000, 224_000);
let num_frames = (rate as f64 * g.gen_range(0.0, 5.0)) as usize;
let max = g.gen_range(0.0, 1.0);
let freqs = [
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
];
let volumes = [
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
];
let volume_scale = 1.0 / volumes.iter().sum::<f32>();
let mut accumulators = [0.0; 4];
let steps = [
2.0 * std::f32::consts::PI * freqs[0] / rate as f32,
2.0 * std::f32::consts::PI * freqs[1] / rate as f32,
2.0 * std::f32::consts::PI * freqs[2] / rate as f32,
2.0 * std::f32::consts::PI * freqs[3] / rate as f32,
];
let mut data = vec![S::from_sample(0.0f32); num_frames * channels as usize];
for frame in data.chunks_exact_mut(channels as usize) {
let val = max
* (f32::sin(accumulators[0]) * volumes[0]
+ f32::sin(accumulators[1]) * volumes[1]
+ f32::sin(accumulators[2]) * volumes[2]
+ f32::sin(accumulators[3]) * volumes[3])
/ volume_scale;
for sample in frame.iter_mut() {
*sample = S::from_sample(val);
}
for (acc, step) in accumulators.iter_mut().zip(steps.iter()) {
*acc += step;
}
}
Signal {
data,
channels,
rate,
}
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
SignalShrinker::boxed(self.clone())
}
}
struct SignalShrinker<A: FromSample<f32>> {
seed: Signal<A>,
/// How many elements to take
size: usize,
/// Whether we tried with one channel already
tried_one_channel: bool,
}
impl<A: FromSample<f32> + quickcheck::Arbitrary> SignalShrinker<A> {
fn boxed(seed: Signal<A>) -> Box<dyn Iterator<Item = Signal<A>>> {
let channels = seed.channels;
Box::new(SignalShrinker {
seed,
size: 0,
tried_one_channel: channels == 1,
})
}
}
impl<A> Iterator for SignalShrinker<A>
where
A: FromSample<f32> + quickcheck::Arbitrary,
{
type Item = Signal<A>;
fn next(&mut self) -> Option<Signal<A>> {
if self.size < self.seed.data.len() {
// Generate a smaller vector by removing size elements
let xs1 = if self.tried_one_channel {
Vec::from(&self.seed.data[..self.size])
} else {
self.seed
.data
.iter()
.cloned()
.step_by(self.seed.channels as usize)
.take(self.size)
.collect()
};
if self.size == 0 {
self.size = if self.tried_one_channel {
self.seed.channels as usize
} else {
1
};
} else {
self.size *= 2;
}
Some(Signal {
data: xs1,
channels: if self.tried_one_channel {
self.seed.channels
} else {
1
},
rate: self.seed.rate,
})
} else if !self.tried_one_channel {
self.tried_one_channel = true;
self.size = 0;
self.next()
} else {
None
}
}
}
}
| self
}
} | identifier_body |
narwhal.js | console.log('Narwhal scripts');
(function ($) {
'use strict';
//
// Helpers
//
const ifExistsDo = ($item, action) => { if ($item.length) action($item) }
const loadStyle = (url) =>
(new Promise((resolve, reject) => {
const $style = $('<link rel="stylesheet">')
.attr('href', url)
.ready(resolve)
$('head').append($style);
}))
const loadScriptAndWait = (url, waitFor) =>
(new Promise((resolve, reject) => {
const $script = $('<script type="text/javascript">')
.attr('src', url)
.ready(resolve)
$('head').append($script);
})
.then(() => (new Promise((resolve, reject) => {
setTimeout(resolve, waitFor)
})))
)
$(document).ready(() => {
//
// Configurations, selectors, ...
//
const selector = {
editButton: { id: 'n-edit-button', selector: '#n-edit-button' },
clickable: { class: 'n-clickable', selector: '.n-clickable' },
jinja2UrlTemplate: { id: 'n-jinja2-urlTemplate', selector: '#n-jinja2-urlTemplate' },
metaData: { id: 'n-meta', selector: '#n-meta' },
metaDataType: { id: 'n-meta-type', selector: '#n-meta-type' },
artifactItem: { class: 'n-artifact-item', selector: '.n-artifact-item' },
artifactItemId: { id: 'n-artifact-id', selector: '#n-artifact-id-' },
}
const selectors = [
{ title: 'edit button', selector: 'a[href^="/edit"]:first', attr: { id: selector.editButton.id } },
{ title: 'artifact item', selector: '.tbl-artifact tr', attr: { class: selector.artifactItem.class } },
{ title: 'artifact item id', selector: '.tbl-artifact tr', attr: { id: selector.artifactItemId.id, data: 'id' } },
// clickable content
{ title: 'description row', selector: 'tr:contains("description"):first > td:first', attr: { class: selector.clickable.class } },
{ title: 'template row', selector: 'tr:contains("url template"):first > td:first', attr: { class: selector.clickable.class } },
// jinja
{ title: 'jinja2 urlTemplate', selector: 'input[name="url_template"]', attr: { id: selector.jinja2UrlTemplate.id } },
// metadata
{ title: 'meta - type', selector: 'tr:contains("Meta") th small', attr: { id: selector.metaDataType.id } },
{ title: 'meta', selector: 'tr:contains("Meta") textarea[name="meta"]', attr: { id: selector.metaData.id } },
]
// distribute attributes to elements by selectors above
selectors.forEach(({ selector, attr }) => {
ifExistsDo($(selector), ($item) => {
if (attr.hasOwnProperty('class')) {
$item.addClass(attr.class)
} else if (attr.hasOwnProperty('id') && attr.hasOwnProperty('data')) {
$item.each((_k, singleItem) => {
const $singleItem = $(singleItem)
const id = `${attr.id}-${$singleItem.data(attr.data)}`
$singleItem.attr('id', id)
})
} else if (attr.hasOwnProperty('id')) {
$item.attr('id', attr.id)
}
})
})
//
// Events, handlers, helpers ...
//
// edit on `E` keyup
$(document).on('keyup', ({ target: { tagName }, key }) => {
const tag = tagName.toLocaleLowerCase()
if (tag !== 'input' && tag !== 'textarea' && key === 'e') {
ifExistsDo($(selector.editButton.selector), ($edit) => {
window.location.href = $edit.first().attr('href')
})
}
})
// make links in clickable class clickable
ifExistsDo($(selector.clickable.selector), ($clickable) => {
$clickable.each((_, item) => {
const $item = $(item)
const content =
$item
.html()
.split(' ')
.map((word) => word.startsWith('http') ? `<a href='${word}' target="_blank">${word}</a>` : word)
.join(' ')
$item.html(content)
})
})
// show jinja2 syntax in textarea with jinja2
const highlightJinjaSyntax = (type, id) => {
// https://codemirror.net/doc/manual.html
return window.CodeMirror.fromTextArea(document.getElementById(id), {
mode: { name: type, htmlMode: true },
lineNumbers: true,
tabSize: 2,
extraKeys: {
Tab: (cm) => cm.execCommand("indentMore"),
"Shift-Tab": (cm) => cm.execCommand("indentLess"),
},
});
}
// transform urlTemplate input to textArea
if (window.location.href.indexOf('/artifacts/') > 0) {
ifExistsDo($(selector.jinja2UrlTemplate.selector), ($urlTemplate) => {
const $textArea = $('<textarea>')
.attr('name', $urlTemplate.attr('name'))
.attr('id', $urlTemplate.attr('id'))
.addClass($urlTemplate.attr('class'))
.val($urlTemplate.val())
$urlTemplate.replaceWith($textArea)
})
}
// load script and enable highlighting
if ($(selector.jinja2UrlTemplate.selector).length > 0 || $(selector.metaData.selector).length > 0) {
Promise
.all([
loadStyle('https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.44.0/codemirror.min.css'),
loadScriptAndWait('https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.44.0/codemirror.min.js', 300),
])
.then(() => {
if (!window.hasOwnProperty('CodeMirror')) {
throw Error('CodeMirror was not loaded correctly ...')
}
})
.then(() => loadScriptAndWait('https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.44.0/mode/jinja2/jinja2.min.js', 300))
.then(() => loadScriptAndWait('https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.44.0/mode/yaml/yaml.min.js', 300))
.then(() => {
ifExistsDo($(selector.jinja2UrlTemplate.selector), ($_) => {
highlightJinjaSyntax('jinja2', selector.jinja2UrlTemplate.id).setSize('', '60px')
})
ifExistsDo($(selector.metaData.selector), ($_) => {
const typeText = $(selector.metaDataType.id).text()
const type = typeText.indexOf('jinja2') > 0
? 'jinja2'
: typeText.indexOf('yaml')
? 'yaml'
: ''
highlightJinjaSyntax(type, selector.metaData.id)
})
})
.then(() => {
$('.CodeMirror').css({ border: '1px solid #ccc', borderRadius: '4px' })
})
.catch((e) => console.error(e))
}
// add dashboard overview
if (window.location.href.indexOf('/artifacts') > 0 && $(selector.artifactItem).length > 0) {
$('.n-overview').remove()
const example = [
{
domain: "consents",
list: [
{ id: 923, configurationId: 953, alias: 'thor (router)' },
{ id: 962, configurationId: 963, alias: 'rogue (person-filter)' },
{ id: 924, configurationId: 952, alias: 'ant-man (aggregator)' },
{ id: 965, configurationId: 966, alias: 'quicksilver (deriver)' },
{ id: 958, configurationId: 959, alias: 'hulk (data_processing-filter)' },
{ id: 960, configurationId: 961, alias: 'she-hulk (all-intent-filter)' },
{ id: 904, configurationId: 968, alias: 'loki (stator)' },
]
}
]
const dashboardArtifactsOverview = localStorage.hasOwnProperty('dashboardArtifactsOverview')
? JSON.parse(localStorage.getItem('dashboardArtifactsOverview'))
: []
const $createDomainTitle = (domain) => domain.length > 0
? $('<h4></h4>')
.text(domain)
.css({ clear: 'both' })
: ''
const $getArtifact = (id) => {
const $artifact = $(`${selector.artifactItemId.selector}${id}`)
return $artifact.length
? $artifact
: $('<tr><td>id</td></tr>') // not all artifacts are shown on the page, but we can create a link
.append($(`<td><a href="/artifacts/${id}">artefact id: ${id}</a></td>`))
}
const $createAlias = (alias) => alias.length > 0
? $('<h3></h3>')
.text(alias)
.css({ fontSize: 12, margin: 0, textAlign: 'right' })
: ''
const $createArtifact = ($artifact) =>
$('<h4></h4>')
.append($artifact.clone().find('td:nth-child(2)'))
.css({ fontSize: 12 })
| : ''
const $createOverviewItem = ($artifact, $configuration, alias) =>
$('<div class="n-overview-item"></div>')
.append($createAlias(alias))
.append($createArtifact($artifact))
.append($createConfiguration($configuration))
.css({ margin: '0 10px 10px 0', padding: 5, border: '1px solid black', borderRadius: 5, height: 80, float: 'left' })
const $overview = $('<div class="n-overview"></div>').css({ paddingTop: 10, paddingBottom: 10 })
dashboardArtifactsOverview.forEach(({ domain, list }) => {
$overview.append($createDomainTitle(domain))
list.forEach(({ id, configurationId = 0, alias = '' }) => {
$overview.append(
$createOverviewItem(
$getArtifact(id),
$getArtifact(configurationId),
alias
)
)
})
})
$('h3:first').append(
$overview.append($('<br style="clear: both">'))
)
}
});
})(window.jQuery); | const $createConfiguration = ($configuration) => $configuration
? $('<h5></h5>')
.append($configuration.clone().find('td:nth-child(2)'))
.css({ fontSize: 11 }) | random_line_split |
narwhal.js | console.log('Narwhal scripts');
(function ($) {
'use strict';
//
// Helpers
//
const ifExistsDo = ($item, action) => { if ($item.length) action($item) }
const loadStyle = (url) =>
(new Promise((resolve, reject) => {
const $style = $('<link rel="stylesheet">')
.attr('href', url)
.ready(resolve)
$('head').append($style);
}))
const loadScriptAndWait = (url, waitFor) =>
(new Promise((resolve, reject) => {
const $script = $('<script type="text/javascript">')
.attr('src', url)
.ready(resolve)
$('head').append($script);
})
.then(() => (new Promise((resolve, reject) => {
setTimeout(resolve, waitFor)
})))
)
$(document).ready(() => {
//
// Configurations, selectors, ...
//
const selector = {
editButton: { id: 'n-edit-button', selector: '#n-edit-button' },
clickable: { class: 'n-clickable', selector: '.n-clickable' },
jinja2UrlTemplate: { id: 'n-jinja2-urlTemplate', selector: '#n-jinja2-urlTemplate' },
metaData: { id: 'n-meta', selector: '#n-meta' },
metaDataType: { id: 'n-meta-type', selector: '#n-meta-type' },
artifactItem: { class: 'n-artifact-item', selector: '.n-artifact-item' },
artifactItemId: { id: 'n-artifact-id', selector: '#n-artifact-id-' },
}
const selectors = [
{ title: 'edit button', selector: 'a[href^="/edit"]:first', attr: { id: selector.editButton.id } },
{ title: 'artifact item', selector: '.tbl-artifact tr', attr: { class: selector.artifactItem.class } },
{ title: 'artifact item id', selector: '.tbl-artifact tr', attr: { id: selector.artifactItemId.id, data: 'id' } },
// clickable content
{ title: 'description row', selector: 'tr:contains("description"):first > td:first', attr: { class: selector.clickable.class } },
{ title: 'template row', selector: 'tr:contains("url template"):first > td:first', attr: { class: selector.clickable.class } },
// jinja
{ title: 'jinja2 urlTemplate', selector: 'input[name="url_template"]', attr: { id: selector.jinja2UrlTemplate.id } },
// metadata
{ title: 'meta - type', selector: 'tr:contains("Meta") th small', attr: { id: selector.metaDataType.id } },
{ title: 'meta', selector: 'tr:contains("Meta") textarea[name="meta"]', attr: { id: selector.metaData.id } },
]
// distribute attributes to elements by selectors above
selectors.forEach(({ selector, attr }) => {
ifExistsDo($(selector), ($item) => {
if (attr.hasOwnProperty('class')) {
$item.addClass(attr.class)
} else if (attr.hasOwnProperty('id') && attr.hasOwnProperty('data')) {
$item.each((_k, singleItem) => {
const $singleItem = $(singleItem)
const id = `${attr.id}-${$singleItem.data(attr.data)}`
$singleItem.attr('id', id)
})
} else if (attr.hasOwnProperty('id')) {
$item.attr('id', attr.id)
}
})
})
//
// Events, handlers, helpers ...
//
// edit on `E` keyup
$(document).on('keyup', ({ target: { tagName }, key }) => {
const tag = tagName.toLocaleLowerCase()
if (tag !== 'input' && tag !== 'textarea' && key === 'e') {
ifExistsDo($(selector.editButton.selector), ($edit) => {
window.location.href = $edit.first().attr('href')
})
}
})
// make links in clickable class clickable
ifExistsDo($(selector.clickable.selector), ($clickable) => {
$clickable.each((_, item) => {
const $item = $(item)
const content =
$item
.html()
.split(' ')
.map((word) => word.startsWith('http') ? `<a href='${word}' target="_blank">${word}</a>` : word)
.join(' ')
$item.html(content)
})
})
// show jinja2 syntax in textarea with jinja2
const highlightJinjaSyntax = (type, id) => {
// https://codemirror.net/doc/manual.html
return window.CodeMirror.fromTextArea(document.getElementById(id), {
mode: { name: type, htmlMode: true },
lineNumbers: true,
tabSize: 2,
extraKeys: {
Tab: (cm) => cm.execCommand("indentMore"),
"Shift-Tab": (cm) => cm.execCommand("indentLess"),
},
});
}
// transform urlTemplate input to textArea
if (window.location.href.indexOf('/artifacts/') > 0) {
ifExistsDo($(selector.jinja2UrlTemplate.selector), ($urlTemplate) => {
const $textArea = $('<textarea>')
.attr('name', $urlTemplate.attr('name'))
.attr('id', $urlTemplate.attr('id'))
.addClass($urlTemplate.attr('class'))
.val($urlTemplate.val())
$urlTemplate.replaceWith($textArea)
})
}
// load script and enable highlighting
if ($(selector.jinja2UrlTemplate.selector).length > 0 || $(selector.metaData.selector).length > 0) {
Promise
.all([
loadStyle('https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.44.0/codemirror.min.css'),
loadScriptAndWait('https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.44.0/codemirror.min.js', 300),
])
.then(() => {
if (!window.hasOwnProperty('CodeMirror')) |
})
.then(() => loadScriptAndWait('https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.44.0/mode/jinja2/jinja2.min.js', 300))
.then(() => loadScriptAndWait('https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.44.0/mode/yaml/yaml.min.js', 300))
.then(() => {
ifExistsDo($(selector.jinja2UrlTemplate.selector), ($_) => {
highlightJinjaSyntax('jinja2', selector.jinja2UrlTemplate.id).setSize('', '60px')
})
ifExistsDo($(selector.metaData.selector), ($_) => {
const typeText = $(selector.metaDataType.id).text()
const type = typeText.indexOf('jinja2') > 0
? 'jinja2'
: typeText.indexOf('yaml')
? 'yaml'
: ''
highlightJinjaSyntax(type, selector.metaData.id)
})
})
.then(() => {
$('.CodeMirror').css({ border: '1px solid #ccc', borderRadius: '4px' })
})
.catch((e) => console.error(e))
}
// add dashboard overview
if (window.location.href.indexOf('/artifacts') > 0 && $(selector.artifactItem).length > 0) {
$('.n-overview').remove()
const example = [
{
domain: "consents",
list: [
{ id: 923, configurationId: 953, alias: 'thor (router)' },
{ id: 962, configurationId: 963, alias: 'rogue (person-filter)' },
{ id: 924, configurationId: 952, alias: 'ant-man (aggregator)' },
{ id: 965, configurationId: 966, alias: 'quicksilver (deriver)' },
{ id: 958, configurationId: 959, alias: 'hulk (data_processing-filter)' },
{ id: 960, configurationId: 961, alias: 'she-hulk (all-intent-filter)' },
{ id: 904, configurationId: 968, alias: 'loki (stator)' },
]
}
]
const dashboardArtifactsOverview = localStorage.hasOwnProperty('dashboardArtifactsOverview')
? JSON.parse(localStorage.getItem('dashboardArtifactsOverview'))
: []
const $createDomainTitle = (domain) => domain.length > 0
? $('<h4></h4>')
.text(domain)
.css({ clear: 'both' })
: ''
const $getArtifact = (id) => {
const $artifact = $(`${selector.artifactItemId.selector}${id}`)
return $artifact.length
? $artifact
: $('<tr><td>id</td></tr>') // not all artifacts are shown on the page, but we can create a link
.append($(`<td><a href="/artifacts/${id}">artefact id: ${id}</a></td>`))
}
const $createAlias = (alias) => alias.length > 0
? $('<h3></h3>')
.text(alias)
.css({ fontSize: 12, margin: 0, textAlign: 'right' })
: ''
const $createArtifact = ($artifact) =>
$('<h4></h4>')
.append($artifact.clone().find('td:nth-child(2)'))
.css({ fontSize: 12 })
const $createConfiguration = ($configuration) => $configuration
? $('<h5></h5>')
.append($configuration.clone().find('td:nth-child(2)'))
.css({ fontSize: 11 })
: ''
const $createOverviewItem = ($artifact, $configuration, alias) =>
$('<div class="n-overview-item"></div>')
.append($createAlias(alias))
.append($createArtifact($artifact))
.append($createConfiguration($configuration))
.css({ margin: '0 10px 10px 0', padding: 5, border: '1px solid black', borderRadius: 5, height: 80, float: 'left' })
const $overview = $('<div class="n-overview"></div>').css({ paddingTop: 10, paddingBottom: 10 })
dashboardArtifactsOverview.forEach(({ domain, list }) => {
$overview.append($createDomainTitle(domain))
list.forEach(({ id, configurationId = 0, alias = '' }) => {
$overview.append(
$createOverviewItem(
$getArtifact(id),
$getArtifact(configurationId),
alias
)
)
})
})
$('h3:first').append(
$overview.append($('<br style="clear: both">'))
)
}
});
})(window.jQuery);
| {
throw Error('CodeMirror was not loaded correctly ...')
} | conditional_block |
mul_fixed.rs | use super::{
add, add_incomplete, EccBaseFieldElemFixed, EccScalarFixed, EccScalarFixedShort, FixedPoint,
NonIdentityEccPoint, FIXED_BASE_WINDOW_SIZE, H,
};
use crate::utilities::decompose_running_sum::RunningSumConfig;
use std::marker::PhantomData;
use group::{
ff::{Field, PrimeField, PrimeFieldBits},
Curve,
};
use halo2_proofs::{
circuit::{AssignedCell, Region, Value},
plonk::{
Advice, Column, ConstraintSystem, Constraints, Error, Expression, Fixed, Selector,
VirtualCells,
},
poly::Rotation,
};
use lazy_static::lazy_static;
use pasta_curves::{arithmetic::CurveAffine, pallas};
pub mod base_field_elem;
pub mod full_width;
pub mod short;
lazy_static! {
static ref TWO_SCALAR: pallas::Scalar = pallas::Scalar::from(2);
// H = 2^3 (3-bit window)
static ref H_SCALAR: pallas::Scalar = pallas::Scalar::from(H as u64);
static ref H_BASE: pallas::Base = pallas::Base::from(H as u64);
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Config<FixedPoints: super::FixedPoints<pallas::Affine>> {
running_sum_config: RunningSumConfig<pallas::Base, FIXED_BASE_WINDOW_SIZE>,
// The fixed Lagrange interpolation coefficients for `x_p`.
lagrange_coeffs: [Column<Fixed>; H],
// The fixed `z` for each window such that `y + z = u^2`.
fixed_z: Column<Fixed>,
// Decomposition of an `n-1`-bit scalar into `k`-bit windows:
// a = a_0 + 2^k(a_1) + 2^{2k}(a_2) + ... + 2^{(n-1)k}(a_{n-1})
window: Column<Advice>,
// y-coordinate of accumulator (only used in the final row).
u: Column<Advice>,
// Configuration for `add`
add_config: add::Config,
// Configuration for `add_incomplete`
add_incomplete_config: add_incomplete::Config,
_marker: PhantomData<FixedPoints>,
}
impl<FixedPoints: super::FixedPoints<pallas::Affine>> Config<FixedPoints> {
#[allow(clippy::too_many_arguments)]
pub(super) fn configure(
meta: &mut ConstraintSystem<pallas::Base>,
lagrange_coeffs: [Column<Fixed>; H],
window: Column<Advice>,
u: Column<Advice>,
add_config: add::Config,
add_incomplete_config: add_incomplete::Config,
) -> Self {
meta.enable_equality(window);
meta.enable_equality(u);
let q_running_sum = meta.selector();
let running_sum_config = RunningSumConfig::configure(meta, q_running_sum, window);
let config = Self {
running_sum_config,
lagrange_coeffs,
fixed_z: meta.fixed_column(),
window,
u,
add_config,
add_incomplete_config,
_marker: PhantomData,
};
// Check relationships between `add_config` and `add_incomplete_config`.
assert_eq!(
config.add_config.x_p, config.add_incomplete_config.x_p,
"add and add_incomplete are used internally in mul_fixed."
);
assert_eq!(
config.add_config.y_p, config.add_incomplete_config.y_p,
"add and add_incomplete are used internally in mul_fixed."
);
for advice in [config.window, config.u].iter() {
assert_ne!(
*advice, config.add_config.x_qr,
"Do not overlap with output columns of add."
);
assert_ne!(
*advice, config.add_config.y_qr,
"Do not overlap with output columns of add."
); | config.running_sum_coords_gate(meta);
config
}
/// Check that each window in the running sum decomposition uses the correct y_p
/// and interpolated x_p.
///
/// This gate is used both in the mul_fixed::base_field_elem and mul_fixed::short
/// helpers, which decompose the scalar using a running sum.
///
/// This gate is not used in the mul_fixed::full_width helper, since the full-width
/// scalar is witnessed directly as three-bit windows instead of being decomposed
/// via a running sum.
fn running_sum_coords_gate(&self, meta: &mut ConstraintSystem<pallas::Base>) {
meta.create_gate("Running sum coordinates check", |meta| {
let q_mul_fixed_running_sum =
meta.query_selector(self.running_sum_config.q_range_check());
let z_cur = meta.query_advice(self.window, Rotation::cur());
let z_next = meta.query_advice(self.window, Rotation::next());
// z_{i+1} = (z_i - a_i) / 2^3
// => a_i = z_i - z_{i+1} * 2^3
let word = z_cur - z_next * pallas::Base::from(H as u64);
Constraints::with_selector(q_mul_fixed_running_sum, self.coords_check(meta, word))
});
}
/// [Specification](https://p.z.cash/halo2-0.1:ecc-fixed-mul-coordinates).
#[allow(clippy::op_ref)]
fn coords_check(
&self,
meta: &mut VirtualCells<'_, pallas::Base>,
window: Expression<pallas::Base>,
) -> Vec<(&'static str, Expression<pallas::Base>)> {
let y_p = meta.query_advice(self.add_config.y_p, Rotation::cur());
let x_p = meta.query_advice(self.add_config.x_p, Rotation::cur());
let z = meta.query_fixed(self.fixed_z);
let u = meta.query_advice(self.u, Rotation::cur());
let window_pow: Vec<Expression<pallas::Base>> = (0..H)
.map(|pow| {
(0..pow).fold(Expression::Constant(pallas::Base::one()), |acc, _| {
acc * window.clone()
})
})
.collect();
let interpolated_x = window_pow.iter().zip(self.lagrange_coeffs.iter()).fold(
Expression::Constant(pallas::Base::zero()),
|acc, (window_pow, coeff)| acc + (window_pow.clone() * meta.query_fixed(*coeff)),
);
// Check interpolation of x-coordinate
let x_check = interpolated_x - x_p.clone();
// Check that `y + z = u^2`, where `z` is fixed and `u`, `y` are witnessed
let y_check = u.square() - y_p.clone() - z;
// Check that (x, y) is on the curve
let on_curve =
y_p.square() - x_p.clone().square() * x_p - Expression::Constant(pallas::Affine::b());
vec![
("check x", x_check),
("check y", y_check),
("on-curve", on_curve),
]
}
#[allow(clippy::type_complexity)]
fn assign_region_inner<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
scalar: &ScalarFixed,
base: &F,
coords_check_toggle: Selector,
) -> Result<(NonIdentityEccPoint, NonIdentityEccPoint), Error> {
// Assign fixed columns for given fixed base
self.assign_fixed_constants::<F, NUM_WINDOWS>(region, offset, base, coords_check_toggle)?;
// Initialize accumulator
let acc = self.initialize_accumulator::<F, NUM_WINDOWS>(region, offset, base, scalar)?;
// Process all windows excluding least and most significant windows
let acc = self.add_incomplete::<F, NUM_WINDOWS>(region, offset, acc, base, scalar)?;
// Process most significant window
let mul_b = self.process_msb::<F, NUM_WINDOWS>(region, offset, base, scalar)?;
Ok((acc, mul_b))
}
/// [Specification](https://p.z.cash/halo2-0.1:ecc-fixed-mul-load-base).
fn assign_fixed_constants<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
base: &F,
coords_check_toggle: Selector,
) -> Result<(), Error> {
let mut constants = None;
let build_constants = || {
let lagrange_coeffs = base.lagrange_coeffs();
assert_eq!(lagrange_coeffs.len(), NUM_WINDOWS);
let z = base.z();
assert_eq!(z.len(), NUM_WINDOWS);
(lagrange_coeffs, z)
};
// Assign fixed columns for given fixed base
for window in 0..NUM_WINDOWS {
coords_check_toggle.enable(region, window + offset)?;
// Assign x-coordinate Lagrange interpolation coefficients
for k in 0..H {
region.assign_fixed(
|| {
format!(
"Lagrange interpolation coeff for window: {:?}, k: {:?}",
window, k
)
},
self.lagrange_coeffs[k],
window + offset,
|| {
if constants.as_ref().is_none() {
constants = Some(build_constants());
}
let lagrange_coeffs = &constants.as_ref().unwrap().0;
Value::known(lagrange_coeffs[window][k])
},
)?;
}
// Assign z-values for each window
region.assign_fixed(
|| format!("z-value for window: {:?}", window),
self.fixed_z,
window + offset,
|| {
let z = &constants.as_ref().unwrap().1;
Value::known(pallas::Base::from(z[window]))
},
)?;
}
Ok(())
}
/// Assigns the values used to process a window.
fn process_window<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
w: usize,
k_usize: Value<usize>,
window_scalar: Value<pallas::Scalar>,
base: &F,
) -> Result<NonIdentityEccPoint, Error> {
let base_value = base.generator();
let base_u = base.u();
assert_eq!(base_u.len(), NUM_WINDOWS);
// Compute [window_scalar]B
let mul_b = {
let mul_b = window_scalar.map(|scalar| base_value * scalar);
let mul_b = mul_b.map(|mul_b| mul_b.to_affine().coordinates().unwrap());
let x = mul_b.map(|mul_b| {
let x = *mul_b.x();
assert!(x != pallas::Base::zero());
x.into()
});
let x = region.assign_advice(
|| format!("mul_b_x, window {}", w),
self.add_config.x_p,
offset + w,
|| x,
)?;
let y = mul_b.map(|mul_b| {
let y = *mul_b.y();
assert!(y != pallas::Base::zero());
y.into()
});
let y = region.assign_advice(
|| format!("mul_b_y, window {}", w),
self.add_config.y_p,
offset + w,
|| y,
)?;
NonIdentityEccPoint::from_coordinates_unchecked(x, y)
};
// Assign u = (y_p + z_w).sqrt()
let u_val = k_usize.map(|k| pallas::Base::from_repr(base_u[w][k]).unwrap());
region.assign_advice(|| "u", self.u, offset + w, || u_val)?;
Ok(mul_b)
}
fn initialize_accumulator<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
base: &F,
scalar: &ScalarFixed,
) -> Result<NonIdentityEccPoint, Error> {
// Recall that the message at each window `w` is represented as
// `m_w = [(k_w + 2) ⋅ 8^w]B`.
// When `w = 0`, we have `m_0 = [(k_0 + 2)]B`.
let w = 0;
let k0 = scalar.windows_field()[0];
let k0_usize = scalar.windows_usize()[0];
self.process_lower_bits::<_, NUM_WINDOWS>(region, offset, w, k0, k0_usize, base)
}
fn add_incomplete<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
mut acc: NonIdentityEccPoint,
base: &F,
scalar: &ScalarFixed,
) -> Result<NonIdentityEccPoint, Error> {
let scalar_windows_field = scalar.windows_field();
let scalar_windows_usize = scalar.windows_usize();
assert_eq!(scalar_windows_field.len(), NUM_WINDOWS);
for (w, (k, k_usize)) in scalar_windows_field
.into_iter()
.zip(scalar_windows_usize)
.enumerate()
// The MSB is processed separately.
.take(NUM_WINDOWS - 1)
// Skip k_0 (already processed).
.skip(1)
{
// Compute [(k_w + 2) ⋅ 8^w]B
//
// This assigns the coordinates of the returned point into the input cells for
// the incomplete addition gate, which will then copy them into themselves.
let mul_b =
self.process_lower_bits::<_, NUM_WINDOWS>(region, offset, w, k, k_usize, base)?;
// Add to the accumulator.
//
// After the first loop, the accumulator will already be in the input cells
// for the incomplete addition gate, and will be copied into themselves.
acc = self
.add_incomplete_config
.assign_region(&mul_b, &acc, offset + w, region)?;
}
Ok(acc)
}
/// Assigns the values used to process a window that does not contain the MSB.
fn process_lower_bits<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
w: usize,
k: Value<pallas::Scalar>,
k_usize: Value<usize>,
base: &F,
) -> Result<NonIdentityEccPoint, Error> {
// `scalar = [(k_w + 2) ⋅ 8^w]
let scalar = k.map(|k| (k + *TWO_SCALAR) * (*H_SCALAR).pow(&[w as u64, 0, 0, 0]));
self.process_window::<_, NUM_WINDOWS>(region, offset, w, k_usize, scalar, base)
}
/// Assigns the values used to process the window containing the MSB.
fn process_msb<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
base: &F,
scalar: &ScalarFixed,
) -> Result<NonIdentityEccPoint, Error> {
let k_usize = scalar.windows_usize()[NUM_WINDOWS - 1];
// offset_acc = \sum_{j = 0}^{NUM_WINDOWS - 2} 2^{FIXED_BASE_WINDOW_SIZE*j + 1}
let offset_acc = (0..(NUM_WINDOWS - 1)).fold(pallas::Scalar::zero(), |acc, w| {
acc + (*TWO_SCALAR).pow(&[FIXED_BASE_WINDOW_SIZE as u64 * w as u64 + 1, 0, 0, 0])
});
// `scalar = [k * 8^(NUM_WINDOWS - 1) - offset_acc]`.
let scalar = scalar.windows_field()[scalar.windows_field().len() - 1]
.map(|k| k * (*H_SCALAR).pow(&[(NUM_WINDOWS - 1) as u64, 0, 0, 0]) - offset_acc);
self.process_window::<_, NUM_WINDOWS>(
region,
offset,
NUM_WINDOWS - 1,
k_usize,
scalar,
base,
)
}
}
enum ScalarFixed {
FullWidth(EccScalarFixed),
Short(EccScalarFixedShort),
BaseFieldElem(EccBaseFieldElemFixed),
}
impl From<&EccScalarFixed> for ScalarFixed {
fn from(scalar_fixed: &EccScalarFixed) -> Self {
Self::FullWidth(scalar_fixed.clone())
}
}
impl From<&EccScalarFixedShort> for ScalarFixed {
fn from(scalar_fixed: &EccScalarFixedShort) -> Self {
Self::Short(scalar_fixed.clone())
}
}
impl From<&EccBaseFieldElemFixed> for ScalarFixed {
fn from(base_field_elem: &EccBaseFieldElemFixed) -> Self {
Self::BaseFieldElem(base_field_elem.clone())
}
}
impl ScalarFixed {
/// The scalar decomposition was done in the base field. For computation
/// outside the circuit, we now convert them back into the scalar field.
///
/// This function does not require that the base field fits inside the scalar field,
/// because the window size fits into either field.
fn windows_field(&self) -> Vec<Value<pallas::Scalar>> {
let running_sum_to_windows = |zs: Vec<AssignedCell<pallas::Base, pallas::Base>>| {
(0..(zs.len() - 1))
.map(|idx| {
let z_cur = zs[idx].value();
let z_next = zs[idx + 1].value();
let word = z_cur - z_next * Value::known(*H_BASE);
// This assumes that the endianness of the encodings of pallas::Base
// and pallas::Scalar are the same. They happen to be, but we need to
// be careful if this is generalised.
word.map(|word| pallas::Scalar::from_repr(word.to_repr()).unwrap())
})
.collect::<Vec<_>>()
};
match self {
Self::BaseFieldElem(scalar) => running_sum_to_windows(scalar.running_sum.to_vec()),
Self::Short(scalar) => running_sum_to_windows(
scalar
.running_sum
.as_ref()
.expect("EccScalarFixedShort has been constrained")
.to_vec(),
),
Self::FullWidth(scalar) => scalar
.windows
.as_ref()
.expect("EccScalarFixed has been witnessed")
.iter()
.map(|bits| {
// This assumes that the endianness of the encodings of pallas::Base
// and pallas::Scalar are the same. They happen to be, but we need to
// be careful if this is generalised.
bits.value()
.map(|value| pallas::Scalar::from_repr(value.to_repr()).unwrap())
})
.collect::<Vec<_>>(),
}
}
/// The scalar decomposition is guaranteed to be in three-bit windows, so we construct
/// `usize` indices from the lowest three bits of each window field element for
/// convenient indexing into `u`-values.
fn windows_usize(&self) -> Vec<Value<usize>> {
self.windows_field()
.iter()
.map(|window| {
window.map(|window| {
window
.to_le_bits()
.iter()
.by_vals()
.take(FIXED_BASE_WINDOW_SIZE)
.rev()
.fold(0, |acc, b| 2 * acc + usize::from(b))
})
})
.collect::<Vec<_>>()
}
} | }
| random_line_split |
mul_fixed.rs | use super::{
add, add_incomplete, EccBaseFieldElemFixed, EccScalarFixed, EccScalarFixedShort, FixedPoint,
NonIdentityEccPoint, FIXED_BASE_WINDOW_SIZE, H,
};
use crate::utilities::decompose_running_sum::RunningSumConfig;
use std::marker::PhantomData;
use group::{
ff::{Field, PrimeField, PrimeFieldBits},
Curve,
};
use halo2_proofs::{
circuit::{AssignedCell, Region, Value},
plonk::{
Advice, Column, ConstraintSystem, Constraints, Error, Expression, Fixed, Selector,
VirtualCells,
},
poly::Rotation,
};
use lazy_static::lazy_static;
use pasta_curves::{arithmetic::CurveAffine, pallas};
pub mod base_field_elem;
pub mod full_width;
pub mod short;
lazy_static! {
static ref TWO_SCALAR: pallas::Scalar = pallas::Scalar::from(2);
// H = 2^3 (3-bit window)
static ref H_SCALAR: pallas::Scalar = pallas::Scalar::from(H as u64);
static ref H_BASE: pallas::Base = pallas::Base::from(H as u64);
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Config<FixedPoints: super::FixedPoints<pallas::Affine>> {
running_sum_config: RunningSumConfig<pallas::Base, FIXED_BASE_WINDOW_SIZE>,
// The fixed Lagrange interpolation coefficients for `x_p`.
lagrange_coeffs: [Column<Fixed>; H],
// The fixed `z` for each window such that `y + z = u^2`.
fixed_z: Column<Fixed>,
// Decomposition of an `n-1`-bit scalar into `k`-bit windows:
// a = a_0 + 2^k(a_1) + 2^{2k}(a_2) + ... + 2^{(n-1)k}(a_{n-1})
window: Column<Advice>,
// y-coordinate of accumulator (only used in the final row).
u: Column<Advice>,
// Configuration for `add`
add_config: add::Config,
// Configuration for `add_incomplete`
add_incomplete_config: add_incomplete::Config,
_marker: PhantomData<FixedPoints>,
}
impl<FixedPoints: super::FixedPoints<pallas::Affine>> Config<FixedPoints> {
#[allow(clippy::too_many_arguments)]
pub(super) fn configure(
meta: &mut ConstraintSystem<pallas::Base>,
lagrange_coeffs: [Column<Fixed>; H],
window: Column<Advice>,
u: Column<Advice>,
add_config: add::Config,
add_incomplete_config: add_incomplete::Config,
) -> Self {
meta.enable_equality(window);
meta.enable_equality(u);
let q_running_sum = meta.selector();
let running_sum_config = RunningSumConfig::configure(meta, q_running_sum, window);
let config = Self {
running_sum_config,
lagrange_coeffs,
fixed_z: meta.fixed_column(),
window,
u,
add_config,
add_incomplete_config,
_marker: PhantomData,
};
// Check relationships between `add_config` and `add_incomplete_config`.
assert_eq!(
config.add_config.x_p, config.add_incomplete_config.x_p,
"add and add_incomplete are used internally in mul_fixed."
);
assert_eq!(
config.add_config.y_p, config.add_incomplete_config.y_p,
"add and add_incomplete are used internally in mul_fixed."
);
for advice in [config.window, config.u].iter() {
assert_ne!(
*advice, config.add_config.x_qr,
"Do not overlap with output columns of add."
);
assert_ne!(
*advice, config.add_config.y_qr,
"Do not overlap with output columns of add."
);
}
config.running_sum_coords_gate(meta);
config
}
/// Check that each window in the running sum decomposition uses the correct y_p
/// and interpolated x_p.
///
/// This gate is used both in the mul_fixed::base_field_elem and mul_fixed::short
/// helpers, which decompose the scalar using a running sum.
///
/// This gate is not used in the mul_fixed::full_width helper, since the full-width
/// scalar is witnessed directly as three-bit windows instead of being decomposed
/// via a running sum.
fn running_sum_coords_gate(&self, meta: &mut ConstraintSystem<pallas::Base>) {
meta.create_gate("Running sum coordinates check", |meta| {
let q_mul_fixed_running_sum =
meta.query_selector(self.running_sum_config.q_range_check());
let z_cur = meta.query_advice(self.window, Rotation::cur());
let z_next = meta.query_advice(self.window, Rotation::next());
// z_{i+1} = (z_i - a_i) / 2^3
// => a_i = z_i - z_{i+1} * 2^3
let word = z_cur - z_next * pallas::Base::from(H as u64);
Constraints::with_selector(q_mul_fixed_running_sum, self.coords_check(meta, word))
});
}
/// [Specification](https://p.z.cash/halo2-0.1:ecc-fixed-mul-coordinates).
#[allow(clippy::op_ref)]
fn coords_check(
&self,
meta: &mut VirtualCells<'_, pallas::Base>,
window: Expression<pallas::Base>,
) -> Vec<(&'static str, Expression<pallas::Base>)> {
let y_p = meta.query_advice(self.add_config.y_p, Rotation::cur());
let x_p = meta.query_advice(self.add_config.x_p, Rotation::cur());
let z = meta.query_fixed(self.fixed_z);
let u = meta.query_advice(self.u, Rotation::cur());
let window_pow: Vec<Expression<pallas::Base>> = (0..H)
.map(|pow| {
(0..pow).fold(Expression::Constant(pallas::Base::one()), |acc, _| {
acc * window.clone()
})
})
.collect();
let interpolated_x = window_pow.iter().zip(self.lagrange_coeffs.iter()).fold(
Expression::Constant(pallas::Base::zero()),
|acc, (window_pow, coeff)| acc + (window_pow.clone() * meta.query_fixed(*coeff)),
);
// Check interpolation of x-coordinate
let x_check = interpolated_x - x_p.clone();
// Check that `y + z = u^2`, where `z` is fixed and `u`, `y` are witnessed
let y_check = u.square() - y_p.clone() - z;
// Check that (x, y) is on the curve
let on_curve =
y_p.square() - x_p.clone().square() * x_p - Expression::Constant(pallas::Affine::b());
vec![
("check x", x_check),
("check y", y_check),
("on-curve", on_curve),
]
}
#[allow(clippy::type_complexity)]
fn assign_region_inner<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
scalar: &ScalarFixed,
base: &F,
coords_check_toggle: Selector,
) -> Result<(NonIdentityEccPoint, NonIdentityEccPoint), Error> {
// Assign fixed columns for given fixed base
self.assign_fixed_constants::<F, NUM_WINDOWS>(region, offset, base, coords_check_toggle)?;
// Initialize accumulator
let acc = self.initialize_accumulator::<F, NUM_WINDOWS>(region, offset, base, scalar)?;
// Process all windows excluding least and most significant windows
let acc = self.add_incomplete::<F, NUM_WINDOWS>(region, offset, acc, base, scalar)?;
// Process most significant window
let mul_b = self.process_msb::<F, NUM_WINDOWS>(region, offset, base, scalar)?;
Ok((acc, mul_b))
}
/// [Specification](https://p.z.cash/halo2-0.1:ecc-fixed-mul-load-base).
fn assign_fixed_constants<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
base: &F,
coords_check_toggle: Selector,
) -> Result<(), Error> {
let mut constants = None;
let build_constants = || {
let lagrange_coeffs = base.lagrange_coeffs();
assert_eq!(lagrange_coeffs.len(), NUM_WINDOWS);
let z = base.z();
assert_eq!(z.len(), NUM_WINDOWS);
(lagrange_coeffs, z)
};
// Assign fixed columns for given fixed base
for window in 0..NUM_WINDOWS {
coords_check_toggle.enable(region, window + offset)?;
// Assign x-coordinate Lagrange interpolation coefficients
for k in 0..H {
region.assign_fixed(
|| {
format!(
"Lagrange interpolation coeff for window: {:?}, k: {:?}",
window, k
)
},
self.lagrange_coeffs[k],
window + offset,
|| {
if constants.as_ref().is_none() {
constants = Some(build_constants());
}
let lagrange_coeffs = &constants.as_ref().unwrap().0;
Value::known(lagrange_coeffs[window][k])
},
)?;
}
// Assign z-values for each window
region.assign_fixed(
|| format!("z-value for window: {:?}", window),
self.fixed_z,
window + offset,
|| {
let z = &constants.as_ref().unwrap().1;
Value::known(pallas::Base::from(z[window]))
},
)?;
}
Ok(())
}
/// Assigns the values used to process a window.
fn process_window<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
w: usize,
k_usize: Value<usize>,
window_scalar: Value<pallas::Scalar>,
base: &F,
) -> Result<NonIdentityEccPoint, Error> {
let base_value = base.generator();
let base_u = base.u();
assert_eq!(base_u.len(), NUM_WINDOWS);
// Compute [window_scalar]B
let mul_b = {
let mul_b = window_scalar.map(|scalar| base_value * scalar);
let mul_b = mul_b.map(|mul_b| mul_b.to_affine().coordinates().unwrap());
let x = mul_b.map(|mul_b| {
let x = *mul_b.x();
assert!(x != pallas::Base::zero());
x.into()
});
let x = region.assign_advice(
|| format!("mul_b_x, window {}", w),
self.add_config.x_p,
offset + w,
|| x,
)?;
let y = mul_b.map(|mul_b| {
let y = *mul_b.y();
assert!(y != pallas::Base::zero());
y.into()
});
let y = region.assign_advice(
|| format!("mul_b_y, window {}", w),
self.add_config.y_p,
offset + w,
|| y,
)?;
NonIdentityEccPoint::from_coordinates_unchecked(x, y)
};
// Assign u = (y_p + z_w).sqrt()
let u_val = k_usize.map(|k| pallas::Base::from_repr(base_u[w][k]).unwrap());
region.assign_advice(|| "u", self.u, offset + w, || u_val)?;
Ok(mul_b)
}
fn initialize_accumulator<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
base: &F,
scalar: &ScalarFixed,
) -> Result<NonIdentityEccPoint, Error> {
// Recall that the message at each window `w` is represented as
// `m_w = [(k_w + 2) ⋅ 8^w]B`.
// When `w = 0`, we have `m_0 = [(k_0 + 2)]B`.
let w = 0;
let k0 = scalar.windows_field()[0];
let k0_usize = scalar.windows_usize()[0];
self.process_lower_bits::<_, NUM_WINDOWS>(region, offset, w, k0, k0_usize, base)
}
fn add_incomplete<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
mut acc: NonIdentityEccPoint,
base: &F,
scalar: &ScalarFixed,
) -> Result<NonIdentityEccPoint, Error> {
let scalar_windows_field = scalar.windows_field();
let scalar_windows_usize = scalar.windows_usize();
assert_eq!(scalar_windows_field.len(), NUM_WINDOWS);
for (w, (k, k_usize)) in scalar_windows_field
.into_iter()
.zip(scalar_windows_usize)
.enumerate()
// The MSB is processed separately.
.take(NUM_WINDOWS - 1)
// Skip k_0 (already processed).
.skip(1)
{
// Compute [(k_w + 2) ⋅ 8^w]B
//
// This assigns the coordinates of the returned point into the input cells for
// the incomplete addition gate, which will then copy them into themselves.
let mul_b =
self.process_lower_bits::<_, NUM_WINDOWS>(region, offset, w, k, k_usize, base)?;
// Add to the accumulator.
//
// After the first loop, the accumulator will already be in the input cells
// for the incomplete addition gate, and will be copied into themselves.
acc = self
.add_incomplete_config
.assign_region(&mul_b, &acc, offset + w, region)?;
}
Ok(acc)
}
/// Assigns the values used to process a window that does not contain the MSB.
fn process_lower_bits<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
w: usize,
k: Value<pallas::Scalar>,
k_usize: Value<usize>,
base: &F,
) -> Result<NonIdentityEccPoint, Error> {
// `scalar = [(k_w + 2) ⋅ 8^w]
let scalar = k.map(|k| (k + *TWO_SCALAR) * (*H_SCALAR).pow(&[w as u64, 0, 0, 0]));
self.process_window::<_, NUM_WINDOWS>(region, offset, w, k_usize, scalar, base)
}
/// Assigns the values used to process the window containing the MSB.
fn process_msb<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
base: &F,
scalar: &ScalarFixed,
) -> Result<NonIdentityEccPoint, Error> {
let k_usize = scalar.windows_usize()[NUM_WINDOWS - 1];
// offset_acc = \sum_{j = 0}^{NUM_WINDOWS - 2} 2^{FIXED_BASE_WINDOW_SIZE*j + 1}
let offset_acc = (0..(NUM_WINDOWS - 1)).fold(pallas::Scalar::zero(), |acc, w| {
acc + (*TWO_SCALAR).pow(&[FIXED_BASE_WINDOW_SIZE as u64 * w as u64 + 1, 0, 0, 0])
});
// `scalar = [k * 8^(NUM_WINDOWS - 1) - offset_acc]`.
let scalar = scalar.windows_field()[scalar.windows_field().len() - 1]
.map(|k| k * (*H_SCALAR).pow(&[(NUM_WINDOWS - 1) as u64, 0, 0, 0]) - offset_acc);
self.process_window::<_, NUM_WINDOWS>(
region,
offset,
NUM_WINDOWS - 1,
k_usize,
scalar,
base,
)
}
}
enum ScalarFixed {
FullWidth(EccScalarFixed),
Short(EccScalarFixedShort),
BaseFieldElem(EccBaseFieldElemFixed),
}
impl From<&EccScalarFixed> for ScalarFixed {
fn from(scalar_fixed: &EccScalarFixed) -> Self {
| pl From<&EccScalarFixedShort> for ScalarFixed {
fn from(scalar_fixed: &EccScalarFixedShort) -> Self {
Self::Short(scalar_fixed.clone())
}
}
impl From<&EccBaseFieldElemFixed> for ScalarFixed {
fn from(base_field_elem: &EccBaseFieldElemFixed) -> Self {
Self::BaseFieldElem(base_field_elem.clone())
}
}
impl ScalarFixed {
/// The scalar decomposition was done in the base field. For computation
/// outside the circuit, we now convert them back into the scalar field.
///
/// This function does not require that the base field fits inside the scalar field,
/// because the window size fits into either field.
fn windows_field(&self) -> Vec<Value<pallas::Scalar>> {
let running_sum_to_windows = |zs: Vec<AssignedCell<pallas::Base, pallas::Base>>| {
(0..(zs.len() - 1))
.map(|idx| {
let z_cur = zs[idx].value();
let z_next = zs[idx + 1].value();
let word = z_cur - z_next * Value::known(*H_BASE);
// This assumes that the endianness of the encodings of pallas::Base
// and pallas::Scalar are the same. They happen to be, but we need to
// be careful if this is generalised.
word.map(|word| pallas::Scalar::from_repr(word.to_repr()).unwrap())
})
.collect::<Vec<_>>()
};
match self {
Self::BaseFieldElem(scalar) => running_sum_to_windows(scalar.running_sum.to_vec()),
Self::Short(scalar) => running_sum_to_windows(
scalar
.running_sum
.as_ref()
.expect("EccScalarFixedShort has been constrained")
.to_vec(),
),
Self::FullWidth(scalar) => scalar
.windows
.as_ref()
.expect("EccScalarFixed has been witnessed")
.iter()
.map(|bits| {
// This assumes that the endianness of the encodings of pallas::Base
// and pallas::Scalar are the same. They happen to be, but we need to
// be careful if this is generalised.
bits.value()
.map(|value| pallas::Scalar::from_repr(value.to_repr()).unwrap())
})
.collect::<Vec<_>>(),
}
}
/// The scalar decomposition is guaranteed to be in three-bit windows, so we construct
/// `usize` indices from the lowest three bits of each window field element for
/// convenient indexing into `u`-values.
fn windows_usize(&self) -> Vec<Value<usize>> {
self.windows_field()
.iter()
.map(|window| {
window.map(|window| {
window
.to_le_bits()
.iter()
.by_vals()
.take(FIXED_BASE_WINDOW_SIZE)
.rev()
.fold(0, |acc, b| 2 * acc + usize::from(b))
})
})
.collect::<Vec<_>>()
}
}
| Self::FullWidth(scalar_fixed.clone())
}
}
im | identifier_body |
mul_fixed.rs | use super::{
add, add_incomplete, EccBaseFieldElemFixed, EccScalarFixed, EccScalarFixedShort, FixedPoint,
NonIdentityEccPoint, FIXED_BASE_WINDOW_SIZE, H,
};
use crate::utilities::decompose_running_sum::RunningSumConfig;
use std::marker::PhantomData;
use group::{
ff::{Field, PrimeField, PrimeFieldBits},
Curve,
};
use halo2_proofs::{
circuit::{AssignedCell, Region, Value},
plonk::{
Advice, Column, ConstraintSystem, Constraints, Error, Expression, Fixed, Selector,
VirtualCells,
},
poly::Rotation,
};
use lazy_static::lazy_static;
use pasta_curves::{arithmetic::CurveAffine, pallas};
pub mod base_field_elem;
pub mod full_width;
pub mod short;
lazy_static! {
static ref TWO_SCALAR: pallas::Scalar = pallas::Scalar::from(2);
// H = 2^3 (3-bit window)
static ref H_SCALAR: pallas::Scalar = pallas::Scalar::from(H as u64);
static ref H_BASE: pallas::Base = pallas::Base::from(H as u64);
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Config<FixedPoints: super::FixedPoints<pallas::Affine>> {
running_sum_config: RunningSumConfig<pallas::Base, FIXED_BASE_WINDOW_SIZE>,
// The fixed Lagrange interpolation coefficients for `x_p`.
lagrange_coeffs: [Column<Fixed>; H],
// The fixed `z` for each window such that `y + z = u^2`.
fixed_z: Column<Fixed>,
// Decomposition of an `n-1`-bit scalar into `k`-bit windows:
// a = a_0 + 2^k(a_1) + 2^{2k}(a_2) + ... + 2^{(n-1)k}(a_{n-1})
window: Column<Advice>,
// y-coordinate of accumulator (only used in the final row).
u: Column<Advice>,
// Configuration for `add`
add_config: add::Config,
// Configuration for `add_incomplete`
add_incomplete_config: add_incomplete::Config,
_marker: PhantomData<FixedPoints>,
}
impl<FixedPoints: super::FixedPoints<pallas::Affine>> Config<FixedPoints> {
#[allow(clippy::too_many_arguments)]
pub(super) fn configure(
meta: &mut ConstraintSystem<pallas::Base>,
lagrange_coeffs: [Column<Fixed>; H],
window: Column<Advice>,
u: Column<Advice>,
add_config: add::Config,
add_incomplete_config: add_incomplete::Config,
) -> Self {
meta.enable_equality(window);
meta.enable_equality(u);
let q_running_sum = meta.selector();
let running_sum_config = RunningSumConfig::configure(meta, q_running_sum, window);
let config = Self {
running_sum_config,
lagrange_coeffs,
fixed_z: meta.fixed_column(),
window,
u,
add_config,
add_incomplete_config,
_marker: PhantomData,
};
// Check relationships between `add_config` and `add_incomplete_config`.
assert_eq!(
config.add_config.x_p, config.add_incomplete_config.x_p,
"add and add_incomplete are used internally in mul_fixed."
);
assert_eq!(
config.add_config.y_p, config.add_incomplete_config.y_p,
"add and add_incomplete are used internally in mul_fixed."
);
for advice in [config.window, config.u].iter() {
assert_ne!(
*advice, config.add_config.x_qr,
"Do not overlap with output columns of add."
);
assert_ne!(
*advice, config.add_config.y_qr,
"Do not overlap with output columns of add."
);
}
config.running_sum_coords_gate(meta);
config
}
/// Check that each window in the running sum decomposition uses the correct y_p
/// and interpolated x_p.
///
/// This gate is used both in the mul_fixed::base_field_elem and mul_fixed::short
/// helpers, which decompose the scalar using a running sum.
///
/// This gate is not used in the mul_fixed::full_width helper, since the full-width
/// scalar is witnessed directly as three-bit windows instead of being decomposed
/// via a running sum.
fn running_sum_coords_gate(&self, meta: &mut ConstraintSystem<pallas::Base>) {
meta.create_gate("Running sum coordinates check", |meta| {
let q_mul_fixed_running_sum =
meta.query_selector(self.running_sum_config.q_range_check());
let z_cur = meta.query_advice(self.window, Rotation::cur());
let z_next = meta.query_advice(self.window, Rotation::next());
// z_{i+1} = (z_i - a_i) / 2^3
// => a_i = z_i - z_{i+1} * 2^3
let word = z_cur - z_next * pallas::Base::from(H as u64);
Constraints::with_selector(q_mul_fixed_running_sum, self.coords_check(meta, word))
});
}
/// [Specification](https://p.z.cash/halo2-0.1:ecc-fixed-mul-coordinates).
#[allow(clippy::op_ref)]
fn coords_check(
&self,
meta: &mut VirtualCells<'_, pallas::Base>,
window: Expression<pallas::Base>,
) -> Vec<(&'static str, Expression<pallas::Base>)> {
let y_p = meta.query_advice(self.add_config.y_p, Rotation::cur());
let x_p = meta.query_advice(self.add_config.x_p, Rotation::cur());
let z = meta.query_fixed(self.fixed_z);
let u = meta.query_advice(self.u, Rotation::cur());
let window_pow: Vec<Expression<pallas::Base>> = (0..H)
.map(|pow| {
(0..pow).fold(Expression::Constant(pallas::Base::one()), |acc, _| {
acc * window.clone()
})
})
.collect();
let interpolated_x = window_pow.iter().zip(self.lagrange_coeffs.iter()).fold(
Expression::Constant(pallas::Base::zero()),
|acc, (window_pow, coeff)| acc + (window_pow.clone() * meta.query_fixed(*coeff)),
);
// Check interpolation of x-coordinate
let x_check = interpolated_x - x_p.clone();
// Check that `y + z = u^2`, where `z` is fixed and `u`, `y` are witnessed
let y_check = u.square() - y_p.clone() - z;
// Check that (x, y) is on the curve
let on_curve =
y_p.square() - x_p.clone().square() * x_p - Expression::Constant(pallas::Affine::b());
vec![
("check x", x_check),
("check y", y_check),
("on-curve", on_curve),
]
}
#[allow(clippy::type_complexity)]
fn assign_region_inner<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
scalar: &ScalarFixed,
base: &F,
coords_check_toggle: Selector,
) -> Result<(NonIdentityEccPoint, NonIdentityEccPoint), Error> {
// Assign fixed columns for given fixed base
self.assign_fixed_constants::<F, NUM_WINDOWS>(region, offset, base, coords_check_toggle)?;
// Initialize accumulator
let acc = self.initialize_accumulator::<F, NUM_WINDOWS>(region, offset, base, scalar)?;
// Process all windows excluding least and most significant windows
let acc = self.add_incomplete::<F, NUM_WINDOWS>(region, offset, acc, base, scalar)?;
// Process most significant window
let mul_b = self.process_msb::<F, NUM_WINDOWS>(region, offset, base, scalar)?;
Ok((acc, mul_b))
}
/// [Specification](https://p.z.cash/halo2-0.1:ecc-fixed-mul-load-base).
fn assign_fixed_constants<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
base: &F,
coords_check_toggle: Selector,
) -> Result<(), Error> {
let mut constants = None;
let build_constants = || {
let lagrange_coeffs = base.lagrange_coeffs();
assert_eq!(lagrange_coeffs.len(), NUM_WINDOWS);
let z = base.z();
assert_eq!(z.len(), NUM_WINDOWS);
(lagrange_coeffs, z)
};
// Assign fixed columns for given fixed base
for window in 0..NUM_WINDOWS {
coords_check_toggle.enable(region, window + offset)?;
// Assign x-coordinate Lagrange interpolation coefficients
for k in 0..H {
region.assign_fixed(
|| {
format!(
"Lagrange interpolation coeff for window: {:?}, k: {:?}",
window, k
)
},
self.lagrange_coeffs[k],
window + offset,
|| {
if constants.as_ref().is_none() {
constants = Some(build_constants());
}
let lagrange_coeffs = &constants.as_ref().unwrap().0;
Value::known(lagrange_coeffs[window][k])
},
)?;
}
// Assign z-values for each window
region.assign_fixed(
|| format!("z-value for window: {:?}", window),
self.fixed_z,
window + offset,
|| {
let z = &constants.as_ref().unwrap().1;
Value::known(pallas::Base::from(z[window]))
},
)?;
}
Ok(())
}
/// Assigns the values used to process a window.
fn process_window<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
w: usize,
k_usize: Value<usize>,
window_scalar: Value<pallas::Scalar>,
base: &F,
) -> Result<NonIdentityEccPoint, Error> {
let base_value = base.generator();
let base_u = base.u();
assert_eq!(base_u.len(), NUM_WINDOWS);
// Compute [window_scalar]B
let mul_b = {
let mul_b = window_scalar.map(|scalar| base_value * scalar);
let mul_b = mul_b.map(|mul_b| mul_b.to_affine().coordinates().unwrap());
let x = mul_b.map(|mul_b| {
let x = *mul_b.x();
assert!(x != pallas::Base::zero());
x.into()
});
let x = region.assign_advice(
|| format!("mul_b_x, window {}", w),
self.add_config.x_p,
offset + w,
|| x,
)?;
let y = mul_b.map(|mul_b| {
let y = *mul_b.y();
assert!(y != pallas::Base::zero());
y.into()
});
let y = region.assign_advice(
|| format!("mul_b_y, window {}", w),
self.add_config.y_p,
offset + w,
|| y,
)?;
NonIdentityEccPoint::from_coordinates_unchecked(x, y)
};
// Assign u = (y_p + z_w).sqrt()
let u_val = k_usize.map(|k| pallas::Base::from_repr(base_u[w][k]).unwrap());
region.assign_advice(|| "u", self.u, offset + w, || u_val)?;
Ok(mul_b)
}
fn initialize_accumulator<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
base: &F,
scalar: &ScalarFixed,
) -> Result<NonIdentityEccPoint, Error> {
// Recall that the message at each window `w` is represented as
// `m_w = [(k_w + 2) ⋅ 8^w]B`.
// When `w = 0`, we have `m_0 = [(k_0 + 2)]B`.
let w = 0;
let k0 = scalar.windows_field()[0];
let k0_usize = scalar.windows_usize()[0];
self.process_lower_bits::<_, NUM_WINDOWS>(region, offset, w, k0, k0_usize, base)
}
fn add_incomplete<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
mut acc: NonIdentityEccPoint,
base: &F,
scalar: &ScalarFixed,
) -> Result<NonIdentityEccPoint, Error> {
let scalar_windows_field = scalar.windows_field();
let scalar_windows_usize = scalar.windows_usize();
assert_eq!(scalar_windows_field.len(), NUM_WINDOWS);
for (w, (k, k_usize)) in scalar_windows_field
.into_iter()
.zip(scalar_windows_usize)
.enumerate()
// The MSB is processed separately.
.take(NUM_WINDOWS - 1)
// Skip k_0 (already processed).
.skip(1)
{
// Compute [(k_w + 2) ⋅ 8^w]B
//
// This assigns the coordinates of the returned point into the input cells for
// the incomplete addition gate, which will then copy them into themselves.
let mul_b =
self.process_lower_bits::<_, NUM_WINDOWS>(region, offset, w, k, k_usize, base)?;
// Add to the accumulator.
//
// After the first loop, the accumulator will already be in the input cells
// for the incomplete addition gate, and will be copied into themselves.
acc = self
.add_incomplete_config
.assign_region(&mul_b, &acc, offset + w, region)?;
}
Ok(acc)
}
/// Assigns the values used to process a window that does not contain the MSB.
fn process_lower_bits<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
w: usize,
k: Value<pallas::Scalar>,
k_usize: Value<usize>,
base: &F,
) -> Result<NonIdentityEccPoint, Error> {
// `scalar = [(k_w + 2) ⋅ 8^w]
let scalar = k.map(|k| (k + *TWO_SCALAR) * (*H_SCALAR).pow(&[w as u64, 0, 0, 0]));
self.process_window::<_, NUM_WINDOWS>(region, offset, w, k_usize, scalar, base)
}
/// Assigns the values used to process the window containing the MSB.
fn process_msb<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>(
&self,
region: &mut Region<'_, pallas::Base>,
offset: usize,
base: &F,
scalar: &ScalarFixed,
) -> Result<NonIdentityEccPoint, Error> {
let k_usize = scalar.windows_usize()[NUM_WINDOWS - 1];
// offset_acc = \sum_{j = 0}^{NUM_WINDOWS - 2} 2^{FIXED_BASE_WINDOW_SIZE*j + 1}
let offset_acc = (0..(NUM_WINDOWS - 1)).fold(pallas::Scalar::zero(), |acc, w| {
acc + (*TWO_SCALAR).pow(&[FIXED_BASE_WINDOW_SIZE as u64 * w as u64 + 1, 0, 0, 0])
});
// `scalar = [k * 8^(NUM_WINDOWS - 1) - offset_acc]`.
let scalar = scalar.windows_field()[scalar.windows_field().len() - 1]
.map(|k| k * (*H_SCALAR).pow(&[(NUM_WINDOWS - 1) as u64, 0, 0, 0]) - offset_acc);
self.process_window::<_, NUM_WINDOWS>(
region,
offset,
NUM_WINDOWS - 1,
k_usize,
scalar,
base,
)
}
}
enum ScalarFixed {
FullWidth(EccScalarFixed),
Short(EccScalarFixedShort),
BaseFieldElem(EccBaseFieldElemFixed),
}
impl From<&EccScalarFixed> for ScalarFixed {
fn from(scalar_fixed: &EccScalarFixed) -> Self {
Self::FullWidth(scalar_fixed.clone())
}
}
impl From<&EccScalarFixedShort> for ScalarFixed {
fn from(scalar_fixed: &EccScalarFixedShort) -> Self {
Self::Short(scalar_fixed.clone())
}
}
impl From<&EccBaseFieldElemFixed> for ScalarFixed {
fn from(base_field_elem: &EccBaseFieldElemFixed) -> Self {
Self::BaseFieldElem(base_field_elem.clone())
}
}
impl ScalarFixed {
/// The scalar decomposition was done in the base field. For computation
/// outside the circuit, we now convert them back into the scalar field.
///
/// This function does not require that the base field fits inside the scalar field,
/// because the window size fits into either field.
fn window | ) -> Vec<Value<pallas::Scalar>> {
let running_sum_to_windows = |zs: Vec<AssignedCell<pallas::Base, pallas::Base>>| {
(0..(zs.len() - 1))
.map(|idx| {
let z_cur = zs[idx].value();
let z_next = zs[idx + 1].value();
let word = z_cur - z_next * Value::known(*H_BASE);
// This assumes that the endianness of the encodings of pallas::Base
// and pallas::Scalar are the same. They happen to be, but we need to
// be careful if this is generalised.
word.map(|word| pallas::Scalar::from_repr(word.to_repr()).unwrap())
})
.collect::<Vec<_>>()
};
match self {
Self::BaseFieldElem(scalar) => running_sum_to_windows(scalar.running_sum.to_vec()),
Self::Short(scalar) => running_sum_to_windows(
scalar
.running_sum
.as_ref()
.expect("EccScalarFixedShort has been constrained")
.to_vec(),
),
Self::FullWidth(scalar) => scalar
.windows
.as_ref()
.expect("EccScalarFixed has been witnessed")
.iter()
.map(|bits| {
// This assumes that the endianness of the encodings of pallas::Base
// and pallas::Scalar are the same. They happen to be, but we need to
// be careful if this is generalised.
bits.value()
.map(|value| pallas::Scalar::from_repr(value.to_repr()).unwrap())
})
.collect::<Vec<_>>(),
}
}
/// The scalar decomposition is guaranteed to be in three-bit windows, so we construct
/// `usize` indices from the lowest three bits of each window field element for
/// convenient indexing into `u`-values.
fn windows_usize(&self) -> Vec<Value<usize>> {
self.windows_field()
.iter()
.map(|window| {
window.map(|window| {
window
.to_le_bits()
.iter()
.by_vals()
.take(FIXED_BASE_WINDOW_SIZE)
.rev()
.fold(0, |acc, b| 2 * acc + usize::from(b))
})
})
.collect::<Vec<_>>()
}
}
| s_field(&self | identifier_name |
RhinoResponse.ts | import { HTTPServer, Cookies, Path } from "../deps.ts";
import { HeaderField, MIMEType, StatusCode, StatusCodeName, ExtMIMEType } from "../mod.ts";
import { Utils } from "../utils.ts";
// Derives the optional values of a cookie
export type CookieOptions = Omit<Omit<Cookies.Cookie, "name">, "value">;
export class | {
private _headersSent: boolean = false;
private STATUS: number = 200;
private COOKIES: HTTPServer.Response = {};
private readonly RESPONSE_HEADERS = new Headers();
constructor(private readonly HTTP_REQUEST: HTTPServer.ServerRequest) {}
/**
* Appends a value to an already existing HTTP header field, or
* adds the field if it has not been created.
* @param field The HTTP header field name
* @param value The value for the header field.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public append(field: string, value: string | string[]) {
this.appendHeader(field, value);
return this;
}
/**
* Appends a value to an already existing HTTP header field, or
* adds the field if it has not been created.
* @param field The HTTP header field name
* @param value The value for the header field.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public appendHeader(field: string, value: string | string[]): RhinoResponse {
if (Array.isArray(value)) {
// Joins the values with a comma, if they are an array of values
value.forEach((val) => this.RESPONSE_HEADERS.append(field, val));
} else {
// Otherwise, just adds the value to the header
this.RESPONSE_HEADERS.append(field, value);
}
return this;
}
/**
* Sets the HTTP response Content-Disposition header field to “attachment”.
* If a filename is given, then it sets the Content-Type based on the extension name via res.type(),
* and sets the Content-Disposition “filename=” parameter.
* @param filename The name that will be set to the downloaded file
*/
public asAttachment(filename?: string): RhinoResponse {
const _filename = filename ? `filename=${filename};` : "";
this.setHeader(HeaderField.ContentDisposition, `attachment; ${_filename}`);
const ext = Path.extname(filename || "");
this.contentType(ext);
return this;
}
/**
* Clears a cookie from the response by setting
* its expiration date to a date before now.
* @param name The name of the cookie
*/
public clearCookie(name: string) {
// Sets the cookie to be appended to the response
Cookies.delCookie(this.COOKIES, name);
}
/**
* Sends a cookie to the client with the specified name and value
* @param name The name of the cookie
* @param value The value of the cookie
* @param options Optional parameters for the cookie
*/
public cookie(name: string, value: string | number | object, options?: CookieOptions) {
if (typeof value === "string") value = value;
if (typeof value === "number") value = value.toString();
if (typeof value === "object") value = JSON.stringify(value);
// Form the cookie data
const c: Cookies.Cookie = { name, value, ...options };
// Sets the cookie to be appended to the response
Cookies.setCookie(this.COOKIES, c);
}
/**
* Sets the content-type header.
* @param mime The MIME type, or file extension
*
* @example
* ```
* // file extension
* this.res.contentType('.css')
* // file type
* this.res.contentType('html')
* // Mime type from the MIMEType Enumerable
* this.res.contentType(MIMEType.TextMarkdown)
* // Direct MIME type
* this.res.contentType('application/json')
* ```
*/
public contentType(mime: string): RhinoResponse {
let _mime: string = MIMEType.TextPlain;
if (mime.includes("/")) {
// If the passed mime contains a slash, we
// will assume it to be a mime type directly.
_mime = mime;
} else {
// For anything else, we assume it to be a file extension or file type.
// If the file extension could not be understood, we leave it as text/plain
const foundMime = Object.keys(ExtMIMEType).filter((key) => {
const keyName = (mime.startsWith(".") ? "." : "") + key;
return keyName === mime;
});
let mimeFileExt = ExtMIMEType[foundMime[0]];
if (mimeFileExt) {
_mime = mimeFileExt;
} else {
Utils.logPassiveError(
"The passed file extension could not be understood. Please specify the mime type directly."
);
}
}
this.set(HeaderField.ContentType, _mime);
return this;
}
/**
* Send the contents of a file in the HTTP response's body for automatic download by the client.
* @param filepath The relative or absolute path to the file. If the
* path is relative to the file, then the entry point of the application should
* be the base of the file's path.
* @note Requires the --allow-read flag
*/
public async download(filepath: string, filename?: string): Promise<any> {
const file_n = filename ?? filepath.split("/").pop();
return new Promise(async (resolve, reject) => {
try {
this.asAttachment(file_n);
this.sendFile(filepath)
.then(() => resolve(true))
.catch((err) => {
this.removeHeader(HeaderField.ContentDisposition);
reject(err);
});
} catch (err) {
reject(err);
}
});
}
/**
* Sends the response without any data.
*/
public end() {
this.HTTP_REQUEST.finalize();
}
/**
* Returns wether the headers have been sent or not
*/
public get headersSent(): boolean {
return this._headersSent;
}
/**
* Removes a header field from the the response headers.
* @param field The header field to be removed
*/
public removeHeader(field: string) {
this.RESPONSE_HEADERS.delete(field);
}
/**
* Sends the passed resData as a response to the client
* @param resData The data to send to the client
*/
public send(resData: any) {
let d = resData;
// If a response was already sent to the client, we log an error
if (this._headersSent) Utils.logPassiveError("A response has already been sent!");
// If the data is an object, we convert it to a string
if (typeof d === "object") d = JSON.stringify(d);
this.HTTP_REQUEST.done.then(() => {
// Prevents any other responses from being
// sent after this response has been sent.
this._headersSent = true;
});
// If no response has been sent to the client, the data is sent to the client
if (!this._headersSent)
this.HTTP_REQUEST.respond({
status: this.STATUS,
headers: this.RESPONSE_HEADERS,
body: d, // converts any data into a string
...this.COOKIES,
});
}
/**
* Transfers the file at the given path in the response body,
* and sets the content-type header based on the file's extension.
* @param filepath The relative or absolute path to the file. If the
* path is relative to the file, then the entry point of the application should
* be the base of the file's path.
*/
public async sendFile(filepath: any): Promise<any> {
// If a response was already sent to the client, we log an error
if (this._headersSent) Utils.logPassiveError("A response has already been sent!");
const p = Path.resolve(filepath);
const ext = Path.extname(filepath);
return new Promise(async (resolve, reject) => {
try {
const [file, fileInfo] = await Promise.all([Deno.open(p), Deno.stat(p)]);
this.contentType(ext);
this.setHeader("content-length", fileInfo.size.toString());
this.HTTP_REQUEST.done.then(() => {
// closes the file after the request is done
file.close();
// Prevents any other responses from being
// sent after this response has been sent.
this._headersSent = true;
resolve(true);
});
// If no response has been sent to the client, the data is sent to the client
if (!this._headersSent)
this.HTTP_REQUEST.respond({
status: this.STATUS,
headers: this.RESPONSE_HEADERS,
body: file,
...this.COOKIES,
});
} catch (err) {
reject(err);
}
});
}
/**
* Sends a status code and its string name to the client.
* @param code The status code
*/
public sendStatus(code: StatusCode) {
const n = StatusCodeName[Object.keys(StatusCode)[Object.values(StatusCode).indexOf(code)]];
this.status(code).send(n);
}
/**
* Creates an HTTP header field with the passed value. Resets the field if
* it already exists, or creates it if it does not exist.
* @param field The HTTP header field name.
* @param value The value for the header field.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public set(field: HeaderField, value: string | string[]): RhinoResponse {
this.setHeader(field, value);
return this;
}
/**
* Creates an HTTP header field with the passed value. Resets the field if
* it already exists, or creates it if it does not exist.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public setHeader(field: string, value: string | string[]): RhinoResponse {
if (Array.isArray(value)) {
// If the header field already exists, we delete it.
if (this.RESPONSE_HEADERS.get(field)) this.RESPONSE_HEADERS.delete(field);
// NOTE: calling Header.append() here was not a mistake. We cannot call
// Headers.set() in this case because that would reset the value
// of the field on every call of the loop. Instead, we delete the field
// (in case it already existed) and append to a new empty field.
value.forEach((val) => this.RESPONSE_HEADERS.append(field, val));
} else {
// Otherwise, just adds the value to the header
this.RESPONSE_HEADERS.set(field, value);
}
return this;
}
/**
* Sets the status code for the response sent to the client
* Visit https://www.restapitutorial.com/httpstatuscodes.html for
* more information on what each status code means.
* @param code The status code
*/
public status(code: StatusCode): RhinoResponse {
this.STATUS = code;
return this;
}
}
| RhinoResponse | identifier_name |
RhinoResponse.ts | import { HTTPServer, Cookies, Path } from "../deps.ts";
import { HeaderField, MIMEType, StatusCode, StatusCodeName, ExtMIMEType } from "../mod.ts";
import { Utils } from "../utils.ts";
// Derives the optional values of a cookie
export type CookieOptions = Omit<Omit<Cookies.Cookie, "name">, "value">;
export class RhinoResponse {
private _headersSent: boolean = false;
private STATUS: number = 200;
private COOKIES: HTTPServer.Response = {};
private readonly RESPONSE_HEADERS = new Headers();
constructor(private readonly HTTP_REQUEST: HTTPServer.ServerRequest) {}
/**
* Appends a value to an already existing HTTP header field, or
* adds the field if it has not been created.
* @param field The HTTP header field name
* @param value The value for the header field.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public append(field: string, value: string | string[]) {
this.appendHeader(field, value);
return this;
}
/**
* Appends a value to an already existing HTTP header field, or
* adds the field if it has not been created.
* @param field The HTTP header field name
* @param value The value for the header field.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public appendHeader(field: string, value: string | string[]): RhinoResponse {
if (Array.isArray(value)) {
// Joins the values with a comma, if they are an array of values
value.forEach((val) => this.RESPONSE_HEADERS.append(field, val));
} else {
// Otherwise, just adds the value to the header
this.RESPONSE_HEADERS.append(field, value);
}
return this;
}
/**
* Sets the HTTP response Content-Disposition header field to “attachment”.
* If a filename is given, then it sets the Content-Type based on the extension name via res.type(),
* and sets the Content-Disposition “filename=” parameter.
* @param filename The name that will be set to the downloaded file
*/
public asAttachment(filename?: string): RhinoResponse {
| *
* Clears a cookie from the response by setting
* its expiration date to a date before now.
* @param name The name of the cookie
*/
public clearCookie(name: string) {
// Sets the cookie to be appended to the response
Cookies.delCookie(this.COOKIES, name);
}
/**
* Sends a cookie to the client with the specified name and value
* @param name The name of the cookie
* @param value The value of the cookie
* @param options Optional parameters for the cookie
*/
public cookie(name: string, value: string | number | object, options?: CookieOptions) {
if (typeof value === "string") value = value;
if (typeof value === "number") value = value.toString();
if (typeof value === "object") value = JSON.stringify(value);
// Form the cookie data
const c: Cookies.Cookie = { name, value, ...options };
// Sets the cookie to be appended to the response
Cookies.setCookie(this.COOKIES, c);
}
/**
* Sets the content-type header.
* @param mime The MIME type, or file extension
*
* @example
* ```
* // file extension
* this.res.contentType('.css')
* // file type
* this.res.contentType('html')
* // Mime type from the MIMEType Enumerable
* this.res.contentType(MIMEType.TextMarkdown)
* // Direct MIME type
* this.res.contentType('application/json')
* ```
*/
public contentType(mime: string): RhinoResponse {
let _mime: string = MIMEType.TextPlain;
if (mime.includes("/")) {
// If the passed mime contains a slash, we
// will assume it to be a mime type directly.
_mime = mime;
} else {
// For anything else, we assume it to be a file extension or file type.
// If the file extension could not be understood, we leave it as text/plain
const foundMime = Object.keys(ExtMIMEType).filter((key) => {
const keyName = (mime.startsWith(".") ? "." : "") + key;
return keyName === mime;
});
let mimeFileExt = ExtMIMEType[foundMime[0]];
if (mimeFileExt) {
_mime = mimeFileExt;
} else {
Utils.logPassiveError(
"The passed file extension could not be understood. Please specify the mime type directly."
);
}
}
this.set(HeaderField.ContentType, _mime);
return this;
}
/**
* Send the contents of a file in the HTTP response's body for automatic download by the client.
* @param filepath The relative or absolute path to the file. If the
* path is relative to the file, then the entry point of the application should
* be the base of the file's path.
* @note Requires the --allow-read flag
*/
public async download(filepath: string, filename?: string): Promise<any> {
const file_n = filename ?? filepath.split("/").pop();
return new Promise(async (resolve, reject) => {
try {
this.asAttachment(file_n);
this.sendFile(filepath)
.then(() => resolve(true))
.catch((err) => {
this.removeHeader(HeaderField.ContentDisposition);
reject(err);
});
} catch (err) {
reject(err);
}
});
}
/**
* Sends the response without any data.
*/
public end() {
this.HTTP_REQUEST.finalize();
}
/**
* Returns wether the headers have been sent or not
*/
public get headersSent(): boolean {
return this._headersSent;
}
/**
* Removes a header field from the the response headers.
* @param field The header field to be removed
*/
public removeHeader(field: string) {
this.RESPONSE_HEADERS.delete(field);
}
/**
* Sends the passed resData as a response to the client
* @param resData The data to send to the client
*/
public send(resData: any) {
let d = resData;
// If a response was already sent to the client, we log an error
if (this._headersSent) Utils.logPassiveError("A response has already been sent!");
// If the data is an object, we convert it to a string
if (typeof d === "object") d = JSON.stringify(d);
this.HTTP_REQUEST.done.then(() => {
// Prevents any other responses from being
// sent after this response has been sent.
this._headersSent = true;
});
// If no response has been sent to the client, the data is sent to the client
if (!this._headersSent)
this.HTTP_REQUEST.respond({
status: this.STATUS,
headers: this.RESPONSE_HEADERS,
body: d, // converts any data into a string
...this.COOKIES,
});
}
/**
* Transfers the file at the given path in the response body,
* and sets the content-type header based on the file's extension.
* @param filepath The relative or absolute path to the file. If the
* path is relative to the file, then the entry point of the application should
* be the base of the file's path.
*/
public async sendFile(filepath: any): Promise<any> {
// If a response was already sent to the client, we log an error
if (this._headersSent) Utils.logPassiveError("A response has already been sent!");
const p = Path.resolve(filepath);
const ext = Path.extname(filepath);
return new Promise(async (resolve, reject) => {
try {
const [file, fileInfo] = await Promise.all([Deno.open(p), Deno.stat(p)]);
this.contentType(ext);
this.setHeader("content-length", fileInfo.size.toString());
this.HTTP_REQUEST.done.then(() => {
// closes the file after the request is done
file.close();
// Prevents any other responses from being
// sent after this response has been sent.
this._headersSent = true;
resolve(true);
});
// If no response has been sent to the client, the data is sent to the client
if (!this._headersSent)
this.HTTP_REQUEST.respond({
status: this.STATUS,
headers: this.RESPONSE_HEADERS,
body: file,
...this.COOKIES,
});
} catch (err) {
reject(err);
}
});
}
/**
* Sends a status code and its string name to the client.
* @param code The status code
*/
public sendStatus(code: StatusCode) {
const n = StatusCodeName[Object.keys(StatusCode)[Object.values(StatusCode).indexOf(code)]];
this.status(code).send(n);
}
/**
* Creates an HTTP header field with the passed value. Resets the field if
* it already exists, or creates it if it does not exist.
* @param field The HTTP header field name.
* @param value The value for the header field.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public set(field: HeaderField, value: string | string[]): RhinoResponse {
this.setHeader(field, value);
return this;
}
/**
* Creates an HTTP header field with the passed value. Resets the field if
* it already exists, or creates it if it does not exist.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public setHeader(field: string, value: string | string[]): RhinoResponse {
if (Array.isArray(value)) {
// If the header field already exists, we delete it.
if (this.RESPONSE_HEADERS.get(field)) this.RESPONSE_HEADERS.delete(field);
// NOTE: calling Header.append() here was not a mistake. We cannot call
// Headers.set() in this case because that would reset the value
// of the field on every call of the loop. Instead, we delete the field
// (in case it already existed) and append to a new empty field.
value.forEach((val) => this.RESPONSE_HEADERS.append(field, val));
} else {
// Otherwise, just adds the value to the header
this.RESPONSE_HEADERS.set(field, value);
}
return this;
}
/**
* Sets the status code for the response sent to the client
* Visit https://www.restapitutorial.com/httpstatuscodes.html for
* more information on what each status code means.
* @param code The status code
*/
public status(code: StatusCode): RhinoResponse {
this.STATUS = code;
return this;
}
}
| const _filename = filename ? `filename=${filename};` : "";
this.setHeader(HeaderField.ContentDisposition, `attachment; ${_filename}`);
const ext = Path.extname(filename || "");
this.contentType(ext);
return this;
}
/* | identifier_body |
RhinoResponse.ts | import { HTTPServer, Cookies, Path } from "../deps.ts";
import { HeaderField, MIMEType, StatusCode, StatusCodeName, ExtMIMEType } from "../mod.ts";
import { Utils } from "../utils.ts";
// Derives the optional values of a cookie
export type CookieOptions = Omit<Omit<Cookies.Cookie, "name">, "value">;
export class RhinoResponse {
private _headersSent: boolean = false;
private STATUS: number = 200;
private COOKIES: HTTPServer.Response = {};
private readonly RESPONSE_HEADERS = new Headers();
constructor(private readonly HTTP_REQUEST: HTTPServer.ServerRequest) {}
/**
* Appends a value to an already existing HTTP header field, or
* adds the field if it has not been created.
* @param field The HTTP header field name
* @param value The value for the header field.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public append(field: string, value: string | string[]) {
this.appendHeader(field, value);
return this;
}
/**
* Appends a value to an already existing HTTP header field, or
* adds the field if it has not been created.
* @param field The HTTP header field name
* @param value The value for the header field.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public appendHeader(field: string, value: string | string[]): RhinoResponse {
if (Array.isArray(value)) {
// Joins the values with a comma, if they are an array of values
value.forEach((val) => this.RESPONSE_HEADERS.append(field, val));
} else {
// Otherwise, just adds the value to the header
this.RESPONSE_HEADERS.append(field, value);
}
return this;
}
/**
* Sets the HTTP response Content-Disposition header field to “attachment”.
* If a filename is given, then it sets the Content-Type based on the extension name via res.type(),
* and sets the Content-Disposition “filename=” parameter.
* @param filename The name that will be set to the downloaded file
*/
public asAttachment(filename?: string): RhinoResponse {
const _filename = filename ? `filename=${filename};` : "";
this.setHeader(HeaderField.ContentDisposition, `attachment; ${_filename}`);
const ext = Path.extname(filename || "");
this.contentType(ext);
return this;
}
/**
* Clears a cookie from the response by setting
* its expiration date to a date before now.
* @param name The name of the cookie
*/
public clearCookie(name: string) {
// Sets the cookie to be appended to the response
Cookies.delCookie(this.COOKIES, name);
}
/**
* Sends a cookie to the client with the specified name and value
* @param name The name of the cookie
* @param value The value of the cookie
* @param options Optional parameters for the cookie
*/
public cookie(name: string, value: string | number | object, options?: CookieOptions) {
if (typeof value === "string") value = value;
if (typeof value === "number") value = value.toString();
if (typeof value === "object") value = JSON.stringify(value);
// Form the cookie data
const c: Cookies.Cookie = { name, value, ...options };
// Sets the cookie to be appended to the response
Cookies.setCookie(this.COOKIES, c);
}
/**
* Sets the content-type header.
* @param mime The MIME type, or file extension
*
* @example
* ```
* // file extension
* this.res.contentType('.css')
* // file type
* this.res.contentType('html')
* // Mime type from the MIMEType Enumerable
* this.res.contentType(MIMEType.TextMarkdown)
* // Direct MIME type
* this.res.contentType('application/json')
* ```
*/
public contentType(mime: string): RhinoResponse {
let _mime: string = MIMEType.TextPlain;
if (mime.includes("/")) {
// If the passed mime contains a slash, we
// will assume it to be a mime type directly.
_mime = mime;
} else {
// For anything else, we assume it to be a file extension or file type.
// If the file extension could not be understood, we leave it as text/plain
const foundMime = Object.keys(ExtMIMEType).filter((key) => {
const keyName = (mime.startsWith(".") ? "." : "") + key;
return keyName === mime;
});
let mimeFileExt = ExtMIMEType[foundMime[0]];
if (mimeFileExt) {
_mime = mimeFileExt;
} else {
Utils.logPassiveError(
"The passed file extension could not be understood. Please specify the mime type directly."
);
}
}
this.set(HeaderField.ContentType, _mime);
return this;
}
/**
* Send the contents of a file in the HTTP response's body for automatic download by the client.
* @param filepath The relative or absolute path to the file. If the
* path is relative to the file, then the entry point of the application should
* be the base of the file's path.
* @note Requires the --allow-read flag
*/
public async download(filepath: string, filename?: string): Promise<any> {
const file_n = filename ?? filepath.split("/").pop();
return new Promise(async (resolve, reject) => {
try {
this.asAttachment(file_n);
this.sendFile(filepath)
.then(() => resolve(true))
.catch((err) => {
this.removeHeader(HeaderField.ContentDisposition);
reject(err);
});
} catch (err) {
reject(err);
}
});
}
/**
* Sends the response without any data.
*/
public end() {
this.HTTP_REQUEST.finalize();
}
/**
* Returns wether the headers have been sent or not
*/
public get headersSent(): boolean {
return this._headersSent;
}
/**
* Removes a header field from the the response headers.
* @param field The header field to be removed
*/
public removeHeader(field: string) {
this.RESPONSE_HEADERS.delete(field);
}
/**
* Sends the passed resData as a response to the client
* @param resData The data to send to the client
*/
public send(resData: any) {
let d = resData;
// If a response was already sent to the client, we log an error
if (this._headersSent) Utils.logPassiveError("A response has already been sent!");
// If the data is an object, we convert it to a string
if (typeof d === "object") d = JSON.stringify(d);
this.HTTP_REQUEST.done.then(() => {
// Prevents any other responses from being
// sent after this response has been sent.
this._headersSent = true;
});
// If no response has been sent to the client, the data is sent to the client
if (!this._headersSent)
this.HTTP_REQUEST.respond({
status: this.STATUS,
headers: this.RESPONSE_HEADERS,
body: d, // converts any data into a string
...this.COOKIES,
});
}
/**
* Transfers the file at the given path in the response body,
* and sets the content-type header based on the file's extension.
* @param filepath The relative or absolute path to the file. If the
* path is relative to the file, then the entry point of the application should
* be the base of the file's path.
*/
public async sendFile(filepath: any): Promise<any> {
// If a response was already sent to the client, we log an error
if (this._headersSent) Utils.logPassiveError("A response has already been sent!");
const p = Path.resolve(filepath);
const ext = Path.extname(filepath);
return new Promise(async (resolve, reject) => {
try {
const [file, fileInfo] = await Promise.all([Deno.open(p), Deno.stat(p)]);
this.contentType(ext);
this.setHeader("content-length", fileInfo.size.toString());
this.HTTP_REQUEST.done.then(() => {
// closes the file after the request is done
file.close();
// Prevents any other responses from being
// sent after this response has been sent.
this._headersSent = true;
resolve(true);
});
// If no response has been sent to the client, the data is sent to the client
if (!this._headersSent)
this.HTTP_REQUEST.respond({
status: this.STATUS,
headers: this.RESPONSE_HEADERS,
body: file,
...this.COOKIES,
});
} catch (err) {
reject(err);
}
});
}
/**
* Sends a status code and its string name to the client.
* @param code The status code
*/
public sendStatus(code: StatusCode) {
const n = StatusCodeName[Object.keys(StatusCode)[Object.values(StatusCode).indexOf(code)]];
this.status(code).send(n);
}
/**
* Creates an HTTP header field with the passed value. Resets the field if
* it already exists, or creates it if it does not exist.
* @param field The HTTP header field name.
* @param value The value for the header field.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public set(field: HeaderField, value: string | string[]): RhinoResponse {
this.setHeader(field, value);
return this;
}
/**
* Creates an HTTP header field with the passed value. Resets the field if
* it already exists, or creates it if it does not exist.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public setHeader(field: string, value: string | string[]): RhinoResponse {
if (Array.isArray(value)) {
// If the header field already exists, we delete it.
if (this.RESPONSE_HEADERS.get(field)) this.RESPONSE_HEADERS.delete(field);
// NOTE: calling Header.append() here was not a mistake. We cannot call
// Headers.set() in this case because that would reset the value
// of the field on every call of the loop. Instead, we delete the field
// (in case it already existed) and append to a new empty field.
value.forEach((val) => this.RESPONSE_HEADERS.append(field, val));
} else {
| return this;
}
/**
* Sets the status code for the response sent to the client
* Visit https://www.restapitutorial.com/httpstatuscodes.html for
* more information on what each status code means.
* @param code The status code
*/
public status(code: StatusCode): RhinoResponse {
this.STATUS = code;
return this;
}
}
| // Otherwise, just adds the value to the header
this.RESPONSE_HEADERS.set(field, value);
}
| conditional_block |
RhinoResponse.ts | import { HTTPServer, Cookies, Path } from "../deps.ts";
import { HeaderField, MIMEType, StatusCode, StatusCodeName, ExtMIMEType } from "../mod.ts";
import { Utils } from "../utils.ts";
| // Derives the optional values of a cookie
export type CookieOptions = Omit<Omit<Cookies.Cookie, "name">, "value">;
export class RhinoResponse {
private _headersSent: boolean = false;
private STATUS: number = 200;
private COOKIES: HTTPServer.Response = {};
private readonly RESPONSE_HEADERS = new Headers();
constructor(private readonly HTTP_REQUEST: HTTPServer.ServerRequest) {}
/**
* Appends a value to an already existing HTTP header field, or
* adds the field if it has not been created.
* @param field The HTTP header field name
* @param value The value for the header field.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public append(field: string, value: string | string[]) {
this.appendHeader(field, value);
return this;
}
/**
* Appends a value to an already existing HTTP header field, or
* adds the field if it has not been created.
* @param field The HTTP header field name
* @param value The value for the header field.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public appendHeader(field: string, value: string | string[]): RhinoResponse {
if (Array.isArray(value)) {
// Joins the values with a comma, if they are an array of values
value.forEach((val) => this.RESPONSE_HEADERS.append(field, val));
} else {
// Otherwise, just adds the value to the header
this.RESPONSE_HEADERS.append(field, value);
}
return this;
}
/**
* Sets the HTTP response Content-Disposition header field to “attachment”.
* If a filename is given, then it sets the Content-Type based on the extension name via res.type(),
* and sets the Content-Disposition “filename=” parameter.
* @param filename The name that will be set to the downloaded file
*/
public asAttachment(filename?: string): RhinoResponse {
const _filename = filename ? `filename=${filename};` : "";
this.setHeader(HeaderField.ContentDisposition, `attachment; ${_filename}`);
const ext = Path.extname(filename || "");
this.contentType(ext);
return this;
}
/**
* Clears a cookie from the response by setting
* its expiration date to a date before now.
* @param name The name of the cookie
*/
public clearCookie(name: string) {
// Sets the cookie to be appended to the response
Cookies.delCookie(this.COOKIES, name);
}
/**
* Sends a cookie to the client with the specified name and value
* @param name The name of the cookie
* @param value The value of the cookie
* @param options Optional parameters for the cookie
*/
public cookie(name: string, value: string | number | object, options?: CookieOptions) {
if (typeof value === "string") value = value;
if (typeof value === "number") value = value.toString();
if (typeof value === "object") value = JSON.stringify(value);
// Form the cookie data
const c: Cookies.Cookie = { name, value, ...options };
// Sets the cookie to be appended to the response
Cookies.setCookie(this.COOKIES, c);
}
/**
* Sets the content-type header.
* @param mime The MIME type, or file extension
*
* @example
* ```
* // file extension
* this.res.contentType('.css')
* // file type
* this.res.contentType('html')
* // Mime type from the MIMEType Enumerable
* this.res.contentType(MIMEType.TextMarkdown)
* // Direct MIME type
* this.res.contentType('application/json')
* ```
*/
public contentType(mime: string): RhinoResponse {
let _mime: string = MIMEType.TextPlain;
if (mime.includes("/")) {
// If the passed mime contains a slash, we
// will assume it to be a mime type directly.
_mime = mime;
} else {
// For anything else, we assume it to be a file extension or file type.
// If the file extension could not be understood, we leave it as text/plain
const foundMime = Object.keys(ExtMIMEType).filter((key) => {
const keyName = (mime.startsWith(".") ? "." : "") + key;
return keyName === mime;
});
let mimeFileExt = ExtMIMEType[foundMime[0]];
if (mimeFileExt) {
_mime = mimeFileExt;
} else {
Utils.logPassiveError(
"The passed file extension could not be understood. Please specify the mime type directly."
);
}
}
this.set(HeaderField.ContentType, _mime);
return this;
}
/**
* Send the contents of a file in the HTTP response's body for automatic download by the client.
* @param filepath The relative or absolute path to the file. If the
* path is relative to the file, then the entry point of the application should
* be the base of the file's path.
* @note Requires the --allow-read flag
*/
public async download(filepath: string, filename?: string): Promise<any> {
const file_n = filename ?? filepath.split("/").pop();
return new Promise(async (resolve, reject) => {
try {
this.asAttachment(file_n);
this.sendFile(filepath)
.then(() => resolve(true))
.catch((err) => {
this.removeHeader(HeaderField.ContentDisposition);
reject(err);
});
} catch (err) {
reject(err);
}
});
}
/**
* Sends the response without any data.
*/
public end() {
this.HTTP_REQUEST.finalize();
}
/**
* Returns wether the headers have been sent or not
*/
public get headersSent(): boolean {
return this._headersSent;
}
/**
* Removes a header field from the the response headers.
* @param field The header field to be removed
*/
public removeHeader(field: string) {
this.RESPONSE_HEADERS.delete(field);
}
/**
* Sends the passed resData as a response to the client
* @param resData The data to send to the client
*/
public send(resData: any) {
let d = resData;
// If a response was already sent to the client, we log an error
if (this._headersSent) Utils.logPassiveError("A response has already been sent!");
// If the data is an object, we convert it to a string
if (typeof d === "object") d = JSON.stringify(d);
this.HTTP_REQUEST.done.then(() => {
// Prevents any other responses from being
// sent after this response has been sent.
this._headersSent = true;
});
// If no response has been sent to the client, the data is sent to the client
if (!this._headersSent)
this.HTTP_REQUEST.respond({
status: this.STATUS,
headers: this.RESPONSE_HEADERS,
body: d, // converts any data into a string
...this.COOKIES,
});
}
/**
* Transfers the file at the given path in the response body,
* and sets the content-type header based on the file's extension.
* @param filepath The relative or absolute path to the file. If the
* path is relative to the file, then the entry point of the application should
* be the base of the file's path.
*/
public async sendFile(filepath: any): Promise<any> {
// If a response was already sent to the client, we log an error
if (this._headersSent) Utils.logPassiveError("A response has already been sent!");
const p = Path.resolve(filepath);
const ext = Path.extname(filepath);
return new Promise(async (resolve, reject) => {
try {
const [file, fileInfo] = await Promise.all([Deno.open(p), Deno.stat(p)]);
this.contentType(ext);
this.setHeader("content-length", fileInfo.size.toString());
this.HTTP_REQUEST.done.then(() => {
// closes the file after the request is done
file.close();
// Prevents any other responses from being
// sent after this response has been sent.
this._headersSent = true;
resolve(true);
});
// If no response has been sent to the client, the data is sent to the client
if (!this._headersSent)
this.HTTP_REQUEST.respond({
status: this.STATUS,
headers: this.RESPONSE_HEADERS,
body: file,
...this.COOKIES,
});
} catch (err) {
reject(err);
}
});
}
/**
* Sends a status code and its string name to the client.
* @param code The status code
*/
public sendStatus(code: StatusCode) {
const n = StatusCodeName[Object.keys(StatusCode)[Object.values(StatusCode).indexOf(code)]];
this.status(code).send(n);
}
/**
* Creates an HTTP header field with the passed value. Resets the field if
* it already exists, or creates it if it does not exist.
* @param field The HTTP header field name.
* @param value The value for the header field.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public set(field: HeaderField, value: string | string[]): RhinoResponse {
this.setHeader(field, value);
return this;
}
/**
* Creates an HTTP header field with the passed value. Resets the field if
* it already exists, or creates it if it does not exist.
* @note Should follow the standard from https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
*/
public setHeader(field: string, value: string | string[]): RhinoResponse {
if (Array.isArray(value)) {
// If the header field already exists, we delete it.
if (this.RESPONSE_HEADERS.get(field)) this.RESPONSE_HEADERS.delete(field);
// NOTE: calling Header.append() here was not a mistake. We cannot call
// Headers.set() in this case because that would reset the value
// of the field on every call of the loop. Instead, we delete the field
// (in case it already existed) and append to a new empty field.
value.forEach((val) => this.RESPONSE_HEADERS.append(field, val));
} else {
// Otherwise, just adds the value to the header
this.RESPONSE_HEADERS.set(field, value);
}
return this;
}
/**
* Sets the status code for the response sent to the client
* Visit https://www.restapitutorial.com/httpstatuscodes.html for
* more information on what each status code means.
* @param code The status code
*/
public status(code: StatusCode): RhinoResponse {
this.STATUS = code;
return this;
}
} | random_line_split | |
node.go | package tree
import (
"database/sql/driver"
"encoding/binary"
"fmt"
"math"
"reflect"
"strings"
"time"
"github.com/colinking/go-sqlite3-native/internal"
"github.com/colinking/go-sqlite3-native/internal/pager"
)
type node struct {
pager *pager.Pager
// the following fields are kept for debugging purposes:
typ TreeType
freeblockOffset int
numCells int
contentOffset int
numFreeBytes int
cellPointerArrayOffset int
// the content of this node
// these are ordered in increasing order
records []Record
children []*child
// parent pointer for backtracking when traversing a tree
// nil if there is no parent (the root page)
parent *node
}
type child struct {
keyInt int
keyColumns []Column
pageNumber int
node *node
}
func (c *child) String() string {
var key string
if c.keyColumns == nil {
key = fmt.Sprintf("%d", c.keyInt)
} else {
var row []string
for _, c := range c.keyColumns {
row = append(row, c.String())
}
key = "[" + strings.Join(row, "|") + "]"
}
return fmt.Sprintf("%s [page=%d, all<=%s]", c.node.typ.String(), c.pageNumber, key)
}
type Record struct {
rowid int
columns []Column
}
func (r Record) String() string {
var row []string
for _, c := range r.columns {
row = append(row, c.String())
}
return fmt.Sprintf("rowid=%+v columns=[%s]", r.rowid, strings.Join(row, "|"))
}
func (r Record) GetColumn(idx int) Column {
if idx < len(r.columns) {
return r.columns[idx]
}
// NULL
return Column{
typ: 0,
}
}
type Column struct {
typ int
content []byte
}
func (c Column) String() string {
v := c.Value()
switch vt := v.(type) {
case string:
return vt
case []byte:
return "\"" + string(vt) + "\""
case float64:
return fmt.Sprintf("%f", vt)
case int64:
return fmt.Sprintf("%d", vt)
case time.Time:
return vt.String()
case bool:
return fmt.Sprintf("%v", vt)
case nil:
return "nil"
default:
return fmt.Sprintf("<unknown column type = %s>", reflect.TypeOf(v))
}
}
func (c Column) AsInt() (int, bool) {
// [1, 6] are the various int64 types
if c.typ >= 1 && c.typ <= 6 {
i64 := c.Value().(int64)
return int(i64), true
}
return 0, false
}
func (c Column) Value() driver.Value {
// TODO: validate this works with negative integers (2's complement)
switch c.typ {
case 0:
return nil
case 1:
return int64(c.content[0])
case 2:
return int64(binary.BigEndian.Uint16(c.content))
case 3:
// stdlib binary does not have a 24-bit option
b := c.content
u := uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
return int64(u)
case 4:
return int64(binary.BigEndian.Uint32(c.content))
case 5:
// stdlib binary does not have a 48-bit option
b := c.content
u := uint64(b[5]) | uint64(b[4])<<8 | uint64(b[3])<<16 | uint64(b[2])<<24 | uint64(b[1])<<32 | uint64(b[0])<<40
return int64(u)
case 6:
return int64(binary.BigEndian.Uint64(c.content))
case 7:
b := binary.BigEndian.Uint64(c.content)
return math.Float64frombits(b)
case 8:
return int64(0)
case 9:
return int64(1)
default:
if c.typ%2 == 0 {
// blob
return c.content
} else {
// string
return string(c.content)
}
}
}
// https://www.sqlite.org/fileformat2.html#serialtype
func | (typ int) int {
switch typ {
case 0, 8, 9:
return 0
case 1:
return 1
case 2:
return 2
case 3:
return 3
case 4:
return 4
case 5:
return 6
case 6, 7:
return 8
case 10, 11:
// https://github.com/sqlite/sqlite/blob/96e3c39bd58ede59150c00e4f8609cbac674ffae/tool/offsets.c#L216
// return 0
panic(fmt.Errorf("cannot support columns of type=%d", typ))
default:
if typ%2 == 0 {
return (typ - 12) / 2
} else {
// https://github.com/sqlite/sqlite/blob/96e3c39bd58ede59150c00e4f8609cbac674ffae/tool/offsets.c#L216
// should this be 12?
return (typ - 13) / 2
}
}
}
func newNode(pageNumber int, pgr *pager.Pager, parent *node) (n *node, err error) {
page, err := pgr.Get(pageNumber)
if err != nil {
return nil, err
}
defer func() {
if rerr := pgr.ReleasePage(); rerr != nil {
// TODO: multi errors
err = rerr
}
}()
offset := 0
if pageNumber == 1 { // the root table (aka sqlite_schema)
// Note: page 1 is an exception, since it includes the database header which
// is contained in the first 100 bytes of this page.
offset += 100
}
// Read the tree header, which is stored in the either the first 8 bytes (leaf pages)
// or 12 bytes (interior pages) of the page.
typ := ToTreeType(page[offset])
if typ == TreeTypeUnknown {
return nil, fmt.Errorf("unknown tree page type for page=%d: %+v", pageNumber, page[offset])
}
offset += 1
// Assert that tree pages are of the same type (table vs. index) and that
// parents are always of type interior.
if parent != nil {
if typ == TreeTypeTableInterior || typ == TreeTypeTableLeaf {
if parent.typ != TreeTypeTableInterior {
return nil, fmt.Errorf("invalid node type: parent=%s child=%s", parent.typ.String(), typ.String())
}
} else if typ == TreeTypeIndexInterior || typ == TreeTypeIndexLeaf {
if parent.typ != TreeTypeIndexInterior {
return nil, fmt.Errorf("invalid node type: parent=%s child=%s", parent.typ.String(), typ.String())
}
}
}
freeblockOffset := int(binary.BigEndian.Uint16(page[offset : offset+2]))
offset += 2
numCells := int(binary.BigEndian.Uint16(page[offset : offset+2]))
offset += 2
contentOffset := int(binary.BigEndian.Uint16(page[offset : offset+2]))
offset += 2
numFreeBytes := int(page[offset])
offset += 1
// If this is an interior page, then the header is 4 bytes longer because the next four bytes
// store the pointer of the right-most node in this page.
nextPageNumber := 0
if typ == TreeTypeTableInterior || typ == TreeTypeIndexInterior {
nextPageNumber = int(binary.BigEndian.Uint32(page[offset : offset+4]))
offset += 4
}
// TODO: validate long keys (>25% of the page size) which will test overflow pages,
// however this only happens on index trees.
// Read all cell pointers into memory.
ptrs := []int{}
for i := 0; i < numCells; i++ {
// read the cell pointer
b := page[offset : offset+2]
v := binary.BigEndian.Uint16(b)
ptrs = append(ptrs, int(v))
offset += 2
}
// read the cell contents
records := []Record{}
children := []*child{}
for _, ptr := range ptrs {
var numBytesPayload int
var rowid int
var childPageNumber int
var columns []Column
// Left child pointer, if interior
if typ == TreeTypeTableInterior || typ == TreeTypeIndexInterior {
// A 4-byte big-endian page number which is the left child pointer.
childPageNumber = int(binary.BigEndian.Uint32(page[ptr : ptr+4]))
ptr += 4
}
// Number of data bytes, if not zerodata
if typ != TreeTypeTableInterior {
// A varint which is the total number of bytes of payload, including any overflow
ptr += internal.PutVarint(page[ptr:], &numBytesPayload)
}
// Integer key itself if intkey
if typ == TreeTypeTableInterior || typ == TreeTypeTableLeaf {
// A varint which is the integer key.
ptr += internal.PutVarint(page[ptr:], &rowid)
}
// Record Payload
if typ != TreeTypeTableInterior {
// The initial portion of the payload that does not spill to overflow pages.
// TODO: support overflowing payloads.
columns, err = readColumns(page[ptr : ptr+numBytesPayload])
if err != nil {
return nil, err
}
if typ != TreeTypeTableLeaf {
// Extract the rowid from the last column:
idx := len(columns) - 1
var ok bool
rowid, ok = columns[idx].AsInt()
if !ok {
return nil, fmt.Errorf("expected final index column to be rowid: %+v", columns[idx])
}
// Trim the rowid column off:
columns = columns[:len(columns)-1]
}
}
switch typ {
case TreeTypeTableInterior:
children = append(children, &child{
keyInt: rowid,
pageNumber: childPageNumber,
})
case TreeTypeTableLeaf:
records = append(records, Record{
rowid: rowid,
columns: columns,
})
case TreeTypeIndexInterior:
children = append(children, &child{
keyColumns: columns,
pageNumber: childPageNumber,
})
case TreeTypeIndexLeaf:
records = append(records, Record{
rowid: rowid,
columns: columns,
})
}
}
if nextPageNumber > 0 {
children = append(children, &child{
pageNumber: nextPageNumber,
})
}
return &node{
pager: pgr,
records: records,
children: children,
typ: typ,
freeblockOffset: freeblockOffset,
numCells: numCells,
contentOffset: contentOffset,
numFreeBytes: numFreeBytes,
cellPointerArrayOffset: offset,
parent: parent,
}, nil
}
func readColumns(content []byte) ([]Column, error) {
// read columns using the SQLite record format
// https://www.sqlite.org/fileformat2.html#record_format
contentOffset := 0
var headerSize int
contentOffset += internal.PutVarint(content[contentOffset:], &headerSize)
columnTypes := []int{}
for contentOffset < int(headerSize) {
var serialType int
contentOffset += internal.PutVarint(content[contentOffset:], &serialType)
columnTypes = append(columnTypes, int(serialType))
}
if contentOffset > int(headerSize) {
return nil, fmt.Errorf("consumed more header than expected! (%d>%d)", contentOffset, int(headerSize))
}
columns := make([]Column, 0, len(columnTypes))
for _, typ := range columnTypes {
size := columnContentSize(typ)
columns = append(columns, Column{
typ: typ,
content: content[contentOffset : contentOffset+size],
})
contentOffset += size
// TODO: support ALTER COLUMN ADD COLUMN where we should use default values here
}
if contentOffset != len(content) {
return nil, fmt.Errorf("did not consume all bytes in record (%d!=%d)", contentOffset, len(content))
}
return columns, nil
}
func (n *node) Close() error {
// TODO: release all children nodes
// for _, child := range n.children {
// child
// }
return nil
}
| columnContentSize | identifier_name |
node.go | package tree
import (
"database/sql/driver"
"encoding/binary"
"fmt"
"math"
"reflect"
"strings"
"time"
"github.com/colinking/go-sqlite3-native/internal"
"github.com/colinking/go-sqlite3-native/internal/pager"
)
type node struct {
pager *pager.Pager
// the following fields are kept for debugging purposes:
typ TreeType
freeblockOffset int
numCells int
contentOffset int
numFreeBytes int
cellPointerArrayOffset int
// the content of this node
// these are ordered in increasing order
records []Record
children []*child
// parent pointer for backtracking when traversing a tree
// nil if there is no parent (the root page)
parent *node
}
type child struct {
keyInt int
keyColumns []Column
pageNumber int
node *node
}
func (c *child) String() string {
var key string
if c.keyColumns == nil {
key = fmt.Sprintf("%d", c.keyInt)
} else {
var row []string
for _, c := range c.keyColumns {
row = append(row, c.String())
}
key = "[" + strings.Join(row, "|") + "]"
}
return fmt.Sprintf("%s [page=%d, all<=%s]", c.node.typ.String(), c.pageNumber, key)
}
type Record struct {
rowid int
columns []Column
}
func (r Record) String() string {
var row []string
for _, c := range r.columns {
row = append(row, c.String())
}
return fmt.Sprintf("rowid=%+v columns=[%s]", r.rowid, strings.Join(row, "|"))
}
func (r Record) GetColumn(idx int) Column {
if idx < len(r.columns) {
return r.columns[idx]
}
// NULL
return Column{
typ: 0,
}
}
type Column struct {
typ int
content []byte
}
func (c Column) String() string {
v := c.Value()
switch vt := v.(type) {
case string:
return vt
case []byte:
return "\"" + string(vt) + "\""
case float64:
return fmt.Sprintf("%f", vt)
case int64:
return fmt.Sprintf("%d", vt)
case time.Time:
return vt.String()
case bool:
return fmt.Sprintf("%v", vt)
case nil:
return "nil"
default:
return fmt.Sprintf("<unknown column type = %s>", reflect.TypeOf(v))
}
}
func (c Column) AsInt() (int, bool) {
// [1, 6] are the various int64 types
if c.typ >= 1 && c.typ <= 6 {
i64 := c.Value().(int64)
return int(i64), true
}
return 0, false
}
func (c Column) Value() driver.Value |
// https://www.sqlite.org/fileformat2.html#serialtype
func columnContentSize(typ int) int {
switch typ {
case 0, 8, 9:
return 0
case 1:
return 1
case 2:
return 2
case 3:
return 3
case 4:
return 4
case 5:
return 6
case 6, 7:
return 8
case 10, 11:
// https://github.com/sqlite/sqlite/blob/96e3c39bd58ede59150c00e4f8609cbac674ffae/tool/offsets.c#L216
// return 0
panic(fmt.Errorf("cannot support columns of type=%d", typ))
default:
if typ%2 == 0 {
return (typ - 12) / 2
} else {
// https://github.com/sqlite/sqlite/blob/96e3c39bd58ede59150c00e4f8609cbac674ffae/tool/offsets.c#L216
// should this be 12?
return (typ - 13) / 2
}
}
}
func newNode(pageNumber int, pgr *pager.Pager, parent *node) (n *node, err error) {
page, err := pgr.Get(pageNumber)
if err != nil {
return nil, err
}
defer func() {
if rerr := pgr.ReleasePage(); rerr != nil {
// TODO: multi errors
err = rerr
}
}()
offset := 0
if pageNumber == 1 { // the root table (aka sqlite_schema)
// Note: page 1 is an exception, since it includes the database header which
// is contained in the first 100 bytes of this page.
offset += 100
}
// Read the tree header, which is stored in the either the first 8 bytes (leaf pages)
// or 12 bytes (interior pages) of the page.
typ := ToTreeType(page[offset])
if typ == TreeTypeUnknown {
return nil, fmt.Errorf("unknown tree page type for page=%d: %+v", pageNumber, page[offset])
}
offset += 1
// Assert that tree pages are of the same type (table vs. index) and that
// parents are always of type interior.
if parent != nil {
if typ == TreeTypeTableInterior || typ == TreeTypeTableLeaf {
if parent.typ != TreeTypeTableInterior {
return nil, fmt.Errorf("invalid node type: parent=%s child=%s", parent.typ.String(), typ.String())
}
} else if typ == TreeTypeIndexInterior || typ == TreeTypeIndexLeaf {
if parent.typ != TreeTypeIndexInterior {
return nil, fmt.Errorf("invalid node type: parent=%s child=%s", parent.typ.String(), typ.String())
}
}
}
freeblockOffset := int(binary.BigEndian.Uint16(page[offset : offset+2]))
offset += 2
numCells := int(binary.BigEndian.Uint16(page[offset : offset+2]))
offset += 2
contentOffset := int(binary.BigEndian.Uint16(page[offset : offset+2]))
offset += 2
numFreeBytes := int(page[offset])
offset += 1
// If this is an interior page, then the header is 4 bytes longer because the next four bytes
// store the pointer of the right-most node in this page.
nextPageNumber := 0
if typ == TreeTypeTableInterior || typ == TreeTypeIndexInterior {
nextPageNumber = int(binary.BigEndian.Uint32(page[offset : offset+4]))
offset += 4
}
// TODO: validate long keys (>25% of the page size) which will test overflow pages,
// however this only happens on index trees.
// Read all cell pointers into memory.
ptrs := []int{}
for i := 0; i < numCells; i++ {
// read the cell pointer
b := page[offset : offset+2]
v := binary.BigEndian.Uint16(b)
ptrs = append(ptrs, int(v))
offset += 2
}
// read the cell contents
records := []Record{}
children := []*child{}
for _, ptr := range ptrs {
var numBytesPayload int
var rowid int
var childPageNumber int
var columns []Column
// Left child pointer, if interior
if typ == TreeTypeTableInterior || typ == TreeTypeIndexInterior {
// A 4-byte big-endian page number which is the left child pointer.
childPageNumber = int(binary.BigEndian.Uint32(page[ptr : ptr+4]))
ptr += 4
}
// Number of data bytes, if not zerodata
if typ != TreeTypeTableInterior {
// A varint which is the total number of bytes of payload, including any overflow
ptr += internal.PutVarint(page[ptr:], &numBytesPayload)
}
// Integer key itself if intkey
if typ == TreeTypeTableInterior || typ == TreeTypeTableLeaf {
// A varint which is the integer key.
ptr += internal.PutVarint(page[ptr:], &rowid)
}
// Record Payload
if typ != TreeTypeTableInterior {
// The initial portion of the payload that does not spill to overflow pages.
// TODO: support overflowing payloads.
columns, err = readColumns(page[ptr : ptr+numBytesPayload])
if err != nil {
return nil, err
}
if typ != TreeTypeTableLeaf {
// Extract the rowid from the last column:
idx := len(columns) - 1
var ok bool
rowid, ok = columns[idx].AsInt()
if !ok {
return nil, fmt.Errorf("expected final index column to be rowid: %+v", columns[idx])
}
// Trim the rowid column off:
columns = columns[:len(columns)-1]
}
}
switch typ {
case TreeTypeTableInterior:
children = append(children, &child{
keyInt: rowid,
pageNumber: childPageNumber,
})
case TreeTypeTableLeaf:
records = append(records, Record{
rowid: rowid,
columns: columns,
})
case TreeTypeIndexInterior:
children = append(children, &child{
keyColumns: columns,
pageNumber: childPageNumber,
})
case TreeTypeIndexLeaf:
records = append(records, Record{
rowid: rowid,
columns: columns,
})
}
}
if nextPageNumber > 0 {
children = append(children, &child{
pageNumber: nextPageNumber,
})
}
return &node{
pager: pgr,
records: records,
children: children,
typ: typ,
freeblockOffset: freeblockOffset,
numCells: numCells,
contentOffset: contentOffset,
numFreeBytes: numFreeBytes,
cellPointerArrayOffset: offset,
parent: parent,
}, nil
}
func readColumns(content []byte) ([]Column, error) {
// read columns using the SQLite record format
// https://www.sqlite.org/fileformat2.html#record_format
contentOffset := 0
var headerSize int
contentOffset += internal.PutVarint(content[contentOffset:], &headerSize)
columnTypes := []int{}
for contentOffset < int(headerSize) {
var serialType int
contentOffset += internal.PutVarint(content[contentOffset:], &serialType)
columnTypes = append(columnTypes, int(serialType))
}
if contentOffset > int(headerSize) {
return nil, fmt.Errorf("consumed more header than expected! (%d>%d)", contentOffset, int(headerSize))
}
columns := make([]Column, 0, len(columnTypes))
for _, typ := range columnTypes {
size := columnContentSize(typ)
columns = append(columns, Column{
typ: typ,
content: content[contentOffset : contentOffset+size],
})
contentOffset += size
// TODO: support ALTER COLUMN ADD COLUMN where we should use default values here
}
if contentOffset != len(content) {
return nil, fmt.Errorf("did not consume all bytes in record (%d!=%d)", contentOffset, len(content))
}
return columns, nil
}
func (n *node) Close() error {
// TODO: release all children nodes
// for _, child := range n.children {
// child
// }
return nil
}
| {
// TODO: validate this works with negative integers (2's complement)
switch c.typ {
case 0:
return nil
case 1:
return int64(c.content[0])
case 2:
return int64(binary.BigEndian.Uint16(c.content))
case 3:
// stdlib binary does not have a 24-bit option
b := c.content
u := uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
return int64(u)
case 4:
return int64(binary.BigEndian.Uint32(c.content))
case 5:
// stdlib binary does not have a 48-bit option
b := c.content
u := uint64(b[5]) | uint64(b[4])<<8 | uint64(b[3])<<16 | uint64(b[2])<<24 | uint64(b[1])<<32 | uint64(b[0])<<40
return int64(u)
case 6:
return int64(binary.BigEndian.Uint64(c.content))
case 7:
b := binary.BigEndian.Uint64(c.content)
return math.Float64frombits(b)
case 8:
return int64(0)
case 9:
return int64(1)
default:
if c.typ%2 == 0 {
// blob
return c.content
} else {
// string
return string(c.content)
}
}
} | identifier_body |
node.go | package tree
import (
"database/sql/driver"
"encoding/binary"
"fmt"
"math"
"reflect"
"strings"
"time"
"github.com/colinking/go-sqlite3-native/internal"
"github.com/colinking/go-sqlite3-native/internal/pager"
)
type node struct {
pager *pager.Pager
// the following fields are kept for debugging purposes:
typ TreeType
freeblockOffset int
numCells int
contentOffset int
numFreeBytes int
cellPointerArrayOffset int
// the content of this node
// these are ordered in increasing order
records []Record
children []*child
// parent pointer for backtracking when traversing a tree
// nil if there is no parent (the root page)
parent *node
}
type child struct {
keyInt int
keyColumns []Column
pageNumber int
node *node
}
func (c *child) String() string {
var key string
if c.keyColumns == nil {
key = fmt.Sprintf("%d", c.keyInt)
} else {
var row []string
for _, c := range c.keyColumns {
row = append(row, c.String())
}
key = "[" + strings.Join(row, "|") + "]"
}
return fmt.Sprintf("%s [page=%d, all<=%s]", c.node.typ.String(), c.pageNumber, key)
}
type Record struct {
rowid int
columns []Column
}
func (r Record) String() string {
var row []string
for _, c := range r.columns {
row = append(row, c.String())
}
return fmt.Sprintf("rowid=%+v columns=[%s]", r.rowid, strings.Join(row, "|"))
}
func (r Record) GetColumn(idx int) Column {
if idx < len(r.columns) {
return r.columns[idx]
}
// NULL
return Column{
typ: 0,
}
}
type Column struct {
typ int
content []byte
}
func (c Column) String() string {
v := c.Value()
switch vt := v.(type) {
case string:
return vt
case []byte:
return "\"" + string(vt) + "\""
case float64:
return fmt.Sprintf("%f", vt)
case int64:
return fmt.Sprintf("%d", vt)
case time.Time:
return vt.String()
case bool:
return fmt.Sprintf("%v", vt)
case nil:
return "nil"
default:
return fmt.Sprintf("<unknown column type = %s>", reflect.TypeOf(v))
}
}
| func (c Column) AsInt() (int, bool) {
// [1, 6] are the various int64 types
if c.typ >= 1 && c.typ <= 6 {
i64 := c.Value().(int64)
return int(i64), true
}
return 0, false
}
func (c Column) Value() driver.Value {
// TODO: validate this works with negative integers (2's complement)
switch c.typ {
case 0:
return nil
case 1:
return int64(c.content[0])
case 2:
return int64(binary.BigEndian.Uint16(c.content))
case 3:
// stdlib binary does not have a 24-bit option
b := c.content
u := uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
return int64(u)
case 4:
return int64(binary.BigEndian.Uint32(c.content))
case 5:
// stdlib binary does not have a 48-bit option
b := c.content
u := uint64(b[5]) | uint64(b[4])<<8 | uint64(b[3])<<16 | uint64(b[2])<<24 | uint64(b[1])<<32 | uint64(b[0])<<40
return int64(u)
case 6:
return int64(binary.BigEndian.Uint64(c.content))
case 7:
b := binary.BigEndian.Uint64(c.content)
return math.Float64frombits(b)
case 8:
return int64(0)
case 9:
return int64(1)
default:
if c.typ%2 == 0 {
// blob
return c.content
} else {
// string
return string(c.content)
}
}
}
// https://www.sqlite.org/fileformat2.html#serialtype
func columnContentSize(typ int) int {
switch typ {
case 0, 8, 9:
return 0
case 1:
return 1
case 2:
return 2
case 3:
return 3
case 4:
return 4
case 5:
return 6
case 6, 7:
return 8
case 10, 11:
// https://github.com/sqlite/sqlite/blob/96e3c39bd58ede59150c00e4f8609cbac674ffae/tool/offsets.c#L216
// return 0
panic(fmt.Errorf("cannot support columns of type=%d", typ))
default:
if typ%2 == 0 {
return (typ - 12) / 2
} else {
// https://github.com/sqlite/sqlite/blob/96e3c39bd58ede59150c00e4f8609cbac674ffae/tool/offsets.c#L216
// should this be 12?
return (typ - 13) / 2
}
}
}
func newNode(pageNumber int, pgr *pager.Pager, parent *node) (n *node, err error) {
page, err := pgr.Get(pageNumber)
if err != nil {
return nil, err
}
defer func() {
if rerr := pgr.ReleasePage(); rerr != nil {
// TODO: multi errors
err = rerr
}
}()
offset := 0
if pageNumber == 1 { // the root table (aka sqlite_schema)
// Note: page 1 is an exception, since it includes the database header which
// is contained in the first 100 bytes of this page.
offset += 100
}
// Read the tree header, which is stored in the either the first 8 bytes (leaf pages)
// or 12 bytes (interior pages) of the page.
typ := ToTreeType(page[offset])
if typ == TreeTypeUnknown {
return nil, fmt.Errorf("unknown tree page type for page=%d: %+v", pageNumber, page[offset])
}
offset += 1
// Assert that tree pages are of the same type (table vs. index) and that
// parents are always of type interior.
if parent != nil {
if typ == TreeTypeTableInterior || typ == TreeTypeTableLeaf {
if parent.typ != TreeTypeTableInterior {
return nil, fmt.Errorf("invalid node type: parent=%s child=%s", parent.typ.String(), typ.String())
}
} else if typ == TreeTypeIndexInterior || typ == TreeTypeIndexLeaf {
if parent.typ != TreeTypeIndexInterior {
return nil, fmt.Errorf("invalid node type: parent=%s child=%s", parent.typ.String(), typ.String())
}
}
}
freeblockOffset := int(binary.BigEndian.Uint16(page[offset : offset+2]))
offset += 2
numCells := int(binary.BigEndian.Uint16(page[offset : offset+2]))
offset += 2
contentOffset := int(binary.BigEndian.Uint16(page[offset : offset+2]))
offset += 2
numFreeBytes := int(page[offset])
offset += 1
// If this is an interior page, then the header is 4 bytes longer because the next four bytes
// store the pointer of the right-most node in this page.
nextPageNumber := 0
if typ == TreeTypeTableInterior || typ == TreeTypeIndexInterior {
nextPageNumber = int(binary.BigEndian.Uint32(page[offset : offset+4]))
offset += 4
}
// TODO: validate long keys (>25% of the page size) which will test overflow pages,
// however this only happens on index trees.
// Read all cell pointers into memory.
ptrs := []int{}
for i := 0; i < numCells; i++ {
// read the cell pointer
b := page[offset : offset+2]
v := binary.BigEndian.Uint16(b)
ptrs = append(ptrs, int(v))
offset += 2
}
// read the cell contents
records := []Record{}
children := []*child{}
for _, ptr := range ptrs {
var numBytesPayload int
var rowid int
var childPageNumber int
var columns []Column
// Left child pointer, if interior
if typ == TreeTypeTableInterior || typ == TreeTypeIndexInterior {
// A 4-byte big-endian page number which is the left child pointer.
childPageNumber = int(binary.BigEndian.Uint32(page[ptr : ptr+4]))
ptr += 4
}
// Number of data bytes, if not zerodata
if typ != TreeTypeTableInterior {
// A varint which is the total number of bytes of payload, including any overflow
ptr += internal.PutVarint(page[ptr:], &numBytesPayload)
}
// Integer key itself if intkey
if typ == TreeTypeTableInterior || typ == TreeTypeTableLeaf {
// A varint which is the integer key.
ptr += internal.PutVarint(page[ptr:], &rowid)
}
// Record Payload
if typ != TreeTypeTableInterior {
// The initial portion of the payload that does not spill to overflow pages.
// TODO: support overflowing payloads.
columns, err = readColumns(page[ptr : ptr+numBytesPayload])
if err != nil {
return nil, err
}
if typ != TreeTypeTableLeaf {
// Extract the rowid from the last column:
idx := len(columns) - 1
var ok bool
rowid, ok = columns[idx].AsInt()
if !ok {
return nil, fmt.Errorf("expected final index column to be rowid: %+v", columns[idx])
}
// Trim the rowid column off:
columns = columns[:len(columns)-1]
}
}
switch typ {
case TreeTypeTableInterior:
children = append(children, &child{
keyInt: rowid,
pageNumber: childPageNumber,
})
case TreeTypeTableLeaf:
records = append(records, Record{
rowid: rowid,
columns: columns,
})
case TreeTypeIndexInterior:
children = append(children, &child{
keyColumns: columns,
pageNumber: childPageNumber,
})
case TreeTypeIndexLeaf:
records = append(records, Record{
rowid: rowid,
columns: columns,
})
}
}
if nextPageNumber > 0 {
children = append(children, &child{
pageNumber: nextPageNumber,
})
}
return &node{
pager: pgr,
records: records,
children: children,
typ: typ,
freeblockOffset: freeblockOffset,
numCells: numCells,
contentOffset: contentOffset,
numFreeBytes: numFreeBytes,
cellPointerArrayOffset: offset,
parent: parent,
}, nil
}
func readColumns(content []byte) ([]Column, error) {
// read columns using the SQLite record format
// https://www.sqlite.org/fileformat2.html#record_format
contentOffset := 0
var headerSize int
contentOffset += internal.PutVarint(content[contentOffset:], &headerSize)
columnTypes := []int{}
for contentOffset < int(headerSize) {
var serialType int
contentOffset += internal.PutVarint(content[contentOffset:], &serialType)
columnTypes = append(columnTypes, int(serialType))
}
if contentOffset > int(headerSize) {
return nil, fmt.Errorf("consumed more header than expected! (%d>%d)", contentOffset, int(headerSize))
}
columns := make([]Column, 0, len(columnTypes))
for _, typ := range columnTypes {
size := columnContentSize(typ)
columns = append(columns, Column{
typ: typ,
content: content[contentOffset : contentOffset+size],
})
contentOffset += size
// TODO: support ALTER COLUMN ADD COLUMN where we should use default values here
}
if contentOffset != len(content) {
return nil, fmt.Errorf("did not consume all bytes in record (%d!=%d)", contentOffset, len(content))
}
return columns, nil
}
func (n *node) Close() error {
// TODO: release all children nodes
// for _, child := range n.children {
// child
// }
return nil
} | random_line_split | |
node.go | package tree
import (
"database/sql/driver"
"encoding/binary"
"fmt"
"math"
"reflect"
"strings"
"time"
"github.com/colinking/go-sqlite3-native/internal"
"github.com/colinking/go-sqlite3-native/internal/pager"
)
type node struct {
pager *pager.Pager
// the following fields are kept for debugging purposes:
typ TreeType
freeblockOffset int
numCells int
contentOffset int
numFreeBytes int
cellPointerArrayOffset int
// the content of this node
// these are ordered in increasing order
records []Record
children []*child
// parent pointer for backtracking when traversing a tree
// nil if there is no parent (the root page)
parent *node
}
type child struct {
keyInt int
keyColumns []Column
pageNumber int
node *node
}
func (c *child) String() string {
var key string
if c.keyColumns == nil {
key = fmt.Sprintf("%d", c.keyInt)
} else {
var row []string
for _, c := range c.keyColumns {
row = append(row, c.String())
}
key = "[" + strings.Join(row, "|") + "]"
}
return fmt.Sprintf("%s [page=%d, all<=%s]", c.node.typ.String(), c.pageNumber, key)
}
type Record struct {
rowid int
columns []Column
}
func (r Record) String() string {
var row []string
for _, c := range r.columns {
row = append(row, c.String())
}
return fmt.Sprintf("rowid=%+v columns=[%s]", r.rowid, strings.Join(row, "|"))
}
func (r Record) GetColumn(idx int) Column {
if idx < len(r.columns) {
return r.columns[idx]
}
// NULL
return Column{
typ: 0,
}
}
type Column struct {
typ int
content []byte
}
func (c Column) String() string {
v := c.Value()
switch vt := v.(type) {
case string:
return vt
case []byte:
return "\"" + string(vt) + "\""
case float64:
return fmt.Sprintf("%f", vt)
case int64:
return fmt.Sprintf("%d", vt)
case time.Time:
return vt.String()
case bool:
return fmt.Sprintf("%v", vt)
case nil:
return "nil"
default:
return fmt.Sprintf("<unknown column type = %s>", reflect.TypeOf(v))
}
}
func (c Column) AsInt() (int, bool) {
// [1, 6] are the various int64 types
if c.typ >= 1 && c.typ <= 6 {
i64 := c.Value().(int64)
return int(i64), true
}
return 0, false
}
func (c Column) Value() driver.Value {
// TODO: validate this works with negative integers (2's complement)
switch c.typ {
case 0:
return nil
case 1:
return int64(c.content[0])
case 2:
return int64(binary.BigEndian.Uint16(c.content))
case 3:
// stdlib binary does not have a 24-bit option
b := c.content
u := uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
return int64(u)
case 4:
return int64(binary.BigEndian.Uint32(c.content))
case 5:
// stdlib binary does not have a 48-bit option
b := c.content
u := uint64(b[5]) | uint64(b[4])<<8 | uint64(b[3])<<16 | uint64(b[2])<<24 | uint64(b[1])<<32 | uint64(b[0])<<40
return int64(u)
case 6:
return int64(binary.BigEndian.Uint64(c.content))
case 7:
b := binary.BigEndian.Uint64(c.content)
return math.Float64frombits(b)
case 8:
return int64(0)
case 9:
return int64(1)
default:
if c.typ%2 == 0 {
// blob
return c.content
} else {
// string
return string(c.content)
}
}
}
// https://www.sqlite.org/fileformat2.html#serialtype
func columnContentSize(typ int) int {
switch typ {
case 0, 8, 9:
return 0
case 1:
return 1
case 2:
return 2
case 3:
return 3
case 4:
return 4
case 5:
return 6
case 6, 7:
return 8
case 10, 11:
// https://github.com/sqlite/sqlite/blob/96e3c39bd58ede59150c00e4f8609cbac674ffae/tool/offsets.c#L216
// return 0
panic(fmt.Errorf("cannot support columns of type=%d", typ))
default:
if typ%2 == 0 {
return (typ - 12) / 2
} else {
// https://github.com/sqlite/sqlite/blob/96e3c39bd58ede59150c00e4f8609cbac674ffae/tool/offsets.c#L216
// should this be 12?
return (typ - 13) / 2
}
}
}
func newNode(pageNumber int, pgr *pager.Pager, parent *node) (n *node, err error) {
page, err := pgr.Get(pageNumber)
if err != nil {
return nil, err
}
defer func() {
if rerr := pgr.ReleasePage(); rerr != nil {
// TODO: multi errors
err = rerr
}
}()
offset := 0
if pageNumber == 1 { // the root table (aka sqlite_schema)
// Note: page 1 is an exception, since it includes the database header which
// is contained in the first 100 bytes of this page.
offset += 100
}
// Read the tree header, which is stored in the either the first 8 bytes (leaf pages)
// or 12 bytes (interior pages) of the page.
typ := ToTreeType(page[offset])
if typ == TreeTypeUnknown {
return nil, fmt.Errorf("unknown tree page type for page=%d: %+v", pageNumber, page[offset])
}
offset += 1
// Assert that tree pages are of the same type (table vs. index) and that
// parents are always of type interior.
if parent != nil {
if typ == TreeTypeTableInterior || typ == TreeTypeTableLeaf {
if parent.typ != TreeTypeTableInterior {
return nil, fmt.Errorf("invalid node type: parent=%s child=%s", parent.typ.String(), typ.String())
}
} else if typ == TreeTypeIndexInterior || typ == TreeTypeIndexLeaf {
if parent.typ != TreeTypeIndexInterior {
return nil, fmt.Errorf("invalid node type: parent=%s child=%s", parent.typ.String(), typ.String())
}
}
}
freeblockOffset := int(binary.BigEndian.Uint16(page[offset : offset+2]))
offset += 2
numCells := int(binary.BigEndian.Uint16(page[offset : offset+2]))
offset += 2
contentOffset := int(binary.BigEndian.Uint16(page[offset : offset+2]))
offset += 2
numFreeBytes := int(page[offset])
offset += 1
// If this is an interior page, then the header is 4 bytes longer because the next four bytes
// store the pointer of the right-most node in this page.
nextPageNumber := 0
if typ == TreeTypeTableInterior || typ == TreeTypeIndexInterior {
nextPageNumber = int(binary.BigEndian.Uint32(page[offset : offset+4]))
offset += 4
}
// TODO: validate long keys (>25% of the page size) which will test overflow pages,
// however this only happens on index trees.
// Read all cell pointers into memory.
ptrs := []int{}
for i := 0; i < numCells; i++ {
// read the cell pointer
b := page[offset : offset+2]
v := binary.BigEndian.Uint16(b)
ptrs = append(ptrs, int(v))
offset += 2
}
// read the cell contents
records := []Record{}
children := []*child{}
for _, ptr := range ptrs {
var numBytesPayload int
var rowid int
var childPageNumber int
var columns []Column
// Left child pointer, if interior
if typ == TreeTypeTableInterior || typ == TreeTypeIndexInterior {
// A 4-byte big-endian page number which is the left child pointer.
childPageNumber = int(binary.BigEndian.Uint32(page[ptr : ptr+4]))
ptr += 4
}
// Number of data bytes, if not zerodata
if typ != TreeTypeTableInterior {
// A varint which is the total number of bytes of payload, including any overflow
ptr += internal.PutVarint(page[ptr:], &numBytesPayload)
}
// Integer key itself if intkey
if typ == TreeTypeTableInterior || typ == TreeTypeTableLeaf {
// A varint which is the integer key.
ptr += internal.PutVarint(page[ptr:], &rowid)
}
// Record Payload
if typ != TreeTypeTableInterior {
// The initial portion of the payload that does not spill to overflow pages.
// TODO: support overflowing payloads.
columns, err = readColumns(page[ptr : ptr+numBytesPayload])
if err != nil |
if typ != TreeTypeTableLeaf {
// Extract the rowid from the last column:
idx := len(columns) - 1
var ok bool
rowid, ok = columns[idx].AsInt()
if !ok {
return nil, fmt.Errorf("expected final index column to be rowid: %+v", columns[idx])
}
// Trim the rowid column off:
columns = columns[:len(columns)-1]
}
}
switch typ {
case TreeTypeTableInterior:
children = append(children, &child{
keyInt: rowid,
pageNumber: childPageNumber,
})
case TreeTypeTableLeaf:
records = append(records, Record{
rowid: rowid,
columns: columns,
})
case TreeTypeIndexInterior:
children = append(children, &child{
keyColumns: columns,
pageNumber: childPageNumber,
})
case TreeTypeIndexLeaf:
records = append(records, Record{
rowid: rowid,
columns: columns,
})
}
}
if nextPageNumber > 0 {
children = append(children, &child{
pageNumber: nextPageNumber,
})
}
return &node{
pager: pgr,
records: records,
children: children,
typ: typ,
freeblockOffset: freeblockOffset,
numCells: numCells,
contentOffset: contentOffset,
numFreeBytes: numFreeBytes,
cellPointerArrayOffset: offset,
parent: parent,
}, nil
}
func readColumns(content []byte) ([]Column, error) {
// read columns using the SQLite record format
// https://www.sqlite.org/fileformat2.html#record_format
contentOffset := 0
var headerSize int
contentOffset += internal.PutVarint(content[contentOffset:], &headerSize)
columnTypes := []int{}
for contentOffset < int(headerSize) {
var serialType int
contentOffset += internal.PutVarint(content[contentOffset:], &serialType)
columnTypes = append(columnTypes, int(serialType))
}
if contentOffset > int(headerSize) {
return nil, fmt.Errorf("consumed more header than expected! (%d>%d)", contentOffset, int(headerSize))
}
columns := make([]Column, 0, len(columnTypes))
for _, typ := range columnTypes {
size := columnContentSize(typ)
columns = append(columns, Column{
typ: typ,
content: content[contentOffset : contentOffset+size],
})
contentOffset += size
// TODO: support ALTER COLUMN ADD COLUMN where we should use default values here
}
if contentOffset != len(content) {
return nil, fmt.Errorf("did not consume all bytes in record (%d!=%d)", contentOffset, len(content))
}
return columns, nil
}
func (n *node) Close() error {
// TODO: release all children nodes
// for _, child := range n.children {
// child
// }
return nil
}
| {
return nil, err
} | conditional_block |
terminal.rs | //! Provides a low-level terminal interface
use std::io;
use std::time::Duration;
use mortal::{self, PrepareConfig, PrepareState, TerminalReadGuard, TerminalWriteGuard};
use crate::sys;
pub use mortal::{CursorMode, Signal, SignalSet, Size};
/// Default `Terminal` interface
pub struct DefaultTerminal(mortal::Terminal);
/// Represents the result of a `Terminal` read operation
pub enum RawRead {
/// `n` bytes were read from the device
Bytes(usize),
/// The terminal window was resized
Resize(Size),
/// A signal was received while waiting for input
Signal(Signal),
}
/// Defines a low-level interface to the terminal
pub trait Terminal: Sized + Send + Sync {
// TODO: When generic associated types are implemented (and stabilized),
// boxed trait objects may be replaced by `Reader` and `Writer`.
/// Returned by `prepare`; passed to `restore` to restore state.
type PrepareState;
/*
/// Holds an exclusive read lock and provides read operations
type Reader: TerminalReader;
/// Holds an exclusive write lock and provides write operations
type Writer: TerminalWriter;
*/
/// Returns the name of the terminal.
fn name(&self) -> &str;
/// Acquires a lock on terminal read operations and returns a value holding
/// that lock and granting access to such operations.
///
/// The lock must not be released until the returned value is dropped.
fn lock_read<'a>(&'a self) -> Box<dyn TerminalReader<Self> + 'a>;
/// Acquires a lock on terminal write operations and returns a value holding
/// that lock and granting access to such operations.
///
/// The lock must not be released until the returned value is dropped.
fn lock_write<'a>(&'a self) -> Box<dyn TerminalWriter<Self> + 'a>;
}
/// Holds a lock on `Terminal` read operations
pub trait TerminalReader<Term: Terminal> {
/// Prepares the terminal for line reading and editing operations.
///
/// If `block_signals` is `true`, the terminal will be configured to treat
/// special characters that would otherwise be interpreted as signals as
/// their literal value.
///
/// If `block_signals` is `false`, a signal contained in the `report_signals`
/// set may be returned.
///
/// # Notes
///
/// This method may be called more than once. However, if the state values
/// are not restored in reverse order in which they were created,
/// the state of the underlying terminal device becomes undefined.
fn prepare(&mut self, block_signals: bool, report_signals: SignalSet)
-> io::Result<Term::PrepareState>;
/// Like `prepare`, but called when the write lock is already held.
///
/// # Safety
///
/// This method must be called with a `TerminalWriter` instance returned
/// by the same `Terminal` instance to which this `TerminalReader` belongs.
unsafe fn prepare_with_lock(&mut self, lock: &mut dyn TerminalWriter<Term>,
block_signals: bool, report_signals: SignalSet)
-> io::Result<Term::PrepareState>;
/// Restores the terminal state using the given state data.
fn restore(&mut self, state: Term::PrepareState) -> io::Result<()>;
/// Like `restore`, but called when the write lock is already held.
///
/// # Safety
///
/// This method must be called with a `TerminalWriter` instance returned
/// by the same `Terminal` instance to which this `TerminalReader` belongs.
unsafe fn restore_with_lock(&mut self, lock: &mut dyn TerminalWriter<Term>,
state: Term::PrepareState) -> io::Result<()>;
/// Reads some input from the terminal and appends it to the given buffer.
fn read(&mut self, buf: &mut Vec<u8>) -> io::Result<RawRead>;
/// Waits `timeout` for user input. If `timeout` is `None`, waits indefinitely.
///
/// Returns `Ok(true)` if input becomes available within the given timeout
/// or if a signal is received.
///
/// Returns `Ok(false)` if the timeout expires before input becomes available.
fn wait_for_input(&mut self, timeout: Option<Duration>) -> io::Result<bool>;
}
/// Holds a lock on `Terminal` write operations
pub trait TerminalWriter<Term: Terminal> {
/// Returns the size of the terminal window
fn size(&self) -> io::Result<Size>;
/// Presents a clear terminal screen, with cursor at first row, first column.
///
/// If the terminal possesses a scrolling window over a buffer, this shall
/// have the effect of moving the visible window down such that it shows
/// an empty view of the buffer, preserving some or all of existing buffer
/// contents, where possible.
fn clear_screen(&mut self) -> io::Result<()>;
/// Clears characters on the line occupied by the cursor, beginning with the
/// cursor and ending at the end of the line. Also clears all characters on
/// all lines after the cursor.
fn clear_to_screen_end(&mut self) -> io::Result<()>;
/// Moves the cursor up `n` cells; `n` may be zero.
fn move_up(&mut self, n: usize) -> io::Result<()>;
/// Moves the cursor down `n` cells; `n` may be zero.
fn move_down(&mut self, n: usize) -> io::Result<()>;
/// Moves the cursor left `n` cells; `n` may be zero.
fn move_left(&mut self, n: usize) -> io::Result<()>;
/// Moves the cursor right `n` cells; `n` may be zero.
fn move_right(&mut self, n: usize) -> io::Result<()>;
/// Moves the cursor to the first column of the current line
fn move_to_first_column(&mut self) -> io::Result<()>;
/// Set the current cursor mode
fn set_cursor_mode(&mut self, mode: CursorMode) -> io::Result<()>;
/// Writes output to the terminal.
///
/// For each carriage return `'\r'` written to the terminal, the cursor
/// should be moved to the first column of the current line.
///
/// For each newline `'\n'` written to the terminal, the cursor should
/// be moved to the first column of the following line.
///
/// The terminal interface shall not automatically move the cursor to the next
/// line when `write` causes a character to be written to the final column.
fn write(&mut self, s: &str) -> io::Result<()>;
/// Flushes any currently buffered output data.
///
/// `TerminalWriter` instances may not buffer data on all systems.
///
/// Data must be flushed when the `TerminalWriter` instance is dropped.
fn flush(&mut self) -> io::Result<()>;
}
impl DefaultTerminal {
/// Opens access to the terminal device associated with standard output.
pub fn new() -> io::Result<DefaultTerminal> {
mortal::Terminal::new().map(DefaultTerminal)
}
/// Opens access to the terminal device associated with standard error.
pub fn stderr() -> io::Result<DefaultTerminal> {
mortal::Terminal::stderr().map(DefaultTerminal)
}
unsafe fn cast_writer<'a>(writer: &'a mut dyn TerminalWriter<Self>)
-> &'a mut TerminalWriteGuard<'a> {
&mut *(writer as *mut _ as *mut TerminalWriteGuard)
}
}
impl Terminal for DefaultTerminal {
type PrepareState = PrepareState;
fn name(&self) -> &str {
self.0.name()
}
fn lock_read<'a>(&'a self) -> Box<dyn TerminalReader<Self> + 'a> {
Box::new(self.0.lock_read().unwrap())
}
fn lock_write<'a>(&'a self) -> Box<dyn TerminalWriter<Self> + 'a> {
Box::new(self.0.lock_write().unwrap())
}
}
impl<'a> TerminalReader<DefaultTerminal> for TerminalReadGuard<'a> {
fn prepare(&mut self, block_signals: bool, report_signals: SignalSet)
-> io::Result<PrepareState> {
self.prepare(PrepareConfig{
block_signals,
enable_control_flow: !block_signals,
enable_keypad: false,
report_signals,
.. PrepareConfig::default()
})
}
unsafe fn prepare_with_lock(&mut self,
lock: &mut dyn TerminalWriter<DefaultTerminal>,
block_signals: bool, report_signals: SignalSet)
-> io::Result<PrepareState> {
let lock = DefaultTerminal::cast_writer(lock);
self.prepare_with_lock(lock, PrepareConfig{
block_signals,
enable_control_flow: !block_signals,
enable_keypad: false,
report_signals,
.. PrepareConfig::default()
})
}
fn restore(&mut self, state: PrepareState) -> io::Result<()> {
self.restore(state)
}
unsafe fn restore_with_lock(&mut self,
lock: &mut dyn TerminalWriter<DefaultTerminal>, state: PrepareState)
-> io::Result<()> {
let lock = DefaultTerminal::cast_writer(lock);
self.restore_with_lock(lock, state)
}
fn read(&mut self, buf: &mut Vec<u8>) -> io::Result<RawRead> {
sys::terminal_read(self, buf)
}
fn wait_for_input(&mut self, timeout: Option<Duration>) -> io::Result<bool> {
self.wait_event(timeout)
}
}
impl<'a> TerminalWriter<DefaultTerminal> for TerminalWriteGuard<'a> {
fn size(&self) -> io::Result<Size> {
self.size()
}
fn clear_screen(&mut self) -> io::Result<()> {
self.clear_screen()
}
fn clear_to_screen_end(&mut self) -> io::Result<()> {
self.clear_to_screen_end()
}
fn move_up(&mut self, n: usize) -> io::Result<()> {
self.move_up(n)
}
fn move_down(&mut self, n: usize) -> io::Result<()> {
self.move_down(n)
}
fn move_left(&mut self, n: usize) -> io::Result<()> {
self.move_left(n)
}
fn move_right(&mut self, n: usize) -> io::Result<()> {
self.move_right(n)
}
fn | (&mut self) -> io::Result<()> {
self.move_to_first_column()
}
fn set_cursor_mode(&mut self, mode: CursorMode) -> io::Result<()> {
self.set_cursor_mode(mode)
}
fn write(&mut self, s: &str) -> io::Result<()> {
self.write_str(s)
}
fn flush(&mut self) -> io::Result<()> {
self.flush()
}
}
| move_to_first_column | identifier_name |
terminal.rs | //! Provides a low-level terminal interface
use std::io;
use std::time::Duration;
use mortal::{self, PrepareConfig, PrepareState, TerminalReadGuard, TerminalWriteGuard};
use crate::sys;
pub use mortal::{CursorMode, Signal, SignalSet, Size};
/// Default `Terminal` interface
pub struct DefaultTerminal(mortal::Terminal);
/// Represents the result of a `Terminal` read operation
pub enum RawRead {
/// `n` bytes were read from the device
Bytes(usize),
/// The terminal window was resized
Resize(Size),
/// A signal was received while waiting for input
Signal(Signal),
}
/// Defines a low-level interface to the terminal
pub trait Terminal: Sized + Send + Sync {
// TODO: When generic associated types are implemented (and stabilized),
// boxed trait objects may be replaced by `Reader` and `Writer`.
/// Returned by `prepare`; passed to `restore` to restore state.
type PrepareState;
/*
/// Holds an exclusive read lock and provides read operations
type Reader: TerminalReader;
/// Holds an exclusive write lock and provides write operations
type Writer: TerminalWriter;
*/
/// Returns the name of the terminal.
fn name(&self) -> &str;
/// Acquires a lock on terminal read operations and returns a value holding
/// that lock and granting access to such operations.
///
/// The lock must not be released until the returned value is dropped.
fn lock_read<'a>(&'a self) -> Box<dyn TerminalReader<Self> + 'a>;
/// Acquires a lock on terminal write operations and returns a value holding
/// that lock and granting access to such operations.
///
/// The lock must not be released until the returned value is dropped.
fn lock_write<'a>(&'a self) -> Box<dyn TerminalWriter<Self> + 'a>;
}
/// Holds a lock on `Terminal` read operations
pub trait TerminalReader<Term: Terminal> {
/// Prepares the terminal for line reading and editing operations.
///
/// If `block_signals` is `true`, the terminal will be configured to treat
/// special characters that would otherwise be interpreted as signals as
/// their literal value.
///
/// If `block_signals` is `false`, a signal contained in the `report_signals`
/// set may be returned.
///
/// # Notes
///
/// This method may be called more than once. However, if the state values
/// are not restored in reverse order in which they were created,
/// the state of the underlying terminal device becomes undefined.
fn prepare(&mut self, block_signals: bool, report_signals: SignalSet)
-> io::Result<Term::PrepareState>;
/// Like `prepare`, but called when the write lock is already held.
///
/// # Safety
///
/// This method must be called with a `TerminalWriter` instance returned
/// by the same `Terminal` instance to which this `TerminalReader` belongs.
unsafe fn prepare_with_lock(&mut self, lock: &mut dyn TerminalWriter<Term>,
block_signals: bool, report_signals: SignalSet)
-> io::Result<Term::PrepareState>;
/// Restores the terminal state using the given state data.
fn restore(&mut self, state: Term::PrepareState) -> io::Result<()>;
/// Like `restore`, but called when the write lock is already held.
///
/// # Safety
///
/// This method must be called with a `TerminalWriter` instance returned
/// by the same `Terminal` instance to which this `TerminalReader` belongs.
unsafe fn restore_with_lock(&mut self, lock: &mut dyn TerminalWriter<Term>,
state: Term::PrepareState) -> io::Result<()>;
/// Reads some input from the terminal and appends it to the given buffer.
fn read(&mut self, buf: &mut Vec<u8>) -> io::Result<RawRead>;
/// Waits `timeout` for user input. If `timeout` is `None`, waits indefinitely.
///
/// Returns `Ok(true)` if input becomes available within the given timeout
/// or if a signal is received.
///
/// Returns `Ok(false)` if the timeout expires before input becomes available.
fn wait_for_input(&mut self, timeout: Option<Duration>) -> io::Result<bool>;
}
/// Holds a lock on `Terminal` write operations
pub trait TerminalWriter<Term: Terminal> {
/// Returns the size of the terminal window
fn size(&self) -> io::Result<Size>;
/// Presents a clear terminal screen, with cursor at first row, first column.
///
/// If the terminal possesses a scrolling window over a buffer, this shall
/// have the effect of moving the visible window down such that it shows
/// an empty view of the buffer, preserving some or all of existing buffer
/// contents, where possible.
fn clear_screen(&mut self) -> io::Result<()>;
/// Clears characters on the line occupied by the cursor, beginning with the
/// cursor and ending at the end of the line. Also clears all characters on
/// all lines after the cursor.
fn clear_to_screen_end(&mut self) -> io::Result<()>;
/// Moves the cursor up `n` cells; `n` may be zero.
fn move_up(&mut self, n: usize) -> io::Result<()>;
/// Moves the cursor down `n` cells; `n` may be zero.
fn move_down(&mut self, n: usize) -> io::Result<()>;
/// Moves the cursor left `n` cells; `n` may be zero.
fn move_left(&mut self, n: usize) -> io::Result<()>;
/// Moves the cursor right `n` cells; `n` may be zero.
fn move_right(&mut self, n: usize) -> io::Result<()>;
/// Moves the cursor to the first column of the current line
fn move_to_first_column(&mut self) -> io::Result<()>;
/// Set the current cursor mode
fn set_cursor_mode(&mut self, mode: CursorMode) -> io::Result<()>;
/// Writes output to the terminal.
///
/// For each carriage return `'\r'` written to the terminal, the cursor
/// should be moved to the first column of the current line.
///
/// For each newline `'\n'` written to the terminal, the cursor should
/// be moved to the first column of the following line.
///
/// The terminal interface shall not automatically move the cursor to the next
/// line when `write` causes a character to be written to the final column.
fn write(&mut self, s: &str) -> io::Result<()>;
/// Flushes any currently buffered output data.
///
/// `TerminalWriter` instances may not buffer data on all systems.
///
/// Data must be flushed when the `TerminalWriter` instance is dropped.
fn flush(&mut self) -> io::Result<()>;
}
impl DefaultTerminal {
/// Opens access to the terminal device associated with standard output.
pub fn new() -> io::Result<DefaultTerminal> {
mortal::Terminal::new().map(DefaultTerminal)
}
/// Opens access to the terminal device associated with standard error.
pub fn stderr() -> io::Result<DefaultTerminal> {
mortal::Terminal::stderr().map(DefaultTerminal)
}
unsafe fn cast_writer<'a>(writer: &'a mut dyn TerminalWriter<Self>)
-> &'a mut TerminalWriteGuard<'a> {
&mut *(writer as *mut _ as *mut TerminalWriteGuard)
}
}
impl Terminal for DefaultTerminal {
type PrepareState = PrepareState;
fn name(&self) -> &str {
self.0.name()
}
fn lock_read<'a>(&'a self) -> Box<dyn TerminalReader<Self> + 'a> {
Box::new(self.0.lock_read().unwrap())
}
fn lock_write<'a>(&'a self) -> Box<dyn TerminalWriter<Self> + 'a> {
Box::new(self.0.lock_write().unwrap())
}
}
impl<'a> TerminalReader<DefaultTerminal> for TerminalReadGuard<'a> {
fn prepare(&mut self, block_signals: bool, report_signals: SignalSet)
-> io::Result<PrepareState> {
self.prepare(PrepareConfig{
block_signals,
enable_control_flow: !block_signals,
enable_keypad: false,
report_signals,
.. PrepareConfig::default()
})
}
unsafe fn prepare_with_lock(&mut self,
lock: &mut dyn TerminalWriter<DefaultTerminal>,
block_signals: bool, report_signals: SignalSet)
-> io::Result<PrepareState> {
let lock = DefaultTerminal::cast_writer(lock);
self.prepare_with_lock(lock, PrepareConfig{
block_signals,
enable_control_flow: !block_signals,
enable_keypad: false,
report_signals,
.. PrepareConfig::default()
})
}
fn restore(&mut self, state: PrepareState) -> io::Result<()> {
self.restore(state)
}
unsafe fn restore_with_lock(&mut self,
lock: &mut dyn TerminalWriter<DefaultTerminal>, state: PrepareState)
-> io::Result<()> {
let lock = DefaultTerminal::cast_writer(lock);
self.restore_with_lock(lock, state)
}
fn read(&mut self, buf: &mut Vec<u8>) -> io::Result<RawRead> {
sys::terminal_read(self, buf)
}
fn wait_for_input(&mut self, timeout: Option<Duration>) -> io::Result<bool> {
self.wait_event(timeout)
}
}
impl<'a> TerminalWriter<DefaultTerminal> for TerminalWriteGuard<'a> {
fn size(&self) -> io::Result<Size> {
self.size()
}
fn clear_screen(&mut self) -> io::Result<()> {
self.clear_screen()
}
fn clear_to_screen_end(&mut self) -> io::Result<()> {
self.clear_to_screen_end()
}
fn move_up(&mut self, n: usize) -> io::Result<()> {
self.move_up(n)
}
fn move_down(&mut self, n: usize) -> io::Result<()> |
fn move_left(&mut self, n: usize) -> io::Result<()> {
self.move_left(n)
}
fn move_right(&mut self, n: usize) -> io::Result<()> {
self.move_right(n)
}
fn move_to_first_column(&mut self) -> io::Result<()> {
self.move_to_first_column()
}
fn set_cursor_mode(&mut self, mode: CursorMode) -> io::Result<()> {
self.set_cursor_mode(mode)
}
fn write(&mut self, s: &str) -> io::Result<()> {
self.write_str(s)
}
fn flush(&mut self) -> io::Result<()> {
self.flush()
}
}
| {
self.move_down(n)
} | identifier_body |
terminal.rs | //! Provides a low-level terminal interface
use std::io;
use std::time::Duration;
use mortal::{self, PrepareConfig, PrepareState, TerminalReadGuard, TerminalWriteGuard};
use crate::sys;
pub use mortal::{CursorMode, Signal, SignalSet, Size};
/// Default `Terminal` interface
pub struct DefaultTerminal(mortal::Terminal);
/// Represents the result of a `Terminal` read operation
pub enum RawRead {
/// `n` bytes were read from the device
Bytes(usize),
/// The terminal window was resized
Resize(Size),
/// A signal was received while waiting for input
Signal(Signal),
}
/// Defines a low-level interface to the terminal
pub trait Terminal: Sized + Send + Sync {
// TODO: When generic associated types are implemented (and stabilized),
// boxed trait objects may be replaced by `Reader` and `Writer`.
/// Returned by `prepare`; passed to `restore` to restore state.
type PrepareState;
/*
/// Holds an exclusive read lock and provides read operations
type Reader: TerminalReader;
/// Holds an exclusive write lock and provides write operations
type Writer: TerminalWriter;
*/
/// Returns the name of the terminal.
fn name(&self) -> &str;
/// Acquires a lock on terminal read operations and returns a value holding
/// that lock and granting access to such operations.
///
/// The lock must not be released until the returned value is dropped.
fn lock_read<'a>(&'a self) -> Box<dyn TerminalReader<Self> + 'a>;
/// Acquires a lock on terminal write operations and returns a value holding
/// that lock and granting access to such operations.
///
/// The lock must not be released until the returned value is dropped.
fn lock_write<'a>(&'a self) -> Box<dyn TerminalWriter<Self> + 'a>;
}
/// Holds a lock on `Terminal` read operations
pub trait TerminalReader<Term: Terminal> {
/// Prepares the terminal for line reading and editing operations.
///
/// If `block_signals` is `true`, the terminal will be configured to treat
/// special characters that would otherwise be interpreted as signals as
/// their literal value.
///
/// If `block_signals` is `false`, a signal contained in the `report_signals`
/// set may be returned.
///
/// # Notes
///
/// This method may be called more than once. However, if the state values
/// are not restored in reverse order in which they were created,
/// the state of the underlying terminal device becomes undefined.
fn prepare(&mut self, block_signals: bool, report_signals: SignalSet)
-> io::Result<Term::PrepareState>;
/// Like `prepare`, but called when the write lock is already held.
///
/// # Safety
///
/// This method must be called with a `TerminalWriter` instance returned
/// by the same `Terminal` instance to which this `TerminalReader` belongs.
unsafe fn prepare_with_lock(&mut self, lock: &mut dyn TerminalWriter<Term>,
block_signals: bool, report_signals: SignalSet)
-> io::Result<Term::PrepareState>;
/// Restores the terminal state using the given state data.
fn restore(&mut self, state: Term::PrepareState) -> io::Result<()>;
/// Like `restore`, but called when the write lock is already held.
///
/// # Safety
///
/// This method must be called with a `TerminalWriter` instance returned
/// by the same `Terminal` instance to which this `TerminalReader` belongs.
unsafe fn restore_with_lock(&mut self, lock: &mut dyn TerminalWriter<Term>,
state: Term::PrepareState) -> io::Result<()>;
/// Reads some input from the terminal and appends it to the given buffer.
fn read(&mut self, buf: &mut Vec<u8>) -> io::Result<RawRead>;
/// Waits `timeout` for user input. If `timeout` is `None`, waits indefinitely.
///
/// Returns `Ok(true)` if input becomes available within the given timeout
/// or if a signal is received.
///
/// Returns `Ok(false)` if the timeout expires before input becomes available.
fn wait_for_input(&mut self, timeout: Option<Duration>) -> io::Result<bool>;
}
/// Holds a lock on `Terminal` write operations
pub trait TerminalWriter<Term: Terminal> {
/// Returns the size of the terminal window
fn size(&self) -> io::Result<Size>;
/// Presents a clear terminal screen, with cursor at first row, first column.
///
/// If the terminal possesses a scrolling window over a buffer, this shall
/// have the effect of moving the visible window down such that it shows
/// an empty view of the buffer, preserving some or all of existing buffer
/// contents, where possible.
fn clear_screen(&mut self) -> io::Result<()>;
/// Clears characters on the line occupied by the cursor, beginning with the
/// cursor and ending at the end of the line. Also clears all characters on
/// all lines after the cursor.
fn clear_to_screen_end(&mut self) -> io::Result<()>;
/// Moves the cursor up `n` cells; `n` may be zero.
fn move_up(&mut self, n: usize) -> io::Result<()>;
/// Moves the cursor down `n` cells; `n` may be zero.
fn move_down(&mut self, n: usize) -> io::Result<()>;
/// Moves the cursor left `n` cells; `n` may be zero.
fn move_left(&mut self, n: usize) -> io::Result<()>;
/// Moves the cursor right `n` cells; `n` may be zero.
fn move_right(&mut self, n: usize) -> io::Result<()>;
/// Moves the cursor to the first column of the current line
fn move_to_first_column(&mut self) -> io::Result<()>;
/// Set the current cursor mode
fn set_cursor_mode(&mut self, mode: CursorMode) -> io::Result<()>;
/// Writes output to the terminal.
///
/// For each carriage return `'\r'` written to the terminal, the cursor
/// should be moved to the first column of the current line.
///
/// For each newline `'\n'` written to the terminal, the cursor should
/// be moved to the first column of the following line. |
/// Flushes any currently buffered output data.
///
/// `TerminalWriter` instances may not buffer data on all systems.
///
/// Data must be flushed when the `TerminalWriter` instance is dropped.
fn flush(&mut self) -> io::Result<()>;
}
impl DefaultTerminal {
/// Opens access to the terminal device associated with standard output.
pub fn new() -> io::Result<DefaultTerminal> {
mortal::Terminal::new().map(DefaultTerminal)
}
/// Opens access to the terminal device associated with standard error.
pub fn stderr() -> io::Result<DefaultTerminal> {
mortal::Terminal::stderr().map(DefaultTerminal)
}
unsafe fn cast_writer<'a>(writer: &'a mut dyn TerminalWriter<Self>)
-> &'a mut TerminalWriteGuard<'a> {
&mut *(writer as *mut _ as *mut TerminalWriteGuard)
}
}
impl Terminal for DefaultTerminal {
type PrepareState = PrepareState;
fn name(&self) -> &str {
self.0.name()
}
fn lock_read<'a>(&'a self) -> Box<dyn TerminalReader<Self> + 'a> {
Box::new(self.0.lock_read().unwrap())
}
fn lock_write<'a>(&'a self) -> Box<dyn TerminalWriter<Self> + 'a> {
Box::new(self.0.lock_write().unwrap())
}
}
impl<'a> TerminalReader<DefaultTerminal> for TerminalReadGuard<'a> {
fn prepare(&mut self, block_signals: bool, report_signals: SignalSet)
-> io::Result<PrepareState> {
self.prepare(PrepareConfig{
block_signals,
enable_control_flow: !block_signals,
enable_keypad: false,
report_signals,
.. PrepareConfig::default()
})
}
unsafe fn prepare_with_lock(&mut self,
lock: &mut dyn TerminalWriter<DefaultTerminal>,
block_signals: bool, report_signals: SignalSet)
-> io::Result<PrepareState> {
let lock = DefaultTerminal::cast_writer(lock);
self.prepare_with_lock(lock, PrepareConfig{
block_signals,
enable_control_flow: !block_signals,
enable_keypad: false,
report_signals,
.. PrepareConfig::default()
})
}
fn restore(&mut self, state: PrepareState) -> io::Result<()> {
self.restore(state)
}
unsafe fn restore_with_lock(&mut self,
lock: &mut dyn TerminalWriter<DefaultTerminal>, state: PrepareState)
-> io::Result<()> {
let lock = DefaultTerminal::cast_writer(lock);
self.restore_with_lock(lock, state)
}
fn read(&mut self, buf: &mut Vec<u8>) -> io::Result<RawRead> {
sys::terminal_read(self, buf)
}
fn wait_for_input(&mut self, timeout: Option<Duration>) -> io::Result<bool> {
self.wait_event(timeout)
}
}
impl<'a> TerminalWriter<DefaultTerminal> for TerminalWriteGuard<'a> {
fn size(&self) -> io::Result<Size> {
self.size()
}
fn clear_screen(&mut self) -> io::Result<()> {
self.clear_screen()
}
fn clear_to_screen_end(&mut self) -> io::Result<()> {
self.clear_to_screen_end()
}
fn move_up(&mut self, n: usize) -> io::Result<()> {
self.move_up(n)
}
fn move_down(&mut self, n: usize) -> io::Result<()> {
self.move_down(n)
}
fn move_left(&mut self, n: usize) -> io::Result<()> {
self.move_left(n)
}
fn move_right(&mut self, n: usize) -> io::Result<()> {
self.move_right(n)
}
fn move_to_first_column(&mut self) -> io::Result<()> {
self.move_to_first_column()
}
fn set_cursor_mode(&mut self, mode: CursorMode) -> io::Result<()> {
self.set_cursor_mode(mode)
}
fn write(&mut self, s: &str) -> io::Result<()> {
self.write_str(s)
}
fn flush(&mut self) -> io::Result<()> {
self.flush()
}
} | ///
/// The terminal interface shall not automatically move the cursor to the next
/// line when `write` causes a character to be written to the final column.
fn write(&mut self, s: &str) -> io::Result<()>; | random_line_split |
passThrough.go | //
// Copyright 2019-2020 Nestybox, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package implementations
import (
"io"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/nestybox/sysbox-fs/domain"
"github.com/sirupsen/logrus"
)
//
// Pass-through handler
//
// Handler for all non-emulated resources. It does a simple "passthrough" of the
// access by entering all the namespaces of the process that is doing the I/O
// operation and performs this one on behalf of it.
//
// Currently, this handler serves non-emulated resources within the /proc/sys
// subtree, but there's nothing specific to this path in this handler's
// implementation (see that the Path attribute is set to "*"), so this one could
// be utilized for pass-through operations in other subtrees too.
//
type PassThrough struct {
domain.HandlerBase
}
var PassThrough_Handler = &PassThrough{
domain.HandlerBase{
Name: "PassThrough",
Path: "*",
Enabled: true,
},
}
func (h *PassThrough) Lookup(
n domain.IOnodeIface,
req *domain.HandlerRequest) (os.FileInfo, error) {
logrus.Debugf("Executing Lookup() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.LookupRequest,
Payload: &domain.LookupPayload{
Entry: n.Path(),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return nil, err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return nil, responseMsg.Payload.(error)
}
info := responseMsg.Payload.(domain.FileInfo)
return info, nil
}
func (h *PassThrough) Open(
n domain.IOnodeIface,
req *domain.HandlerRequest) error {
logrus.Debugf("Executing Open() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.OpenFileRequest,
Payload: &domain.OpenFilePayload{
File: n.Path(),
Flags: strconv.Itoa(n.OpenFlags()),
Mode: strconv.Itoa(int(n.OpenMode())),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return responseMsg.Payload.(error)
}
return nil
}
func (h *PassThrough) Read(
n domain.IOnodeIface,
req *domain.HandlerRequest) (int, error) {
var resource = n.Name()
logrus.Debugf("Executing Read() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, resource)
if req.Offset > 0 {
return 0, io.EOF
}
var (
data string
ok bool
err error
)
path := n.Path()
prs := h.Service.ProcessService()
process := prs.ProcessCreate(req.Pid, req.Uid, req.Gid)
cntr := req.Container
//
// Caching here improves performance by avoiding dispatching the nsenter agent. But
// note that caching is only helping processes at the sys container level, not in inner
// containers or unshared namespaces. To enable caching for those, we would need to
// have a cache per each namespace and this is expensive; plus we would also need to
// know when the namespace ceases to exist in order to destroy the cache associated
// with it.
//
if domain.ProcessNsMatch(process, cntr.InitProc()) {
// If this resource is cached, return it's data; otherwise fetch its data from the
// host FS and store it in the cache.
cntr.Lock()
data, ok = cntr.Data(path, resource)
if !ok {
data, err = h.fetchFile(n, process)
if err != nil |
cntr.SetData(path, resource, data)
}
cntr.Unlock()
} else {
data, err = h.fetchFile(n, process)
if err != nil {
return 0, err
}
}
data += "\n"
return copyResultBuffer(req.Data, []byte(data))
}
func (h *PassThrough) Write(
n domain.IOnodeIface,
req *domain.HandlerRequest) (int, error) {
var resource = n.Name()
logrus.Debugf("Executing Write() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, resource)
path := n.Path()
cntr := req.Container
newContent := strings.TrimSpace(string(req.Data))
prs := h.Service.ProcessService()
process := prs.ProcessCreate(req.Pid, req.Uid, req.Gid)
// If write op is originated by a process within a registered sys-container
// (it fully matches its namespaces) then store the data in the cache and do
// a write-through to the host FS. Otherwise just do the write-through.
if domain.ProcessNsMatch(process, cntr.InitProc()) {
cntr.Lock()
if err := h.pushFile(n, process, newContent); err != nil {
cntr.Unlock()
return 0, err
}
cntr.SetData(path, resource, newContent)
cntr.Unlock()
} else {
if err := h.pushFile(n, process, newContent); err != nil {
return 0, err
}
}
return len(req.Data), nil
}
func (h *PassThrough) ReadDirAll(
n domain.IOnodeIface,
req *domain.HandlerRequest) ([]os.FileInfo, error) {
logrus.Debugf("Executing ReadDirAll() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.ReadDirRequest,
Payload: &domain.ReadDirPayload{
Dir: n.Path(),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return nil, err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return nil, responseMsg.Payload.(error)
}
var osFileEntries = make([]os.FileInfo, 0)
// Transform event-response payload into a FileInfo slice. Notice that to
// convert []T1 struct to a []T2 one, we must iterate through each element
// and do the conversion one element at a time.
dirEntries := responseMsg.Payload.([]domain.FileInfo)
for _, v := range dirEntries {
osFileEntries = append(osFileEntries, v)
}
return osFileEntries, nil
}
func (h *PassThrough) Setattr(
n domain.IOnodeIface,
req *domain.HandlerRequest) error {
logrus.Debugf("Executing Setattr() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.OpenFileRequest,
Payload: &domain.OpenFilePayload{
File: n.Path(),
Flags: strconv.Itoa(n.OpenFlags()),
Mode: strconv.Itoa(int(n.OpenMode())),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return responseMsg.Payload.(error)
}
return nil
}
// Auxiliary method to fetch the content of any given file within a container.
func (h *PassThrough) fetchFile(
n domain.IOnodeIface,
process domain.ProcessIface) (string, error) {
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
process.Pid(),
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.ReadFileRequest,
Payload: &domain.ReadFilePayload{
File: n.Path(),
},
},
nil,
false,
)
// Launch nsenter-event to obtain file state within container
// namespaces.
err := nss.SendRequestEvent(event)
if err != nil {
return "", err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return "", responseMsg.Payload.(error)
}
info := responseMsg.Payload.(string)
return info, nil
}
// Auxiliary method to inject content into any given file within a container.
func (h *PassThrough) pushFile(
n domain.IOnodeIface,
process domain.ProcessIface,
s string) error {
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
process.Pid(),
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.WriteFileRequest,
Payload: &domain.WriteFilePayload{
File: n.Path(),
Content: s,
},
},
nil,
false,
)
// Launch nsenter-event to write file state within container
// namespaces.
err := nss.SendRequestEvent(event)
if err != nil {
return err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return responseMsg.Payload.(error)
}
return nil
}
func (h *PassThrough) GetName() string {
return h.Name
}
func (h *PassThrough) GetPath() string {
return h.Path
}
func (h *PassThrough) GetService() domain.HandlerServiceIface {
return h.Service
}
func (h *PassThrough) GetEnabled() bool {
return h.Enabled
}
func (h *PassThrough) SetEnabled(b bool) {
h.Enabled = b
}
func (h *PassThrough) GetResourcesList() []string {
var resources []string
for resourceKey, resource := range h.EmuResourceMap {
resource.Mutex.Lock()
if !resource.Enabled {
resource.Mutex.Unlock()
continue
}
resource.Mutex.Unlock()
resources = append(resources, filepath.Join(h.GetPath(), resourceKey))
}
return resources
}
func (h *PassThrough) GetResourceMutex(n domain.IOnodeIface) *sync.Mutex {
resource, ok := h.EmuResourceMap[n.Name()]
if !ok {
return nil
}
return &resource.Mutex
}
func (h *PassThrough) SetService(hs domain.HandlerServiceIface) {
h.Service = hs
}
| {
cntr.Unlock()
return 0, err
} | conditional_block |
passThrough.go | //
// Copyright 2019-2020 Nestybox, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package implementations
import (
"io"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/nestybox/sysbox-fs/domain"
"github.com/sirupsen/logrus"
)
//
// Pass-through handler
//
// Handler for all non-emulated resources. It does a simple "passthrough" of the
// access by entering all the namespaces of the process that is doing the I/O
// operation and performs this one on behalf of it.
//
// Currently, this handler serves non-emulated resources within the /proc/sys
// subtree, but there's nothing specific to this path in this handler's
// implementation (see that the Path attribute is set to "*"), so this one could
// be utilized for pass-through operations in other subtrees too.
//
type PassThrough struct {
domain.HandlerBase
}
var PassThrough_Handler = &PassThrough{
domain.HandlerBase{
Name: "PassThrough",
Path: "*",
Enabled: true,
},
}
func (h *PassThrough) Lookup(
n domain.IOnodeIface,
req *domain.HandlerRequest) (os.FileInfo, error) {
logrus.Debugf("Executing Lookup() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.LookupRequest,
Payload: &domain.LookupPayload{
Entry: n.Path(),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return nil, err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return nil, responseMsg.Payload.(error)
}
info := responseMsg.Payload.(domain.FileInfo)
return info, nil
}
func (h *PassThrough) Open(
n domain.IOnodeIface,
req *domain.HandlerRequest) error {
logrus.Debugf("Executing Open() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.OpenFileRequest,
Payload: &domain.OpenFilePayload{
File: n.Path(),
Flags: strconv.Itoa(n.OpenFlags()),
Mode: strconv.Itoa(int(n.OpenMode())),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return responseMsg.Payload.(error)
}
return nil
}
func (h *PassThrough) Read(
n domain.IOnodeIface,
req *domain.HandlerRequest) (int, error) {
var resource = n.Name()
logrus.Debugf("Executing Read() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, resource) | return 0, io.EOF
}
var (
data string
ok bool
err error
)
path := n.Path()
prs := h.Service.ProcessService()
process := prs.ProcessCreate(req.Pid, req.Uid, req.Gid)
cntr := req.Container
//
// Caching here improves performance by avoiding dispatching the nsenter agent. But
// note that caching is only helping processes at the sys container level, not in inner
// containers or unshared namespaces. To enable caching for those, we would need to
// have a cache per each namespace and this is expensive; plus we would also need to
// know when the namespace ceases to exist in order to destroy the cache associated
// with it.
//
if domain.ProcessNsMatch(process, cntr.InitProc()) {
// If this resource is cached, return it's data; otherwise fetch its data from the
// host FS and store it in the cache.
cntr.Lock()
data, ok = cntr.Data(path, resource)
if !ok {
data, err = h.fetchFile(n, process)
if err != nil {
cntr.Unlock()
return 0, err
}
cntr.SetData(path, resource, data)
}
cntr.Unlock()
} else {
data, err = h.fetchFile(n, process)
if err != nil {
return 0, err
}
}
data += "\n"
return copyResultBuffer(req.Data, []byte(data))
}
func (h *PassThrough) Write(
n domain.IOnodeIface,
req *domain.HandlerRequest) (int, error) {
var resource = n.Name()
logrus.Debugf("Executing Write() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, resource)
path := n.Path()
cntr := req.Container
newContent := strings.TrimSpace(string(req.Data))
prs := h.Service.ProcessService()
process := prs.ProcessCreate(req.Pid, req.Uid, req.Gid)
// If write op is originated by a process within a registered sys-container
// (it fully matches its namespaces) then store the data in the cache and do
// a write-through to the host FS. Otherwise just do the write-through.
if domain.ProcessNsMatch(process, cntr.InitProc()) {
cntr.Lock()
if err := h.pushFile(n, process, newContent); err != nil {
cntr.Unlock()
return 0, err
}
cntr.SetData(path, resource, newContent)
cntr.Unlock()
} else {
if err := h.pushFile(n, process, newContent); err != nil {
return 0, err
}
}
return len(req.Data), nil
}
func (h *PassThrough) ReadDirAll(
n domain.IOnodeIface,
req *domain.HandlerRequest) ([]os.FileInfo, error) {
logrus.Debugf("Executing ReadDirAll() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.ReadDirRequest,
Payload: &domain.ReadDirPayload{
Dir: n.Path(),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return nil, err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return nil, responseMsg.Payload.(error)
}
var osFileEntries = make([]os.FileInfo, 0)
// Transform event-response payload into a FileInfo slice. Notice that to
// convert []T1 struct to a []T2 one, we must iterate through each element
// and do the conversion one element at a time.
dirEntries := responseMsg.Payload.([]domain.FileInfo)
for _, v := range dirEntries {
osFileEntries = append(osFileEntries, v)
}
return osFileEntries, nil
}
func (h *PassThrough) Setattr(
n domain.IOnodeIface,
req *domain.HandlerRequest) error {
logrus.Debugf("Executing Setattr() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.OpenFileRequest,
Payload: &domain.OpenFilePayload{
File: n.Path(),
Flags: strconv.Itoa(n.OpenFlags()),
Mode: strconv.Itoa(int(n.OpenMode())),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return responseMsg.Payload.(error)
}
return nil
}
// Auxiliary method to fetch the content of any given file within a container.
func (h *PassThrough) fetchFile(
n domain.IOnodeIface,
process domain.ProcessIface) (string, error) {
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
process.Pid(),
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.ReadFileRequest,
Payload: &domain.ReadFilePayload{
File: n.Path(),
},
},
nil,
false,
)
// Launch nsenter-event to obtain file state within container
// namespaces.
err := nss.SendRequestEvent(event)
if err != nil {
return "", err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return "", responseMsg.Payload.(error)
}
info := responseMsg.Payload.(string)
return info, nil
}
// Auxiliary method to inject content into any given file within a container.
func (h *PassThrough) pushFile(
n domain.IOnodeIface,
process domain.ProcessIface,
s string) error {
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
process.Pid(),
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.WriteFileRequest,
Payload: &domain.WriteFilePayload{
File: n.Path(),
Content: s,
},
},
nil,
false,
)
// Launch nsenter-event to write file state within container
// namespaces.
err := nss.SendRequestEvent(event)
if err != nil {
return err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return responseMsg.Payload.(error)
}
return nil
}
func (h *PassThrough) GetName() string {
return h.Name
}
func (h *PassThrough) GetPath() string {
return h.Path
}
func (h *PassThrough) GetService() domain.HandlerServiceIface {
return h.Service
}
func (h *PassThrough) GetEnabled() bool {
return h.Enabled
}
func (h *PassThrough) SetEnabled(b bool) {
h.Enabled = b
}
func (h *PassThrough) GetResourcesList() []string {
var resources []string
for resourceKey, resource := range h.EmuResourceMap {
resource.Mutex.Lock()
if !resource.Enabled {
resource.Mutex.Unlock()
continue
}
resource.Mutex.Unlock()
resources = append(resources, filepath.Join(h.GetPath(), resourceKey))
}
return resources
}
func (h *PassThrough) GetResourceMutex(n domain.IOnodeIface) *sync.Mutex {
resource, ok := h.EmuResourceMap[n.Name()]
if !ok {
return nil
}
return &resource.Mutex
}
func (h *PassThrough) SetService(hs domain.HandlerServiceIface) {
h.Service = hs
} |
if req.Offset > 0 { | random_line_split |
passThrough.go | //
// Copyright 2019-2020 Nestybox, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package implementations
import (
"io"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/nestybox/sysbox-fs/domain"
"github.com/sirupsen/logrus"
)
//
// Pass-through handler
//
// Handler for all non-emulated resources. It does a simple "passthrough" of the
// access by entering all the namespaces of the process that is doing the I/O
// operation and performs this one on behalf of it.
//
// Currently, this handler serves non-emulated resources within the /proc/sys
// subtree, but there's nothing specific to this path in this handler's
// implementation (see that the Path attribute is set to "*"), so this one could
// be utilized for pass-through operations in other subtrees too.
//
type PassThrough struct {
domain.HandlerBase
}
var PassThrough_Handler = &PassThrough{
domain.HandlerBase{
Name: "PassThrough",
Path: "*",
Enabled: true,
},
}
func (h *PassThrough) Lookup(
n domain.IOnodeIface,
req *domain.HandlerRequest) (os.FileInfo, error) {
logrus.Debugf("Executing Lookup() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.LookupRequest,
Payload: &domain.LookupPayload{
Entry: n.Path(),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return nil, err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return nil, responseMsg.Payload.(error)
}
info := responseMsg.Payload.(domain.FileInfo)
return info, nil
}
func (h *PassThrough) | (
n domain.IOnodeIface,
req *domain.HandlerRequest) error {
logrus.Debugf("Executing Open() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.OpenFileRequest,
Payload: &domain.OpenFilePayload{
File: n.Path(),
Flags: strconv.Itoa(n.OpenFlags()),
Mode: strconv.Itoa(int(n.OpenMode())),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return responseMsg.Payload.(error)
}
return nil
}
func (h *PassThrough) Read(
n domain.IOnodeIface,
req *domain.HandlerRequest) (int, error) {
var resource = n.Name()
logrus.Debugf("Executing Read() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, resource)
if req.Offset > 0 {
return 0, io.EOF
}
var (
data string
ok bool
err error
)
path := n.Path()
prs := h.Service.ProcessService()
process := prs.ProcessCreate(req.Pid, req.Uid, req.Gid)
cntr := req.Container
//
// Caching here improves performance by avoiding dispatching the nsenter agent. But
// note that caching is only helping processes at the sys container level, not in inner
// containers or unshared namespaces. To enable caching for those, we would need to
// have a cache per each namespace and this is expensive; plus we would also need to
// know when the namespace ceases to exist in order to destroy the cache associated
// with it.
//
if domain.ProcessNsMatch(process, cntr.InitProc()) {
// If this resource is cached, return it's data; otherwise fetch its data from the
// host FS and store it in the cache.
cntr.Lock()
data, ok = cntr.Data(path, resource)
if !ok {
data, err = h.fetchFile(n, process)
if err != nil {
cntr.Unlock()
return 0, err
}
cntr.SetData(path, resource, data)
}
cntr.Unlock()
} else {
data, err = h.fetchFile(n, process)
if err != nil {
return 0, err
}
}
data += "\n"
return copyResultBuffer(req.Data, []byte(data))
}
func (h *PassThrough) Write(
n domain.IOnodeIface,
req *domain.HandlerRequest) (int, error) {
var resource = n.Name()
logrus.Debugf("Executing Write() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, resource)
path := n.Path()
cntr := req.Container
newContent := strings.TrimSpace(string(req.Data))
prs := h.Service.ProcessService()
process := prs.ProcessCreate(req.Pid, req.Uid, req.Gid)
// If write op is originated by a process within a registered sys-container
// (it fully matches its namespaces) then store the data in the cache and do
// a write-through to the host FS. Otherwise just do the write-through.
if domain.ProcessNsMatch(process, cntr.InitProc()) {
cntr.Lock()
if err := h.pushFile(n, process, newContent); err != nil {
cntr.Unlock()
return 0, err
}
cntr.SetData(path, resource, newContent)
cntr.Unlock()
} else {
if err := h.pushFile(n, process, newContent); err != nil {
return 0, err
}
}
return len(req.Data), nil
}
func (h *PassThrough) ReadDirAll(
n domain.IOnodeIface,
req *domain.HandlerRequest) ([]os.FileInfo, error) {
logrus.Debugf("Executing ReadDirAll() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.ReadDirRequest,
Payload: &domain.ReadDirPayload{
Dir: n.Path(),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return nil, err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return nil, responseMsg.Payload.(error)
}
var osFileEntries = make([]os.FileInfo, 0)
// Transform event-response payload into a FileInfo slice. Notice that to
// convert []T1 struct to a []T2 one, we must iterate through each element
// and do the conversion one element at a time.
dirEntries := responseMsg.Payload.([]domain.FileInfo)
for _, v := range dirEntries {
osFileEntries = append(osFileEntries, v)
}
return osFileEntries, nil
}
func (h *PassThrough) Setattr(
n domain.IOnodeIface,
req *domain.HandlerRequest) error {
logrus.Debugf("Executing Setattr() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.OpenFileRequest,
Payload: &domain.OpenFilePayload{
File: n.Path(),
Flags: strconv.Itoa(n.OpenFlags()),
Mode: strconv.Itoa(int(n.OpenMode())),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return responseMsg.Payload.(error)
}
return nil
}
// Auxiliary method to fetch the content of any given file within a container.
func (h *PassThrough) fetchFile(
n domain.IOnodeIface,
process domain.ProcessIface) (string, error) {
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
process.Pid(),
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.ReadFileRequest,
Payload: &domain.ReadFilePayload{
File: n.Path(),
},
},
nil,
false,
)
// Launch nsenter-event to obtain file state within container
// namespaces.
err := nss.SendRequestEvent(event)
if err != nil {
return "", err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return "", responseMsg.Payload.(error)
}
info := responseMsg.Payload.(string)
return info, nil
}
// Auxiliary method to inject content into any given file within a container.
func (h *PassThrough) pushFile(
n domain.IOnodeIface,
process domain.ProcessIface,
s string) error {
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
process.Pid(),
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.WriteFileRequest,
Payload: &domain.WriteFilePayload{
File: n.Path(),
Content: s,
},
},
nil,
false,
)
// Launch nsenter-event to write file state within container
// namespaces.
err := nss.SendRequestEvent(event)
if err != nil {
return err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return responseMsg.Payload.(error)
}
return nil
}
func (h *PassThrough) GetName() string {
return h.Name
}
func (h *PassThrough) GetPath() string {
return h.Path
}
func (h *PassThrough) GetService() domain.HandlerServiceIface {
return h.Service
}
func (h *PassThrough) GetEnabled() bool {
return h.Enabled
}
func (h *PassThrough) SetEnabled(b bool) {
h.Enabled = b
}
func (h *PassThrough) GetResourcesList() []string {
var resources []string
for resourceKey, resource := range h.EmuResourceMap {
resource.Mutex.Lock()
if !resource.Enabled {
resource.Mutex.Unlock()
continue
}
resource.Mutex.Unlock()
resources = append(resources, filepath.Join(h.GetPath(), resourceKey))
}
return resources
}
func (h *PassThrough) GetResourceMutex(n domain.IOnodeIface) *sync.Mutex {
resource, ok := h.EmuResourceMap[n.Name()]
if !ok {
return nil
}
return &resource.Mutex
}
func (h *PassThrough) SetService(hs domain.HandlerServiceIface) {
h.Service = hs
}
| Open | identifier_name |
passThrough.go | //
// Copyright 2019-2020 Nestybox, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package implementations
import (
"io"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/nestybox/sysbox-fs/domain"
"github.com/sirupsen/logrus"
)
//
// Pass-through handler
//
// Handler for all non-emulated resources. It does a simple "passthrough" of the
// access by entering all the namespaces of the process that is doing the I/O
// operation and performs this one on behalf of it.
//
// Currently, this handler serves non-emulated resources within the /proc/sys
// subtree, but there's nothing specific to this path in this handler's
// implementation (see that the Path attribute is set to "*"), so this one could
// be utilized for pass-through operations in other subtrees too.
//
type PassThrough struct {
domain.HandlerBase
}
var PassThrough_Handler = &PassThrough{
domain.HandlerBase{
Name: "PassThrough",
Path: "*",
Enabled: true,
},
}
func (h *PassThrough) Lookup(
n domain.IOnodeIface,
req *domain.HandlerRequest) (os.FileInfo, error) {
logrus.Debugf("Executing Lookup() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.LookupRequest,
Payload: &domain.LookupPayload{
Entry: n.Path(),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return nil, err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return nil, responseMsg.Payload.(error)
}
info := responseMsg.Payload.(domain.FileInfo)
return info, nil
}
func (h *PassThrough) Open(
n domain.IOnodeIface,
req *domain.HandlerRequest) error {
logrus.Debugf("Executing Open() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.OpenFileRequest,
Payload: &domain.OpenFilePayload{
File: n.Path(),
Flags: strconv.Itoa(n.OpenFlags()),
Mode: strconv.Itoa(int(n.OpenMode())),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return responseMsg.Payload.(error)
}
return nil
}
func (h *PassThrough) Read(
n domain.IOnodeIface,
req *domain.HandlerRequest) (int, error) {
var resource = n.Name()
logrus.Debugf("Executing Read() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, resource)
if req.Offset > 0 {
return 0, io.EOF
}
var (
data string
ok bool
err error
)
path := n.Path()
prs := h.Service.ProcessService()
process := prs.ProcessCreate(req.Pid, req.Uid, req.Gid)
cntr := req.Container
//
// Caching here improves performance by avoiding dispatching the nsenter agent. But
// note that caching is only helping processes at the sys container level, not in inner
// containers or unshared namespaces. To enable caching for those, we would need to
// have a cache per each namespace and this is expensive; plus we would also need to
// know when the namespace ceases to exist in order to destroy the cache associated
// with it.
//
if domain.ProcessNsMatch(process, cntr.InitProc()) {
// If this resource is cached, return it's data; otherwise fetch its data from the
// host FS and store it in the cache.
cntr.Lock()
data, ok = cntr.Data(path, resource)
if !ok {
data, err = h.fetchFile(n, process)
if err != nil {
cntr.Unlock()
return 0, err
}
cntr.SetData(path, resource, data)
}
cntr.Unlock()
} else {
data, err = h.fetchFile(n, process)
if err != nil {
return 0, err
}
}
data += "\n"
return copyResultBuffer(req.Data, []byte(data))
}
func (h *PassThrough) Write(
n domain.IOnodeIface,
req *domain.HandlerRequest) (int, error) {
var resource = n.Name()
logrus.Debugf("Executing Write() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, resource)
path := n.Path()
cntr := req.Container
newContent := strings.TrimSpace(string(req.Data))
prs := h.Service.ProcessService()
process := prs.ProcessCreate(req.Pid, req.Uid, req.Gid)
// If write op is originated by a process within a registered sys-container
// (it fully matches its namespaces) then store the data in the cache and do
// a write-through to the host FS. Otherwise just do the write-through.
if domain.ProcessNsMatch(process, cntr.InitProc()) {
cntr.Lock()
if err := h.pushFile(n, process, newContent); err != nil {
cntr.Unlock()
return 0, err
}
cntr.SetData(path, resource, newContent)
cntr.Unlock()
} else {
if err := h.pushFile(n, process, newContent); err != nil {
return 0, err
}
}
return len(req.Data), nil
}
func (h *PassThrough) ReadDirAll(
n domain.IOnodeIface,
req *domain.HandlerRequest) ([]os.FileInfo, error) {
logrus.Debugf("Executing ReadDirAll() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.ReadDirRequest,
Payload: &domain.ReadDirPayload{
Dir: n.Path(),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return nil, err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return nil, responseMsg.Payload.(error)
}
var osFileEntries = make([]os.FileInfo, 0)
// Transform event-response payload into a FileInfo slice. Notice that to
// convert []T1 struct to a []T2 one, we must iterate through each element
// and do the conversion one element at a time.
dirEntries := responseMsg.Payload.([]domain.FileInfo)
for _, v := range dirEntries {
osFileEntries = append(osFileEntries, v)
}
return osFileEntries, nil
}
func (h *PassThrough) Setattr(
n domain.IOnodeIface,
req *domain.HandlerRequest) error {
logrus.Debugf("Executing Setattr() for req-id: %#x, handler: %s, resource: %s",
req.ID, h.Name, n.Name())
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
req.Pid,
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.OpenFileRequest,
Payload: &domain.OpenFilePayload{
File: n.Path(),
Flags: strconv.Itoa(n.OpenFlags()),
Mode: strconv.Itoa(int(n.OpenMode())),
},
},
nil,
false,
)
// Launch nsenter-event.
err := nss.SendRequestEvent(event)
if err != nil {
return err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return responseMsg.Payload.(error)
}
return nil
}
// Auxiliary method to fetch the content of any given file within a container.
func (h *PassThrough) fetchFile(
n domain.IOnodeIface,
process domain.ProcessIface) (string, error) {
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
process.Pid(),
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.ReadFileRequest,
Payload: &domain.ReadFilePayload{
File: n.Path(),
},
},
nil,
false,
)
// Launch nsenter-event to obtain file state within container
// namespaces.
err := nss.SendRequestEvent(event)
if err != nil {
return "", err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return "", responseMsg.Payload.(error)
}
info := responseMsg.Payload.(string)
return info, nil
}
// Auxiliary method to inject content into any given file within a container.
func (h *PassThrough) pushFile(
n domain.IOnodeIface,
process domain.ProcessIface,
s string) error {
// Create nsenterEvent to initiate interaction with container namespaces.
nss := h.Service.NSenterService()
event := nss.NewEvent(
process.Pid(),
&domain.AllNSsButMount,
&domain.NSenterMessage{
Type: domain.WriteFileRequest,
Payload: &domain.WriteFilePayload{
File: n.Path(),
Content: s,
},
},
nil,
false,
)
// Launch nsenter-event to write file state within container
// namespaces.
err := nss.SendRequestEvent(event)
if err != nil {
return err
}
// Obtain nsenter-event response.
responseMsg := nss.ReceiveResponseEvent(event)
if responseMsg.Type == domain.ErrorResponse {
return responseMsg.Payload.(error)
}
return nil
}
func (h *PassThrough) GetName() string {
return h.Name
}
func (h *PassThrough) GetPath() string {
return h.Path
}
func (h *PassThrough) GetService() domain.HandlerServiceIface |
func (h *PassThrough) GetEnabled() bool {
return h.Enabled
}
func (h *PassThrough) SetEnabled(b bool) {
h.Enabled = b
}
func (h *PassThrough) GetResourcesList() []string {
var resources []string
for resourceKey, resource := range h.EmuResourceMap {
resource.Mutex.Lock()
if !resource.Enabled {
resource.Mutex.Unlock()
continue
}
resource.Mutex.Unlock()
resources = append(resources, filepath.Join(h.GetPath(), resourceKey))
}
return resources
}
func (h *PassThrough) GetResourceMutex(n domain.IOnodeIface) *sync.Mutex {
resource, ok := h.EmuResourceMap[n.Name()]
if !ok {
return nil
}
return &resource.Mutex
}
func (h *PassThrough) SetService(hs domain.HandlerServiceIface) {
h.Service = hs
}
| {
return h.Service
} | identifier_body |
utils.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"math"
"strconv"
cnstypes "github.com/vmware/govmomi/cns/types"
"google.golang.org/grpc/codes"
cnsvolume "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/volume"
cnsvsphere "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/vsphere"
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger"
)
const (
// DefaultQuerySnapshotLimit constant is already present in pkg/csi/service/common/constants.go
// However, using that constant creates an import cycle.
// TODO: Refactor to move all the constants into a top level directory.
DefaultQuerySnapshotLimit = int64(128)
)
// QueryVolumeUtil helps to invoke query volume API based on the feature
// state set for using query async volume. If useQueryVolumeAsync is set to
// true, the function invokes CNS QueryVolumeAsync, otherwise it invokes
// synchronous QueryVolume API. The function also take volume manager instance,
// query filters, query selection as params. Returns queryResult when query
// volume succeeds, otherwise returns appropriate errors.
func QueryVolumeUtil(ctx context.Context, m cnsvolume.Manager, queryFilter cnstypes.CnsQueryFilter,
querySelection *cnstypes.CnsQuerySelection, useQueryVolumeAsync bool) (*cnstypes.CnsQueryResult, error) |
// QuerySnapshotsUtil helps invoke CNS QuerySnapshot API. The method takes in a snapshotQueryFilter that represents
// the criteria to retrieve the snapshots. The maxEntries represents the max number of results that the caller of this
// method can handle.
func QuerySnapshotsUtil(ctx context.Context, m cnsvolume.Manager, snapshotQueryFilter cnstypes.CnsSnapshotQueryFilter,
maxEntries int64) ([]cnstypes.CnsSnapshotQueryResultEntry, string, error) {
log := logger.GetLogger(ctx)
var allQuerySnapshotResults []cnstypes.CnsSnapshotQueryResultEntry
var snapshotQuerySpec cnstypes.CnsSnapshotQuerySpec
var batchSize int64
maxIteration := int64(1)
isMaxIterationSet := false
if snapshotQueryFilter.SnapshotQuerySpecs == nil {
log.Infof("Attempting to retrieve all the Snapshots available in the vCenter inventory.")
} else {
snapshotQuerySpec = snapshotQueryFilter.SnapshotQuerySpecs[0]
log.Infof("Invoking QuerySnapshots with spec: %+v", snapshotQuerySpec)
}
// Check if cursor is specified, if not set a default cursor.
if snapshotQueryFilter.Cursor == nil {
// Setting the default limit(128) explicitly.
snapshotQueryFilter = cnstypes.CnsSnapshotQueryFilter{
Cursor: &cnstypes.CnsCursor{
Offset: 0,
Limit: DefaultQuerySnapshotLimit,
},
}
batchSize = DefaultQuerySnapshotLimit
} else {
batchSize = snapshotQueryFilter.Cursor.Limit
}
iteration := int64(1)
for {
if iteration > maxIteration {
// Exceeds the max number of results that can be handled by callers.
nextToken := strconv.FormatInt(snapshotQueryFilter.Cursor.Offset, 10)
log.Infof("the number of results: %d approached max-entries: %d for "+
"limit: %d in iteration: %d, returning with next-token: %s",
len(allQuerySnapshotResults), maxEntries, batchSize, iteration, nextToken)
return allQuerySnapshotResults, nextToken, nil
}
log.Infof("invoking QuerySnapshots in iteration: %d with offset: %d and limit: %d, current total "+
"results: %d", iteration, snapshotQueryFilter.Cursor.Offset, snapshotQueryFilter.Cursor.Limit,
len(allQuerySnapshotResults))
snapshotQueryResult, err := m.QuerySnapshots(ctx, snapshotQueryFilter)
if err != nil {
log.Errorf("querySnapshots failed for snapshotQueryFilter: %v. Err=%+v", snapshotQueryFilter, err)
return nil, "", err
}
if snapshotQueryResult == nil {
log.Infof("Observed empty SnapshotQueryResult")
break
}
if len(snapshotQueryResult.Entries) == 0 {
log.Infof("QuerySnapshots retrieved no results for the spec: %+v", snapshotQuerySpec)
}
// Update the max iteration.
// isMaxIterationSet ensures that the max iterations are set only once, this is to ensure that the number of
// results are lower than the max entries supported by caller in a busy system which has increasing number
// total records.
if !isMaxIterationSet {
if snapshotQueryResult.Cursor.TotalRecords < maxEntries {
// If the total number of records is less than max entries supported by caller then
// all results can be retrieved in a loop, when the results are returned no next-token is expected to be set.
// Example:
// maxEntries=200, totalRecords=150, batchSize=128
// maxIteration=2
// iteration-1: 128 results, iteration-2: 22 results
// total results returned: 150
// offset=0
maxRecords := snapshotQueryResult.Cursor.TotalRecords
numOfIterationsForAllResults := float64(maxRecords) / float64(batchSize)
maxIteration = int64(math.Ceil(numOfIterationsForAllResults))
log.Infof("setting max iteration to %d for total records count: %d", maxIteration, maxRecords)
} else {
// All results cannot be returned to caller, in this case the expectation is return as many results with a
// nextToken.
// Example:
// maxEntries=150, totalRecords=200, batchSize=128
// maxIteration=1
// iteration-1: 128 results
// total results returned: 128
// offset= 1, callers are expected to call with new offset as next token.
maxRecords := maxEntries
numOfIterationsForAllResults := float64(maxRecords) / float64(batchSize)
maxIteration = int64(math.Floor(numOfIterationsForAllResults))
log.Infof("setting max iteration to %d for total records count: %d and max limit: %d",
maxIteration, snapshotQueryResult.Cursor.TotalRecords, maxRecords)
}
isMaxIterationSet = true
}
allQuerySnapshotResults = append(allQuerySnapshotResults, snapshotQueryResult.Entries...)
log.Infof("%d more snapshots to be queried",
snapshotQueryResult.Cursor.TotalRecords-snapshotQueryResult.Cursor.Offset)
if snapshotQueryResult.Cursor.Offset == snapshotQueryResult.Cursor.TotalRecords {
log.Infof("QuerySnapshots retrieved all records (%d) for the SnapshotQuerySpec: %+v in %d iterations",
snapshotQueryResult.Cursor.TotalRecords, snapshotQuerySpec, iteration)
break
}
iteration++
snapshotQueryFilter.Cursor = &snapshotQueryResult.Cursor
}
return allQuerySnapshotResults, "", nil
}
type CnsVolumeDetails struct {
VolumeID string
SizeInMB int64
DatastoreUrl string
VolumeType string
}
// QueryVolumeDetailsUtil queries Capacity in MB and datastore URL for the source volume with expected volume type.
func QueryVolumeDetailsUtil(ctx context.Context, m cnsvolume.Manager, volumeIds []cnstypes.CnsVolumeId) (
map[string]*CnsVolumeDetails, error) {
log := logger.GetLogger(ctx)
volumeDetailsMap := make(map[string]*CnsVolumeDetails)
// Select only the backing object details, volume type and datastore.
querySelection := &cnstypes.CnsQuerySelection{
Names: []string{
string(cnstypes.QuerySelectionNameTypeBackingObjectDetails),
string(cnstypes.QuerySelectionNameTypeVolumeType),
string(cnstypes.QuerySelectionNameTypeDataStoreUrl),
},
}
queryFilter := cnstypes.CnsQueryFilter{
VolumeIds: volumeIds,
}
log.Infof("Invoking QueryAllVolumeUtil with Filter: %+v, Selection: %+v", queryFilter, *querySelection)
allQueryResults, err := m.QueryAllVolume(ctx, queryFilter, *querySelection)
if err != nil {
log.Errorf("failed to retrieve the volume size and datastore, err: %+v", err)
return volumeDetailsMap, logger.LogNewErrorCodef(log, codes.Internal,
"failed to retrieve the volume sizes: %+v", err)
}
log.Infof("Number of results from QueryAllVolumeUtil: %d", len(allQueryResults.Volumes))
for _, res := range allQueryResults.Volumes {
volumeId := res.VolumeId
datastoreUrl := res.DatastoreUrl
volumeCapacityInMB := res.BackingObjectDetails.GetCnsBackingObjectDetails().CapacityInMb
volumeType := res.VolumeType
log.Debugf("VOLUME: %s, TYPE: %s, DATASTORE: %s, CAPACITY: %d", volumeId, volumeType, datastoreUrl,
volumeCapacityInMB)
volumeDetails := &CnsVolumeDetails{
VolumeID: volumeId.Id,
SizeInMB: volumeCapacityInMB,
DatastoreUrl: datastoreUrl,
VolumeType: volumeType,
}
volumeDetailsMap[volumeId.Id] = volumeDetails
}
return volumeDetailsMap, nil
}
// LogoutAllSessions will logout all vCenter sessions and disconnect vCenter client
func LogoutAllvCenterSessions(ctx context.Context) {
log := logger.GetLogger(ctx)
log.Info("Logging out all vCenter sessions")
virtualcentermanager := cnsvsphere.GetVirtualCenterManager(ctx)
vCenters := virtualcentermanager.GetAllVirtualCenters()
managerInstanceMap := cnsvolume.GetAllManagerInstances(ctx)
for _, vc := range vCenters {
if vc.Client == nil {
continue
}
log.Info("Closing idle vCenter session")
vc.Client.CloseIdleConnections()
// logout vCenter session for list-view
mgr, ok := managerInstanceMap[vc.Config.Host]
if ok && mgr != nil {
err := mgr.LogoutListViewVCSession(ctx)
if err != nil {
continue
}
}
log.Infof("Disconnecting vCenter client for host %s", vc.Config.Host)
err := vc.Disconnect(ctx)
if err != nil {
log.Errorf("Error while disconnect vCenter client for host %s. Error: %+v", vc.Config.Host, err)
continue
}
log.Infof("Disconnected vCenter client for host %s", vc.Config.Host)
}
log.Info("Successfully logged out vCenter sessions")
}
| {
log := logger.GetLogger(ctx)
var queryAsyncNotSupported bool
var queryResult *cnstypes.CnsQueryResult
var err error
if useQueryVolumeAsync {
// AsyncQueryVolume feature switch is enabled.
queryResult, err = m.QueryVolumeAsync(ctx, queryFilter, querySelection)
if err != nil {
if err.Error() == cnsvsphere.ErrNotSupported.Error() {
log.Warn("QueryVolumeAsync is not supported. Invoking QueryVolume API")
queryAsyncNotSupported = true
} else { // Return for any other failures.
return nil, logger.LogNewErrorCodef(log, codes.Internal,
"queryVolumeAsync failed for queryFilter: %v. Err=%+v", queryFilter, err.Error())
}
}
}
if !useQueryVolumeAsync || queryAsyncNotSupported {
queryResult, err = m.QueryVolume(ctx, queryFilter)
if err != nil {
return nil, logger.LogNewErrorCodef(log, codes.Internal,
"queryVolume failed for queryFilter: %+v. Err=%+v", queryFilter, err.Error())
}
}
return queryResult, nil
} | identifier_body |
utils.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"math"
"strconv"
cnstypes "github.com/vmware/govmomi/cns/types"
"google.golang.org/grpc/codes"
cnsvolume "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/volume"
cnsvsphere "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/vsphere"
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger"
)
const (
// DefaultQuerySnapshotLimit constant is already present in pkg/csi/service/common/constants.go
// However, using that constant creates an import cycle.
// TODO: Refactor to move all the constants into a top level directory.
DefaultQuerySnapshotLimit = int64(128)
)
// QueryVolumeUtil helps to invoke query volume API based on the feature
// state set for using query async volume. If useQueryVolumeAsync is set to
// true, the function invokes CNS QueryVolumeAsync, otherwise it invokes
// synchronous QueryVolume API. The function also take volume manager instance,
// query filters, query selection as params. Returns queryResult when query
// volume succeeds, otherwise returns appropriate errors.
func QueryVolumeUtil(ctx context.Context, m cnsvolume.Manager, queryFilter cnstypes.CnsQueryFilter,
querySelection *cnstypes.CnsQuerySelection, useQueryVolumeAsync bool) (*cnstypes.CnsQueryResult, error) {
log := logger.GetLogger(ctx)
var queryAsyncNotSupported bool
var queryResult *cnstypes.CnsQueryResult
var err error
if useQueryVolumeAsync {
// AsyncQueryVolume feature switch is enabled.
queryResult, err = m.QueryVolumeAsync(ctx, queryFilter, querySelection)
if err != nil {
if err.Error() == cnsvsphere.ErrNotSupported.Error() {
log.Warn("QueryVolumeAsync is not supported. Invoking QueryVolume API")
queryAsyncNotSupported = true
} else { // Return for any other failures.
return nil, logger.LogNewErrorCodef(log, codes.Internal,
"queryVolumeAsync failed for queryFilter: %v. Err=%+v", queryFilter, err.Error())
}
}
}
if !useQueryVolumeAsync || queryAsyncNotSupported {
queryResult, err = m.QueryVolume(ctx, queryFilter)
if err != nil {
return nil, logger.LogNewErrorCodef(log, codes.Internal,
"queryVolume failed for queryFilter: %+v. Err=%+v", queryFilter, err.Error())
}
}
return queryResult, nil
}
// QuerySnapshotsUtil helps invoke CNS QuerySnapshot API. The method takes in a snapshotQueryFilter that represents
// the criteria to retrieve the snapshots. The maxEntries represents the max number of results that the caller of this
// method can handle.
func | (ctx context.Context, m cnsvolume.Manager, snapshotQueryFilter cnstypes.CnsSnapshotQueryFilter,
maxEntries int64) ([]cnstypes.CnsSnapshotQueryResultEntry, string, error) {
log := logger.GetLogger(ctx)
var allQuerySnapshotResults []cnstypes.CnsSnapshotQueryResultEntry
var snapshotQuerySpec cnstypes.CnsSnapshotQuerySpec
var batchSize int64
maxIteration := int64(1)
isMaxIterationSet := false
if snapshotQueryFilter.SnapshotQuerySpecs == nil {
log.Infof("Attempting to retrieve all the Snapshots available in the vCenter inventory.")
} else {
snapshotQuerySpec = snapshotQueryFilter.SnapshotQuerySpecs[0]
log.Infof("Invoking QuerySnapshots with spec: %+v", snapshotQuerySpec)
}
// Check if cursor is specified, if not set a default cursor.
if snapshotQueryFilter.Cursor == nil {
// Setting the default limit(128) explicitly.
snapshotQueryFilter = cnstypes.CnsSnapshotQueryFilter{
Cursor: &cnstypes.CnsCursor{
Offset: 0,
Limit: DefaultQuerySnapshotLimit,
},
}
batchSize = DefaultQuerySnapshotLimit
} else {
batchSize = snapshotQueryFilter.Cursor.Limit
}
iteration := int64(1)
for {
if iteration > maxIteration {
// Exceeds the max number of results that can be handled by callers.
nextToken := strconv.FormatInt(snapshotQueryFilter.Cursor.Offset, 10)
log.Infof("the number of results: %d approached max-entries: %d for "+
"limit: %d in iteration: %d, returning with next-token: %s",
len(allQuerySnapshotResults), maxEntries, batchSize, iteration, nextToken)
return allQuerySnapshotResults, nextToken, nil
}
log.Infof("invoking QuerySnapshots in iteration: %d with offset: %d and limit: %d, current total "+
"results: %d", iteration, snapshotQueryFilter.Cursor.Offset, snapshotQueryFilter.Cursor.Limit,
len(allQuerySnapshotResults))
snapshotQueryResult, err := m.QuerySnapshots(ctx, snapshotQueryFilter)
if err != nil {
log.Errorf("querySnapshots failed for snapshotQueryFilter: %v. Err=%+v", snapshotQueryFilter, err)
return nil, "", err
}
if snapshotQueryResult == nil {
log.Infof("Observed empty SnapshotQueryResult")
break
}
if len(snapshotQueryResult.Entries) == 0 {
log.Infof("QuerySnapshots retrieved no results for the spec: %+v", snapshotQuerySpec)
}
// Update the max iteration.
// isMaxIterationSet ensures that the max iterations are set only once, this is to ensure that the number of
// results are lower than the max entries supported by caller in a busy system which has increasing number
// total records.
if !isMaxIterationSet {
if snapshotQueryResult.Cursor.TotalRecords < maxEntries {
// If the total number of records is less than max entries supported by caller then
// all results can be retrieved in a loop, when the results are returned no next-token is expected to be set.
// Example:
// maxEntries=200, totalRecords=150, batchSize=128
// maxIteration=2
// iteration-1: 128 results, iteration-2: 22 results
// total results returned: 150
// offset=0
maxRecords := snapshotQueryResult.Cursor.TotalRecords
numOfIterationsForAllResults := float64(maxRecords) / float64(batchSize)
maxIteration = int64(math.Ceil(numOfIterationsForAllResults))
log.Infof("setting max iteration to %d for total records count: %d", maxIteration, maxRecords)
} else {
// All results cannot be returned to caller, in this case the expectation is return as many results with a
// nextToken.
// Example:
// maxEntries=150, totalRecords=200, batchSize=128
// maxIteration=1
// iteration-1: 128 results
// total results returned: 128
// offset= 1, callers are expected to call with new offset as next token.
maxRecords := maxEntries
numOfIterationsForAllResults := float64(maxRecords) / float64(batchSize)
maxIteration = int64(math.Floor(numOfIterationsForAllResults))
log.Infof("setting max iteration to %d for total records count: %d and max limit: %d",
maxIteration, snapshotQueryResult.Cursor.TotalRecords, maxRecords)
}
isMaxIterationSet = true
}
allQuerySnapshotResults = append(allQuerySnapshotResults, snapshotQueryResult.Entries...)
log.Infof("%d more snapshots to be queried",
snapshotQueryResult.Cursor.TotalRecords-snapshotQueryResult.Cursor.Offset)
if snapshotQueryResult.Cursor.Offset == snapshotQueryResult.Cursor.TotalRecords {
log.Infof("QuerySnapshots retrieved all records (%d) for the SnapshotQuerySpec: %+v in %d iterations",
snapshotQueryResult.Cursor.TotalRecords, snapshotQuerySpec, iteration)
break
}
iteration++
snapshotQueryFilter.Cursor = &snapshotQueryResult.Cursor
}
return allQuerySnapshotResults, "", nil
}
type CnsVolumeDetails struct {
VolumeID string
SizeInMB int64
DatastoreUrl string
VolumeType string
}
// QueryVolumeDetailsUtil queries Capacity in MB and datastore URL for the source volume with expected volume type.
func QueryVolumeDetailsUtil(ctx context.Context, m cnsvolume.Manager, volumeIds []cnstypes.CnsVolumeId) (
map[string]*CnsVolumeDetails, error) {
log := logger.GetLogger(ctx)
volumeDetailsMap := make(map[string]*CnsVolumeDetails)
// Select only the backing object details, volume type and datastore.
querySelection := &cnstypes.CnsQuerySelection{
Names: []string{
string(cnstypes.QuerySelectionNameTypeBackingObjectDetails),
string(cnstypes.QuerySelectionNameTypeVolumeType),
string(cnstypes.QuerySelectionNameTypeDataStoreUrl),
},
}
queryFilter := cnstypes.CnsQueryFilter{
VolumeIds: volumeIds,
}
log.Infof("Invoking QueryAllVolumeUtil with Filter: %+v, Selection: %+v", queryFilter, *querySelection)
allQueryResults, err := m.QueryAllVolume(ctx, queryFilter, *querySelection)
if err != nil {
log.Errorf("failed to retrieve the volume size and datastore, err: %+v", err)
return volumeDetailsMap, logger.LogNewErrorCodef(log, codes.Internal,
"failed to retrieve the volume sizes: %+v", err)
}
log.Infof("Number of results from QueryAllVolumeUtil: %d", len(allQueryResults.Volumes))
for _, res := range allQueryResults.Volumes {
volumeId := res.VolumeId
datastoreUrl := res.DatastoreUrl
volumeCapacityInMB := res.BackingObjectDetails.GetCnsBackingObjectDetails().CapacityInMb
volumeType := res.VolumeType
log.Debugf("VOLUME: %s, TYPE: %s, DATASTORE: %s, CAPACITY: %d", volumeId, volumeType, datastoreUrl,
volumeCapacityInMB)
volumeDetails := &CnsVolumeDetails{
VolumeID: volumeId.Id,
SizeInMB: volumeCapacityInMB,
DatastoreUrl: datastoreUrl,
VolumeType: volumeType,
}
volumeDetailsMap[volumeId.Id] = volumeDetails
}
return volumeDetailsMap, nil
}
// LogoutAllSessions will logout all vCenter sessions and disconnect vCenter client
func LogoutAllvCenterSessions(ctx context.Context) {
log := logger.GetLogger(ctx)
log.Info("Logging out all vCenter sessions")
virtualcentermanager := cnsvsphere.GetVirtualCenterManager(ctx)
vCenters := virtualcentermanager.GetAllVirtualCenters()
managerInstanceMap := cnsvolume.GetAllManagerInstances(ctx)
for _, vc := range vCenters {
if vc.Client == nil {
continue
}
log.Info("Closing idle vCenter session")
vc.Client.CloseIdleConnections()
// logout vCenter session for list-view
mgr, ok := managerInstanceMap[vc.Config.Host]
if ok && mgr != nil {
err := mgr.LogoutListViewVCSession(ctx)
if err != nil {
continue
}
}
log.Infof("Disconnecting vCenter client for host %s", vc.Config.Host)
err := vc.Disconnect(ctx)
if err != nil {
log.Errorf("Error while disconnect vCenter client for host %s. Error: %+v", vc.Config.Host, err)
continue
}
log.Infof("Disconnected vCenter client for host %s", vc.Config.Host)
}
log.Info("Successfully logged out vCenter sessions")
}
| QuerySnapshotsUtil | identifier_name |
utils.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"math"
"strconv"
cnstypes "github.com/vmware/govmomi/cns/types"
"google.golang.org/grpc/codes"
cnsvolume "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/volume"
cnsvsphere "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/vsphere"
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger"
)
const (
// DefaultQuerySnapshotLimit constant is already present in pkg/csi/service/common/constants.go
// However, using that constant creates an import cycle.
// TODO: Refactor to move all the constants into a top level directory.
DefaultQuerySnapshotLimit = int64(128)
)
// QueryVolumeUtil helps to invoke query volume API based on the feature
// state set for using query async volume. If useQueryVolumeAsync is set to
// true, the function invokes CNS QueryVolumeAsync, otherwise it invokes
// synchronous QueryVolume API. The function also take volume manager instance,
// query filters, query selection as params. Returns queryResult when query
// volume succeeds, otherwise returns appropriate errors.
func QueryVolumeUtil(ctx context.Context, m cnsvolume.Manager, queryFilter cnstypes.CnsQueryFilter,
querySelection *cnstypes.CnsQuerySelection, useQueryVolumeAsync bool) (*cnstypes.CnsQueryResult, error) {
log := logger.GetLogger(ctx)
var queryAsyncNotSupported bool
var queryResult *cnstypes.CnsQueryResult
var err error
if useQueryVolumeAsync {
// AsyncQueryVolume feature switch is enabled.
queryResult, err = m.QueryVolumeAsync(ctx, queryFilter, querySelection)
if err != nil {
if err.Error() == cnsvsphere.ErrNotSupported.Error() {
log.Warn("QueryVolumeAsync is not supported. Invoking QueryVolume API")
queryAsyncNotSupported = true
} else { // Return for any other failures.
return nil, logger.LogNewErrorCodef(log, codes.Internal,
"queryVolumeAsync failed for queryFilter: %v. Err=%+v", queryFilter, err.Error())
}
}
}
if !useQueryVolumeAsync || queryAsyncNotSupported {
queryResult, err = m.QueryVolume(ctx, queryFilter)
if err != nil {
return nil, logger.LogNewErrorCodef(log, codes.Internal,
"queryVolume failed for queryFilter: %+v. Err=%+v", queryFilter, err.Error())
}
}
return queryResult, nil
}
// QuerySnapshotsUtil helps invoke CNS QuerySnapshot API. The method takes in a snapshotQueryFilter that represents
// the criteria to retrieve the snapshots. The maxEntries represents the max number of results that the caller of this
// method can handle.
func QuerySnapshotsUtil(ctx context.Context, m cnsvolume.Manager, snapshotQueryFilter cnstypes.CnsSnapshotQueryFilter,
maxEntries int64) ([]cnstypes.CnsSnapshotQueryResultEntry, string, error) {
log := logger.GetLogger(ctx)
var allQuerySnapshotResults []cnstypes.CnsSnapshotQueryResultEntry
var snapshotQuerySpec cnstypes.CnsSnapshotQuerySpec
var batchSize int64
maxIteration := int64(1)
isMaxIterationSet := false
if snapshotQueryFilter.SnapshotQuerySpecs == nil {
log.Infof("Attempting to retrieve all the Snapshots available in the vCenter inventory.")
} else {
snapshotQuerySpec = snapshotQueryFilter.SnapshotQuerySpecs[0]
log.Infof("Invoking QuerySnapshots with spec: %+v", snapshotQuerySpec)
}
// Check if cursor is specified, if not set a default cursor.
if snapshotQueryFilter.Cursor == nil {
// Setting the default limit(128) explicitly.
snapshotQueryFilter = cnstypes.CnsSnapshotQueryFilter{
Cursor: &cnstypes.CnsCursor{
Offset: 0,
Limit: DefaultQuerySnapshotLimit,
},
}
batchSize = DefaultQuerySnapshotLimit
} else {
batchSize = snapshotQueryFilter.Cursor.Limit
}
iteration := int64(1)
for {
if iteration > maxIteration {
// Exceeds the max number of results that can be handled by callers.
nextToken := strconv.FormatInt(snapshotQueryFilter.Cursor.Offset, 10)
log.Infof("the number of results: %d approached max-entries: %d for "+
"limit: %d in iteration: %d, returning with next-token: %s",
len(allQuerySnapshotResults), maxEntries, batchSize, iteration, nextToken)
return allQuerySnapshotResults, nextToken, nil
}
log.Infof("invoking QuerySnapshots in iteration: %d with offset: %d and limit: %d, current total "+
"results: %d", iteration, snapshotQueryFilter.Cursor.Offset, snapshotQueryFilter.Cursor.Limit,
len(allQuerySnapshotResults))
snapshotQueryResult, err := m.QuerySnapshots(ctx, snapshotQueryFilter)
if err != nil {
log.Errorf("querySnapshots failed for snapshotQueryFilter: %v. Err=%+v", snapshotQueryFilter, err)
return nil, "", err
}
if snapshotQueryResult == nil {
log.Infof("Observed empty SnapshotQueryResult")
break
}
if len(snapshotQueryResult.Entries) == 0 {
log.Infof("QuerySnapshots retrieved no results for the spec: %+v", snapshotQuerySpec)
}
// Update the max iteration.
// isMaxIterationSet ensures that the max iterations are set only once, this is to ensure that the number of
// results are lower than the max entries supported by caller in a busy system which has increasing number
// total records.
if !isMaxIterationSet {
if snapshotQueryResult.Cursor.TotalRecords < maxEntries {
// If the total number of records is less than max entries supported by caller then
// all results can be retrieved in a loop, when the results are returned no next-token is expected to be set.
// Example:
// maxEntries=200, totalRecords=150, batchSize=128
// maxIteration=2
// iteration-1: 128 results, iteration-2: 22 results
// total results returned: 150
// offset=0
maxRecords := snapshotQueryResult.Cursor.TotalRecords
numOfIterationsForAllResults := float64(maxRecords) / float64(batchSize)
maxIteration = int64(math.Ceil(numOfIterationsForAllResults))
log.Infof("setting max iteration to %d for total records count: %d", maxIteration, maxRecords)
} else {
// All results cannot be returned to caller, in this case the expectation is return as many results with a
// nextToken.
// Example:
// maxEntries=150, totalRecords=200, batchSize=128
// maxIteration=1
// iteration-1: 128 results
// total results returned: 128
// offset= 1, callers are expected to call with new offset as next token.
maxRecords := maxEntries
numOfIterationsForAllResults := float64(maxRecords) / float64(batchSize)
maxIteration = int64(math.Floor(numOfIterationsForAllResults))
log.Infof("setting max iteration to %d for total records count: %d and max limit: %d",
maxIteration, snapshotQueryResult.Cursor.TotalRecords, maxRecords)
}
isMaxIterationSet = true
}
allQuerySnapshotResults = append(allQuerySnapshotResults, snapshotQueryResult.Entries...)
log.Infof("%d more snapshots to be queried",
snapshotQueryResult.Cursor.TotalRecords-snapshotQueryResult.Cursor.Offset)
if snapshotQueryResult.Cursor.Offset == snapshotQueryResult.Cursor.TotalRecords {
log.Infof("QuerySnapshots retrieved all records (%d) for the SnapshotQuerySpec: %+v in %d iterations",
snapshotQueryResult.Cursor.TotalRecords, snapshotQuerySpec, iteration)
break
}
iteration++
snapshotQueryFilter.Cursor = &snapshotQueryResult.Cursor
}
return allQuerySnapshotResults, "", nil
}
type CnsVolumeDetails struct {
VolumeID string
SizeInMB int64
DatastoreUrl string
VolumeType string
}
// QueryVolumeDetailsUtil queries Capacity in MB and datastore URL for the source volume with expected volume type.
func QueryVolumeDetailsUtil(ctx context.Context, m cnsvolume.Manager, volumeIds []cnstypes.CnsVolumeId) (
map[string]*CnsVolumeDetails, error) {
log := logger.GetLogger(ctx)
volumeDetailsMap := make(map[string]*CnsVolumeDetails)
// Select only the backing object details, volume type and datastore.
querySelection := &cnstypes.CnsQuerySelection{
Names: []string{
string(cnstypes.QuerySelectionNameTypeBackingObjectDetails),
string(cnstypes.QuerySelectionNameTypeVolumeType),
string(cnstypes.QuerySelectionNameTypeDataStoreUrl),
},
}
queryFilter := cnstypes.CnsQueryFilter{
VolumeIds: volumeIds,
}
log.Infof("Invoking QueryAllVolumeUtil with Filter: %+v, Selection: %+v", queryFilter, *querySelection)
allQueryResults, err := m.QueryAllVolume(ctx, queryFilter, *querySelection)
if err != nil {
log.Errorf("failed to retrieve the volume size and datastore, err: %+v", err)
return volumeDetailsMap, logger.LogNewErrorCodef(log, codes.Internal,
"failed to retrieve the volume sizes: %+v", err)
}
log.Infof("Number of results from QueryAllVolumeUtil: %d", len(allQueryResults.Volumes))
for _, res := range allQueryResults.Volumes {
volumeId := res.VolumeId
datastoreUrl := res.DatastoreUrl
volumeCapacityInMB := res.BackingObjectDetails.GetCnsBackingObjectDetails().CapacityInMb
volumeType := res.VolumeType
log.Debugf("VOLUME: %s, TYPE: %s, DATASTORE: %s, CAPACITY: %d", volumeId, volumeType, datastoreUrl,
volumeCapacityInMB)
volumeDetails := &CnsVolumeDetails{
VolumeID: volumeId.Id,
SizeInMB: volumeCapacityInMB,
DatastoreUrl: datastoreUrl,
VolumeType: volumeType,
}
volumeDetailsMap[volumeId.Id] = volumeDetails
}
return volumeDetailsMap, nil
}
// LogoutAllSessions will logout all vCenter sessions and disconnect vCenter client
func LogoutAllvCenterSessions(ctx context.Context) {
log := logger.GetLogger(ctx)
log.Info("Logging out all vCenter sessions")
virtualcentermanager := cnsvsphere.GetVirtualCenterManager(ctx)
vCenters := virtualcentermanager.GetAllVirtualCenters()
managerInstanceMap := cnsvolume.GetAllManagerInstances(ctx)
for _, vc := range vCenters {
if vc.Client == nil {
continue
}
log.Info("Closing idle vCenter session")
vc.Client.CloseIdleConnections()
// logout vCenter session for list-view
mgr, ok := managerInstanceMap[vc.Config.Host]
if ok && mgr != nil {
err := mgr.LogoutListViewVCSession(ctx) | err := vc.Disconnect(ctx)
if err != nil {
log.Errorf("Error while disconnect vCenter client for host %s. Error: %+v", vc.Config.Host, err)
continue
}
log.Infof("Disconnected vCenter client for host %s", vc.Config.Host)
}
log.Info("Successfully logged out vCenter sessions")
} | if err != nil {
continue
}
}
log.Infof("Disconnecting vCenter client for host %s", vc.Config.Host) | random_line_split |
utils.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"math"
"strconv"
cnstypes "github.com/vmware/govmomi/cns/types"
"google.golang.org/grpc/codes"
cnsvolume "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/volume"
cnsvsphere "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/vsphere"
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger"
)
const (
// DefaultQuerySnapshotLimit constant is already present in pkg/csi/service/common/constants.go
// However, using that constant creates an import cycle.
// TODO: Refactor to move all the constants into a top level directory.
DefaultQuerySnapshotLimit = int64(128)
)
// QueryVolumeUtil helps to invoke query volume API based on the feature
// state set for using query async volume. If useQueryVolumeAsync is set to
// true, the function invokes CNS QueryVolumeAsync, otherwise it invokes
// synchronous QueryVolume API. The function also take volume manager instance,
// query filters, query selection as params. Returns queryResult when query
// volume succeeds, otherwise returns appropriate errors.
func QueryVolumeUtil(ctx context.Context, m cnsvolume.Manager, queryFilter cnstypes.CnsQueryFilter,
querySelection *cnstypes.CnsQuerySelection, useQueryVolumeAsync bool) (*cnstypes.CnsQueryResult, error) {
log := logger.GetLogger(ctx)
var queryAsyncNotSupported bool
var queryResult *cnstypes.CnsQueryResult
var err error
if useQueryVolumeAsync {
// AsyncQueryVolume feature switch is enabled.
queryResult, err = m.QueryVolumeAsync(ctx, queryFilter, querySelection)
if err != nil {
if err.Error() == cnsvsphere.ErrNotSupported.Error() {
log.Warn("QueryVolumeAsync is not supported. Invoking QueryVolume API")
queryAsyncNotSupported = true
} else { // Return for any other failures.
return nil, logger.LogNewErrorCodef(log, codes.Internal,
"queryVolumeAsync failed for queryFilter: %v. Err=%+v", queryFilter, err.Error())
}
}
}
if !useQueryVolumeAsync || queryAsyncNotSupported {
queryResult, err = m.QueryVolume(ctx, queryFilter)
if err != nil {
return nil, logger.LogNewErrorCodef(log, codes.Internal,
"queryVolume failed for queryFilter: %+v. Err=%+v", queryFilter, err.Error())
}
}
return queryResult, nil
}
// QuerySnapshotsUtil helps invoke CNS QuerySnapshot API. The method takes in a snapshotQueryFilter that represents
// the criteria to retrieve the snapshots. The maxEntries represents the max number of results that the caller of this
// method can handle.
func QuerySnapshotsUtil(ctx context.Context, m cnsvolume.Manager, snapshotQueryFilter cnstypes.CnsSnapshotQueryFilter,
maxEntries int64) ([]cnstypes.CnsSnapshotQueryResultEntry, string, error) {
log := logger.GetLogger(ctx)
var allQuerySnapshotResults []cnstypes.CnsSnapshotQueryResultEntry
var snapshotQuerySpec cnstypes.CnsSnapshotQuerySpec
var batchSize int64
maxIteration := int64(1)
isMaxIterationSet := false
if snapshotQueryFilter.SnapshotQuerySpecs == nil {
log.Infof("Attempting to retrieve all the Snapshots available in the vCenter inventory.")
} else {
snapshotQuerySpec = snapshotQueryFilter.SnapshotQuerySpecs[0]
log.Infof("Invoking QuerySnapshots with spec: %+v", snapshotQuerySpec)
}
// Check if cursor is specified, if not set a default cursor.
if snapshotQueryFilter.Cursor == nil | else {
batchSize = snapshotQueryFilter.Cursor.Limit
}
iteration := int64(1)
for {
if iteration > maxIteration {
// Exceeds the max number of results that can be handled by callers.
nextToken := strconv.FormatInt(snapshotQueryFilter.Cursor.Offset, 10)
log.Infof("the number of results: %d approached max-entries: %d for "+
"limit: %d in iteration: %d, returning with next-token: %s",
len(allQuerySnapshotResults), maxEntries, batchSize, iteration, nextToken)
return allQuerySnapshotResults, nextToken, nil
}
log.Infof("invoking QuerySnapshots in iteration: %d with offset: %d and limit: %d, current total "+
"results: %d", iteration, snapshotQueryFilter.Cursor.Offset, snapshotQueryFilter.Cursor.Limit,
len(allQuerySnapshotResults))
snapshotQueryResult, err := m.QuerySnapshots(ctx, snapshotQueryFilter)
if err != nil {
log.Errorf("querySnapshots failed for snapshotQueryFilter: %v. Err=%+v", snapshotQueryFilter, err)
return nil, "", err
}
if snapshotQueryResult == nil {
log.Infof("Observed empty SnapshotQueryResult")
break
}
if len(snapshotQueryResult.Entries) == 0 {
log.Infof("QuerySnapshots retrieved no results for the spec: %+v", snapshotQuerySpec)
}
// Update the max iteration.
// isMaxIterationSet ensures that the max iterations are set only once, this is to ensure that the number of
// results are lower than the max entries supported by caller in a busy system which has increasing number
// total records.
if !isMaxIterationSet {
if snapshotQueryResult.Cursor.TotalRecords < maxEntries {
// If the total number of records is less than max entries supported by caller then
// all results can be retrieved in a loop, when the results are returned no next-token is expected to be set.
// Example:
// maxEntries=200, totalRecords=150, batchSize=128
// maxIteration=2
// iteration-1: 128 results, iteration-2: 22 results
// total results returned: 150
// offset=0
maxRecords := snapshotQueryResult.Cursor.TotalRecords
numOfIterationsForAllResults := float64(maxRecords) / float64(batchSize)
maxIteration = int64(math.Ceil(numOfIterationsForAllResults))
log.Infof("setting max iteration to %d for total records count: %d", maxIteration, maxRecords)
} else {
// All results cannot be returned to caller, in this case the expectation is return as many results with a
// nextToken.
// Example:
// maxEntries=150, totalRecords=200, batchSize=128
// maxIteration=1
// iteration-1: 128 results
// total results returned: 128
// offset= 1, callers are expected to call with new offset as next token.
maxRecords := maxEntries
numOfIterationsForAllResults := float64(maxRecords) / float64(batchSize)
maxIteration = int64(math.Floor(numOfIterationsForAllResults))
log.Infof("setting max iteration to %d for total records count: %d and max limit: %d",
maxIteration, snapshotQueryResult.Cursor.TotalRecords, maxRecords)
}
isMaxIterationSet = true
}
allQuerySnapshotResults = append(allQuerySnapshotResults, snapshotQueryResult.Entries...)
log.Infof("%d more snapshots to be queried",
snapshotQueryResult.Cursor.TotalRecords-snapshotQueryResult.Cursor.Offset)
if snapshotQueryResult.Cursor.Offset == snapshotQueryResult.Cursor.TotalRecords {
log.Infof("QuerySnapshots retrieved all records (%d) for the SnapshotQuerySpec: %+v in %d iterations",
snapshotQueryResult.Cursor.TotalRecords, snapshotQuerySpec, iteration)
break
}
iteration++
snapshotQueryFilter.Cursor = &snapshotQueryResult.Cursor
}
return allQuerySnapshotResults, "", nil
}
type CnsVolumeDetails struct {
VolumeID string
SizeInMB int64
DatastoreUrl string
VolumeType string
}
// QueryVolumeDetailsUtil queries Capacity in MB and datastore URL for the source volume with expected volume type.
func QueryVolumeDetailsUtil(ctx context.Context, m cnsvolume.Manager, volumeIds []cnstypes.CnsVolumeId) (
map[string]*CnsVolumeDetails, error) {
log := logger.GetLogger(ctx)
volumeDetailsMap := make(map[string]*CnsVolumeDetails)
// Select only the backing object details, volume type and datastore.
querySelection := &cnstypes.CnsQuerySelection{
Names: []string{
string(cnstypes.QuerySelectionNameTypeBackingObjectDetails),
string(cnstypes.QuerySelectionNameTypeVolumeType),
string(cnstypes.QuerySelectionNameTypeDataStoreUrl),
},
}
queryFilter := cnstypes.CnsQueryFilter{
VolumeIds: volumeIds,
}
log.Infof("Invoking QueryAllVolumeUtil with Filter: %+v, Selection: %+v", queryFilter, *querySelection)
allQueryResults, err := m.QueryAllVolume(ctx, queryFilter, *querySelection)
if err != nil {
log.Errorf("failed to retrieve the volume size and datastore, err: %+v", err)
return volumeDetailsMap, logger.LogNewErrorCodef(log, codes.Internal,
"failed to retrieve the volume sizes: %+v", err)
}
log.Infof("Number of results from QueryAllVolumeUtil: %d", len(allQueryResults.Volumes))
for _, res := range allQueryResults.Volumes {
volumeId := res.VolumeId
datastoreUrl := res.DatastoreUrl
volumeCapacityInMB := res.BackingObjectDetails.GetCnsBackingObjectDetails().CapacityInMb
volumeType := res.VolumeType
log.Debugf("VOLUME: %s, TYPE: %s, DATASTORE: %s, CAPACITY: %d", volumeId, volumeType, datastoreUrl,
volumeCapacityInMB)
volumeDetails := &CnsVolumeDetails{
VolumeID: volumeId.Id,
SizeInMB: volumeCapacityInMB,
DatastoreUrl: datastoreUrl,
VolumeType: volumeType,
}
volumeDetailsMap[volumeId.Id] = volumeDetails
}
return volumeDetailsMap, nil
}
// LogoutAllSessions will logout all vCenter sessions and disconnect vCenter client
func LogoutAllvCenterSessions(ctx context.Context) {
log := logger.GetLogger(ctx)
log.Info("Logging out all vCenter sessions")
virtualcentermanager := cnsvsphere.GetVirtualCenterManager(ctx)
vCenters := virtualcentermanager.GetAllVirtualCenters()
managerInstanceMap := cnsvolume.GetAllManagerInstances(ctx)
for _, vc := range vCenters {
if vc.Client == nil {
continue
}
log.Info("Closing idle vCenter session")
vc.Client.CloseIdleConnections()
// logout vCenter session for list-view
mgr, ok := managerInstanceMap[vc.Config.Host]
if ok && mgr != nil {
err := mgr.LogoutListViewVCSession(ctx)
if err != nil {
continue
}
}
log.Infof("Disconnecting vCenter client for host %s", vc.Config.Host)
err := vc.Disconnect(ctx)
if err != nil {
log.Errorf("Error while disconnect vCenter client for host %s. Error: %+v", vc.Config.Host, err)
continue
}
log.Infof("Disconnected vCenter client for host %s", vc.Config.Host)
}
log.Info("Successfully logged out vCenter sessions")
}
| {
// Setting the default limit(128) explicitly.
snapshotQueryFilter = cnstypes.CnsSnapshotQueryFilter{
Cursor: &cnstypes.CnsCursor{
Offset: 0,
Limit: DefaultQuerySnapshotLimit,
},
}
batchSize = DefaultQuerySnapshotLimit
} | conditional_block |
block.go | package rpc
import (
"encoding/hex"
"fmt"
"net/http"
"strconv"
"time"
"github.com/gorilla/mux"
"github.com/orientwalt/htdf/client"
"github.com/orientwalt/htdf/client/context"
"github.com/orientwalt/htdf/codec"
sdk "github.com/orientwalt/htdf/types"
"github.com/orientwalt/htdf/types/rest"
sdkRest "github.com/orientwalt/htdf/types/rest"
"github.com/orientwalt/htdf/utils/unit_convert"
"github.com/orientwalt/htdf/x/auth"
htdfservice "github.com/orientwalt/htdf/x/core"
"github.com/spf13/cobra"
"github.com/spf13/viper"
tmliteProxy "github.com/tendermint/tendermint/lite/proxy"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
tmTypes "github.com/tendermint/tendermint/types"
)
//BlockCommand returns the verified block data for a given heights
func BlockCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "block [height]",
Short: "Get verified data for a the block at given height",
Args: cobra.MaximumNArgs(1),
RunE: printBlock,
}
cmd.Flags().StringP(client.FlagNode, "n", "tcp://localhost:26657", "Node to connect to")
viper.BindPFlag(client.FlagNode, cmd.Flags().Lookup(client.FlagNode))
cmd.Flags().Bool(client.FlagTrustNode, false, "Trust connected full node (don't verify proofs for responses)")
viper.BindPFlag(client.FlagTrustNode, cmd.Flags().Lookup(client.FlagTrustNode))
return cmd
}
func getBlock(cliCtx context.CLIContext, height *int64) ([]byte, error) {
// get the node
node, err := cliCtx.GetNode()
if err != nil {
return nil, err
}
// header -> BlockchainInfo
// header, tx -> Block
// results -> BlockResults
res, err := node.Block(height)
if err != nil {
return nil, err
}
if !cliCtx.TrustNode {
check, err := cliCtx.Verify(res.Block.Height)
if err != nil {
return nil, err
}
err = tmliteProxy.ValidateBlockMeta(res.BlockMeta, check)
if err != nil {
return nil, err
}
err = tmliteProxy.ValidateBlock(res.Block, check)
if err != nil {
return nil, err
}
}
if cliCtx.Indent {
return cdc.MarshalJSONIndent(res, "", " ")
}
return cdc.MarshalJSON(res)
}
// get the current blockchain height
func GetChainHeight(cliCtx context.CLIContext) (int64, error) {
node, err := cliCtx.GetNode()
if err != nil {
return -1, err
}
status, err := node.Status()
if err != nil {
return -1, err
}
height := status.SyncInfo.LatestBlockHeight
return height, nil
}
// CMD
func printBlock(cmd *cobra.Command, args []string) error {
var height *int64
// optional height
if len(args) > 0 {
h, err := strconv.Atoi(args[0])
if err != nil {
return err
}
if h > 0 {
tmp := int64(h)
height = &tmp
}
}
output, err := getBlock(context.NewCLIContext(), height)
if err != nil {
return err
}
fmt.Println(string(output))
return nil
}
// REST
// REST handler to get a block
func BlockRequestHandlerFn(cliCtx context.CLIContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
height, err := strconv.ParseInt(vars["height"], 10, 64)
if err != nil {
rest.WriteErrorResponse(w, http.StatusBadRequest,
"ERROR: Couldn't parse block height. Assumed format is '/block/{height}'.")
return
}
chainHeight, err := GetChainHeight(cliCtx)
if height > chainHeight {
rest.WriteErrorResponse(w, http.StatusNotFound,
"ERROR: Requested block height is bigger then the chain length.")
return
}
output, err := getBlock(cliCtx, &height)
if err != nil {
rest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
rest.PostProcessResponse(w, cdc, output, cliCtx.Indent)
}
}
// REST handler to get the latest block
func LatestBlockRequestHandlerFn(cliCtx context.CLIContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
height, err := GetChainHeight(cliCtx)
if err != nil {
rest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
output, err := getBlock(cliCtx, &height)
if err != nil {
rest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
rest.PostProcessResponse(w, cdc, output, cliCtx.Indent)
}
}
//BlockDetails struct
type DisplayTx struct {
From sdk.AccAddress
To sdk.AccAddress
Amount []sdk.BigCoin
Hash string
Memo string
Data string
}
type DisplayBlock struct {
Txs []DisplayTx `json:"txs"`
Evidence tmTypes.EvidenceData `json:"evidence"`
LastCommit *tmTypes.Commit `json:"last_commit"`
}
type BlockInfo struct {
BlockMeta *tmTypes.BlockMeta `json:"block_meta"`
Block DisplayBlock `json:"block"`
Time string `json:"time"`
}
type DisplayFee struct {
// Amount []sdk.BigCoin `json:"amount"` // junying-todo, 2019-12-07
GasWanted string `json:"gas_wanted"`
GasPrice string `json:"gas_price"`
}
type StdTx struct {
Msgs []DisplayTx `json:"msg"`
Fee DisplayFee `json:"fee"`
Signatures []auth.StdSignature `json:"signatures"`
Memo string `json:"memo"`
}
type GetTxResponse struct {
Height int64 `json:"height"`
TxHash string `json:"txhash"`
Code uint32 `json:"code,omitempty"`
Log sdk.ABCIMessageLogs `json:"log,omitempty"`
Info string `json:"info,omitempty"`
GasWanted int64 `json:"gas_wanted,omitempty"`
GasUsed int64 `json:"gas_used,omitempty"`
Tags sdk.StringTags `json:"tags,omitempty"`
Codespace string `json:"codespace,omitempty"`
Tx StdTx `json:"tx,omitempty"`
// Data string `json:"data,omitempty"`
}
// GetBlockDetailFn
func GetBlockDetailFn(cliCtx context.CLIContext, cdc *codec.Codec) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
height, err := strconv.ParseInt(vars["height"], 10, 64)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("ERROR: Couldn't parse block height. Assumed format is '/block/{height}'."))
return
}
chainHeight, err := GetChainHeight(cliCtx)
if height > chainHeight {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("ERROR: Requested block height is bigger then the chain length."))
return
}
// get the node
node, err := cliCtx.GetNode()
if err != nil {
fmt.Printf("getNode error|err=%s\n", err)
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
var blockInfo BlockInfo
//get Block info
resultBlock, err := node.Block(&height)
if err != nil {
fmt.Printf("get block error|err=%s\n", err)
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
blockInfo.BlockMeta = resultBlock.BlockMeta
blockInfo.Block.Evidence = resultBlock.Block.Evidence
blockInfo.Block.LastCommit = resultBlock.Block.LastCommit
blockInfo.Time = resultBlock.BlockMeta.Header.Time.Local().Format("2006-01-02 15:04:05")
for _, tx := range resultBlock.Block.Data.Txs {
sdkTx, err := parseTx(cdc, tx)
if err != nil {
fmt.Printf("parseTx error|err=%s\n", err)
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
switch currTx := sdkTx.(type) {
case auth.StdTx:
var displayTx DisplayTx
for _, msg := range currTx.GetMsgs() {
//fmt.Printf("msg|route=%s|type=%s\n", msg.Route(), msg.Type())
switch msg := msg.(type) {
case htdfservice.MsgSend:
displayTx.From = msg.From
displayTx.To = msg.To
displayTx.Hash = hex.EncodeToString(tx.Hash())
displayTx.Amount = unit_convert.DefaultCoinsToBigCoins(msg.Amount)
displayTx.Memo = currTx.Memo
blockInfo.Block.Txs = append(blockInfo.Block.Txs, displayTx)
//fmt.Printf("msg|from=%s|to=%s\n", msg.From, msg.To)
default:
fmt.Printf("ignore type|type=%s|route=%s\n", msg.Type(), msg.Route())
continue
}
}
default:
fmt.Printf("unknown type: %+v\n", currTx)
}
}
sdkRest.PostProcessResponse(w, cdc, &blockInfo, cliCtx.Indent)
}
}
func parseTx(cdc *codec.Codec, txBytes []byte) (sdk.Tx, error) {
var tx auth.StdTx
err := cdc.UnmarshalBinaryLengthPrefixed(txBytes, &tx)
if err != nil {
return nil, err
} | func formatTxResult(cdc *codec.Codec, res *ctypes.ResultTx, resBlock *ctypes.ResultBlock) (sdk.TxResponse, error) {
tx, err := parseTx(cdc, res.Tx)
if err != nil {
return sdk.TxResponse{}, err
}
return sdk.NewResponseResultTx(res, tx, resBlock.Block.Time.Format(time.RFC3339)), nil
}
//
func GetTxFn(cdc *codec.Codec, cliCtx context.CLIContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
hashHexStr := vars["hash"]
hash, err := hex.DecodeString(hashHexStr)
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
// get the node
node, err := cliCtx.GetNode()
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
resultTx, err := node.Tx(hash, !cliCtx.TrustNode)
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
resBlocks, err := getBlocksForTxResults(cliCtx, []*ctypes.ResultTx{resultTx})
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
txResp, err := formatTxResult(cdc, resultTx, resBlocks[resultTx.Height])
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
var getTxResponse GetTxResponse
getTxResponse.Height = txResp.Height
getTxResponse.TxHash = txResp.TxHash
getTxResponse.Code = txResp.Code
// getTxResponse.Data = txResp.Data
getTxResponse.Log = txResp.Logs
getTxResponse.Info = txResp.Info
getTxResponse.GasWanted = txResp.GasWanted
getTxResponse.GasUsed = txResp.GasUsed
getTxResponse.Tags = txResp.Tags
getTxResponse.Codespace = txResp.Codespace
// switch currTx := txResp.Tx.(type) {
// case auth.StdTx:
// // getTxResponse.Tx.Fee.Amount = unit_convert.DefaultCoinsToBigCoins(currTx.Fee.Amount())
// // getTxResponse.Tx.Fee.GasWanted = unit_convert.DefaultAmoutToBigAmount(strconv.FormatUint(currTx.Fee.GasWanted, 10))
// getTxResponse.Tx.Signatures = currTx.Signatures
// getTxResponse.Tx.Memo = currTx.Memo
// var displayTx DisplayTx
// for _, msg := range currTx.GetMsgs() {
// //fmt.Printf("msg|route=%s|type=%s\n", msg.Route(), msg.Type())
// switch msg := msg.(type) {
// case htdfservice.MsgSend:
// displayTx.From = msg.From
// displayTx.To = msg.To
// displayTx.Amount = unit_convert.DefaultCoinsToBigCoins(msg.Amount)
// getTxResponse.Tx.Msgs = append(getTxResponse.Tx.Msgs, displayTx)
// default:
// fmt.Printf("ignore type|type=%s|route=%s\n", msg.Type(), msg.Route())
// continue
// }
// }
// default:
// fmt.Printf("unknown type: %+v\n", currTx)
// }
sdkRest.PostProcessResponse(w, cdc, getTxResponse, cliCtx.Indent)
}
}
func getBlocksForTxResults(cliCtx context.CLIContext, resTxs []*ctypes.ResultTx) (map[int64]*ctypes.ResultBlock, error) {
node, err := cliCtx.GetNode()
if err != nil {
return nil, err
}
resBlocks := make(map[int64]*ctypes.ResultBlock)
for _, resTx := range resTxs {
if _, ok := resBlocks[resTx.Height]; !ok {
resBlock, err := node.Block(&resTx.Height)
if err != nil {
return nil, err
}
resBlocks[resTx.Height] = resBlock
}
}
return resBlocks, nil
} |
return tx, nil
}
| random_line_split |
block.go | package rpc
import (
"encoding/hex"
"fmt"
"net/http"
"strconv"
"time"
"github.com/gorilla/mux"
"github.com/orientwalt/htdf/client"
"github.com/orientwalt/htdf/client/context"
"github.com/orientwalt/htdf/codec"
sdk "github.com/orientwalt/htdf/types"
"github.com/orientwalt/htdf/types/rest"
sdkRest "github.com/orientwalt/htdf/types/rest"
"github.com/orientwalt/htdf/utils/unit_convert"
"github.com/orientwalt/htdf/x/auth"
htdfservice "github.com/orientwalt/htdf/x/core"
"github.com/spf13/cobra"
"github.com/spf13/viper"
tmliteProxy "github.com/tendermint/tendermint/lite/proxy"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
tmTypes "github.com/tendermint/tendermint/types"
)
//BlockCommand returns the verified block data for a given heights
func BlockCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "block [height]",
Short: "Get verified data for a the block at given height",
Args: cobra.MaximumNArgs(1),
RunE: printBlock,
}
cmd.Flags().StringP(client.FlagNode, "n", "tcp://localhost:26657", "Node to connect to")
viper.BindPFlag(client.FlagNode, cmd.Flags().Lookup(client.FlagNode))
cmd.Flags().Bool(client.FlagTrustNode, false, "Trust connected full node (don't verify proofs for responses)")
viper.BindPFlag(client.FlagTrustNode, cmd.Flags().Lookup(client.FlagTrustNode))
return cmd
}
func getBlock(cliCtx context.CLIContext, height *int64) ([]byte, error) {
// get the node
node, err := cliCtx.GetNode()
if err != nil {
return nil, err
}
// header -> BlockchainInfo
// header, tx -> Block
// results -> BlockResults
res, err := node.Block(height)
if err != nil {
return nil, err
}
if !cliCtx.TrustNode {
check, err := cliCtx.Verify(res.Block.Height)
if err != nil {
return nil, err
}
err = tmliteProxy.ValidateBlockMeta(res.BlockMeta, check)
if err != nil {
return nil, err
}
err = tmliteProxy.ValidateBlock(res.Block, check)
if err != nil {
return nil, err
}
}
if cliCtx.Indent {
return cdc.MarshalJSONIndent(res, "", " ")
}
return cdc.MarshalJSON(res)
}
// get the current blockchain height
func GetChainHeight(cliCtx context.CLIContext) (int64, error) |
// CMD
func printBlock(cmd *cobra.Command, args []string) error {
var height *int64
// optional height
if len(args) > 0 {
h, err := strconv.Atoi(args[0])
if err != nil {
return err
}
if h > 0 {
tmp := int64(h)
height = &tmp
}
}
output, err := getBlock(context.NewCLIContext(), height)
if err != nil {
return err
}
fmt.Println(string(output))
return nil
}
// REST
// REST handler to get a block
func BlockRequestHandlerFn(cliCtx context.CLIContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
height, err := strconv.ParseInt(vars["height"], 10, 64)
if err != nil {
rest.WriteErrorResponse(w, http.StatusBadRequest,
"ERROR: Couldn't parse block height. Assumed format is '/block/{height}'.")
return
}
chainHeight, err := GetChainHeight(cliCtx)
if height > chainHeight {
rest.WriteErrorResponse(w, http.StatusNotFound,
"ERROR: Requested block height is bigger then the chain length.")
return
}
output, err := getBlock(cliCtx, &height)
if err != nil {
rest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
rest.PostProcessResponse(w, cdc, output, cliCtx.Indent)
}
}
// REST handler to get the latest block
func LatestBlockRequestHandlerFn(cliCtx context.CLIContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
height, err := GetChainHeight(cliCtx)
if err != nil {
rest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
output, err := getBlock(cliCtx, &height)
if err != nil {
rest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
rest.PostProcessResponse(w, cdc, output, cliCtx.Indent)
}
}
//BlockDetails struct
type DisplayTx struct {
From sdk.AccAddress
To sdk.AccAddress
Amount []sdk.BigCoin
Hash string
Memo string
Data string
}
type DisplayBlock struct {
Txs []DisplayTx `json:"txs"`
Evidence tmTypes.EvidenceData `json:"evidence"`
LastCommit *tmTypes.Commit `json:"last_commit"`
}
type BlockInfo struct {
BlockMeta *tmTypes.BlockMeta `json:"block_meta"`
Block DisplayBlock `json:"block"`
Time string `json:"time"`
}
type DisplayFee struct {
// Amount []sdk.BigCoin `json:"amount"` // junying-todo, 2019-12-07
GasWanted string `json:"gas_wanted"`
GasPrice string `json:"gas_price"`
}
type StdTx struct {
Msgs []DisplayTx `json:"msg"`
Fee DisplayFee `json:"fee"`
Signatures []auth.StdSignature `json:"signatures"`
Memo string `json:"memo"`
}
type GetTxResponse struct {
Height int64 `json:"height"`
TxHash string `json:"txhash"`
Code uint32 `json:"code,omitempty"`
Log sdk.ABCIMessageLogs `json:"log,omitempty"`
Info string `json:"info,omitempty"`
GasWanted int64 `json:"gas_wanted,omitempty"`
GasUsed int64 `json:"gas_used,omitempty"`
Tags sdk.StringTags `json:"tags,omitempty"`
Codespace string `json:"codespace,omitempty"`
Tx StdTx `json:"tx,omitempty"`
// Data string `json:"data,omitempty"`
}
// GetBlockDetailFn
func GetBlockDetailFn(cliCtx context.CLIContext, cdc *codec.Codec) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
height, err := strconv.ParseInt(vars["height"], 10, 64)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("ERROR: Couldn't parse block height. Assumed format is '/block/{height}'."))
return
}
chainHeight, err := GetChainHeight(cliCtx)
if height > chainHeight {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("ERROR: Requested block height is bigger then the chain length."))
return
}
// get the node
node, err := cliCtx.GetNode()
if err != nil {
fmt.Printf("getNode error|err=%s\n", err)
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
var blockInfo BlockInfo
//get Block info
resultBlock, err := node.Block(&height)
if err != nil {
fmt.Printf("get block error|err=%s\n", err)
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
blockInfo.BlockMeta = resultBlock.BlockMeta
blockInfo.Block.Evidence = resultBlock.Block.Evidence
blockInfo.Block.LastCommit = resultBlock.Block.LastCommit
blockInfo.Time = resultBlock.BlockMeta.Header.Time.Local().Format("2006-01-02 15:04:05")
for _, tx := range resultBlock.Block.Data.Txs {
sdkTx, err := parseTx(cdc, tx)
if err != nil {
fmt.Printf("parseTx error|err=%s\n", err)
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
switch currTx := sdkTx.(type) {
case auth.StdTx:
var displayTx DisplayTx
for _, msg := range currTx.GetMsgs() {
//fmt.Printf("msg|route=%s|type=%s\n", msg.Route(), msg.Type())
switch msg := msg.(type) {
case htdfservice.MsgSend:
displayTx.From = msg.From
displayTx.To = msg.To
displayTx.Hash = hex.EncodeToString(tx.Hash())
displayTx.Amount = unit_convert.DefaultCoinsToBigCoins(msg.Amount)
displayTx.Memo = currTx.Memo
blockInfo.Block.Txs = append(blockInfo.Block.Txs, displayTx)
//fmt.Printf("msg|from=%s|to=%s\n", msg.From, msg.To)
default:
fmt.Printf("ignore type|type=%s|route=%s\n", msg.Type(), msg.Route())
continue
}
}
default:
fmt.Printf("unknown type: %+v\n", currTx)
}
}
sdkRest.PostProcessResponse(w, cdc, &blockInfo, cliCtx.Indent)
}
}
func parseTx(cdc *codec.Codec, txBytes []byte) (sdk.Tx, error) {
var tx auth.StdTx
err := cdc.UnmarshalBinaryLengthPrefixed(txBytes, &tx)
if err != nil {
return nil, err
}
return tx, nil
}
func formatTxResult(cdc *codec.Codec, res *ctypes.ResultTx, resBlock *ctypes.ResultBlock) (sdk.TxResponse, error) {
tx, err := parseTx(cdc, res.Tx)
if err != nil {
return sdk.TxResponse{}, err
}
return sdk.NewResponseResultTx(res, tx, resBlock.Block.Time.Format(time.RFC3339)), nil
}
//
func GetTxFn(cdc *codec.Codec, cliCtx context.CLIContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
hashHexStr := vars["hash"]
hash, err := hex.DecodeString(hashHexStr)
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
// get the node
node, err := cliCtx.GetNode()
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
resultTx, err := node.Tx(hash, !cliCtx.TrustNode)
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
resBlocks, err := getBlocksForTxResults(cliCtx, []*ctypes.ResultTx{resultTx})
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
txResp, err := formatTxResult(cdc, resultTx, resBlocks[resultTx.Height])
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
var getTxResponse GetTxResponse
getTxResponse.Height = txResp.Height
getTxResponse.TxHash = txResp.TxHash
getTxResponse.Code = txResp.Code
// getTxResponse.Data = txResp.Data
getTxResponse.Log = txResp.Logs
getTxResponse.Info = txResp.Info
getTxResponse.GasWanted = txResp.GasWanted
getTxResponse.GasUsed = txResp.GasUsed
getTxResponse.Tags = txResp.Tags
getTxResponse.Codespace = txResp.Codespace
// switch currTx := txResp.Tx.(type) {
// case auth.StdTx:
// // getTxResponse.Tx.Fee.Amount = unit_convert.DefaultCoinsToBigCoins(currTx.Fee.Amount())
// // getTxResponse.Tx.Fee.GasWanted = unit_convert.DefaultAmoutToBigAmount(strconv.FormatUint(currTx.Fee.GasWanted, 10))
// getTxResponse.Tx.Signatures = currTx.Signatures
// getTxResponse.Tx.Memo = currTx.Memo
// var displayTx DisplayTx
// for _, msg := range currTx.GetMsgs() {
// //fmt.Printf("msg|route=%s|type=%s\n", msg.Route(), msg.Type())
// switch msg := msg.(type) {
// case htdfservice.MsgSend:
// displayTx.From = msg.From
// displayTx.To = msg.To
// displayTx.Amount = unit_convert.DefaultCoinsToBigCoins(msg.Amount)
// getTxResponse.Tx.Msgs = append(getTxResponse.Tx.Msgs, displayTx)
// default:
// fmt.Printf("ignore type|type=%s|route=%s\n", msg.Type(), msg.Route())
// continue
// }
// }
// default:
// fmt.Printf("unknown type: %+v\n", currTx)
// }
sdkRest.PostProcessResponse(w, cdc, getTxResponse, cliCtx.Indent)
}
}
func getBlocksForTxResults(cliCtx context.CLIContext, resTxs []*ctypes.ResultTx) (map[int64]*ctypes.ResultBlock, error) {
node, err := cliCtx.GetNode()
if err != nil {
return nil, err
}
resBlocks := make(map[int64]*ctypes.ResultBlock)
for _, resTx := range resTxs {
if _, ok := resBlocks[resTx.Height]; !ok {
resBlock, err := node.Block(&resTx.Height)
if err != nil {
return nil, err
}
resBlocks[resTx.Height] = resBlock
}
}
return resBlocks, nil
}
| {
node, err := cliCtx.GetNode()
if err != nil {
return -1, err
}
status, err := node.Status()
if err != nil {
return -1, err
}
height := status.SyncInfo.LatestBlockHeight
return height, nil
} | identifier_body |
block.go | package rpc
import (
"encoding/hex"
"fmt"
"net/http"
"strconv"
"time"
"github.com/gorilla/mux"
"github.com/orientwalt/htdf/client"
"github.com/orientwalt/htdf/client/context"
"github.com/orientwalt/htdf/codec"
sdk "github.com/orientwalt/htdf/types"
"github.com/orientwalt/htdf/types/rest"
sdkRest "github.com/orientwalt/htdf/types/rest"
"github.com/orientwalt/htdf/utils/unit_convert"
"github.com/orientwalt/htdf/x/auth"
htdfservice "github.com/orientwalt/htdf/x/core"
"github.com/spf13/cobra"
"github.com/spf13/viper"
tmliteProxy "github.com/tendermint/tendermint/lite/proxy"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
tmTypes "github.com/tendermint/tendermint/types"
)
//BlockCommand returns the verified block data for a given heights
func BlockCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "block [height]",
Short: "Get verified data for a the block at given height",
Args: cobra.MaximumNArgs(1),
RunE: printBlock,
}
cmd.Flags().StringP(client.FlagNode, "n", "tcp://localhost:26657", "Node to connect to")
viper.BindPFlag(client.FlagNode, cmd.Flags().Lookup(client.FlagNode))
cmd.Flags().Bool(client.FlagTrustNode, false, "Trust connected full node (don't verify proofs for responses)")
viper.BindPFlag(client.FlagTrustNode, cmd.Flags().Lookup(client.FlagTrustNode))
return cmd
}
func getBlock(cliCtx context.CLIContext, height *int64) ([]byte, error) {
// get the node
node, err := cliCtx.GetNode()
if err != nil {
return nil, err
}
// header -> BlockchainInfo
// header, tx -> Block
// results -> BlockResults
res, err := node.Block(height)
if err != nil {
return nil, err
}
if !cliCtx.TrustNode {
check, err := cliCtx.Verify(res.Block.Height)
if err != nil {
return nil, err
}
err = tmliteProxy.ValidateBlockMeta(res.BlockMeta, check)
if err != nil {
return nil, err
}
err = tmliteProxy.ValidateBlock(res.Block, check)
if err != nil {
return nil, err
}
}
if cliCtx.Indent {
return cdc.MarshalJSONIndent(res, "", " ")
}
return cdc.MarshalJSON(res)
}
// get the current blockchain height
func GetChainHeight(cliCtx context.CLIContext) (int64, error) {
node, err := cliCtx.GetNode()
if err != nil {
return -1, err
}
status, err := node.Status()
if err != nil {
return -1, err
}
height := status.SyncInfo.LatestBlockHeight
return height, nil
}
// CMD
func printBlock(cmd *cobra.Command, args []string) error {
var height *int64
// optional height
if len(args) > 0 {
h, err := strconv.Atoi(args[0])
if err != nil {
return err
}
if h > 0 {
tmp := int64(h)
height = &tmp
}
}
output, err := getBlock(context.NewCLIContext(), height)
if err != nil {
return err
}
fmt.Println(string(output))
return nil
}
// REST
// REST handler to get a block
func | (cliCtx context.CLIContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
height, err := strconv.ParseInt(vars["height"], 10, 64)
if err != nil {
rest.WriteErrorResponse(w, http.StatusBadRequest,
"ERROR: Couldn't parse block height. Assumed format is '/block/{height}'.")
return
}
chainHeight, err := GetChainHeight(cliCtx)
if height > chainHeight {
rest.WriteErrorResponse(w, http.StatusNotFound,
"ERROR: Requested block height is bigger then the chain length.")
return
}
output, err := getBlock(cliCtx, &height)
if err != nil {
rest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
rest.PostProcessResponse(w, cdc, output, cliCtx.Indent)
}
}
// REST handler to get the latest block
func LatestBlockRequestHandlerFn(cliCtx context.CLIContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
height, err := GetChainHeight(cliCtx)
if err != nil {
rest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
output, err := getBlock(cliCtx, &height)
if err != nil {
rest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
rest.PostProcessResponse(w, cdc, output, cliCtx.Indent)
}
}
//BlockDetails struct
type DisplayTx struct {
From sdk.AccAddress
To sdk.AccAddress
Amount []sdk.BigCoin
Hash string
Memo string
Data string
}
type DisplayBlock struct {
Txs []DisplayTx `json:"txs"`
Evidence tmTypes.EvidenceData `json:"evidence"`
LastCommit *tmTypes.Commit `json:"last_commit"`
}
type BlockInfo struct {
BlockMeta *tmTypes.BlockMeta `json:"block_meta"`
Block DisplayBlock `json:"block"`
Time string `json:"time"`
}
type DisplayFee struct {
// Amount []sdk.BigCoin `json:"amount"` // junying-todo, 2019-12-07
GasWanted string `json:"gas_wanted"`
GasPrice string `json:"gas_price"`
}
type StdTx struct {
Msgs []DisplayTx `json:"msg"`
Fee DisplayFee `json:"fee"`
Signatures []auth.StdSignature `json:"signatures"`
Memo string `json:"memo"`
}
type GetTxResponse struct {
Height int64 `json:"height"`
TxHash string `json:"txhash"`
Code uint32 `json:"code,omitempty"`
Log sdk.ABCIMessageLogs `json:"log,omitempty"`
Info string `json:"info,omitempty"`
GasWanted int64 `json:"gas_wanted,omitempty"`
GasUsed int64 `json:"gas_used,omitempty"`
Tags sdk.StringTags `json:"tags,omitempty"`
Codespace string `json:"codespace,omitempty"`
Tx StdTx `json:"tx,omitempty"`
// Data string `json:"data,omitempty"`
}
// GetBlockDetailFn
func GetBlockDetailFn(cliCtx context.CLIContext, cdc *codec.Codec) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
height, err := strconv.ParseInt(vars["height"], 10, 64)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("ERROR: Couldn't parse block height. Assumed format is '/block/{height}'."))
return
}
chainHeight, err := GetChainHeight(cliCtx)
if height > chainHeight {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("ERROR: Requested block height is bigger then the chain length."))
return
}
// get the node
node, err := cliCtx.GetNode()
if err != nil {
fmt.Printf("getNode error|err=%s\n", err)
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
var blockInfo BlockInfo
//get Block info
resultBlock, err := node.Block(&height)
if err != nil {
fmt.Printf("get block error|err=%s\n", err)
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
blockInfo.BlockMeta = resultBlock.BlockMeta
blockInfo.Block.Evidence = resultBlock.Block.Evidence
blockInfo.Block.LastCommit = resultBlock.Block.LastCommit
blockInfo.Time = resultBlock.BlockMeta.Header.Time.Local().Format("2006-01-02 15:04:05")
for _, tx := range resultBlock.Block.Data.Txs {
sdkTx, err := parseTx(cdc, tx)
if err != nil {
fmt.Printf("parseTx error|err=%s\n", err)
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
switch currTx := sdkTx.(type) {
case auth.StdTx:
var displayTx DisplayTx
for _, msg := range currTx.GetMsgs() {
//fmt.Printf("msg|route=%s|type=%s\n", msg.Route(), msg.Type())
switch msg := msg.(type) {
case htdfservice.MsgSend:
displayTx.From = msg.From
displayTx.To = msg.To
displayTx.Hash = hex.EncodeToString(tx.Hash())
displayTx.Amount = unit_convert.DefaultCoinsToBigCoins(msg.Amount)
displayTx.Memo = currTx.Memo
blockInfo.Block.Txs = append(blockInfo.Block.Txs, displayTx)
//fmt.Printf("msg|from=%s|to=%s\n", msg.From, msg.To)
default:
fmt.Printf("ignore type|type=%s|route=%s\n", msg.Type(), msg.Route())
continue
}
}
default:
fmt.Printf("unknown type: %+v\n", currTx)
}
}
sdkRest.PostProcessResponse(w, cdc, &blockInfo, cliCtx.Indent)
}
}
func parseTx(cdc *codec.Codec, txBytes []byte) (sdk.Tx, error) {
var tx auth.StdTx
err := cdc.UnmarshalBinaryLengthPrefixed(txBytes, &tx)
if err != nil {
return nil, err
}
return tx, nil
}
func formatTxResult(cdc *codec.Codec, res *ctypes.ResultTx, resBlock *ctypes.ResultBlock) (sdk.TxResponse, error) {
tx, err := parseTx(cdc, res.Tx)
if err != nil {
return sdk.TxResponse{}, err
}
return sdk.NewResponseResultTx(res, tx, resBlock.Block.Time.Format(time.RFC3339)), nil
}
//
func GetTxFn(cdc *codec.Codec, cliCtx context.CLIContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
hashHexStr := vars["hash"]
hash, err := hex.DecodeString(hashHexStr)
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
// get the node
node, err := cliCtx.GetNode()
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
resultTx, err := node.Tx(hash, !cliCtx.TrustNode)
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
resBlocks, err := getBlocksForTxResults(cliCtx, []*ctypes.ResultTx{resultTx})
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
txResp, err := formatTxResult(cdc, resultTx, resBlocks[resultTx.Height])
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
var getTxResponse GetTxResponse
getTxResponse.Height = txResp.Height
getTxResponse.TxHash = txResp.TxHash
getTxResponse.Code = txResp.Code
// getTxResponse.Data = txResp.Data
getTxResponse.Log = txResp.Logs
getTxResponse.Info = txResp.Info
getTxResponse.GasWanted = txResp.GasWanted
getTxResponse.GasUsed = txResp.GasUsed
getTxResponse.Tags = txResp.Tags
getTxResponse.Codespace = txResp.Codespace
// switch currTx := txResp.Tx.(type) {
// case auth.StdTx:
// // getTxResponse.Tx.Fee.Amount = unit_convert.DefaultCoinsToBigCoins(currTx.Fee.Amount())
// // getTxResponse.Tx.Fee.GasWanted = unit_convert.DefaultAmoutToBigAmount(strconv.FormatUint(currTx.Fee.GasWanted, 10))
// getTxResponse.Tx.Signatures = currTx.Signatures
// getTxResponse.Tx.Memo = currTx.Memo
// var displayTx DisplayTx
// for _, msg := range currTx.GetMsgs() {
// //fmt.Printf("msg|route=%s|type=%s\n", msg.Route(), msg.Type())
// switch msg := msg.(type) {
// case htdfservice.MsgSend:
// displayTx.From = msg.From
// displayTx.To = msg.To
// displayTx.Amount = unit_convert.DefaultCoinsToBigCoins(msg.Amount)
// getTxResponse.Tx.Msgs = append(getTxResponse.Tx.Msgs, displayTx)
// default:
// fmt.Printf("ignore type|type=%s|route=%s\n", msg.Type(), msg.Route())
// continue
// }
// }
// default:
// fmt.Printf("unknown type: %+v\n", currTx)
// }
sdkRest.PostProcessResponse(w, cdc, getTxResponse, cliCtx.Indent)
}
}
func getBlocksForTxResults(cliCtx context.CLIContext, resTxs []*ctypes.ResultTx) (map[int64]*ctypes.ResultBlock, error) {
node, err := cliCtx.GetNode()
if err != nil {
return nil, err
}
resBlocks := make(map[int64]*ctypes.ResultBlock)
for _, resTx := range resTxs {
if _, ok := resBlocks[resTx.Height]; !ok {
resBlock, err := node.Block(&resTx.Height)
if err != nil {
return nil, err
}
resBlocks[resTx.Height] = resBlock
}
}
return resBlocks, nil
}
| BlockRequestHandlerFn | identifier_name |
block.go | package rpc
import (
"encoding/hex"
"fmt"
"net/http"
"strconv"
"time"
"github.com/gorilla/mux"
"github.com/orientwalt/htdf/client"
"github.com/orientwalt/htdf/client/context"
"github.com/orientwalt/htdf/codec"
sdk "github.com/orientwalt/htdf/types"
"github.com/orientwalt/htdf/types/rest"
sdkRest "github.com/orientwalt/htdf/types/rest"
"github.com/orientwalt/htdf/utils/unit_convert"
"github.com/orientwalt/htdf/x/auth"
htdfservice "github.com/orientwalt/htdf/x/core"
"github.com/spf13/cobra"
"github.com/spf13/viper"
tmliteProxy "github.com/tendermint/tendermint/lite/proxy"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
tmTypes "github.com/tendermint/tendermint/types"
)
//BlockCommand returns the verified block data for a given heights
func BlockCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "block [height]",
Short: "Get verified data for a the block at given height",
Args: cobra.MaximumNArgs(1),
RunE: printBlock,
}
cmd.Flags().StringP(client.FlagNode, "n", "tcp://localhost:26657", "Node to connect to")
viper.BindPFlag(client.FlagNode, cmd.Flags().Lookup(client.FlagNode))
cmd.Flags().Bool(client.FlagTrustNode, false, "Trust connected full node (don't verify proofs for responses)")
viper.BindPFlag(client.FlagTrustNode, cmd.Flags().Lookup(client.FlagTrustNode))
return cmd
}
func getBlock(cliCtx context.CLIContext, height *int64) ([]byte, error) {
// get the node
node, err := cliCtx.GetNode()
if err != nil {
return nil, err
}
// header -> BlockchainInfo
// header, tx -> Block
// results -> BlockResults
res, err := node.Block(height)
if err != nil {
return nil, err
}
if !cliCtx.TrustNode {
check, err := cliCtx.Verify(res.Block.Height)
if err != nil {
return nil, err
}
err = tmliteProxy.ValidateBlockMeta(res.BlockMeta, check)
if err != nil {
return nil, err
}
err = tmliteProxy.ValidateBlock(res.Block, check)
if err != nil {
return nil, err
}
}
if cliCtx.Indent {
return cdc.MarshalJSONIndent(res, "", " ")
}
return cdc.MarshalJSON(res)
}
// get the current blockchain height
func GetChainHeight(cliCtx context.CLIContext) (int64, error) {
node, err := cliCtx.GetNode()
if err != nil {
return -1, err
}
status, err := node.Status()
if err != nil {
return -1, err
}
height := status.SyncInfo.LatestBlockHeight
return height, nil
}
// CMD
func printBlock(cmd *cobra.Command, args []string) error {
var height *int64
// optional height
if len(args) > 0 {
h, err := strconv.Atoi(args[0])
if err != nil {
return err
}
if h > 0 {
tmp := int64(h)
height = &tmp
}
}
output, err := getBlock(context.NewCLIContext(), height)
if err != nil {
return err
}
fmt.Println(string(output))
return nil
}
// REST
// REST handler to get a block
func BlockRequestHandlerFn(cliCtx context.CLIContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
height, err := strconv.ParseInt(vars["height"], 10, 64)
if err != nil {
rest.WriteErrorResponse(w, http.StatusBadRequest,
"ERROR: Couldn't parse block height. Assumed format is '/block/{height}'.")
return
}
chainHeight, err := GetChainHeight(cliCtx)
if height > chainHeight {
rest.WriteErrorResponse(w, http.StatusNotFound,
"ERROR: Requested block height is bigger then the chain length.")
return
}
output, err := getBlock(cliCtx, &height)
if err != nil {
rest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
rest.PostProcessResponse(w, cdc, output, cliCtx.Indent)
}
}
// REST handler to get the latest block
func LatestBlockRequestHandlerFn(cliCtx context.CLIContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
height, err := GetChainHeight(cliCtx)
if err != nil {
rest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
output, err := getBlock(cliCtx, &height)
if err != nil {
rest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
rest.PostProcessResponse(w, cdc, output, cliCtx.Indent)
}
}
//BlockDetails struct
type DisplayTx struct {
From sdk.AccAddress
To sdk.AccAddress
Amount []sdk.BigCoin
Hash string
Memo string
Data string
}
type DisplayBlock struct {
Txs []DisplayTx `json:"txs"`
Evidence tmTypes.EvidenceData `json:"evidence"`
LastCommit *tmTypes.Commit `json:"last_commit"`
}
type BlockInfo struct {
BlockMeta *tmTypes.BlockMeta `json:"block_meta"`
Block DisplayBlock `json:"block"`
Time string `json:"time"`
}
type DisplayFee struct {
// Amount []sdk.BigCoin `json:"amount"` // junying-todo, 2019-12-07
GasWanted string `json:"gas_wanted"`
GasPrice string `json:"gas_price"`
}
type StdTx struct {
Msgs []DisplayTx `json:"msg"`
Fee DisplayFee `json:"fee"`
Signatures []auth.StdSignature `json:"signatures"`
Memo string `json:"memo"`
}
type GetTxResponse struct {
Height int64 `json:"height"`
TxHash string `json:"txhash"`
Code uint32 `json:"code,omitempty"`
Log sdk.ABCIMessageLogs `json:"log,omitempty"`
Info string `json:"info,omitempty"`
GasWanted int64 `json:"gas_wanted,omitempty"`
GasUsed int64 `json:"gas_used,omitempty"`
Tags sdk.StringTags `json:"tags,omitempty"`
Codespace string `json:"codespace,omitempty"`
Tx StdTx `json:"tx,omitempty"`
// Data string `json:"data,omitempty"`
}
// GetBlockDetailFn
func GetBlockDetailFn(cliCtx context.CLIContext, cdc *codec.Codec) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
height, err := strconv.ParseInt(vars["height"], 10, 64)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("ERROR: Couldn't parse block height. Assumed format is '/block/{height}'."))
return
}
chainHeight, err := GetChainHeight(cliCtx)
if height > chainHeight {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("ERROR: Requested block height is bigger then the chain length."))
return
}
// get the node
node, err := cliCtx.GetNode()
if err != nil {
fmt.Printf("getNode error|err=%s\n", err)
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
var blockInfo BlockInfo
//get Block info
resultBlock, err := node.Block(&height)
if err != nil {
fmt.Printf("get block error|err=%s\n", err)
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
blockInfo.BlockMeta = resultBlock.BlockMeta
blockInfo.Block.Evidence = resultBlock.Block.Evidence
blockInfo.Block.LastCommit = resultBlock.Block.LastCommit
blockInfo.Time = resultBlock.BlockMeta.Header.Time.Local().Format("2006-01-02 15:04:05")
for _, tx := range resultBlock.Block.Data.Txs {
sdkTx, err := parseTx(cdc, tx)
if err != nil {
fmt.Printf("parseTx error|err=%s\n", err)
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
switch currTx := sdkTx.(type) {
case auth.StdTx:
var displayTx DisplayTx
for _, msg := range currTx.GetMsgs() |
default:
fmt.Printf("unknown type: %+v\n", currTx)
}
}
sdkRest.PostProcessResponse(w, cdc, &blockInfo, cliCtx.Indent)
}
}
func parseTx(cdc *codec.Codec, txBytes []byte) (sdk.Tx, error) {
var tx auth.StdTx
err := cdc.UnmarshalBinaryLengthPrefixed(txBytes, &tx)
if err != nil {
return nil, err
}
return tx, nil
}
func formatTxResult(cdc *codec.Codec, res *ctypes.ResultTx, resBlock *ctypes.ResultBlock) (sdk.TxResponse, error) {
tx, err := parseTx(cdc, res.Tx)
if err != nil {
return sdk.TxResponse{}, err
}
return sdk.NewResponseResultTx(res, tx, resBlock.Block.Time.Format(time.RFC3339)), nil
}
//
func GetTxFn(cdc *codec.Codec, cliCtx context.CLIContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
hashHexStr := vars["hash"]
hash, err := hex.DecodeString(hashHexStr)
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
// get the node
node, err := cliCtx.GetNode()
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
resultTx, err := node.Tx(hash, !cliCtx.TrustNode)
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
resBlocks, err := getBlocksForTxResults(cliCtx, []*ctypes.ResultTx{resultTx})
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
txResp, err := formatTxResult(cdc, resultTx, resBlocks[resultTx.Height])
if err != nil {
sdkRest.WriteErrorResponse(w, http.StatusInternalServerError, err.Error())
return
}
var getTxResponse GetTxResponse
getTxResponse.Height = txResp.Height
getTxResponse.TxHash = txResp.TxHash
getTxResponse.Code = txResp.Code
// getTxResponse.Data = txResp.Data
getTxResponse.Log = txResp.Logs
getTxResponse.Info = txResp.Info
getTxResponse.GasWanted = txResp.GasWanted
getTxResponse.GasUsed = txResp.GasUsed
getTxResponse.Tags = txResp.Tags
getTxResponse.Codespace = txResp.Codespace
// switch currTx := txResp.Tx.(type) {
// case auth.StdTx:
// // getTxResponse.Tx.Fee.Amount = unit_convert.DefaultCoinsToBigCoins(currTx.Fee.Amount())
// // getTxResponse.Tx.Fee.GasWanted = unit_convert.DefaultAmoutToBigAmount(strconv.FormatUint(currTx.Fee.GasWanted, 10))
// getTxResponse.Tx.Signatures = currTx.Signatures
// getTxResponse.Tx.Memo = currTx.Memo
// var displayTx DisplayTx
// for _, msg := range currTx.GetMsgs() {
// //fmt.Printf("msg|route=%s|type=%s\n", msg.Route(), msg.Type())
// switch msg := msg.(type) {
// case htdfservice.MsgSend:
// displayTx.From = msg.From
// displayTx.To = msg.To
// displayTx.Amount = unit_convert.DefaultCoinsToBigCoins(msg.Amount)
// getTxResponse.Tx.Msgs = append(getTxResponse.Tx.Msgs, displayTx)
// default:
// fmt.Printf("ignore type|type=%s|route=%s\n", msg.Type(), msg.Route())
// continue
// }
// }
// default:
// fmt.Printf("unknown type: %+v\n", currTx)
// }
sdkRest.PostProcessResponse(w, cdc, getTxResponse, cliCtx.Indent)
}
}
func getBlocksForTxResults(cliCtx context.CLIContext, resTxs []*ctypes.ResultTx) (map[int64]*ctypes.ResultBlock, error) {
node, err := cliCtx.GetNode()
if err != nil {
return nil, err
}
resBlocks := make(map[int64]*ctypes.ResultBlock)
for _, resTx := range resTxs {
if _, ok := resBlocks[resTx.Height]; !ok {
resBlock, err := node.Block(&resTx.Height)
if err != nil {
return nil, err
}
resBlocks[resTx.Height] = resBlock
}
}
return resBlocks, nil
}
| {
//fmt.Printf("msg|route=%s|type=%s\n", msg.Route(), msg.Type())
switch msg := msg.(type) {
case htdfservice.MsgSend:
displayTx.From = msg.From
displayTx.To = msg.To
displayTx.Hash = hex.EncodeToString(tx.Hash())
displayTx.Amount = unit_convert.DefaultCoinsToBigCoins(msg.Amount)
displayTx.Memo = currTx.Memo
blockInfo.Block.Txs = append(blockInfo.Block.Txs, displayTx)
//fmt.Printf("msg|from=%s|to=%s\n", msg.From, msg.To)
default:
fmt.Printf("ignore type|type=%s|route=%s\n", msg.Type(), msg.Route())
continue
}
} | conditional_block |
benchmarking.py | """
This module performs benchmarking to compare the computational costs of
generating predictions using the ligpy model, trained neural nets for the
full set of outputs and collections of individual outputs, and the trained
decision tree estimator.
"""
import sys
sys.path.append('../../../ligpy/ligpy')
import os
import copy
from subprocess import call
import numpy as np
import cPickle as pickle
import ligpy_utils as utils
import ddasac_utils as ddasac
from analysis_tools import load_results, generate_report
from constants import MW
from lignet_utils import gen_train_test, load_nets
from create_and_train import EarlyStopping
# Pre-load the testing data and machine learning estimators
global y_scaler
x_train, x_test, y_train, y_test, x_scaler, y_scaler = gen_train_test()
global transform_mat
transform_mat = np.zeros((1, 30))
nets = load_nets('trained_networks/final*')
full_net = load_nets('trained_networks/full*')
with open('trained_networks/decision_tree.pkl', 'rb') as pkl:
dtr_full = pickle.load(pkl)[0]
with open('ligpy_benchmarking_files/ligpy_args.txt', 'rb') as args:
ligpy_args = args.readlines()
ligpy_args = ligpy_args[1:]
# reset the random seed generator
np.random.seed()
rand_sample = np.random.randint(0, 199999)
# the row of input data to use in tests
rand_input = x_train[rand_sample:rand_sample+1, :]
def predict_full_net(input_data=rand_input, net=full_net['all']):
"""
Predict the output measures using the network trained on all 30
output measures at once.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
net : nolearn.lasagne.base.NeuralNet, optional
the trained neural net for all 30 output measures
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
return y_scaler.inverse_transform(net.predict(input_data))
def predict_single_net(input_data=rand_input, net=nets[5]):
"""
Predict the value for a single output measure.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
net : nolearn.lasagne.base.NeuralNet, optional
a trained neural net for a single output measure
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
pred = net.predict(input_data)
transform_mat[:, 5] = pred.ravel()
return y_scaler.inverse_transform(transform_mat)[:, 5]
def predict_30_single_nets(input_data=rand_input, nets=nets):
"""
Predict the output measures using 30 individually trained neural nets.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
nets : dict, optional
dictionary with the trained neural nets for all 30 output
measures
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
predicted = np.zeros((input_data.shape[0], 30))
for i in nets.keys():
predicted[:, i] = nets[i].predict(input_data).ravel()
return y_scaler.inverse_transform(predicted)
def predict_decision_tree(input_data=rand_input, tree=dtr_full):
"""
Predict the output measures using a decision tree trained on all 30
output measures at once.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
tree : sklearn.tree.tree.DecisionTreeRegressor, optional
the trained decision tree for all 30 output measures
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
return y_scaler.inverse_transform(tree.predict(input_data))
def get_random_ligpy_args():
"""
Get the arguments from a random row of ligpy_args to pass to
predict_ligpy.
Returns
-------
end_time
output_time_step
cool_time
initial_T
heat_rate
maximum_T
plant
"""
rand_index = np.random.randint(0, 249999)
args = ligpy_args[rand_index]
end_time = float(args.split(' ')[0])
output_time_step = float(args.split(' ')[1])
cool_time = int(args.split(' ')[2])
initial_T = float(args.split(' ')[3])
heat_rate = float(args.split(' ')[4])
maximum_T = float(args.split(' ')[5])
plant = str(args.split(' ')[8]).rstrip()
return (end_time, output_time_step, cool_time, initial_T, heat_rate,
maximum_T, plant)
# these need to be defined before the next function, but they will be
# overwritten
(end_time, output_time_step, cool_time, initial_T, heat_rate,
maximum_T, plant) = get_random_ligpy_args()
def setup_predict_ligpy(end_time=end_time, output_time_step=output_time_step,
cool_time=cool_time, initial_T=initial_T,
heat_rate=heat_rate, maximum_T=maximum_T, plant=plant):
|
def teardown_predict_ligpy():
"""
Clean up after running predict_ligpy().
Parameters
----------
None
Returns
-------
None
"""
call('rm -rf bsub.c bsub.o ddat.in fort.11 f.out greg10.in jacobian.c '
'jacobian.o model.c model.o net_rates.def parest rates.def '
'results_dir/', shell=True)
# These must be defined to load the module
(file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time) = setup_predict_ligpy()
def predict_ligpy(file_completereactionlist=file_completereactionlist,
kmatrix=kmatrix, working_directory=working_directory,
y0_ddasac=y0_ddasac, specieslist=specieslist,
absolute_tolerance=absolute_tolerance,
relative_tolerance=relative_tolerance,
initial_T=initial_T, heat_rate=heat_rate, end_time=end_time,
maximum_T=maximum_T, output_time_step=output_time_step,
cool_time=cool_time):
"""
This function is a modified version of `ligpy.py` in the `ligpy` package.
It sets up and solves the ODE model for lignin pyrolysis, then calculates
the set of outputs that are predicted by the machine learning models
developed in `lignet`.
Parameters
----------
standard arguments passed to `ligpy.py`
Returns
-------
None
"""
# Solve the model with DDASAC
ddasac.run_ddasac(file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time)
# Load the program parameters and results from the selected folder
(end_time, output_time_step, initial_T, heating_rate, max_T, atol, rtol,
plant, cool_time, y, t, T, specieslist, speciesindices,
indices_to_species) = load_results('.')
# create a new matrix of mass fractions (instead of concentrations)
m = copy.deepcopy(y)
for species in specieslist:
# make an array of mass concentration (g/L)
m[:, speciesindices[species]] = (y[:, speciesindices[species]] *
MW[species][0])
generate_report(speciesindices, specieslist, y, m, t, 'temp_result')
if __name__ == '__main__':
import timeit
tot_time = (timeit.timeit('predict_full_net()',
setup='from __main__ import predict_full_net',
number=1000))
print('predict_full_net: %s sec per call' % (tot_time/1000))
tot_time = (timeit.timeit('predict_single_net()',
setup='from __main__ import predict_single_net',
number=1000))
print('predict_single_net: %s sec per call' % (tot_time/1000))
tot_time = (timeit.timeit('predict_30_single_nets()',
setup='from __main__ import predict_30_single_nets',
number=1000))
print('predict_30_single_nets: %s sec per call' % (tot_time/1000))
tot_time = (timeit.timeit('predict_decision_tree()',
setup='from __main__ import predict_decision_tree',
number=1000))
print('predict_decision_tree: %s sec per call' % (tot_time/1000))
num_runs = 1000
times = np.zeros(num_runs)
for i in range(0, num_runs):
(end_time, output_time_step, cool_time, initial_T, heat_rate,
maximum_T, plant) = get_random_ligpy_args()
(file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time) = setup_predict_ligpy()
tot_time = (timeit.timeit('predict_ligpy()',
setup='from __main__ import predict_ligpy',
number=1))
times[i] = tot_time
teardown_predict_ligpy()
print('predict_ligpy: %s sec per call' % times.mean())
| """
Create the proper environment to run predict_ligpy() and set up the
kinetic model.
Parameters
----------
standard arguments passed to `ligpy.py`
Returns
-------
standard arguments for `ddasac.run_ddasac()`
"""
call('cp ligpy_benchmarking_files/sa_compositionlist.dat '
'../../../ligpy/ligpy/data/compositionlist.dat;', shell=True
)
absolute_tolerance = float(1e-10)
relative_tolerance = float(1e-8)
# These are the files and paths that will be referenced in this program:
(file_completereactionlist, file_completerateconstantlist,
file_compositionlist) = utils.set_paths()
working_directory = 'results_dir'
if not os.path.exists(working_directory):
os.makedirs(working_directory)
# pickle the arguments used for this program to reference during analysis
prog_params = [end_time, output_time_step, initial_T, heat_rate, maximum_T,
absolute_tolerance, relative_tolerance, plant, cool_time]
with open('%s/prog_params.pkl' % working_directory, 'wb') as pkl:
pickle.dump(prog_params, pkl)
# Get lists of all the species in the kinetic scheme and their indices
specieslist = utils.get_specieslist(file_completereactionlist)
# Get kmatrix
kmatrix = utils.build_k_matrix(file_completerateconstantlist)
# Set the initial composition of the lignin polymer
PLIGC_0, PLIGH_0, PLIGO_0 = utils.define_initial_composition(
file_compositionlist, plant)
# Set the initial conditions for the DDASAC solver
y0_ddasac = np.zeros(len(specieslist))
y0_ddasac[:3] = [PLIGC_0, PLIGH_0, PLIGO_0]
return (file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time) | identifier_body |
benchmarking.py | """
This module performs benchmarking to compare the computational costs of
generating predictions using the ligpy model, trained neural nets for the
full set of outputs and collections of individual outputs, and the trained
decision tree estimator.
"""
import sys
sys.path.append('../../../ligpy/ligpy')
import os
import copy
from subprocess import call
import numpy as np
import cPickle as pickle
import ligpy_utils as utils
import ddasac_utils as ddasac
from analysis_tools import load_results, generate_report
from constants import MW
from lignet_utils import gen_train_test, load_nets
from create_and_train import EarlyStopping
# Pre-load the testing data and machine learning estimators
global y_scaler
x_train, x_test, y_train, y_test, x_scaler, y_scaler = gen_train_test()
global transform_mat
transform_mat = np.zeros((1, 30))
nets = load_nets('trained_networks/final*')
full_net = load_nets('trained_networks/full*')
with open('trained_networks/decision_tree.pkl', 'rb') as pkl:
dtr_full = pickle.load(pkl)[0]
with open('ligpy_benchmarking_files/ligpy_args.txt', 'rb') as args:
ligpy_args = args.readlines()
ligpy_args = ligpy_args[1:]
# reset the random seed generator
np.random.seed()
rand_sample = np.random.randint(0, 199999)
# the row of input data to use in tests
rand_input = x_train[rand_sample:rand_sample+1, :]
def predict_full_net(input_data=rand_input, net=full_net['all']):
"""
Predict the output measures using the network trained on all 30
output measures at once.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
net : nolearn.lasagne.base.NeuralNet, optional
the trained neural net for all 30 output measures
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
return y_scaler.inverse_transform(net.predict(input_data))
def predict_single_net(input_data=rand_input, net=nets[5]):
"""
Predict the value for a single output measure.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
net : nolearn.lasagne.base.NeuralNet, optional
a trained neural net for a single output measure
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
pred = net.predict(input_data)
transform_mat[:, 5] = pred.ravel()
return y_scaler.inverse_transform(transform_mat)[:, 5]
def predict_30_single_nets(input_data=rand_input, nets=nets):
"""
Predict the output measures using 30 individually trained neural nets.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
nets : dict, optional
dictionary with the trained neural nets for all 30 output
measures
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
predicted = np.zeros((input_data.shape[0], 30))
for i in nets.keys():
|
return y_scaler.inverse_transform(predicted)
def predict_decision_tree(input_data=rand_input, tree=dtr_full):
"""
Predict the output measures using a decision tree trained on all 30
output measures at once.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
tree : sklearn.tree.tree.DecisionTreeRegressor, optional
the trained decision tree for all 30 output measures
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
return y_scaler.inverse_transform(tree.predict(input_data))
def get_random_ligpy_args():
"""
Get the arguments from a random row of ligpy_args to pass to
predict_ligpy.
Returns
-------
end_time
output_time_step
cool_time
initial_T
heat_rate
maximum_T
plant
"""
rand_index = np.random.randint(0, 249999)
args = ligpy_args[rand_index]
end_time = float(args.split(' ')[0])
output_time_step = float(args.split(' ')[1])
cool_time = int(args.split(' ')[2])
initial_T = float(args.split(' ')[3])
heat_rate = float(args.split(' ')[4])
maximum_T = float(args.split(' ')[5])
plant = str(args.split(' ')[8]).rstrip()
return (end_time, output_time_step, cool_time, initial_T, heat_rate,
maximum_T, plant)
# these need to be defined before the next function, but they will be
# overwritten
(end_time, output_time_step, cool_time, initial_T, heat_rate,
maximum_T, plant) = get_random_ligpy_args()
def setup_predict_ligpy(end_time=end_time, output_time_step=output_time_step,
cool_time=cool_time, initial_T=initial_T,
heat_rate=heat_rate, maximum_T=maximum_T, plant=plant):
"""
Create the proper environment to run predict_ligpy() and set up the
kinetic model.
Parameters
----------
standard arguments passed to `ligpy.py`
Returns
-------
standard arguments for `ddasac.run_ddasac()`
"""
call('cp ligpy_benchmarking_files/sa_compositionlist.dat '
'../../../ligpy/ligpy/data/compositionlist.dat;', shell=True
)
absolute_tolerance = float(1e-10)
relative_tolerance = float(1e-8)
# These are the files and paths that will be referenced in this program:
(file_completereactionlist, file_completerateconstantlist,
file_compositionlist) = utils.set_paths()
working_directory = 'results_dir'
if not os.path.exists(working_directory):
os.makedirs(working_directory)
# pickle the arguments used for this program to reference during analysis
prog_params = [end_time, output_time_step, initial_T, heat_rate, maximum_T,
absolute_tolerance, relative_tolerance, plant, cool_time]
with open('%s/prog_params.pkl' % working_directory, 'wb') as pkl:
pickle.dump(prog_params, pkl)
# Get lists of all the species in the kinetic scheme and their indices
specieslist = utils.get_specieslist(file_completereactionlist)
# Get kmatrix
kmatrix = utils.build_k_matrix(file_completerateconstantlist)
# Set the initial composition of the lignin polymer
PLIGC_0, PLIGH_0, PLIGO_0 = utils.define_initial_composition(
file_compositionlist, plant)
# Set the initial conditions for the DDASAC solver
y0_ddasac = np.zeros(len(specieslist))
y0_ddasac[:3] = [PLIGC_0, PLIGH_0, PLIGO_0]
return (file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time)
def teardown_predict_ligpy():
"""
Clean up after running predict_ligpy().
Parameters
----------
None
Returns
-------
None
"""
call('rm -rf bsub.c bsub.o ddat.in fort.11 f.out greg10.in jacobian.c '
'jacobian.o model.c model.o net_rates.def parest rates.def '
'results_dir/', shell=True)
# These must be defined to load the module
(file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time) = setup_predict_ligpy()
def predict_ligpy(file_completereactionlist=file_completereactionlist,
kmatrix=kmatrix, working_directory=working_directory,
y0_ddasac=y0_ddasac, specieslist=specieslist,
absolute_tolerance=absolute_tolerance,
relative_tolerance=relative_tolerance,
initial_T=initial_T, heat_rate=heat_rate, end_time=end_time,
maximum_T=maximum_T, output_time_step=output_time_step,
cool_time=cool_time):
"""
This function is a modified version of `ligpy.py` in the `ligpy` package.
It sets up and solves the ODE model for lignin pyrolysis, then calculates
the set of outputs that are predicted by the machine learning models
developed in `lignet`.
Parameters
----------
standard arguments passed to `ligpy.py`
Returns
-------
None
"""
# Solve the model with DDASAC
ddasac.run_ddasac(file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time)
# Load the program parameters and results from the selected folder
(end_time, output_time_step, initial_T, heating_rate, max_T, atol, rtol,
plant, cool_time, y, t, T, specieslist, speciesindices,
indices_to_species) = load_results('.')
# create a new matrix of mass fractions (instead of concentrations)
m = copy.deepcopy(y)
for species in specieslist:
# make an array of mass concentration (g/L)
m[:, speciesindices[species]] = (y[:, speciesindices[species]] *
MW[species][0])
generate_report(speciesindices, specieslist, y, m, t, 'temp_result')
if __name__ == '__main__':
import timeit
tot_time = (timeit.timeit('predict_full_net()',
setup='from __main__ import predict_full_net',
number=1000))
print('predict_full_net: %s sec per call' % (tot_time/1000))
tot_time = (timeit.timeit('predict_single_net()',
setup='from __main__ import predict_single_net',
number=1000))
print('predict_single_net: %s sec per call' % (tot_time/1000))
tot_time = (timeit.timeit('predict_30_single_nets()',
setup='from __main__ import predict_30_single_nets',
number=1000))
print('predict_30_single_nets: %s sec per call' % (tot_time/1000))
tot_time = (timeit.timeit('predict_decision_tree()',
setup='from __main__ import predict_decision_tree',
number=1000))
print('predict_decision_tree: %s sec per call' % (tot_time/1000))
num_runs = 1000
times = np.zeros(num_runs)
for i in range(0, num_runs):
(end_time, output_time_step, cool_time, initial_T, heat_rate,
maximum_T, plant) = get_random_ligpy_args()
(file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time) = setup_predict_ligpy()
tot_time = (timeit.timeit('predict_ligpy()',
setup='from __main__ import predict_ligpy',
number=1))
times[i] = tot_time
teardown_predict_ligpy()
print('predict_ligpy: %s sec per call' % times.mean())
| predicted[:, i] = nets[i].predict(input_data).ravel() | conditional_block |
benchmarking.py | """
This module performs benchmarking to compare the computational costs of
generating predictions using the ligpy model, trained neural nets for the
full set of outputs and collections of individual outputs, and the trained
decision tree estimator.
"""
import sys
sys.path.append('../../../ligpy/ligpy')
import os
import copy
from subprocess import call
import numpy as np
import cPickle as pickle
import ligpy_utils as utils
import ddasac_utils as ddasac
from analysis_tools import load_results, generate_report
from constants import MW
from lignet_utils import gen_train_test, load_nets
from create_and_train import EarlyStopping
# Pre-load the testing data and machine learning estimators
global y_scaler
x_train, x_test, y_train, y_test, x_scaler, y_scaler = gen_train_test()
global transform_mat
transform_mat = np.zeros((1, 30))
nets = load_nets('trained_networks/final*')
full_net = load_nets('trained_networks/full*')
with open('trained_networks/decision_tree.pkl', 'rb') as pkl:
dtr_full = pickle.load(pkl)[0]
with open('ligpy_benchmarking_files/ligpy_args.txt', 'rb') as args:
ligpy_args = args.readlines()
ligpy_args = ligpy_args[1:]
# reset the random seed generator
np.random.seed()
rand_sample = np.random.randint(0, 199999)
# the row of input data to use in tests
rand_input = x_train[rand_sample:rand_sample+1, :]
def predict_full_net(input_data=rand_input, net=full_net['all']):
"""
Predict the output measures using the network trained on all 30
output measures at once.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
net : nolearn.lasagne.base.NeuralNet, optional
the trained neural net for all 30 output measures
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
return y_scaler.inverse_transform(net.predict(input_data))
def predict_single_net(input_data=rand_input, net=nets[5]):
"""
Predict the value for a single output measure.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
net : nolearn.lasagne.base.NeuralNet, optional
a trained neural net for a single output measure
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
pred = net.predict(input_data)
transform_mat[:, 5] = pred.ravel()
return y_scaler.inverse_transform(transform_mat)[:, 5]
def predict_30_single_nets(input_data=rand_input, nets=nets):
"""
Predict the output measures using 30 individually trained neural nets.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
nets : dict, optional
dictionary with the trained neural nets for all 30 output
measures
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
predicted = np.zeros((input_data.shape[0], 30))
for i in nets.keys():
predicted[:, i] = nets[i].predict(input_data).ravel()
return y_scaler.inverse_transform(predicted)
def predict_decision_tree(input_data=rand_input, tree=dtr_full):
"""
Predict the output measures using a decision tree trained on all 30
output measures at once.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
tree : sklearn.tree.tree.DecisionTreeRegressor, optional
the trained decision tree for all 30 output measures
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
return y_scaler.inverse_transform(tree.predict(input_data))
def get_random_ligpy_args():
"""
Get the arguments from a random row of ligpy_args to pass to
predict_ligpy.
Returns
-------
end_time
output_time_step
cool_time
initial_T
heat_rate
maximum_T
plant
"""
rand_index = np.random.randint(0, 249999)
args = ligpy_args[rand_index]
end_time = float(args.split(' ')[0])
output_time_step = float(args.split(' ')[1])
cool_time = int(args.split(' ')[2])
initial_T = float(args.split(' ')[3])
heat_rate = float(args.split(' ')[4])
maximum_T = float(args.split(' ')[5])
plant = str(args.split(' ')[8]).rstrip()
return (end_time, output_time_step, cool_time, initial_T, heat_rate,
maximum_T, plant)
# these need to be defined before the next function, but they will be
# overwritten
(end_time, output_time_step, cool_time, initial_T, heat_rate,
maximum_T, plant) = get_random_ligpy_args()
def setup_predict_ligpy(end_time=end_time, output_time_step=output_time_step,
cool_time=cool_time, initial_T=initial_T,
heat_rate=heat_rate, maximum_T=maximum_T, plant=plant):
"""
Create the proper environment to run predict_ligpy() and set up the
kinetic model.
Parameters
----------
standard arguments passed to `ligpy.py`
Returns
-------
standard arguments for `ddasac.run_ddasac()`
"""
call('cp ligpy_benchmarking_files/sa_compositionlist.dat '
'../../../ligpy/ligpy/data/compositionlist.dat;', shell=True
)
absolute_tolerance = float(1e-10)
relative_tolerance = float(1e-8)
# These are the files and paths that will be referenced in this program:
(file_completereactionlist, file_completerateconstantlist,
file_compositionlist) = utils.set_paths()
working_directory = 'results_dir'
if not os.path.exists(working_directory):
os.makedirs(working_directory)
# pickle the arguments used for this program to reference during analysis
prog_params = [end_time, output_time_step, initial_T, heat_rate, maximum_T,
absolute_tolerance, relative_tolerance, plant, cool_time]
with open('%s/prog_params.pkl' % working_directory, 'wb') as pkl:
pickle.dump(prog_params, pkl)
# Get lists of all the species in the kinetic scheme and their indices
specieslist = utils.get_specieslist(file_completereactionlist)
# Get kmatrix
kmatrix = utils.build_k_matrix(file_completerateconstantlist)
# Set the initial composition of the lignin polymer
PLIGC_0, PLIGH_0, PLIGO_0 = utils.define_initial_composition(
file_compositionlist, plant)
# Set the initial conditions for the DDASAC solver
y0_ddasac = np.zeros(len(specieslist))
y0_ddasac[:3] = [PLIGC_0, PLIGH_0, PLIGO_0]
return (file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time)
def | ():
"""
Clean up after running predict_ligpy().
Parameters
----------
None
Returns
-------
None
"""
call('rm -rf bsub.c bsub.o ddat.in fort.11 f.out greg10.in jacobian.c '
'jacobian.o model.c model.o net_rates.def parest rates.def '
'results_dir/', shell=True)
# These must be defined to load the module
(file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time) = setup_predict_ligpy()
def predict_ligpy(file_completereactionlist=file_completereactionlist,
kmatrix=kmatrix, working_directory=working_directory,
y0_ddasac=y0_ddasac, specieslist=specieslist,
absolute_tolerance=absolute_tolerance,
relative_tolerance=relative_tolerance,
initial_T=initial_T, heat_rate=heat_rate, end_time=end_time,
maximum_T=maximum_T, output_time_step=output_time_step,
cool_time=cool_time):
"""
This function is a modified version of `ligpy.py` in the `ligpy` package.
It sets up and solves the ODE model for lignin pyrolysis, then calculates
the set of outputs that are predicted by the machine learning models
developed in `lignet`.
Parameters
----------
standard arguments passed to `ligpy.py`
Returns
-------
None
"""
# Solve the model with DDASAC
ddasac.run_ddasac(file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time)
# Load the program parameters and results from the selected folder
(end_time, output_time_step, initial_T, heating_rate, max_T, atol, rtol,
plant, cool_time, y, t, T, specieslist, speciesindices,
indices_to_species) = load_results('.')
# create a new matrix of mass fractions (instead of concentrations)
m = copy.deepcopy(y)
for species in specieslist:
# make an array of mass concentration (g/L)
m[:, speciesindices[species]] = (y[:, speciesindices[species]] *
MW[species][0])
generate_report(speciesindices, specieslist, y, m, t, 'temp_result')
if __name__ == '__main__':
import timeit
tot_time = (timeit.timeit('predict_full_net()',
setup='from __main__ import predict_full_net',
number=1000))
print('predict_full_net: %s sec per call' % (tot_time/1000))
tot_time = (timeit.timeit('predict_single_net()',
setup='from __main__ import predict_single_net',
number=1000))
print('predict_single_net: %s sec per call' % (tot_time/1000))
tot_time = (timeit.timeit('predict_30_single_nets()',
setup='from __main__ import predict_30_single_nets',
number=1000))
print('predict_30_single_nets: %s sec per call' % (tot_time/1000))
tot_time = (timeit.timeit('predict_decision_tree()',
setup='from __main__ import predict_decision_tree',
number=1000))
print('predict_decision_tree: %s sec per call' % (tot_time/1000))
num_runs = 1000
times = np.zeros(num_runs)
for i in range(0, num_runs):
(end_time, output_time_step, cool_time, initial_T, heat_rate,
maximum_T, plant) = get_random_ligpy_args()
(file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time) = setup_predict_ligpy()
tot_time = (timeit.timeit('predict_ligpy()',
setup='from __main__ import predict_ligpy',
number=1))
times[i] = tot_time
teardown_predict_ligpy()
print('predict_ligpy: %s sec per call' % times.mean())
| teardown_predict_ligpy | identifier_name |
benchmarking.py | """
This module performs benchmarking to compare the computational costs of
generating predictions using the ligpy model, trained neural nets for the
full set of outputs and collections of individual outputs, and the trained
decision tree estimator.
"""
import sys
sys.path.append('../../../ligpy/ligpy')
import os
import copy
from subprocess import call
import numpy as np
import cPickle as pickle
import ligpy_utils as utils
import ddasac_utils as ddasac
from analysis_tools import load_results, generate_report
from constants import MW
from lignet_utils import gen_train_test, load_nets
from create_and_train import EarlyStopping
# Pre-load the testing data and machine learning estimators
global y_scaler
x_train, x_test, y_train, y_test, x_scaler, y_scaler = gen_train_test()
global transform_mat
transform_mat = np.zeros((1, 30))
nets = load_nets('trained_networks/final*')
full_net = load_nets('trained_networks/full*')
with open('trained_networks/decision_tree.pkl', 'rb') as pkl:
dtr_full = pickle.load(pkl)[0]
with open('ligpy_benchmarking_files/ligpy_args.txt', 'rb') as args:
ligpy_args = args.readlines()
ligpy_args = ligpy_args[1:]
# reset the random seed generator
np.random.seed()
rand_sample = np.random.randint(0, 199999)
# the row of input data to use in tests
rand_input = x_train[rand_sample:rand_sample+1, :]
def predict_full_net(input_data=rand_input, net=full_net['all']):
"""
Predict the output measures using the network trained on all 30
output measures at once.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
net : nolearn.lasagne.base.NeuralNet, optional
the trained neural net for all 30 output measures
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
return y_scaler.inverse_transform(net.predict(input_data))
def predict_single_net(input_data=rand_input, net=nets[5]):
"""
Predict the value for a single output measure.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
net : nolearn.lasagne.base.NeuralNet, optional
a trained neural net for a single output measure
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
pred = net.predict(input_data)
transform_mat[:, 5] = pred.ravel()
return y_scaler.inverse_transform(transform_mat)[:, 5]
def predict_30_single_nets(input_data=rand_input, nets=nets):
"""
Predict the output measures using 30 individually trained neural nets.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
nets : dict, optional
dictionary with the trained neural nets for all 30 output
measures
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
predicted = np.zeros((input_data.shape[0], 30))
for i in nets.keys():
predicted[:, i] = nets[i].predict(input_data).ravel()
return y_scaler.inverse_transform(predicted)
def predict_decision_tree(input_data=rand_input, tree=dtr_full):
"""
Predict the output measures using a decision tree trained on all 30
output measures at once.
Parameters
----------
input_data : numpy.ndarray, optional
an array of input values to predict. This can be a single
row or many rows.
tree : sklearn.tree.tree.DecisionTreeRegressor, optional
the trained decision tree for all 30 output measures
Returns
-------
predicted : numpy.ndarray
an array of the predicted values
"""
return y_scaler.inverse_transform(tree.predict(input_data))
def get_random_ligpy_args():
"""
Get the arguments from a random row of ligpy_args to pass to
predict_ligpy.
Returns
-------
end_time
output_time_step
cool_time
initial_T
heat_rate
maximum_T
plant
"""
rand_index = np.random.randint(0, 249999)
args = ligpy_args[rand_index]
end_time = float(args.split(' ')[0])
output_time_step = float(args.split(' ')[1])
cool_time = int(args.split(' ')[2])
initial_T = float(args.split(' ')[3])
heat_rate = float(args.split(' ')[4])
maximum_T = float(args.split(' ')[5])
plant = str(args.split(' ')[8]).rstrip()
return (end_time, output_time_step, cool_time, initial_T, heat_rate,
maximum_T, plant)
# these need to be defined before the next function, but they will be
# overwritten
(end_time, output_time_step, cool_time, initial_T, heat_rate,
maximum_T, plant) = get_random_ligpy_args()
def setup_predict_ligpy(end_time=end_time, output_time_step=output_time_step,
cool_time=cool_time, initial_T=initial_T,
heat_rate=heat_rate, maximum_T=maximum_T, plant=plant):
"""
Create the proper environment to run predict_ligpy() and set up the
kinetic model.
Parameters
----------
standard arguments passed to `ligpy.py`
Returns
-------
standard arguments for `ddasac.run_ddasac()`
"""
call('cp ligpy_benchmarking_files/sa_compositionlist.dat ' | relative_tolerance = float(1e-8)
# These are the files and paths that will be referenced in this program:
(file_completereactionlist, file_completerateconstantlist,
file_compositionlist) = utils.set_paths()
working_directory = 'results_dir'
if not os.path.exists(working_directory):
os.makedirs(working_directory)
# pickle the arguments used for this program to reference during analysis
prog_params = [end_time, output_time_step, initial_T, heat_rate, maximum_T,
absolute_tolerance, relative_tolerance, plant, cool_time]
with open('%s/prog_params.pkl' % working_directory, 'wb') as pkl:
pickle.dump(prog_params, pkl)
# Get lists of all the species in the kinetic scheme and their indices
specieslist = utils.get_specieslist(file_completereactionlist)
# Get kmatrix
kmatrix = utils.build_k_matrix(file_completerateconstantlist)
# Set the initial composition of the lignin polymer
PLIGC_0, PLIGH_0, PLIGO_0 = utils.define_initial_composition(
file_compositionlist, plant)
# Set the initial conditions for the DDASAC solver
y0_ddasac = np.zeros(len(specieslist))
y0_ddasac[:3] = [PLIGC_0, PLIGH_0, PLIGO_0]
return (file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time)
def teardown_predict_ligpy():
"""
Clean up after running predict_ligpy().
Parameters
----------
None
Returns
-------
None
"""
call('rm -rf bsub.c bsub.o ddat.in fort.11 f.out greg10.in jacobian.c '
'jacobian.o model.c model.o net_rates.def parest rates.def '
'results_dir/', shell=True)
# These must be defined to load the module
(file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time) = setup_predict_ligpy()
def predict_ligpy(file_completereactionlist=file_completereactionlist,
kmatrix=kmatrix, working_directory=working_directory,
y0_ddasac=y0_ddasac, specieslist=specieslist,
absolute_tolerance=absolute_tolerance,
relative_tolerance=relative_tolerance,
initial_T=initial_T, heat_rate=heat_rate, end_time=end_time,
maximum_T=maximum_T, output_time_step=output_time_step,
cool_time=cool_time):
"""
This function is a modified version of `ligpy.py` in the `ligpy` package.
It sets up and solves the ODE model for lignin pyrolysis, then calculates
the set of outputs that are predicted by the machine learning models
developed in `lignet`.
Parameters
----------
standard arguments passed to `ligpy.py`
Returns
-------
None
"""
# Solve the model with DDASAC
ddasac.run_ddasac(file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time)
# Load the program parameters and results from the selected folder
(end_time, output_time_step, initial_T, heating_rate, max_T, atol, rtol,
plant, cool_time, y, t, T, specieslist, speciesindices,
indices_to_species) = load_results('.')
# create a new matrix of mass fractions (instead of concentrations)
m = copy.deepcopy(y)
for species in specieslist:
# make an array of mass concentration (g/L)
m[:, speciesindices[species]] = (y[:, speciesindices[species]] *
MW[species][0])
generate_report(speciesindices, specieslist, y, m, t, 'temp_result')
if __name__ == '__main__':
import timeit
tot_time = (timeit.timeit('predict_full_net()',
setup='from __main__ import predict_full_net',
number=1000))
print('predict_full_net: %s sec per call' % (tot_time/1000))
tot_time = (timeit.timeit('predict_single_net()',
setup='from __main__ import predict_single_net',
number=1000))
print('predict_single_net: %s sec per call' % (tot_time/1000))
tot_time = (timeit.timeit('predict_30_single_nets()',
setup='from __main__ import predict_30_single_nets',
number=1000))
print('predict_30_single_nets: %s sec per call' % (tot_time/1000))
tot_time = (timeit.timeit('predict_decision_tree()',
setup='from __main__ import predict_decision_tree',
number=1000))
print('predict_decision_tree: %s sec per call' % (tot_time/1000))
num_runs = 1000
times = np.zeros(num_runs)
for i in range(0, num_runs):
(end_time, output_time_step, cool_time, initial_T, heat_rate,
maximum_T, plant) = get_random_ligpy_args()
(file_completereactionlist, kmatrix, working_directory,
y0_ddasac, specieslist, absolute_tolerance,
relative_tolerance, initial_T, heat_rate, end_time,
maximum_T, output_time_step, cool_time) = setup_predict_ligpy()
tot_time = (timeit.timeit('predict_ligpy()',
setup='from __main__ import predict_ligpy',
number=1))
times[i] = tot_time
teardown_predict_ligpy()
print('predict_ligpy: %s sec per call' % times.mean()) | '../../../ligpy/ligpy/data/compositionlist.dat;', shell=True
)
absolute_tolerance = float(1e-10) | random_line_split |
main.py | import time
# import math
import gym
# from gym import spaces, logger
# from gym.utils import seeding
from gym.envs.classic_control import rendering
import random
import math
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
import numpy as np
from collections import namedtuple
from itertools import count
class DQN(nn.Module):
def __init__(self, h, w, outputs):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
# Number of Linear input connections depends on output of conv2d layers
# and therefore the input image size, so compute it.
def conv2d_size_out(size, kernel_size = 5, stride = 2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.head = nn.Linear(linear_input_size, outputs)
# Called with either one element to determine next action, or a batch
# during optimization. Returns tensor([[left0exp,right0exp]...]).
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class Track:
def __init__(self, a, b, track_length):
self.begins_at = a
self.ends_at = b
self.track_length = track_length
def __eq__(self, other):
return self.begins_at == other.begins_at and self.ends_at == other.ends_at
def __hash__(self):
return hash((self.begins_at.__hash__(), self.ends_at.__hash__()))
def get_delta(self):
delta_x = self.ends_at.x - self.begins_at.x
delta_y = self.ends_at.y - self.begins_at.y
return Node(x=delta_x, y=delta_y)
def get_angle(self):
delta_x = self.ends_at.x - self.begins_at.x
delta_y = self.ends_at.y - self.begins_at.y
# np.arctan(-1) / np.pi
return np.arctan(delta_y / delta_x)
def __repr__(self):
return f'Track: {self.begins_at} -> {self.ends_at}'
def geom(self):
geom = rendering.Line(self.begins_at.arr(), self.ends_at.arr())
geom.set_color(0, 0, 0)
return geom
def __contains__(self, station):
return station in (self.begins_at, self.ends_at)
class Node:
def __init__(self, x, y, name=None):
self.name = name
# self.p = pos
self.x = x
self.y = y
self._tracks = None
self._nodes = None
def __eq__(self, other):
return np.isclose(self.x, other.x) and np.isclose(self.y, other.y)
def __hash__(self):
return hash((self.x, self.y))
def tracks(self):
if not self._tracks:
self._tracks = {x for x in World.tracks if x.begins_at == self or x.ends_at == self}
return self._tracks
def arr(self):
return self.x, self.y
def geom(self):
geom = rendering.make_capsule(length=20, width=20)
geom.set_color(1, 0, 0)
geom.add_attr(rendering.Transform(translation=self.arr(), rotation=0.0, scale=(1, 1)))
geom.add_attr(rendering.Transform(translation=(-10.0, 0), rotation=0.0, scale=(1, 1))) # center the pill
return geom
def is_next_to(self, other_track):
for t in self.tracks():
if other_track in (t.begins_at, t.ends_at):
return True
return False
def __repr__(self):
return f'Node: {self.name} ({self.x}, {self.y})'
class Train:
def __init__(self, track, dist, direction, name):
self.on_track = track
self.dist_on_track = dist
self.direction = direction # -1 = from end to beginning, 1 = from beginning to end
self.name = name
self.speed = 0.1
width = 45.0
height = 15.0
lef, rig, top, bot = -width / 2, width / 2, height / 2, -height / 2
self.geom = rendering.FilledPolygon([(lef, bot), (lef, top), (rig, top), (rig, bot)])
self.geom.set_color(0, 1, 0)
self.translation = rendering.Transform()
self.geom.add_attr(self.translation)
def go_to(self, station):
pass
def step(self, direction, next_track, speed):
if next_track in self.on_track.begins_at.tracks() or next_track in self.on_track.ends_at.tracks():
if self.pos() == next_track.begins_at:
self.dist_on_track = 0
self.on_track = next_track
if self.pos() == next_track.ends_at:
self.dist_on_track = next_track.track_length
self.on_track = next_track
self.dist_on_track += direction * speed
if self.dist_on_track < 0.0:
self.dist_on_track = 0.0
if self.dist_on_track > self.on_track.track_length:
self.dist_on_track = self.on_track.track_length
# if self.pos() == self.on_track.begins_at:
# if next_track in self.on_track.begins_at.tracks():
#
# if self.pos() == next_track.begins_at:
# self.dist_on_track = 0
# else:
# self.dist_on_track = next_track.track_length
# self.on_track = next_track
# if self.pos() == self.on_track.ends_at:
# if next_track in self.on_track.ends_at.tracks():
# if self.pos() == next_track.begins_at:
# self.dist_on_track = 0
# else:
# self.dist_on_track = next_track.track_length
# self.on_track = next_track
def | (self) -> Node:
delta = self.on_track.get_delta()
progress = self.dist_on_track / self.on_track.track_length
x = self.on_track.begins_at.x + progress * delta.x
y = self.on_track.begins_at.y + progress * delta.y
return Node(x=x, y=y)
def curr_station(self):
stations = [x for x in World.nodes if x == self.pos()]
if stations:
return stations[0]
else:
return None
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return self.name.__hash__()
class World(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 1
}
from typing import List, Set
tracks: Set[Track] = set()
nodes: List[Node] = []
points = set()
def __init__(self):
self.age = 0
self.viewer = None
self.state = None
self.done = False
self.scr_wid = 900
self.scr_hgt = 700
self.train = None
self.train_trans = None
self.origin = None
self.destination = None
def step(self, next_station: Node):
if self.done:
raise Exception('test is done.')
curr_station = [x for x in World.nodes if x == self.train.pos()][0]
if curr_station.is_next_to(next_station):
track = [x for x in World.tracks if curr_station in x and next_station in x][0]
if track.ends_at == next_station:
dir = 1
else:
dir = -1
self.train.step(dir, track, track.track_length)
self.state = {
'current_station': next_station
}
# self.train.step(direction=action['direction'], next_track=action['next_track'], speed=action['speed'])
self.age += 1
self.done = self.destination == self.train.pos()
if self.done:
reward = 0.0
else:
# reward = 1.0 / self.age
reward = -1.0
return self.state['current_station'], reward, self.done, {}
def reset(self):
track = list(self.origin.tracks())[0]
if track.ends_at == self.origin:
self.train = Train(track=track, dist=track.track_length, direction=1, name='bob')
else:
self.train = Train(track=track, dist=0.0, direction=1, name='bob')
self.state = {
'current_station': self.origin
}
self.done = False
self.close()
self.render()
return self.state
def render(self, mode='human'):
if self.viewer is None:
self.viewer = rendering.Viewer(self.scr_wid, self.scr_hgt)
for track in World.tracks:
self.viewer.add_geom(track.geom())
for n in World.nodes:
self.viewer.add_geom(n.geom())
self.viewer.add_geom(self.train.geom)
self.train.translation.set_rotation(self.train.on_track.get_angle())
self.train.translation.set_translation(self.train.pos().x, self.train.pos().y)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
gym.envs.registration.register(
id='world-v0',
entry_point='main:World',
)
if __name__ == '__main__':
from main import *
import gym
import numpy as np
from torch.optim.rmsprop import RMSprop
na = Node(name='A', x=304.0, y=256.0)
nb = Node(name='B', x=539.0, y=365.0)
tb = Node(name='Bravo', x=841.0, y=154.0)
tc = Node(name='Charlie', x=204.0, y=526.0)
td = Node(name='Dingo', x=786.0, y=617.0)
tf = Node(name='Foxtrot', x=56.0, y=285.0)
tt = Node(name='Tango', x=89.0, y=66.0)
tw = Node(name='Whiskey', x=249.0, y=64.0)
ra = Track(tc, td, 5)
rb = Track(td, tb, 5)
rc = Track(tb, nb, 3)
rd = Track(td, nb, 3)
re = Track(tc, nb, 3)
rf = Track(na, nb, 3)
rg = Track(na, tf, 2)
rh = Track(na, tt, 2)
ri = Track(na, tw, 2)
World.nodes = [na, nb, tb, tc, td, tf, tt, tw]
World.tracks = {ra, rb, rc, rd, re, rf, rg, rh, ri}
world: World = gym.make('world-v0')
world.origin = tw
world.destination = td
world.reset()
world.render()
# =================================================
RENDER_FPS = 20
N_INPUTS = len(World.nodes) * 2 # which station now
N_OUTPUTS = len(World.nodes) # which station next
LEARNING_RATE = 0.1
N_EPISODES = 20 # number of training iterations
N_MAX_STEPS = 1000 # max steps per episode
N_GAMES_PER_EPISODE = 3 # train the policy every 10 episodes
# BATCH_SIZE = 128
# GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
# TARGET_UPDATE = 10
# save_iterations = 10 # save the model every 10 training iterations
DISCOUNT_RATE = 0.95
BATCH_SIZE = 128
GAMMA = 0.999
# 2. Build the neural network
class NextStationNet(nn.Module):
def __init__(self):
super(NextStationNet, self).__init__()
self.input_layer = nn.Linear(N_INPUTS, 32)
self.hidden_layer = nn.Linear(32, 32)
# self.hidden_layer.weight = torch.nn.Parameter(torch.tensor([[1.58]]))
# self.hidden_layer.bias = torch.nn.Parameter(torch.tensor([-0.14]))
self.output_layer = nn.Linear(32, N_OUTPUTS)
# self.output_layer.weight = torch.nn.Parameter(torch.tensor([[2.45]]))
# self.output_layer.bias = torch.nn.Parameter(torch.tensor([-0.11]))
def forward(self, x):
x = torch.sigmoid(self.input_layer(x))
x = torch.sigmoid(self.hidden_layer(x))
x = torch.sigmoid(self.output_layer(x))
return x
policy_net: NextStationNet = NextStationNet()
target_net: NextStationNet = NextStationNet() # DQN(screen_height, screen_width, n_actions).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer: RMSprop = RMSprop(policy_net.parameters())
steps_done = 0 # super global variable
def select_action(policy_net, from_station, to_station):
global steps_done
# track instance to one hot
in_arr = torch.tensor([0.0] * N_INPUTS)
in_arr[World.nodes.index(from_station)] = 1.0
in_arr[len(World.nodes) + World.nodes.index(to_station)] = 1.0
# global steps_done
eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
with torch.no_grad():
prediction = policy_net(in_arr)
if random.random() > eps_threshold:
max_idx = prediction.max(0)[1]
else:
max_idx = random.randrange(N_OUTPUTS)
target = torch.tensor([0.0] * N_OUTPUTS)
target[max_idx] = 1.0
station = World.nodes[max_idx]
return station, prediction, target
print(f"network topology: {policy_net}")
# # run input data forward through network
# # track instance to one hot
# input_data = torch.tensor([0.0] * N_INPUTS)
# input_data[World.nodes.index(world.train.curr_station())] = 1.0 # start at node A
#
# output = policy_net(input_data)
#
#
# # backpropagate gradient
#
# target = torch.tensor([0] * N_OUTPUTS)
# target[World.nodes.index(td)] = 1 # I want to go to station dingo
# # target = torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
#
# criterion = nn.MSELoss()
# loss = criterion(output, target)
# policy_net.zero_grad()
# loss.backward()
#
# # update weights and biases
# optimizer = optim.SGD(policy_net.parameters(), lr=0.1)
# optimizer.step()
#
# output = policy_net(input_data)
# print(f"updated_a_l2 = {round(output.item(), 4)}")
def discount_rewards(reward, discount_rate):
discounted_rewards = np.empty(len(reward))
cumulative_rewards = 0
for step in reversed(range(len(reward))):
cumulative_rewards = reward[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(reward, discount_rate) for reward in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean) / reward_std for discounted_rewards in all_discounted_rewards]
# exploration policy. Try something new or stick with the known.
for episode in range(N_EPISODES):
print(f"episode: {episode}")
all_rewards = [] # all sequences of raw rewards for each episode
all_gradients = [] # gradients saved at each step of each episode
# all_entropy = []
for game in range(N_GAMES_PER_EPISODE):
print(f" game: {game}")
curr_rewards = [] # all raw rewards from the current episode
curr_gradients = [] # all gradients from the current episode
world.reset()
# world.render()
# time.sleep(1 / RENDER_FPS)
for step in range(N_MAX_STEPS):
next_station, prediction, target = select_action(policy_net, from_station=world.train.curr_station(), to_station=world.destination)
print(next_station)
curr_station, reward, done, info = world.step(next_station=next_station)
world.render()
time.sleep(1 / RENDER_FPS)
curr_rewards += [reward]
# curr_gradients += [val_grads]
if done:
break
# loss = F.smooth_l1_loss(prediction, target)
# Optimize the model
# optimizer.zero_grad()
# loss.backward()
# for param in policy_net.parameters():
# param.grad.data.clamp_(-1, 1)
# optimizer.step()
all_rewards += [curr_rewards]
print(f" reward: {len(curr_rewards)}")
all_gradients += [curr_gradients]
# At this point we have run the policy for 10 episodes, and we are
# ready for a policy update using the algorithm described earlier.
all_rewards_normalized = discount_and_normalize_rewards(all_rewards, DISCOUNT_RATE)
feed_dict = {}
for idx, grad_and_var in enumerate(car_a['GradientsAndVariables']):
# multiply the gradients by the action scores, and compute the mean
# mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index] for game_index, rewards in enumerate(all_rewards) for step, reward in enumerate(rewards)], axis=0)
yo = []
for game_index, rewards in enumerate(all_rewards_normalized):
for step, reward in enumerate(rewards):
yo += [reward * all_gradients[game_index][step][idx]]
mean_gradients = np.mean(yo, axis=0)
feed_dict[grad_and_var[0]] = mean_gradients
# train here
sess.run(car_a['Trainer'], feed_dict=feed_dict)
world.step(next_station=na)
world.render()
time.sleep(1)
world.step(next_station=nb)
world.render()
time.sleep(1)
world.step(next_station=td)
world.render()
time.sleep(1) | pos | identifier_name |
main.py | import time
# import math
import gym
# from gym import spaces, logger
# from gym.utils import seeding
from gym.envs.classic_control import rendering
import random
import math
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
import numpy as np
from collections import namedtuple
from itertools import count
class DQN(nn.Module):
def __init__(self, h, w, outputs):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
# Number of Linear input connections depends on output of conv2d layers
# and therefore the input image size, so compute it.
def conv2d_size_out(size, kernel_size = 5, stride = 2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.head = nn.Linear(linear_input_size, outputs)
# Called with either one element to determine next action, or a batch
# during optimization. Returns tensor([[left0exp,right0exp]...]).
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class Track:
def __init__(self, a, b, track_length):
self.begins_at = a
self.ends_at = b
self.track_length = track_length
def __eq__(self, other):
return self.begins_at == other.begins_at and self.ends_at == other.ends_at
def __hash__(self):
return hash((self.begins_at.__hash__(), self.ends_at.__hash__()))
def get_delta(self):
delta_x = self.ends_at.x - self.begins_at.x
delta_y = self.ends_at.y - self.begins_at.y
return Node(x=delta_x, y=delta_y)
def get_angle(self):
delta_x = self.ends_at.x - self.begins_at.x
delta_y = self.ends_at.y - self.begins_at.y
# np.arctan(-1) / np.pi
return np.arctan(delta_y / delta_x)
def __repr__(self):
return f'Track: {self.begins_at} -> {self.ends_at}'
def geom(self):
geom = rendering.Line(self.begins_at.arr(), self.ends_at.arr())
geom.set_color(0, 0, 0)
return geom
def __contains__(self, station):
return station in (self.begins_at, self.ends_at)
class Node:
def __init__(self, x, y, name=None):
self.name = name
# self.p = pos
self.x = x
self.y = y
self._tracks = None
self._nodes = None
def __eq__(self, other):
return np.isclose(self.x, other.x) and np.isclose(self.y, other.y)
def __hash__(self):
return hash((self.x, self.y))
def tracks(self):
if not self._tracks:
self._tracks = {x for x in World.tracks if x.begins_at == self or x.ends_at == self}
return self._tracks
def arr(self):
return self.x, self.y
def geom(self):
geom = rendering.make_capsule(length=20, width=20)
geom.set_color(1, 0, 0)
geom.add_attr(rendering.Transform(translation=self.arr(), rotation=0.0, scale=(1, 1)))
geom.add_attr(rendering.Transform(translation=(-10.0, 0), rotation=0.0, scale=(1, 1))) # center the pill
return geom
def is_next_to(self, other_track):
for t in self.tracks():
if other_track in (t.begins_at, t.ends_at):
return True
return False
def __repr__(self):
return f'Node: {self.name} ({self.x}, {self.y})'
class Train:
def __init__(self, track, dist, direction, name):
self.on_track = track
self.dist_on_track = dist
self.direction = direction # -1 = from end to beginning, 1 = from beginning to end
self.name = name
self.speed = 0.1
width = 45.0
height = 15.0
lef, rig, top, bot = -width / 2, width / 2, height / 2, -height / 2
self.geom = rendering.FilledPolygon([(lef, bot), (lef, top), (rig, top), (rig, bot)])
self.geom.set_color(0, 1, 0)
self.translation = rendering.Transform()
self.geom.add_attr(self.translation)
def go_to(self, station):
pass
def step(self, direction, next_track, speed):
if next_track in self.on_track.begins_at.tracks() or next_track in self.on_track.ends_at.tracks():
if self.pos() == next_track.begins_at:
self.dist_on_track = 0
self.on_track = next_track
if self.pos() == next_track.ends_at:
self.dist_on_track = next_track.track_length
self.on_track = next_track
self.dist_on_track += direction * speed
if self.dist_on_track < 0.0:
self.dist_on_track = 0.0
if self.dist_on_track > self.on_track.track_length:
self.dist_on_track = self.on_track.track_length
# if self.pos() == self.on_track.begins_at:
# if next_track in self.on_track.begins_at.tracks():
#
# if self.pos() == next_track.begins_at:
# self.dist_on_track = 0
# else:
# self.dist_on_track = next_track.track_length
# self.on_track = next_track
# if self.pos() == self.on_track.ends_at:
# if next_track in self.on_track.ends_at.tracks():
# if self.pos() == next_track.begins_at:
# self.dist_on_track = 0
# else:
# self.dist_on_track = next_track.track_length
# self.on_track = next_track
def pos(self) -> Node:
delta = self.on_track.get_delta()
progress = self.dist_on_track / self.on_track.track_length
x = self.on_track.begins_at.x + progress * delta.x
y = self.on_track.begins_at.y + progress * delta.y
return Node(x=x, y=y)
def curr_station(self):
stations = [x for x in World.nodes if x == self.pos()]
if stations:
return stations[0]
else:
return None
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return self.name.__hash__()
class World(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 1
}
from typing import List, Set
tracks: Set[Track] = set()
nodes: List[Node] = []
points = set()
def __init__(self):
self.age = 0
self.viewer = None
self.state = None
self.done = False
self.scr_wid = 900
self.scr_hgt = 700
self.train = None
self.train_trans = None
self.origin = None
self.destination = None
def step(self, next_station: Node):
if self.done:
raise Exception('test is done.')
curr_station = [x for x in World.nodes if x == self.train.pos()][0]
if curr_station.is_next_to(next_station):
track = [x for x in World.tracks if curr_station in x and next_station in x][0]
if track.ends_at == next_station:
dir = 1
else:
dir = -1
self.train.step(dir, track, track.track_length)
self.state = {
'current_station': next_station
}
# self.train.step(direction=action['direction'], next_track=action['next_track'], speed=action['speed'])
self.age += 1
self.done = self.destination == self.train.pos()
if self.done:
reward = 0.0
else:
# reward = 1.0 / self.age
reward = -1.0
return self.state['current_station'], reward, self.done, {}
def reset(self):
track = list(self.origin.tracks())[0]
if track.ends_at == self.origin:
self.train = Train(track=track, dist=track.track_length, direction=1, name='bob')
else:
self.train = Train(track=track, dist=0.0, direction=1, name='bob')
self.state = {
'current_station': self.origin
}
self.done = False
self.close()
self.render()
return self.state
def render(self, mode='human'):
if self.viewer is None:
self.viewer = rendering.Viewer(self.scr_wid, self.scr_hgt)
for track in World.tracks:
self.viewer.add_geom(track.geom())
for n in World.nodes:
self.viewer.add_geom(n.geom())
self.viewer.add_geom(self.train.geom)
self.train.translation.set_rotation(self.train.on_track.get_angle())
self.train.translation.set_translation(self.train.pos().x, self.train.pos().y)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
gym.envs.registration.register(
id='world-v0',
entry_point='main:World',
)
if __name__ == '__main__':
from main import *
import gym
import numpy as np
from torch.optim.rmsprop import RMSprop
na = Node(name='A', x=304.0, y=256.0)
nb = Node(name='B', x=539.0, y=365.0)
tb = Node(name='Bravo', x=841.0, y=154.0)
tc = Node(name='Charlie', x=204.0, y=526.0)
td = Node(name='Dingo', x=786.0, y=617.0)
tf = Node(name='Foxtrot', x=56.0, y=285.0)
tt = Node(name='Tango', x=89.0, y=66.0)
tw = Node(name='Whiskey', x=249.0, y=64.0)
ra = Track(tc, td, 5)
rb = Track(td, tb, 5)
rc = Track(tb, nb, 3)
rd = Track(td, nb, 3)
re = Track(tc, nb, 3)
rf = Track(na, nb, 3)
rg = Track(na, tf, 2)
rh = Track(na, tt, 2)
ri = Track(na, tw, 2)
World.nodes = [na, nb, tb, tc, td, tf, tt, tw]
World.tracks = {ra, rb, rc, rd, re, rf, rg, rh, ri}
world: World = gym.make('world-v0')
world.origin = tw
world.destination = td
world.reset()
world.render()
# =================================================
RENDER_FPS = 20
N_INPUTS = len(World.nodes) * 2 # which station now
N_OUTPUTS = len(World.nodes) # which station next
LEARNING_RATE = 0.1
N_EPISODES = 20 # number of training iterations
N_MAX_STEPS = 1000 # max steps per episode
N_GAMES_PER_EPISODE = 3 # train the policy every 10 episodes
# BATCH_SIZE = 128
# GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
# TARGET_UPDATE = 10
# save_iterations = 10 # save the model every 10 training iterations
DISCOUNT_RATE = 0.95
BATCH_SIZE = 128
GAMMA = 0.999
# 2. Build the neural network
class NextStationNet(nn.Module):
def __init__(self):
super(NextStationNet, self).__init__()
self.input_layer = nn.Linear(N_INPUTS, 32)
self.hidden_layer = nn.Linear(32, 32)
# self.hidden_layer.weight = torch.nn.Parameter(torch.tensor([[1.58]]))
# self.hidden_layer.bias = torch.nn.Parameter(torch.tensor([-0.14]))
self.output_layer = nn.Linear(32, N_OUTPUTS)
# self.output_layer.weight = torch.nn.Parameter(torch.tensor([[2.45]]))
# self.output_layer.bias = torch.nn.Parameter(torch.tensor([-0.11]))
def forward(self, x):
x = torch.sigmoid(self.input_layer(x))
x = torch.sigmoid(self.hidden_layer(x))
x = torch.sigmoid(self.output_layer(x))
return x
policy_net: NextStationNet = NextStationNet()
target_net: NextStationNet = NextStationNet() # DQN(screen_height, screen_width, n_actions).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer: RMSprop = RMSprop(policy_net.parameters())
steps_done = 0 # super global variable
def select_action(policy_net, from_station, to_station):
global steps_done
# track instance to one hot
in_arr = torch.tensor([0.0] * N_INPUTS)
in_arr[World.nodes.index(from_station)] = 1.0
in_arr[len(World.nodes) + World.nodes.index(to_station)] = 1.0
# global steps_done
eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
with torch.no_grad():
prediction = policy_net(in_arr)
if random.random() > eps_threshold:
max_idx = prediction.max(0)[1]
else:
max_idx = random.randrange(N_OUTPUTS)
target = torch.tensor([0.0] * N_OUTPUTS)
target[max_idx] = 1.0
station = World.nodes[max_idx]
return station, prediction, target
print(f"network topology: {policy_net}")
# # run input data forward through network
# # track instance to one hot
# input_data = torch.tensor([0.0] * N_INPUTS)
# input_data[World.nodes.index(world.train.curr_station())] = 1.0 # start at node A
#
# output = policy_net(input_data)
#
#
# # backpropagate gradient
#
# target = torch.tensor([0] * N_OUTPUTS)
# target[World.nodes.index(td)] = 1 # I want to go to station dingo
# # target = torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
#
# criterion = nn.MSELoss()
# loss = criterion(output, target)
# policy_net.zero_grad()
# loss.backward()
#
# # update weights and biases
# optimizer = optim.SGD(policy_net.parameters(), lr=0.1)
# optimizer.step()
#
# output = policy_net(input_data)
# print(f"updated_a_l2 = {round(output.item(), 4)}")
def discount_rewards(reward, discount_rate):
discounted_rewards = np.empty(len(reward))
cumulative_rewards = 0
for step in reversed(range(len(reward))):
cumulative_rewards = reward[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(reward, discount_rate) for reward in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean) / reward_std for discounted_rewards in all_discounted_rewards]
# exploration policy. Try something new or stick with the known.
for episode in range(N_EPISODES):
print(f"episode: {episode}")
all_rewards = [] # all sequences of raw rewards for each episode
all_gradients = [] # gradients saved at each step of each episode
# all_entropy = []
for game in range(N_GAMES_PER_EPISODE):
|
# At this point we have run the policy for 10 episodes, and we are
# ready for a policy update using the algorithm described earlier.
all_rewards_normalized = discount_and_normalize_rewards(all_rewards, DISCOUNT_RATE)
feed_dict = {}
for idx, grad_and_var in enumerate(car_a['GradientsAndVariables']):
# multiply the gradients by the action scores, and compute the mean
# mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index] for game_index, rewards in enumerate(all_rewards) for step, reward in enumerate(rewards)], axis=0)
yo = []
for game_index, rewards in enumerate(all_rewards_normalized):
for step, reward in enumerate(rewards):
yo += [reward * all_gradients[game_index][step][idx]]
mean_gradients = np.mean(yo, axis=0)
feed_dict[grad_and_var[0]] = mean_gradients
# train here
sess.run(car_a['Trainer'], feed_dict=feed_dict)
world.step(next_station=na)
world.render()
time.sleep(1)
world.step(next_station=nb)
world.render()
time.sleep(1)
world.step(next_station=td)
world.render()
time.sleep(1) | print(f" game: {game}")
curr_rewards = [] # all raw rewards from the current episode
curr_gradients = [] # all gradients from the current episode
world.reset()
# world.render()
# time.sleep(1 / RENDER_FPS)
for step in range(N_MAX_STEPS):
next_station, prediction, target = select_action(policy_net, from_station=world.train.curr_station(), to_station=world.destination)
print(next_station)
curr_station, reward, done, info = world.step(next_station=next_station)
world.render()
time.sleep(1 / RENDER_FPS)
curr_rewards += [reward]
# curr_gradients += [val_grads]
if done:
break
# loss = F.smooth_l1_loss(prediction, target)
# Optimize the model
# optimizer.zero_grad()
# loss.backward()
# for param in policy_net.parameters():
# param.grad.data.clamp_(-1, 1)
# optimizer.step()
all_rewards += [curr_rewards]
print(f" reward: {len(curr_rewards)}")
all_gradients += [curr_gradients] | conditional_block |
main.py | import time
# import math
import gym
# from gym import spaces, logger
# from gym.utils import seeding
from gym.envs.classic_control import rendering
import random
import math
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
import numpy as np
from collections import namedtuple
from itertools import count
class DQN(nn.Module):
def __init__(self, h, w, outputs):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
# Number of Linear input connections depends on output of conv2d layers
# and therefore the input image size, so compute it.
def conv2d_size_out(size, kernel_size = 5, stride = 2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.head = nn.Linear(linear_input_size, outputs)
# Called with either one element to determine next action, or a batch
# during optimization. Returns tensor([[left0exp,right0exp]...]).
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class Track:
def __init__(self, a, b, track_length):
self.begins_at = a
self.ends_at = b
self.track_length = track_length
def __eq__(self, other):
return self.begins_at == other.begins_at and self.ends_at == other.ends_at
def __hash__(self):
return hash((self.begins_at.__hash__(), self.ends_at.__hash__()))
def get_delta(self):
delta_x = self.ends_at.x - self.begins_at.x | return Node(x=delta_x, y=delta_y)
def get_angle(self):
delta_x = self.ends_at.x - self.begins_at.x
delta_y = self.ends_at.y - self.begins_at.y
# np.arctan(-1) / np.pi
return np.arctan(delta_y / delta_x)
def __repr__(self):
return f'Track: {self.begins_at} -> {self.ends_at}'
def geom(self):
geom = rendering.Line(self.begins_at.arr(), self.ends_at.arr())
geom.set_color(0, 0, 0)
return geom
def __contains__(self, station):
return station in (self.begins_at, self.ends_at)
class Node:
def __init__(self, x, y, name=None):
self.name = name
# self.p = pos
self.x = x
self.y = y
self._tracks = None
self._nodes = None
def __eq__(self, other):
return np.isclose(self.x, other.x) and np.isclose(self.y, other.y)
def __hash__(self):
return hash((self.x, self.y))
def tracks(self):
if not self._tracks:
self._tracks = {x for x in World.tracks if x.begins_at == self or x.ends_at == self}
return self._tracks
def arr(self):
return self.x, self.y
def geom(self):
geom = rendering.make_capsule(length=20, width=20)
geom.set_color(1, 0, 0)
geom.add_attr(rendering.Transform(translation=self.arr(), rotation=0.0, scale=(1, 1)))
geom.add_attr(rendering.Transform(translation=(-10.0, 0), rotation=0.0, scale=(1, 1))) # center the pill
return geom
def is_next_to(self, other_track):
for t in self.tracks():
if other_track in (t.begins_at, t.ends_at):
return True
return False
def __repr__(self):
return f'Node: {self.name} ({self.x}, {self.y})'
class Train:
def __init__(self, track, dist, direction, name):
self.on_track = track
self.dist_on_track = dist
self.direction = direction # -1 = from end to beginning, 1 = from beginning to end
self.name = name
self.speed = 0.1
width = 45.0
height = 15.0
lef, rig, top, bot = -width / 2, width / 2, height / 2, -height / 2
self.geom = rendering.FilledPolygon([(lef, bot), (lef, top), (rig, top), (rig, bot)])
self.geom.set_color(0, 1, 0)
self.translation = rendering.Transform()
self.geom.add_attr(self.translation)
def go_to(self, station):
pass
def step(self, direction, next_track, speed):
if next_track in self.on_track.begins_at.tracks() or next_track in self.on_track.ends_at.tracks():
if self.pos() == next_track.begins_at:
self.dist_on_track = 0
self.on_track = next_track
if self.pos() == next_track.ends_at:
self.dist_on_track = next_track.track_length
self.on_track = next_track
self.dist_on_track += direction * speed
if self.dist_on_track < 0.0:
self.dist_on_track = 0.0
if self.dist_on_track > self.on_track.track_length:
self.dist_on_track = self.on_track.track_length
# if self.pos() == self.on_track.begins_at:
# if next_track in self.on_track.begins_at.tracks():
#
# if self.pos() == next_track.begins_at:
# self.dist_on_track = 0
# else:
# self.dist_on_track = next_track.track_length
# self.on_track = next_track
# if self.pos() == self.on_track.ends_at:
# if next_track in self.on_track.ends_at.tracks():
# if self.pos() == next_track.begins_at:
# self.dist_on_track = 0
# else:
# self.dist_on_track = next_track.track_length
# self.on_track = next_track
def pos(self) -> Node:
delta = self.on_track.get_delta()
progress = self.dist_on_track / self.on_track.track_length
x = self.on_track.begins_at.x + progress * delta.x
y = self.on_track.begins_at.y + progress * delta.y
return Node(x=x, y=y)
def curr_station(self):
stations = [x for x in World.nodes if x == self.pos()]
if stations:
return stations[0]
else:
return None
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return self.name.__hash__()
class World(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 1
}
from typing import List, Set
tracks: Set[Track] = set()
nodes: List[Node] = []
points = set()
def __init__(self):
self.age = 0
self.viewer = None
self.state = None
self.done = False
self.scr_wid = 900
self.scr_hgt = 700
self.train = None
self.train_trans = None
self.origin = None
self.destination = None
def step(self, next_station: Node):
if self.done:
raise Exception('test is done.')
curr_station = [x for x in World.nodes if x == self.train.pos()][0]
if curr_station.is_next_to(next_station):
track = [x for x in World.tracks if curr_station in x and next_station in x][0]
if track.ends_at == next_station:
dir = 1
else:
dir = -1
self.train.step(dir, track, track.track_length)
self.state = {
'current_station': next_station
}
# self.train.step(direction=action['direction'], next_track=action['next_track'], speed=action['speed'])
self.age += 1
self.done = self.destination == self.train.pos()
if self.done:
reward = 0.0
else:
# reward = 1.0 / self.age
reward = -1.0
return self.state['current_station'], reward, self.done, {}
def reset(self):
track = list(self.origin.tracks())[0]
if track.ends_at == self.origin:
self.train = Train(track=track, dist=track.track_length, direction=1, name='bob')
else:
self.train = Train(track=track, dist=0.0, direction=1, name='bob')
self.state = {
'current_station': self.origin
}
self.done = False
self.close()
self.render()
return self.state
def render(self, mode='human'):
if self.viewer is None:
self.viewer = rendering.Viewer(self.scr_wid, self.scr_hgt)
for track in World.tracks:
self.viewer.add_geom(track.geom())
for n in World.nodes:
self.viewer.add_geom(n.geom())
self.viewer.add_geom(self.train.geom)
self.train.translation.set_rotation(self.train.on_track.get_angle())
self.train.translation.set_translation(self.train.pos().x, self.train.pos().y)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
gym.envs.registration.register(
id='world-v0',
entry_point='main:World',
)
if __name__ == '__main__':
from main import *
import gym
import numpy as np
from torch.optim.rmsprop import RMSprop
na = Node(name='A', x=304.0, y=256.0)
nb = Node(name='B', x=539.0, y=365.0)
tb = Node(name='Bravo', x=841.0, y=154.0)
tc = Node(name='Charlie', x=204.0, y=526.0)
td = Node(name='Dingo', x=786.0, y=617.0)
tf = Node(name='Foxtrot', x=56.0, y=285.0)
tt = Node(name='Tango', x=89.0, y=66.0)
tw = Node(name='Whiskey', x=249.0, y=64.0)
ra = Track(tc, td, 5)
rb = Track(td, tb, 5)
rc = Track(tb, nb, 3)
rd = Track(td, nb, 3)
re = Track(tc, nb, 3)
rf = Track(na, nb, 3)
rg = Track(na, tf, 2)
rh = Track(na, tt, 2)
ri = Track(na, tw, 2)
World.nodes = [na, nb, tb, tc, td, tf, tt, tw]
World.tracks = {ra, rb, rc, rd, re, rf, rg, rh, ri}
world: World = gym.make('world-v0')
world.origin = tw
world.destination = td
world.reset()
world.render()
# =================================================
RENDER_FPS = 20
N_INPUTS = len(World.nodes) * 2 # which station now
N_OUTPUTS = len(World.nodes) # which station next
LEARNING_RATE = 0.1
N_EPISODES = 20 # number of training iterations
N_MAX_STEPS = 1000 # max steps per episode
N_GAMES_PER_EPISODE = 3 # train the policy every 10 episodes
# BATCH_SIZE = 128
# GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
# TARGET_UPDATE = 10
# save_iterations = 10 # save the model every 10 training iterations
DISCOUNT_RATE = 0.95
BATCH_SIZE = 128
GAMMA = 0.999
# 2. Build the neural network
class NextStationNet(nn.Module):
def __init__(self):
super(NextStationNet, self).__init__()
self.input_layer = nn.Linear(N_INPUTS, 32)
self.hidden_layer = nn.Linear(32, 32)
# self.hidden_layer.weight = torch.nn.Parameter(torch.tensor([[1.58]]))
# self.hidden_layer.bias = torch.nn.Parameter(torch.tensor([-0.14]))
self.output_layer = nn.Linear(32, N_OUTPUTS)
# self.output_layer.weight = torch.nn.Parameter(torch.tensor([[2.45]]))
# self.output_layer.bias = torch.nn.Parameter(torch.tensor([-0.11]))
def forward(self, x):
x = torch.sigmoid(self.input_layer(x))
x = torch.sigmoid(self.hidden_layer(x))
x = torch.sigmoid(self.output_layer(x))
return x
policy_net: NextStationNet = NextStationNet()
target_net: NextStationNet = NextStationNet() # DQN(screen_height, screen_width, n_actions).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer: RMSprop = RMSprop(policy_net.parameters())
steps_done = 0 # super global variable
def select_action(policy_net, from_station, to_station):
global steps_done
# track instance to one hot
in_arr = torch.tensor([0.0] * N_INPUTS)
in_arr[World.nodes.index(from_station)] = 1.0
in_arr[len(World.nodes) + World.nodes.index(to_station)] = 1.0
# global steps_done
eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
with torch.no_grad():
prediction = policy_net(in_arr)
if random.random() > eps_threshold:
max_idx = prediction.max(0)[1]
else:
max_idx = random.randrange(N_OUTPUTS)
target = torch.tensor([0.0] * N_OUTPUTS)
target[max_idx] = 1.0
station = World.nodes[max_idx]
return station, prediction, target
print(f"network topology: {policy_net}")
# # run input data forward through network
# # track instance to one hot
# input_data = torch.tensor([0.0] * N_INPUTS)
# input_data[World.nodes.index(world.train.curr_station())] = 1.0 # start at node A
#
# output = policy_net(input_data)
#
#
# # backpropagate gradient
#
# target = torch.tensor([0] * N_OUTPUTS)
# target[World.nodes.index(td)] = 1 # I want to go to station dingo
# # target = torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
#
# criterion = nn.MSELoss()
# loss = criterion(output, target)
# policy_net.zero_grad()
# loss.backward()
#
# # update weights and biases
# optimizer = optim.SGD(policy_net.parameters(), lr=0.1)
# optimizer.step()
#
# output = policy_net(input_data)
# print(f"updated_a_l2 = {round(output.item(), 4)}")
def discount_rewards(reward, discount_rate):
discounted_rewards = np.empty(len(reward))
cumulative_rewards = 0
for step in reversed(range(len(reward))):
cumulative_rewards = reward[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(reward, discount_rate) for reward in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean) / reward_std for discounted_rewards in all_discounted_rewards]
# exploration policy. Try something new or stick with the known.
for episode in range(N_EPISODES):
print(f"episode: {episode}")
all_rewards = [] # all sequences of raw rewards for each episode
all_gradients = [] # gradients saved at each step of each episode
# all_entropy = []
for game in range(N_GAMES_PER_EPISODE):
print(f" game: {game}")
curr_rewards = [] # all raw rewards from the current episode
curr_gradients = [] # all gradients from the current episode
world.reset()
# world.render()
# time.sleep(1 / RENDER_FPS)
for step in range(N_MAX_STEPS):
next_station, prediction, target = select_action(policy_net, from_station=world.train.curr_station(), to_station=world.destination)
print(next_station)
curr_station, reward, done, info = world.step(next_station=next_station)
world.render()
time.sleep(1 / RENDER_FPS)
curr_rewards += [reward]
# curr_gradients += [val_grads]
if done:
break
# loss = F.smooth_l1_loss(prediction, target)
# Optimize the model
# optimizer.zero_grad()
# loss.backward()
# for param in policy_net.parameters():
# param.grad.data.clamp_(-1, 1)
# optimizer.step()
all_rewards += [curr_rewards]
print(f" reward: {len(curr_rewards)}")
all_gradients += [curr_gradients]
# At this point we have run the policy for 10 episodes, and we are
# ready for a policy update using the algorithm described earlier.
all_rewards_normalized = discount_and_normalize_rewards(all_rewards, DISCOUNT_RATE)
feed_dict = {}
for idx, grad_and_var in enumerate(car_a['GradientsAndVariables']):
# multiply the gradients by the action scores, and compute the mean
# mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index] for game_index, rewards in enumerate(all_rewards) for step, reward in enumerate(rewards)], axis=0)
yo = []
for game_index, rewards in enumerate(all_rewards_normalized):
for step, reward in enumerate(rewards):
yo += [reward * all_gradients[game_index][step][idx]]
mean_gradients = np.mean(yo, axis=0)
feed_dict[grad_and_var[0]] = mean_gradients
# train here
sess.run(car_a['Trainer'], feed_dict=feed_dict)
world.step(next_station=na)
world.render()
time.sleep(1)
world.step(next_station=nb)
world.render()
time.sleep(1)
world.step(next_station=td)
world.render()
time.sleep(1) | delta_y = self.ends_at.y - self.begins_at.y
| random_line_split |
main.py | import time
# import math
import gym
# from gym import spaces, logger
# from gym.utils import seeding
from gym.envs.classic_control import rendering
import random
import math
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
import numpy as np
from collections import namedtuple
from itertools import count
class DQN(nn.Module):
def __init__(self, h, w, outputs):
|
# Called with either one element to determine next action, or a batch
# during optimization. Returns tensor([[left0exp,right0exp]...]).
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class Track:
def __init__(self, a, b, track_length):
self.begins_at = a
self.ends_at = b
self.track_length = track_length
def __eq__(self, other):
return self.begins_at == other.begins_at and self.ends_at == other.ends_at
def __hash__(self):
return hash((self.begins_at.__hash__(), self.ends_at.__hash__()))
def get_delta(self):
delta_x = self.ends_at.x - self.begins_at.x
delta_y = self.ends_at.y - self.begins_at.y
return Node(x=delta_x, y=delta_y)
def get_angle(self):
delta_x = self.ends_at.x - self.begins_at.x
delta_y = self.ends_at.y - self.begins_at.y
# np.arctan(-1) / np.pi
return np.arctan(delta_y / delta_x)
def __repr__(self):
return f'Track: {self.begins_at} -> {self.ends_at}'
def geom(self):
geom = rendering.Line(self.begins_at.arr(), self.ends_at.arr())
geom.set_color(0, 0, 0)
return geom
def __contains__(self, station):
return station in (self.begins_at, self.ends_at)
class Node:
def __init__(self, x, y, name=None):
self.name = name
# self.p = pos
self.x = x
self.y = y
self._tracks = None
self._nodes = None
def __eq__(self, other):
return np.isclose(self.x, other.x) and np.isclose(self.y, other.y)
def __hash__(self):
return hash((self.x, self.y))
def tracks(self):
if not self._tracks:
self._tracks = {x for x in World.tracks if x.begins_at == self or x.ends_at == self}
return self._tracks
def arr(self):
return self.x, self.y
def geom(self):
geom = rendering.make_capsule(length=20, width=20)
geom.set_color(1, 0, 0)
geom.add_attr(rendering.Transform(translation=self.arr(), rotation=0.0, scale=(1, 1)))
geom.add_attr(rendering.Transform(translation=(-10.0, 0), rotation=0.0, scale=(1, 1))) # center the pill
return geom
def is_next_to(self, other_track):
for t in self.tracks():
if other_track in (t.begins_at, t.ends_at):
return True
return False
def __repr__(self):
return f'Node: {self.name} ({self.x}, {self.y})'
class Train:
def __init__(self, track, dist, direction, name):
self.on_track = track
self.dist_on_track = dist
self.direction = direction # -1 = from end to beginning, 1 = from beginning to end
self.name = name
self.speed = 0.1
width = 45.0
height = 15.0
lef, rig, top, bot = -width / 2, width / 2, height / 2, -height / 2
self.geom = rendering.FilledPolygon([(lef, bot), (lef, top), (rig, top), (rig, bot)])
self.geom.set_color(0, 1, 0)
self.translation = rendering.Transform()
self.geom.add_attr(self.translation)
def go_to(self, station):
pass
def step(self, direction, next_track, speed):
if next_track in self.on_track.begins_at.tracks() or next_track in self.on_track.ends_at.tracks():
if self.pos() == next_track.begins_at:
self.dist_on_track = 0
self.on_track = next_track
if self.pos() == next_track.ends_at:
self.dist_on_track = next_track.track_length
self.on_track = next_track
self.dist_on_track += direction * speed
if self.dist_on_track < 0.0:
self.dist_on_track = 0.0
if self.dist_on_track > self.on_track.track_length:
self.dist_on_track = self.on_track.track_length
# if self.pos() == self.on_track.begins_at:
# if next_track in self.on_track.begins_at.tracks():
#
# if self.pos() == next_track.begins_at:
# self.dist_on_track = 0
# else:
# self.dist_on_track = next_track.track_length
# self.on_track = next_track
# if self.pos() == self.on_track.ends_at:
# if next_track in self.on_track.ends_at.tracks():
# if self.pos() == next_track.begins_at:
# self.dist_on_track = 0
# else:
# self.dist_on_track = next_track.track_length
# self.on_track = next_track
def pos(self) -> Node:
delta = self.on_track.get_delta()
progress = self.dist_on_track / self.on_track.track_length
x = self.on_track.begins_at.x + progress * delta.x
y = self.on_track.begins_at.y + progress * delta.y
return Node(x=x, y=y)
def curr_station(self):
stations = [x for x in World.nodes if x == self.pos()]
if stations:
return stations[0]
else:
return None
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return self.name.__hash__()
class World(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 1
}
from typing import List, Set
tracks: Set[Track] = set()
nodes: List[Node] = []
points = set()
def __init__(self):
self.age = 0
self.viewer = None
self.state = None
self.done = False
self.scr_wid = 900
self.scr_hgt = 700
self.train = None
self.train_trans = None
self.origin = None
self.destination = None
def step(self, next_station: Node):
if self.done:
raise Exception('test is done.')
curr_station = [x for x in World.nodes if x == self.train.pos()][0]
if curr_station.is_next_to(next_station):
track = [x for x in World.tracks if curr_station in x and next_station in x][0]
if track.ends_at == next_station:
dir = 1
else:
dir = -1
self.train.step(dir, track, track.track_length)
self.state = {
'current_station': next_station
}
# self.train.step(direction=action['direction'], next_track=action['next_track'], speed=action['speed'])
self.age += 1
self.done = self.destination == self.train.pos()
if self.done:
reward = 0.0
else:
# reward = 1.0 / self.age
reward = -1.0
return self.state['current_station'], reward, self.done, {}
def reset(self):
track = list(self.origin.tracks())[0]
if track.ends_at == self.origin:
self.train = Train(track=track, dist=track.track_length, direction=1, name='bob')
else:
self.train = Train(track=track, dist=0.0, direction=1, name='bob')
self.state = {
'current_station': self.origin
}
self.done = False
self.close()
self.render()
return self.state
def render(self, mode='human'):
if self.viewer is None:
self.viewer = rendering.Viewer(self.scr_wid, self.scr_hgt)
for track in World.tracks:
self.viewer.add_geom(track.geom())
for n in World.nodes:
self.viewer.add_geom(n.geom())
self.viewer.add_geom(self.train.geom)
self.train.translation.set_rotation(self.train.on_track.get_angle())
self.train.translation.set_translation(self.train.pos().x, self.train.pos().y)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
gym.envs.registration.register(
id='world-v0',
entry_point='main:World',
)
if __name__ == '__main__':
from main import *
import gym
import numpy as np
from torch.optim.rmsprop import RMSprop
na = Node(name='A', x=304.0, y=256.0)
nb = Node(name='B', x=539.0, y=365.0)
tb = Node(name='Bravo', x=841.0, y=154.0)
tc = Node(name='Charlie', x=204.0, y=526.0)
td = Node(name='Dingo', x=786.0, y=617.0)
tf = Node(name='Foxtrot', x=56.0, y=285.0)
tt = Node(name='Tango', x=89.0, y=66.0)
tw = Node(name='Whiskey', x=249.0, y=64.0)
ra = Track(tc, td, 5)
rb = Track(td, tb, 5)
rc = Track(tb, nb, 3)
rd = Track(td, nb, 3)
re = Track(tc, nb, 3)
rf = Track(na, nb, 3)
rg = Track(na, tf, 2)
rh = Track(na, tt, 2)
ri = Track(na, tw, 2)
World.nodes = [na, nb, tb, tc, td, tf, tt, tw]
World.tracks = {ra, rb, rc, rd, re, rf, rg, rh, ri}
world: World = gym.make('world-v0')
world.origin = tw
world.destination = td
world.reset()
world.render()
# =================================================
RENDER_FPS = 20
N_INPUTS = len(World.nodes) * 2 # which station now
N_OUTPUTS = len(World.nodes) # which station next
LEARNING_RATE = 0.1
N_EPISODES = 20 # number of training iterations
N_MAX_STEPS = 1000 # max steps per episode
N_GAMES_PER_EPISODE = 3 # train the policy every 10 episodes
# BATCH_SIZE = 128
# GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
# TARGET_UPDATE = 10
# save_iterations = 10 # save the model every 10 training iterations
DISCOUNT_RATE = 0.95
BATCH_SIZE = 128
GAMMA = 0.999
# 2. Build the neural network
class NextStationNet(nn.Module):
def __init__(self):
super(NextStationNet, self).__init__()
self.input_layer = nn.Linear(N_INPUTS, 32)
self.hidden_layer = nn.Linear(32, 32)
# self.hidden_layer.weight = torch.nn.Parameter(torch.tensor([[1.58]]))
# self.hidden_layer.bias = torch.nn.Parameter(torch.tensor([-0.14]))
self.output_layer = nn.Linear(32, N_OUTPUTS)
# self.output_layer.weight = torch.nn.Parameter(torch.tensor([[2.45]]))
# self.output_layer.bias = torch.nn.Parameter(torch.tensor([-0.11]))
def forward(self, x):
x = torch.sigmoid(self.input_layer(x))
x = torch.sigmoid(self.hidden_layer(x))
x = torch.sigmoid(self.output_layer(x))
return x
policy_net: NextStationNet = NextStationNet()
target_net: NextStationNet = NextStationNet() # DQN(screen_height, screen_width, n_actions).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer: RMSprop = RMSprop(policy_net.parameters())
steps_done = 0 # super global variable
def select_action(policy_net, from_station, to_station):
global steps_done
# track instance to one hot
in_arr = torch.tensor([0.0] * N_INPUTS)
in_arr[World.nodes.index(from_station)] = 1.0
in_arr[len(World.nodes) + World.nodes.index(to_station)] = 1.0
# global steps_done
eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
with torch.no_grad():
prediction = policy_net(in_arr)
if random.random() > eps_threshold:
max_idx = prediction.max(0)[1]
else:
max_idx = random.randrange(N_OUTPUTS)
target = torch.tensor([0.0] * N_OUTPUTS)
target[max_idx] = 1.0
station = World.nodes[max_idx]
return station, prediction, target
print(f"network topology: {policy_net}")
# # run input data forward through network
# # track instance to one hot
# input_data = torch.tensor([0.0] * N_INPUTS)
# input_data[World.nodes.index(world.train.curr_station())] = 1.0 # start at node A
#
# output = policy_net(input_data)
#
#
# # backpropagate gradient
#
# target = torch.tensor([0] * N_OUTPUTS)
# target[World.nodes.index(td)] = 1 # I want to go to station dingo
# # target = torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
#
# criterion = nn.MSELoss()
# loss = criterion(output, target)
# policy_net.zero_grad()
# loss.backward()
#
# # update weights and biases
# optimizer = optim.SGD(policy_net.parameters(), lr=0.1)
# optimizer.step()
#
# output = policy_net(input_data)
# print(f"updated_a_l2 = {round(output.item(), 4)}")
def discount_rewards(reward, discount_rate):
discounted_rewards = np.empty(len(reward))
cumulative_rewards = 0
for step in reversed(range(len(reward))):
cumulative_rewards = reward[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(reward, discount_rate) for reward in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean) / reward_std for discounted_rewards in all_discounted_rewards]
# exploration policy. Try something new or stick with the known.
for episode in range(N_EPISODES):
print(f"episode: {episode}")
all_rewards = [] # all sequences of raw rewards for each episode
all_gradients = [] # gradients saved at each step of each episode
# all_entropy = []
for game in range(N_GAMES_PER_EPISODE):
print(f" game: {game}")
curr_rewards = [] # all raw rewards from the current episode
curr_gradients = [] # all gradients from the current episode
world.reset()
# world.render()
# time.sleep(1 / RENDER_FPS)
for step in range(N_MAX_STEPS):
next_station, prediction, target = select_action(policy_net, from_station=world.train.curr_station(), to_station=world.destination)
print(next_station)
curr_station, reward, done, info = world.step(next_station=next_station)
world.render()
time.sleep(1 / RENDER_FPS)
curr_rewards += [reward]
# curr_gradients += [val_grads]
if done:
break
# loss = F.smooth_l1_loss(prediction, target)
# Optimize the model
# optimizer.zero_grad()
# loss.backward()
# for param in policy_net.parameters():
# param.grad.data.clamp_(-1, 1)
# optimizer.step()
all_rewards += [curr_rewards]
print(f" reward: {len(curr_rewards)}")
all_gradients += [curr_gradients]
# At this point we have run the policy for 10 episodes, and we are
# ready for a policy update using the algorithm described earlier.
all_rewards_normalized = discount_and_normalize_rewards(all_rewards, DISCOUNT_RATE)
feed_dict = {}
for idx, grad_and_var in enumerate(car_a['GradientsAndVariables']):
# multiply the gradients by the action scores, and compute the mean
# mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index] for game_index, rewards in enumerate(all_rewards) for step, reward in enumerate(rewards)], axis=0)
yo = []
for game_index, rewards in enumerate(all_rewards_normalized):
for step, reward in enumerate(rewards):
yo += [reward * all_gradients[game_index][step][idx]]
mean_gradients = np.mean(yo, axis=0)
feed_dict[grad_and_var[0]] = mean_gradients
# train here
sess.run(car_a['Trainer'], feed_dict=feed_dict)
world.step(next_station=na)
world.render()
time.sleep(1)
world.step(next_station=nb)
world.render()
time.sleep(1)
world.step(next_station=td)
world.render()
time.sleep(1) | super(DQN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
# Number of Linear input connections depends on output of conv2d layers
# and therefore the input image size, so compute it.
def conv2d_size_out(size, kernel_size = 5, stride = 2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.head = nn.Linear(linear_input_size, outputs) | identifier_body |
test_expand.rs | use super::utils::check;
use hex_literal::hex;
#[test]
fn aes128_expand_key_test() {
use super::aes128::expand_key;
let keys = [0x00; 16];
check(
unsafe { &expand_key(&keys) },
&[
[0x0000000000000000, 0x0000000000000000],
[0x6263636362636363, 0x6263636362636363],
[0x9b9898c9f9fbfbaa, 0x9b9898c9f9fbfbaa],
[0x90973450696ccffa, 0xf2f457330b0fac99],
[0xee06da7b876a1581, 0x759e42b27e91ee2b],
[0x7f2e2b88f8443e09, 0x8dda7cbbf34b9290],
[0xec614b851425758c, 0x99ff09376ab49ba7],
[0x217517873550620b, 0xacaf6b3cc61bf09b],
[0x0ef903333ba96138, 0x97060a04511dfa9f],
[0xb1d4d8e28a7db9da, 0x1d7bb3de4c664941],
[0xb4ef5bcb3e92e211, 0x23e951cf6f8f188e],
],
);
let keys = [0xff; 16];
check(
unsafe { &expand_key(&keys) },
&[
[0xffffffffffffffff, 0xffffffffffffffff],
[0xe8e9e9e917161616, 0xe8e9e9e917161616],
[0xadaeae19bab8b80f, 0x525151e6454747f0],
[0x090e2277b3b69a78, 0xe1e7cb9ea4a08c6e],
[0xe16abd3e52dc2746, 0xb33becd8179b60b6],
[0xe5baf3ceb766d488, 0x045d385013c658e6],
[0x71d07db3c6b6a93b, 0xc2eb916bd12dc98d],
[0xe90d208d2fbb89b6, 0xed5018dd3c7dd150],
[0x96337366b988fad0, 0x54d8e20d68a5335d],
[0x8bf03f233278c5f3, 0x66a027fe0e0514a3],
[0xd60a3588e472f07b, 0x82d2d7858cd7c326],
],
);
let keys = hex!("000102030405060708090a0b0c0d0e0f");
check(
unsafe { &expand_key(&keys) },
&[
[0x0001020304050607, 0x08090a0b0c0d0e0f],
[0xd6aa74fdd2af72fa, 0xdaa678f1d6ab76fe],
[0xb692cf0b643dbdf1, 0xbe9bc5006830b3fe],
[0xb6ff744ed2c2c9bf, 0x6c590cbf0469bf41],
[0x47f7f7bc95353e03, 0xf96c32bcfd058dfd],
[0x3caaa3e8a99f9deb, 0x50f3af57adf622aa],
[0x5e390f7df7a69296, 0xa7553dc10aa31f6b],
[0x14f9701ae35fe28c, 0x440adf4d4ea9c026],
[0x47438735a41c65b9, 0xe016baf4aebf7ad2],
[0x549932d1f0855768, 0x1093ed9cbe2c974e],
[0x13111d7fe3944a17, 0xf307a78b4d2b30c5], | ],
);
let keys = hex!("6920e299a5202a6d656e636869746f2a");
check(
unsafe { &expand_key(&keys) },
&[
[0x6920e299a5202a6d, 0x656e636869746f2a],
[0xfa8807605fa82d0d, 0x3ac64e6553b2214f],
[0xcf75838d90ddae80, 0xaa1be0e5f9a9c1aa],
[0x180d2f1488d08194, 0x22cb6171db62a0db],
[0xbaed96ad323d1739, 0x10f67648cb94d693],
[0x881b4ab2ba265d8b, 0xaad02bc36144fd50],
[0xb34f195d096944d6, 0xa3b96f15c2fd9245],
[0xa7007778ae6933ae, 0x0dd05cbbcf2dcefe],
[0xff8bccf251e2ff5c, 0x5c32a3e7931f6d19],
[0x24b7182e7555e772, 0x29674495ba78298c],
[0xae127cdadb479ba8, 0xf220df3d4858f6b1],
],
);
let keys = hex!("2b7e151628aed2a6abf7158809cf4f3c");
check(
unsafe { &expand_key(&keys) },
&[
[0x2b7e151628aed2a6, 0xabf7158809cf4f3c],
[0xa0fafe1788542cb1, 0x23a339392a6c7605],
[0xf2c295f27a96b943, 0x5935807a7359f67f],
[0x3d80477d4716fe3e, 0x1e237e446d7a883b],
[0xef44a541a8525b7f, 0xb671253bdb0bad00],
[0xd4d1c6f87c839d87, 0xcaf2b8bc11f915bc],
[0x6d88a37a110b3efd, 0xdbf98641ca0093fd],
[0x4e54f70e5f5fc9f3, 0x84a64fb24ea6dc4f],
[0xead27321b58dbad2, 0x312bf5607f8d292f],
[0xac7766f319fadc21, 0x28d12941575c006e],
[0xd014f9a8c9ee2589, 0xe13f0cc8b6630ca6],
],
);
}
#[test]
fn aes192_expand_key_test() {
use super::aes192::expand_key;
let keys = [0x00; 24];
check(
unsafe { &expand_key(&keys) },
&[
[0x0000000000000000, 0x0000000000000000],
[0x0000000000000000, 0x6263636362636363],
[0x6263636362636363, 0x6263636362636363],
[0x9b9898c9f9fbfbaa, 0x9b9898c9f9fbfbaa],
[0x9b9898c9f9fbfbaa, 0x90973450696ccffa],
[0xf2f457330b0fac99, 0x90973450696ccffa],
[0xc81d19a9a171d653, 0x53858160588a2df9],
[0xc81d19a9a171d653, 0x7bebf49bda9a22c8],
[0x891fa3a8d1958e51, 0x198897f8b8f941ab],
[0xc26896f718f2b43f, 0x91ed1797407899c6],
[0x59f00e3ee1094f95, 0x83ecbc0f9b1e0830],
[0x0af31fa74a8b8661, 0x137b885ff272c7ca],
[0x432ac886d834c0b6, 0xd2c7df11984c5970],
],
);
let keys = [0xff; 24];
check(
unsafe { &expand_key(&keys) },
&[
[0xffffffffffffffff, 0xffffffffffffffff],
[0xffffffffffffffff, 0xe8e9e9e917161616],
[0xe8e9e9e917161616, 0xe8e9e9e917161616],
[0xadaeae19bab8b80f, 0x525151e6454747f0],
[0xadaeae19bab8b80f, 0xc5c2d8ed7f7a60e2],
[0x2d2b3104686c76f4, 0xc5c2d8ed7f7a60e2],
[0x1712403f686820dd, 0x454311d92d2f672d],
[0xe8edbfc09797df22, 0x8f8cd3b7e7e4f36a],
[0xa2a7e2b38f88859e, 0x67653a5ef0f2e57c],
[0x2655c33bc1b13051, 0x6316d2e2ec9e577c],
[0x8bfb6d227b09885e, 0x67919b1aa620ab4b],
[0xc53679a929a82ed5, 0xa25343f7d95acba9],
[0x598e482fffaee364, 0x3a989acd1330b418],
],
);
let keys = hex!("000102030405060708090a0b0c0d0e0f1011121314151617");
check(
unsafe { &expand_key(&keys) },
&[
[0x0001020304050607, 0x08090a0b0c0d0e0f],
[0x1011121314151617, 0x5846f2f95c43f4fe],
[0x544afef55847f0fa, 0x4856e2e95c43f4fe],
[0x40f949b31cbabd4d, 0x48f043b810b7b342],
[0x58e151ab04a2a555, 0x7effb5416245080c],
[0x2ab54bb43a02f8f6, 0x62e3a95d66410c08],
[0xf501857297448d7e, 0xbdf1c6ca87f33e3c],
[0xe510976183519b69, 0x34157c9ea351f1e0],
[0x1ea0372a99530916, 0x7c439e77ff12051e],
[0xdd7e0e887e2fff68, 0x608fc842f9dcc154],
[0x859f5f237a8d5a3d, 0xc0c02952beefd63a],
[0xde601e7827bcdf2c, 0xa223800fd8aeda32],
[0xa4970a331a78dc09, 0xc418c271e3a41d5d],
],
);
let keys = hex!("8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b");
check(
unsafe { &expand_key(&keys) },
&[
[0x8e73b0f7da0e6452, 0xc810f32b809079e5],
[0x62f8ead2522c6b7b, 0xfe0c91f72402f5a5],
[0xec12068e6c827f6b, 0x0e7a95b95c56fec2],
[0x4db7b4bd69b54118, 0x85a74796e92538fd],
[0xe75fad44bb095386, 0x485af05721efb14f],
[0xa448f6d94d6dce24, 0xaa326360113b30e6],
[0xa25e7ed583b1cf9a, 0x27f939436a94f767],
[0xc0a69407d19da4e1, 0xec1786eb6fa64971],
[0x485f703222cb8755, 0xe26d135233f0b7b3],
[0x40beeb282f18a259, 0x6747d26b458c553e],
[0xa7e1466c9411f1df, 0x821f750aad07d753],
[0xca4005388fcc5006, 0x282d166abc3ce7b5],
[0xe98ba06f448c773c, 0x8ecc720401002202],
],
);
}
#[test]
fn aes256_expand_key_test() {
use super::aes256::expand_key;
let keys = [0x00; 32];
check(
unsafe { &expand_key(&keys) },
&[
[0x0000000000000000, 0x0000000000000000],
[0x0000000000000000, 0x0000000000000000],
[0x6263636362636363, 0x6263636362636363],
[0xaafbfbfbaafbfbfb, 0xaafbfbfbaafbfbfb],
[0x6f6c6ccf0d0f0fac, 0x6f6c6ccf0d0f0fac],
[0x7d8d8d6ad7767691, 0x7d8d8d6ad7767691],
[0x5354edc15e5be26d, 0x31378ea23c38810e],
[0x968a81c141fcf750, 0x3c717a3aeb070cab],
[0x9eaa8f28c0f16d45, 0xf1c6e3e7cdfe62e9],
[0x2b312bdf6acddc8f, 0x56bca6b5bdbbaa1e],
[0x6406fd52a4f79017, 0x553173f098cf1119],
[0x6dbba90b07767584, 0x51cad331ec71792f],
[0xe7b0e89c4347788b, 0x16760b7b8eb91a62],
[0x74ed0ba1739b7e25, 0x2251ad14ce20d43b],
[0x10f80a1753bf729c, 0x45c979e7cb706385],
],
);
let keys = [0xff; 32];
check(
unsafe { &expand_key(&keys) },
&[
[0xffffffffffffffff, 0xffffffffffffffff],
[0xffffffffffffffff, 0xffffffffffffffff],
[0xe8e9e9e917161616, 0xe8e9e9e917161616],
[0x0fb8b8b8f0474747, 0x0fb8b8b8f0474747],
[0x4a4949655d5f5f73, 0xb5b6b69aa2a0a08c],
[0x355858dcc51f1f9b, 0xcaa7a7233ae0e064],
[0xafa80ae5f2f75596, 0x4741e30ce5e14380],
[0xeca0421129bf5d8a, 0xe318faa9d9f81acd],
[0xe60ab7d014fde246, 0x53bc014ab65d42ca],
[0xa2ec6e658b5333ef, 0x684bc946b1b3d38b],
[0x9b6c8a188f91685e, 0xdc2d69146a702bde],
[0xa0bd9f782beeac97, 0x43a565d1f216b65a],
[0xfc22349173b35ccf, 0xaf9e35dbc5ee1e05],
[0x0695ed132d7b4184, 0x6ede24559cc8920f],
[0x546d424f27de1e80, 0x88402b5b4dae355e],
],
);
let keys = hex!("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f");
check(
unsafe { &expand_key(&keys) },
&[
[0x0001020304050607, 0x08090a0b0c0d0e0f],
[0x1011121314151617, 0x18191a1b1c1d1e1f],
[0xa573c29fa176c498, 0xa97fce93a572c09c],
[0x1651a8cd0244beda, 0x1a5da4c10640bade],
[0xae87dff00ff11b68, 0xa68ed5fb03fc1567],
[0x6de1f1486fa54f92, 0x75f8eb5373b8518d],
[0xc656827fc9a79917, 0x6f294cec6cd5598b],
[0x3de23a75524775e7, 0x27bf9eb45407cf39],
[0x0bdc905fc27b0948, 0xad5245a4c1871c2f],
[0x45f5a66017b2d387, 0x300d4d33640a820a],
[0x7ccff71cbeb4fe54, 0x13e6bbf0d261a7df],
[0xf01afafee7a82979, 0xd7a5644ab3afe640],
[0x2541fe719bf50025, 0x8813bbd55a721c0a],
[0x4e5a6699a9f24fe0, 0x7e572baacdf8cdea],
[0x24fc79ccbf0979e9, 0x371ac23c6d68de36],
],
);
let keys = hex!("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4");
check(
unsafe { &expand_key(&keys) },
&[
[0x603deb1015ca71be, 0x2b73aef0857d7781],
[0x1f352c073b6108d7, 0x2d9810a30914dff4],
[0x9ba354118e6925af, 0xa51a8b5f2067fcde],
[0xa8b09c1a93d194cd, 0xbe49846eb75d5b9a],
[0xd59aecb85bf3c917, 0xfee94248de8ebe96],
[0xb5a9328a2678a647, 0x983122292f6c79b3],
[0x812c81addadf48ba, 0x24360af2fab8b464],
[0x98c5bfc9bebd198e, 0x268c3ba709e04214],
[0x68007bacb2df3316, 0x96e939e46c518d80],
[0xc814e20476a9fb8a, 0x5025c02d59c58239],
[0xde1369676ccc5a71, 0xfa2563959674ee15],
[0x5886ca5d2e2f31d7, 0x7e0af1fa27cf73c3],
[0x749c47ab18501dda, 0xe2757e4f7401905a],
[0xcafaaae3e4d59b34, 0x9adf6acebd10190d],
[0xfe4890d1e6188d0b, 0x046df344706c631e],
],
);
} | random_line_split | |
test_expand.rs | use super::utils::check;
use hex_literal::hex;
#[test]
fn aes128_expand_key_test() {
use super::aes128::expand_key;
let keys = [0x00; 16];
check(
unsafe { &expand_key(&keys) },
&[
[0x0000000000000000, 0x0000000000000000],
[0x6263636362636363, 0x6263636362636363],
[0x9b9898c9f9fbfbaa, 0x9b9898c9f9fbfbaa],
[0x90973450696ccffa, 0xf2f457330b0fac99],
[0xee06da7b876a1581, 0x759e42b27e91ee2b],
[0x7f2e2b88f8443e09, 0x8dda7cbbf34b9290],
[0xec614b851425758c, 0x99ff09376ab49ba7],
[0x217517873550620b, 0xacaf6b3cc61bf09b],
[0x0ef903333ba96138, 0x97060a04511dfa9f],
[0xb1d4d8e28a7db9da, 0x1d7bb3de4c664941],
[0xb4ef5bcb3e92e211, 0x23e951cf6f8f188e],
],
);
let keys = [0xff; 16];
check(
unsafe { &expand_key(&keys) },
&[
[0xffffffffffffffff, 0xffffffffffffffff],
[0xe8e9e9e917161616, 0xe8e9e9e917161616],
[0xadaeae19bab8b80f, 0x525151e6454747f0],
[0x090e2277b3b69a78, 0xe1e7cb9ea4a08c6e],
[0xe16abd3e52dc2746, 0xb33becd8179b60b6],
[0xe5baf3ceb766d488, 0x045d385013c658e6],
[0x71d07db3c6b6a93b, 0xc2eb916bd12dc98d],
[0xe90d208d2fbb89b6, 0xed5018dd3c7dd150],
[0x96337366b988fad0, 0x54d8e20d68a5335d],
[0x8bf03f233278c5f3, 0x66a027fe0e0514a3],
[0xd60a3588e472f07b, 0x82d2d7858cd7c326],
],
);
let keys = hex!("000102030405060708090a0b0c0d0e0f");
check(
unsafe { &expand_key(&keys) },
&[
[0x0001020304050607, 0x08090a0b0c0d0e0f],
[0xd6aa74fdd2af72fa, 0xdaa678f1d6ab76fe],
[0xb692cf0b643dbdf1, 0xbe9bc5006830b3fe],
[0xb6ff744ed2c2c9bf, 0x6c590cbf0469bf41],
[0x47f7f7bc95353e03, 0xf96c32bcfd058dfd],
[0x3caaa3e8a99f9deb, 0x50f3af57adf622aa],
[0x5e390f7df7a69296, 0xa7553dc10aa31f6b],
[0x14f9701ae35fe28c, 0x440adf4d4ea9c026],
[0x47438735a41c65b9, 0xe016baf4aebf7ad2],
[0x549932d1f0855768, 0x1093ed9cbe2c974e],
[0x13111d7fe3944a17, 0xf307a78b4d2b30c5],
],
);
let keys = hex!("6920e299a5202a6d656e636869746f2a");
check(
unsafe { &expand_key(&keys) },
&[
[0x6920e299a5202a6d, 0x656e636869746f2a],
[0xfa8807605fa82d0d, 0x3ac64e6553b2214f],
[0xcf75838d90ddae80, 0xaa1be0e5f9a9c1aa],
[0x180d2f1488d08194, 0x22cb6171db62a0db],
[0xbaed96ad323d1739, 0x10f67648cb94d693],
[0x881b4ab2ba265d8b, 0xaad02bc36144fd50],
[0xb34f195d096944d6, 0xa3b96f15c2fd9245],
[0xa7007778ae6933ae, 0x0dd05cbbcf2dcefe],
[0xff8bccf251e2ff5c, 0x5c32a3e7931f6d19],
[0x24b7182e7555e772, 0x29674495ba78298c],
[0xae127cdadb479ba8, 0xf220df3d4858f6b1],
],
);
let keys = hex!("2b7e151628aed2a6abf7158809cf4f3c");
check(
unsafe { &expand_key(&keys) },
&[
[0x2b7e151628aed2a6, 0xabf7158809cf4f3c],
[0xa0fafe1788542cb1, 0x23a339392a6c7605],
[0xf2c295f27a96b943, 0x5935807a7359f67f],
[0x3d80477d4716fe3e, 0x1e237e446d7a883b],
[0xef44a541a8525b7f, 0xb671253bdb0bad00],
[0xd4d1c6f87c839d87, 0xcaf2b8bc11f915bc],
[0x6d88a37a110b3efd, 0xdbf98641ca0093fd],
[0x4e54f70e5f5fc9f3, 0x84a64fb24ea6dc4f],
[0xead27321b58dbad2, 0x312bf5607f8d292f],
[0xac7766f319fadc21, 0x28d12941575c006e],
[0xd014f9a8c9ee2589, 0xe13f0cc8b6630ca6],
],
);
}
#[test]
fn aes192_expand_key_test() |
#[test]
fn aes256_expand_key_test() {
use super::aes256::expand_key;
let keys = [0x00; 32];
check(
unsafe { &expand_key(&keys) },
&[
[0x0000000000000000, 0x0000000000000000],
[0x0000000000000000, 0x0000000000000000],
[0x6263636362636363, 0x6263636362636363],
[0xaafbfbfbaafbfbfb, 0xaafbfbfbaafbfbfb],
[0x6f6c6ccf0d0f0fac, 0x6f6c6ccf0d0f0fac],
[0x7d8d8d6ad7767691, 0x7d8d8d6ad7767691],
[0x5354edc15e5be26d, 0x31378ea23c38810e],
[0x968a81c141fcf750, 0x3c717a3aeb070cab],
[0x9eaa8f28c0f16d45, 0xf1c6e3e7cdfe62e9],
[0x2b312bdf6acddc8f, 0x56bca6b5bdbbaa1e],
[0x6406fd52a4f79017, 0x553173f098cf1119],
[0x6dbba90b07767584, 0x51cad331ec71792f],
[0xe7b0e89c4347788b, 0x16760b7b8eb91a62],
[0x74ed0ba1739b7e25, 0x2251ad14ce20d43b],
[0x10f80a1753bf729c, 0x45c979e7cb706385],
],
);
let keys = [0xff; 32];
check(
unsafe { &expand_key(&keys) },
&[
[0xffffffffffffffff, 0xffffffffffffffff],
[0xffffffffffffffff, 0xffffffffffffffff],
[0xe8e9e9e917161616, 0xe8e9e9e917161616],
[0x0fb8b8b8f0474747, 0x0fb8b8b8f0474747],
[0x4a4949655d5f5f73, 0xb5b6b69aa2a0a08c],
[0x355858dcc51f1f9b, 0xcaa7a7233ae0e064],
[0xafa80ae5f2f75596, 0x4741e30ce5e14380],
[0xeca0421129bf5d8a, 0xe318faa9d9f81acd],
[0xe60ab7d014fde246, 0x53bc014ab65d42ca],
[0xa2ec6e658b5333ef, 0x684bc946b1b3d38b],
[0x9b6c8a188f91685e, 0xdc2d69146a702bde],
[0xa0bd9f782beeac97, 0x43a565d1f216b65a],
[0xfc22349173b35ccf, 0xaf9e35dbc5ee1e05],
[0x0695ed132d7b4184, 0x6ede24559cc8920f],
[0x546d424f27de1e80, 0x88402b5b4dae355e],
],
);
let keys = hex!("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f");
check(
unsafe { &expand_key(&keys) },
&[
[0x0001020304050607, 0x08090a0b0c0d0e0f],
[0x1011121314151617, 0x18191a1b1c1d1e1f],
[0xa573c29fa176c498, 0xa97fce93a572c09c],
[0x1651a8cd0244beda, 0x1a5da4c10640bade],
[0xae87dff00ff11b68, 0xa68ed5fb03fc1567],
[0x6de1f1486fa54f92, 0x75f8eb5373b8518d],
[0xc656827fc9a79917, 0x6f294cec6cd5598b],
[0x3de23a75524775e7, 0x27bf9eb45407cf39],
[0x0bdc905fc27b0948, 0xad5245a4c1871c2f],
[0x45f5a66017b2d387, 0x300d4d33640a820a],
[0x7ccff71cbeb4fe54, 0x13e6bbf0d261a7df],
[0xf01afafee7a82979, 0xd7a5644ab3afe640],
[0x2541fe719bf50025, 0x8813bbd55a721c0a],
[0x4e5a6699a9f24fe0, 0x7e572baacdf8cdea],
[0x24fc79ccbf0979e9, 0x371ac23c6d68de36],
],
);
let keys = hex!("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4");
check(
unsafe { &expand_key(&keys) },
&[
[0x603deb1015ca71be, 0x2b73aef0857d7781],
[0x1f352c073b6108d7, 0x2d9810a30914dff4],
[0x9ba354118e6925af, 0xa51a8b5f2067fcde],
[0xa8b09c1a93d194cd, 0xbe49846eb75d5b9a],
[0xd59aecb85bf3c917, 0xfee94248de8ebe96],
[0xb5a9328a2678a647, 0x983122292f6c79b3],
[0x812c81addadf48ba, 0x24360af2fab8b464],
[0x98c5bfc9bebd198e, 0x268c3ba709e04214],
[0x68007bacb2df3316, 0x96e939e46c518d80],
[0xc814e20476a9fb8a, 0x5025c02d59c58239],
[0xde1369676ccc5a71, 0xfa2563959674ee15],
[0x5886ca5d2e2f31d7, 0x7e0af1fa27cf73c3],
[0x749c47ab18501dda, 0xe2757e4f7401905a],
[0xcafaaae3e4d59b34, 0x9adf6acebd10190d],
[0xfe4890d1e6188d0b, 0x046df344706c631e],
],
);
}
| {
use super::aes192::expand_key;
let keys = [0x00; 24];
check(
unsafe { &expand_key(&keys) },
&[
[0x0000000000000000, 0x0000000000000000],
[0x0000000000000000, 0x6263636362636363],
[0x6263636362636363, 0x6263636362636363],
[0x9b9898c9f9fbfbaa, 0x9b9898c9f9fbfbaa],
[0x9b9898c9f9fbfbaa, 0x90973450696ccffa],
[0xf2f457330b0fac99, 0x90973450696ccffa],
[0xc81d19a9a171d653, 0x53858160588a2df9],
[0xc81d19a9a171d653, 0x7bebf49bda9a22c8],
[0x891fa3a8d1958e51, 0x198897f8b8f941ab],
[0xc26896f718f2b43f, 0x91ed1797407899c6],
[0x59f00e3ee1094f95, 0x83ecbc0f9b1e0830],
[0x0af31fa74a8b8661, 0x137b885ff272c7ca],
[0x432ac886d834c0b6, 0xd2c7df11984c5970],
],
);
let keys = [0xff; 24];
check(
unsafe { &expand_key(&keys) },
&[
[0xffffffffffffffff, 0xffffffffffffffff],
[0xffffffffffffffff, 0xe8e9e9e917161616],
[0xe8e9e9e917161616, 0xe8e9e9e917161616],
[0xadaeae19bab8b80f, 0x525151e6454747f0],
[0xadaeae19bab8b80f, 0xc5c2d8ed7f7a60e2],
[0x2d2b3104686c76f4, 0xc5c2d8ed7f7a60e2],
[0x1712403f686820dd, 0x454311d92d2f672d],
[0xe8edbfc09797df22, 0x8f8cd3b7e7e4f36a],
[0xa2a7e2b38f88859e, 0x67653a5ef0f2e57c],
[0x2655c33bc1b13051, 0x6316d2e2ec9e577c],
[0x8bfb6d227b09885e, 0x67919b1aa620ab4b],
[0xc53679a929a82ed5, 0xa25343f7d95acba9],
[0x598e482fffaee364, 0x3a989acd1330b418],
],
);
let keys = hex!("000102030405060708090a0b0c0d0e0f1011121314151617");
check(
unsafe { &expand_key(&keys) },
&[
[0x0001020304050607, 0x08090a0b0c0d0e0f],
[0x1011121314151617, 0x5846f2f95c43f4fe],
[0x544afef55847f0fa, 0x4856e2e95c43f4fe],
[0x40f949b31cbabd4d, 0x48f043b810b7b342],
[0x58e151ab04a2a555, 0x7effb5416245080c],
[0x2ab54bb43a02f8f6, 0x62e3a95d66410c08],
[0xf501857297448d7e, 0xbdf1c6ca87f33e3c],
[0xe510976183519b69, 0x34157c9ea351f1e0],
[0x1ea0372a99530916, 0x7c439e77ff12051e],
[0xdd7e0e887e2fff68, 0x608fc842f9dcc154],
[0x859f5f237a8d5a3d, 0xc0c02952beefd63a],
[0xde601e7827bcdf2c, 0xa223800fd8aeda32],
[0xa4970a331a78dc09, 0xc418c271e3a41d5d],
],
);
let keys = hex!("8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b");
check(
unsafe { &expand_key(&keys) },
&[
[0x8e73b0f7da0e6452, 0xc810f32b809079e5],
[0x62f8ead2522c6b7b, 0xfe0c91f72402f5a5],
[0xec12068e6c827f6b, 0x0e7a95b95c56fec2],
[0x4db7b4bd69b54118, 0x85a74796e92538fd],
[0xe75fad44bb095386, 0x485af05721efb14f],
[0xa448f6d94d6dce24, 0xaa326360113b30e6],
[0xa25e7ed583b1cf9a, 0x27f939436a94f767],
[0xc0a69407d19da4e1, 0xec1786eb6fa64971],
[0x485f703222cb8755, 0xe26d135233f0b7b3],
[0x40beeb282f18a259, 0x6747d26b458c553e],
[0xa7e1466c9411f1df, 0x821f750aad07d753],
[0xca4005388fcc5006, 0x282d166abc3ce7b5],
[0xe98ba06f448c773c, 0x8ecc720401002202],
],
);
} | identifier_body |
test_expand.rs | use super::utils::check;
use hex_literal::hex;
#[test]
fn aes128_expand_key_test() {
use super::aes128::expand_key;
let keys = [0x00; 16];
check(
unsafe { &expand_key(&keys) },
&[
[0x0000000000000000, 0x0000000000000000],
[0x6263636362636363, 0x6263636362636363],
[0x9b9898c9f9fbfbaa, 0x9b9898c9f9fbfbaa],
[0x90973450696ccffa, 0xf2f457330b0fac99],
[0xee06da7b876a1581, 0x759e42b27e91ee2b],
[0x7f2e2b88f8443e09, 0x8dda7cbbf34b9290],
[0xec614b851425758c, 0x99ff09376ab49ba7],
[0x217517873550620b, 0xacaf6b3cc61bf09b],
[0x0ef903333ba96138, 0x97060a04511dfa9f],
[0xb1d4d8e28a7db9da, 0x1d7bb3de4c664941],
[0xb4ef5bcb3e92e211, 0x23e951cf6f8f188e],
],
);
let keys = [0xff; 16];
check(
unsafe { &expand_key(&keys) },
&[
[0xffffffffffffffff, 0xffffffffffffffff],
[0xe8e9e9e917161616, 0xe8e9e9e917161616],
[0xadaeae19bab8b80f, 0x525151e6454747f0],
[0x090e2277b3b69a78, 0xe1e7cb9ea4a08c6e],
[0xe16abd3e52dc2746, 0xb33becd8179b60b6],
[0xe5baf3ceb766d488, 0x045d385013c658e6],
[0x71d07db3c6b6a93b, 0xc2eb916bd12dc98d],
[0xe90d208d2fbb89b6, 0xed5018dd3c7dd150],
[0x96337366b988fad0, 0x54d8e20d68a5335d],
[0x8bf03f233278c5f3, 0x66a027fe0e0514a3],
[0xd60a3588e472f07b, 0x82d2d7858cd7c326],
],
);
let keys = hex!("000102030405060708090a0b0c0d0e0f");
check(
unsafe { &expand_key(&keys) },
&[
[0x0001020304050607, 0x08090a0b0c0d0e0f],
[0xd6aa74fdd2af72fa, 0xdaa678f1d6ab76fe],
[0xb692cf0b643dbdf1, 0xbe9bc5006830b3fe],
[0xb6ff744ed2c2c9bf, 0x6c590cbf0469bf41],
[0x47f7f7bc95353e03, 0xf96c32bcfd058dfd],
[0x3caaa3e8a99f9deb, 0x50f3af57adf622aa],
[0x5e390f7df7a69296, 0xa7553dc10aa31f6b],
[0x14f9701ae35fe28c, 0x440adf4d4ea9c026],
[0x47438735a41c65b9, 0xe016baf4aebf7ad2],
[0x549932d1f0855768, 0x1093ed9cbe2c974e],
[0x13111d7fe3944a17, 0xf307a78b4d2b30c5],
],
);
let keys = hex!("6920e299a5202a6d656e636869746f2a");
check(
unsafe { &expand_key(&keys) },
&[
[0x6920e299a5202a6d, 0x656e636869746f2a],
[0xfa8807605fa82d0d, 0x3ac64e6553b2214f],
[0xcf75838d90ddae80, 0xaa1be0e5f9a9c1aa],
[0x180d2f1488d08194, 0x22cb6171db62a0db],
[0xbaed96ad323d1739, 0x10f67648cb94d693],
[0x881b4ab2ba265d8b, 0xaad02bc36144fd50],
[0xb34f195d096944d6, 0xa3b96f15c2fd9245],
[0xa7007778ae6933ae, 0x0dd05cbbcf2dcefe],
[0xff8bccf251e2ff5c, 0x5c32a3e7931f6d19],
[0x24b7182e7555e772, 0x29674495ba78298c],
[0xae127cdadb479ba8, 0xf220df3d4858f6b1],
],
);
let keys = hex!("2b7e151628aed2a6abf7158809cf4f3c");
check(
unsafe { &expand_key(&keys) },
&[
[0x2b7e151628aed2a6, 0xabf7158809cf4f3c],
[0xa0fafe1788542cb1, 0x23a339392a6c7605],
[0xf2c295f27a96b943, 0x5935807a7359f67f],
[0x3d80477d4716fe3e, 0x1e237e446d7a883b],
[0xef44a541a8525b7f, 0xb671253bdb0bad00],
[0xd4d1c6f87c839d87, 0xcaf2b8bc11f915bc],
[0x6d88a37a110b3efd, 0xdbf98641ca0093fd],
[0x4e54f70e5f5fc9f3, 0x84a64fb24ea6dc4f],
[0xead27321b58dbad2, 0x312bf5607f8d292f],
[0xac7766f319fadc21, 0x28d12941575c006e],
[0xd014f9a8c9ee2589, 0xe13f0cc8b6630ca6],
],
);
}
#[test]
fn aes192_expand_key_test() {
use super::aes192::expand_key;
let keys = [0x00; 24];
check(
unsafe { &expand_key(&keys) },
&[
[0x0000000000000000, 0x0000000000000000],
[0x0000000000000000, 0x6263636362636363],
[0x6263636362636363, 0x6263636362636363],
[0x9b9898c9f9fbfbaa, 0x9b9898c9f9fbfbaa],
[0x9b9898c9f9fbfbaa, 0x90973450696ccffa],
[0xf2f457330b0fac99, 0x90973450696ccffa],
[0xc81d19a9a171d653, 0x53858160588a2df9],
[0xc81d19a9a171d653, 0x7bebf49bda9a22c8],
[0x891fa3a8d1958e51, 0x198897f8b8f941ab],
[0xc26896f718f2b43f, 0x91ed1797407899c6],
[0x59f00e3ee1094f95, 0x83ecbc0f9b1e0830],
[0x0af31fa74a8b8661, 0x137b885ff272c7ca],
[0x432ac886d834c0b6, 0xd2c7df11984c5970],
],
);
let keys = [0xff; 24];
check(
unsafe { &expand_key(&keys) },
&[
[0xffffffffffffffff, 0xffffffffffffffff],
[0xffffffffffffffff, 0xe8e9e9e917161616],
[0xe8e9e9e917161616, 0xe8e9e9e917161616],
[0xadaeae19bab8b80f, 0x525151e6454747f0],
[0xadaeae19bab8b80f, 0xc5c2d8ed7f7a60e2],
[0x2d2b3104686c76f4, 0xc5c2d8ed7f7a60e2],
[0x1712403f686820dd, 0x454311d92d2f672d],
[0xe8edbfc09797df22, 0x8f8cd3b7e7e4f36a],
[0xa2a7e2b38f88859e, 0x67653a5ef0f2e57c],
[0x2655c33bc1b13051, 0x6316d2e2ec9e577c],
[0x8bfb6d227b09885e, 0x67919b1aa620ab4b],
[0xc53679a929a82ed5, 0xa25343f7d95acba9],
[0x598e482fffaee364, 0x3a989acd1330b418],
],
);
let keys = hex!("000102030405060708090a0b0c0d0e0f1011121314151617");
check(
unsafe { &expand_key(&keys) },
&[
[0x0001020304050607, 0x08090a0b0c0d0e0f],
[0x1011121314151617, 0x5846f2f95c43f4fe],
[0x544afef55847f0fa, 0x4856e2e95c43f4fe],
[0x40f949b31cbabd4d, 0x48f043b810b7b342],
[0x58e151ab04a2a555, 0x7effb5416245080c],
[0x2ab54bb43a02f8f6, 0x62e3a95d66410c08],
[0xf501857297448d7e, 0xbdf1c6ca87f33e3c],
[0xe510976183519b69, 0x34157c9ea351f1e0],
[0x1ea0372a99530916, 0x7c439e77ff12051e],
[0xdd7e0e887e2fff68, 0x608fc842f9dcc154],
[0x859f5f237a8d5a3d, 0xc0c02952beefd63a],
[0xde601e7827bcdf2c, 0xa223800fd8aeda32],
[0xa4970a331a78dc09, 0xc418c271e3a41d5d],
],
);
let keys = hex!("8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b");
check(
unsafe { &expand_key(&keys) },
&[
[0x8e73b0f7da0e6452, 0xc810f32b809079e5],
[0x62f8ead2522c6b7b, 0xfe0c91f72402f5a5],
[0xec12068e6c827f6b, 0x0e7a95b95c56fec2],
[0x4db7b4bd69b54118, 0x85a74796e92538fd],
[0xe75fad44bb095386, 0x485af05721efb14f],
[0xa448f6d94d6dce24, 0xaa326360113b30e6],
[0xa25e7ed583b1cf9a, 0x27f939436a94f767],
[0xc0a69407d19da4e1, 0xec1786eb6fa64971],
[0x485f703222cb8755, 0xe26d135233f0b7b3],
[0x40beeb282f18a259, 0x6747d26b458c553e],
[0xa7e1466c9411f1df, 0x821f750aad07d753],
[0xca4005388fcc5006, 0x282d166abc3ce7b5],
[0xe98ba06f448c773c, 0x8ecc720401002202],
],
);
}
#[test]
fn | () {
use super::aes256::expand_key;
let keys = [0x00; 32];
check(
unsafe { &expand_key(&keys) },
&[
[0x0000000000000000, 0x0000000000000000],
[0x0000000000000000, 0x0000000000000000],
[0x6263636362636363, 0x6263636362636363],
[0xaafbfbfbaafbfbfb, 0xaafbfbfbaafbfbfb],
[0x6f6c6ccf0d0f0fac, 0x6f6c6ccf0d0f0fac],
[0x7d8d8d6ad7767691, 0x7d8d8d6ad7767691],
[0x5354edc15e5be26d, 0x31378ea23c38810e],
[0x968a81c141fcf750, 0x3c717a3aeb070cab],
[0x9eaa8f28c0f16d45, 0xf1c6e3e7cdfe62e9],
[0x2b312bdf6acddc8f, 0x56bca6b5bdbbaa1e],
[0x6406fd52a4f79017, 0x553173f098cf1119],
[0x6dbba90b07767584, 0x51cad331ec71792f],
[0xe7b0e89c4347788b, 0x16760b7b8eb91a62],
[0x74ed0ba1739b7e25, 0x2251ad14ce20d43b],
[0x10f80a1753bf729c, 0x45c979e7cb706385],
],
);
let keys = [0xff; 32];
check(
unsafe { &expand_key(&keys) },
&[
[0xffffffffffffffff, 0xffffffffffffffff],
[0xffffffffffffffff, 0xffffffffffffffff],
[0xe8e9e9e917161616, 0xe8e9e9e917161616],
[0x0fb8b8b8f0474747, 0x0fb8b8b8f0474747],
[0x4a4949655d5f5f73, 0xb5b6b69aa2a0a08c],
[0x355858dcc51f1f9b, 0xcaa7a7233ae0e064],
[0xafa80ae5f2f75596, 0x4741e30ce5e14380],
[0xeca0421129bf5d8a, 0xe318faa9d9f81acd],
[0xe60ab7d014fde246, 0x53bc014ab65d42ca],
[0xa2ec6e658b5333ef, 0x684bc946b1b3d38b],
[0x9b6c8a188f91685e, 0xdc2d69146a702bde],
[0xa0bd9f782beeac97, 0x43a565d1f216b65a],
[0xfc22349173b35ccf, 0xaf9e35dbc5ee1e05],
[0x0695ed132d7b4184, 0x6ede24559cc8920f],
[0x546d424f27de1e80, 0x88402b5b4dae355e],
],
);
let keys = hex!("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f");
check(
unsafe { &expand_key(&keys) },
&[
[0x0001020304050607, 0x08090a0b0c0d0e0f],
[0x1011121314151617, 0x18191a1b1c1d1e1f],
[0xa573c29fa176c498, 0xa97fce93a572c09c],
[0x1651a8cd0244beda, 0x1a5da4c10640bade],
[0xae87dff00ff11b68, 0xa68ed5fb03fc1567],
[0x6de1f1486fa54f92, 0x75f8eb5373b8518d],
[0xc656827fc9a79917, 0x6f294cec6cd5598b],
[0x3de23a75524775e7, 0x27bf9eb45407cf39],
[0x0bdc905fc27b0948, 0xad5245a4c1871c2f],
[0x45f5a66017b2d387, 0x300d4d33640a820a],
[0x7ccff71cbeb4fe54, 0x13e6bbf0d261a7df],
[0xf01afafee7a82979, 0xd7a5644ab3afe640],
[0x2541fe719bf50025, 0x8813bbd55a721c0a],
[0x4e5a6699a9f24fe0, 0x7e572baacdf8cdea],
[0x24fc79ccbf0979e9, 0x371ac23c6d68de36],
],
);
let keys = hex!("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4");
check(
unsafe { &expand_key(&keys) },
&[
[0x603deb1015ca71be, 0x2b73aef0857d7781],
[0x1f352c073b6108d7, 0x2d9810a30914dff4],
[0x9ba354118e6925af, 0xa51a8b5f2067fcde],
[0xa8b09c1a93d194cd, 0xbe49846eb75d5b9a],
[0xd59aecb85bf3c917, 0xfee94248de8ebe96],
[0xb5a9328a2678a647, 0x983122292f6c79b3],
[0x812c81addadf48ba, 0x24360af2fab8b464],
[0x98c5bfc9bebd198e, 0x268c3ba709e04214],
[0x68007bacb2df3316, 0x96e939e46c518d80],
[0xc814e20476a9fb8a, 0x5025c02d59c58239],
[0xde1369676ccc5a71, 0xfa2563959674ee15],
[0x5886ca5d2e2f31d7, 0x7e0af1fa27cf73c3],
[0x749c47ab18501dda, 0xe2757e4f7401905a],
[0xcafaaae3e4d59b34, 0x9adf6acebd10190d],
[0xfe4890d1e6188d0b, 0x046df344706c631e],
],
);
}
| aes256_expand_key_test | identifier_name |
day11.rs | use std::{collections::HashSet, io::Write};
use itertools::Itertools;
use snafu::Snafu;
type Result<T> = std::result::Result<T, Error>;
#[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Clone)]
enum Item {
Chip(char),
Generator(char),
ChipAndGenerator,
}
impl std::str::FromStr for Item {
type Err = Error;
fn from_str(s: &str) -> Result<Self> |
}
impl std::fmt::Debug for Item {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Item::Chip(id) => write!(f, "{}M", id),
Item::Generator(id) => write!(f, "{}G", id),
Item::ChipAndGenerator => write!(f, "<>"),
}
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
struct State {
elevator: usize,
floors: Vec<HashSet<Item>>,
}
impl std::fmt::Display for State {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for (i, floor) in self.floors.iter().enumerate() {
write!(
f,
"{} {}: {:?}\n",
i,
if self.elevator == i { "E" } else { " " },
floor
)?;
}
Ok(())
}
}
fn would_fry(items: &HashSet<Item>) -> bool {
for item in items {
match item {
Item::Chip(id) => {
if items.contains(&Item::Generator(*id)) {
// chip is protected by generator
continue;
}
for other_item in items {
if let Item::Generator(other_id) = other_item {
// TODO: we might not need this if
if other_id != id {
// chip gets fried by another generator
return true;
}
}
}
}
Item::Generator(_) => {}
Item::ChipAndGenerator => {}
}
}
false
}
impl State {
fn score(&self) -> usize {
self.floors
.iter()
.enumerate()
.map(|(i, f)| f.len() * (i + 1) * 10)
.sum::<usize>()
}
fn is_success(&self) -> bool {
for floor in &self.floors[..self.floors.len() - 1] {
if !floor.is_empty() {
return false;
}
}
true
}
fn get_neighbors(&self) -> Vec<State> {
let mut out = Vec::new();
// calculate valid floors that the elevator can move to
let mut valid_destinations = Vec::new();
if self.elevator > 0 {
valid_destinations.push(self.elevator - 1);
};
if self.elevator < self.floors.len() - 1 {
valid_destinations.push(self.elevator + 1);
}
for num_items in 1..=2 {
// generate sets of items that can be taken from current floor - none, one, or two
for moved_items in self.floors[self.elevator].iter().combinations(num_items) {
let moved_items: HashSet<Item> = moved_items.into_iter().cloned().collect();
for destination in &valid_destinations {
// take moved_items from self.elevator to destination
let current_floor: HashSet<Item> = self.floors[self.elevator]
.difference(&moved_items)
.cloned()
.collect();
let destination_floor: HashSet<Item> = self.floors[*destination]
.union(&moved_items)
.cloned()
.collect();
// do not perform invalid moves
if would_fry(¤t_floor) || would_fry(&destination_floor) {
continue;
}
let mut new_state: State = self.clone();
new_state.floors[self.elevator] = current_floor;
new_state.floors[*destination] = destination_floor;
new_state.elevator = *destination;
out.push(new_state);
}
}
}
out
}
}
impl std::hash::Hash for State {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.elevator.hash(state);
let floors: Vec<Vec<Item>> = self
.floors
.iter()
.map(|items| {
// build list of ids for which both Chip and Generator are seen
let merge: Vec<char> = items
.iter()
.filter_map(|i| {
if let Item::Chip(id) = i {
if items.contains(&Item::Generator(*id)) {
return Some(*id);
}
}
None
})
.collect();
let mut items = items.clone();
for id in &merge {
items.remove(&Item::Chip(*id));
items.remove(&Item::Generator(*id));
}
let mut items: Vec<Item> = items.into_iter().collect();
for _ in &merge {
items.push(Item::ChipAndGenerator);
}
items.sort();
items
})
.collect();
floors.hash(state);
}
}
#[derive(Debug, Snafu)]
enum Error {
#[snafu(display("I/O error: {}", source))]
Io { source: std::io::Error },
#[snafu(display("Int format error for '{}': {}", data, source))]
ParseInt {
data: String,
source: std::num::ParseIntError,
},
#[snafu(display("Invalid item: '{}'", data))]
ParseItem { data: String },
}
fn solve(input: &[&str]) -> Result<usize> {
// -> Result<Vec<State>> {
let start = State {
elevator: 0,
floors: input
.iter()
.map(|l| {
if l.trim().is_empty() {
Ok(HashSet::new())
} else {
l.split(",")
.map(|i| i.parse())
.collect::<Result<HashSet<Item>>>()
}
})
.collect::<Result<_>>()?,
};
//let mut queue = vec![(0, Vec::new(), start.clone())];
let mut queue = vec![(0, start.clone())];
let mut seen: HashSet<State> = HashSet::new();
let mut max_steps = 0;
let mut best_score = 0;
while !queue.is_empty() {
//let (steps, path, state) = queue.remove(0);
let (steps, state) = queue.remove(0);
//println!("{}\n{}", steps, state);
if max_steps < steps {
max_steps = steps;
print!(".");
std::io::stdout().flush().unwrap();
}
for next_state in state.get_neighbors() {
if seen.contains(&next_state) {
continue;
}
let score = next_state.score();
if score > best_score {
best_score = score;
}
// dirty heuristic: don't explore very bad states
if score < best_score - 40 {
continue;
}
//let mut next_path = path.clone();
//next_path.push(next_state.clone());
seen.insert(next_state.clone());
//queue.push((steps + 1, next_path, next_state));
queue.push((steps + 1, next_state));
}
if state.is_success() {
//return Ok(path);
return Ok(steps);
}
}
panic!("No solution")
}
fn main() -> Result<()> {
//let input = vec![
// "HM,LM", // The first floor contains a hydrogen-compatible microchip and a lithium-compatible microchip.
// "HG", // The second floor contains a hydrogen generator.
// "LG", // The third floor contains a lithium generator.
// "", // The fourth floor contains nothing relevant.
//];
// p plutonium
// P promethium
// r ruthenium
// s strontium
// t thulium
let input1 = vec![
"tG,tM,pG,sG", // The first floor contains a thulium generator, a thulium-compatible microchip, a plutonium generator, and a strontium generator.
"pM,sM", // The second floor contains a plutonium-compatible microchip and a strontium-compatible microchip.
"PG,PM,rG,rM", // The third floor contains a promethium generator, a promethium-compatible microchip, a ruthenium generator, and a ruthenium-compatible microchip.
"", // The fourth floor contains nothing relevant.
];
if let Ok(path) = solve(&input1[..]) {
//println!("\npart 1: solution in {} steps", path.len());
println!("\npart 1: solution in {} steps", path);
//for (i, step) in path.into_iter().enumerate() {
// println!("STEP {}:\n{}\n", i, step);
//}
}
// d dilithium
// e elerium
// p plutonium
// P promethium
// r ruthenium
// s strontium
// t thulium
let input2 = vec!["dG,dM,eG,eM,tG,tM,pG,sG", "pM,sM", "PG,PM,rG,rM", ""];
if let Ok(path) = solve(&input2[..]) {
//println!("\npart 2: solution in {} steps", path.len());
println!("\npart 2: solution in {} steps", path);
//for (i, step) in path.into_iter().enumerate() {
// println!("STEP {}:\n{}\n", i, step);
//}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() -> Result<()> {
Ok(())
}
}
| {
let s: Vec<char> = s.chars().collect();
if s.len() != 2 {
return Err(Error::ParseItem {
data: s[0].to_string(),
});
}
match s[1] {
'M' => Ok(Item::Chip(s[0])),
'G' => Ok(Item::Generator(s[0])),
_ => Err(Error::ParseItem {
data: s[1].to_string(),
}),
}
} | identifier_body |
day11.rs | use std::{collections::HashSet, io::Write};
use itertools::Itertools;
use snafu::Snafu;
type Result<T> = std::result::Result<T, Error>;
#[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Clone)]
enum Item {
Chip(char),
Generator(char),
ChipAndGenerator,
}
impl std::str::FromStr for Item {
type Err = Error;
fn from_str(s: &str) -> Result<Self> {
let s: Vec<char> = s.chars().collect();
if s.len() != 2 {
return Err(Error::ParseItem {
data: s[0].to_string(),
});
}
match s[1] {
'M' => Ok(Item::Chip(s[0])),
'G' => Ok(Item::Generator(s[0])),
_ => Err(Error::ParseItem {
data: s[1].to_string(),
}),
}
}
}
impl std::fmt::Debug for Item {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Item::Chip(id) => write!(f, "{}M", id),
Item::Generator(id) => write!(f, "{}G", id),
Item::ChipAndGenerator => write!(f, "<>"),
}
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
struct State {
elevator: usize,
floors: Vec<HashSet<Item>>,
}
impl std::fmt::Display for State {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for (i, floor) in self.floors.iter().enumerate() {
write!(
f,
"{} {}: {:?}\n",
i,
if self.elevator == i { "E" } else { " " },
floor
)?;
}
Ok(())
}
}
fn would_fry(items: &HashSet<Item>) -> bool {
for item in items {
match item {
Item::Chip(id) => {
if items.contains(&Item::Generator(*id)) {
// chip is protected by generator
continue;
}
for other_item in items {
if let Item::Generator(other_id) = other_item {
// TODO: we might not need this if
if other_id != id {
// chip gets fried by another generator
return true;
}
}
}
}
Item::Generator(_) => {}
Item::ChipAndGenerator => {}
}
}
false
}
impl State {
fn | (&self) -> usize {
self.floors
.iter()
.enumerate()
.map(|(i, f)| f.len() * (i + 1) * 10)
.sum::<usize>()
}
fn is_success(&self) -> bool {
for floor in &self.floors[..self.floors.len() - 1] {
if !floor.is_empty() {
return false;
}
}
true
}
fn get_neighbors(&self) -> Vec<State> {
let mut out = Vec::new();
// calculate valid floors that the elevator can move to
let mut valid_destinations = Vec::new();
if self.elevator > 0 {
valid_destinations.push(self.elevator - 1);
};
if self.elevator < self.floors.len() - 1 {
valid_destinations.push(self.elevator + 1);
}
for num_items in 1..=2 {
// generate sets of items that can be taken from current floor - none, one, or two
for moved_items in self.floors[self.elevator].iter().combinations(num_items) {
let moved_items: HashSet<Item> = moved_items.into_iter().cloned().collect();
for destination in &valid_destinations {
// take moved_items from self.elevator to destination
let current_floor: HashSet<Item> = self.floors[self.elevator]
.difference(&moved_items)
.cloned()
.collect();
let destination_floor: HashSet<Item> = self.floors[*destination]
.union(&moved_items)
.cloned()
.collect();
// do not perform invalid moves
if would_fry(¤t_floor) || would_fry(&destination_floor) {
continue;
}
let mut new_state: State = self.clone();
new_state.floors[self.elevator] = current_floor;
new_state.floors[*destination] = destination_floor;
new_state.elevator = *destination;
out.push(new_state);
}
}
}
out
}
}
impl std::hash::Hash for State {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.elevator.hash(state);
let floors: Vec<Vec<Item>> = self
.floors
.iter()
.map(|items| {
// build list of ids for which both Chip and Generator are seen
let merge: Vec<char> = items
.iter()
.filter_map(|i| {
if let Item::Chip(id) = i {
if items.contains(&Item::Generator(*id)) {
return Some(*id);
}
}
None
})
.collect();
let mut items = items.clone();
for id in &merge {
items.remove(&Item::Chip(*id));
items.remove(&Item::Generator(*id));
}
let mut items: Vec<Item> = items.into_iter().collect();
for _ in &merge {
items.push(Item::ChipAndGenerator);
}
items.sort();
items
})
.collect();
floors.hash(state);
}
}
#[derive(Debug, Snafu)]
enum Error {
#[snafu(display("I/O error: {}", source))]
Io { source: std::io::Error },
#[snafu(display("Int format error for '{}': {}", data, source))]
ParseInt {
data: String,
source: std::num::ParseIntError,
},
#[snafu(display("Invalid item: '{}'", data))]
ParseItem { data: String },
}
fn solve(input: &[&str]) -> Result<usize> {
// -> Result<Vec<State>> {
let start = State {
elevator: 0,
floors: input
.iter()
.map(|l| {
if l.trim().is_empty() {
Ok(HashSet::new())
} else {
l.split(",")
.map(|i| i.parse())
.collect::<Result<HashSet<Item>>>()
}
})
.collect::<Result<_>>()?,
};
//let mut queue = vec![(0, Vec::new(), start.clone())];
let mut queue = vec![(0, start.clone())];
let mut seen: HashSet<State> = HashSet::new();
let mut max_steps = 0;
let mut best_score = 0;
while !queue.is_empty() {
//let (steps, path, state) = queue.remove(0);
let (steps, state) = queue.remove(0);
//println!("{}\n{}", steps, state);
if max_steps < steps {
max_steps = steps;
print!(".");
std::io::stdout().flush().unwrap();
}
for next_state in state.get_neighbors() {
if seen.contains(&next_state) {
continue;
}
let score = next_state.score();
if score > best_score {
best_score = score;
}
// dirty heuristic: don't explore very bad states
if score < best_score - 40 {
continue;
}
//let mut next_path = path.clone();
//next_path.push(next_state.clone());
seen.insert(next_state.clone());
//queue.push((steps + 1, next_path, next_state));
queue.push((steps + 1, next_state));
}
if state.is_success() {
//return Ok(path);
return Ok(steps);
}
}
panic!("No solution")
}
fn main() -> Result<()> {
//let input = vec![
// "HM,LM", // The first floor contains a hydrogen-compatible microchip and a lithium-compatible microchip.
// "HG", // The second floor contains a hydrogen generator.
// "LG", // The third floor contains a lithium generator.
// "", // The fourth floor contains nothing relevant.
//];
// p plutonium
// P promethium
// r ruthenium
// s strontium
// t thulium
let input1 = vec![
"tG,tM,pG,sG", // The first floor contains a thulium generator, a thulium-compatible microchip, a plutonium generator, and a strontium generator.
"pM,sM", // The second floor contains a plutonium-compatible microchip and a strontium-compatible microchip.
"PG,PM,rG,rM", // The third floor contains a promethium generator, a promethium-compatible microchip, a ruthenium generator, and a ruthenium-compatible microchip.
"", // The fourth floor contains nothing relevant.
];
if let Ok(path) = solve(&input1[..]) {
//println!("\npart 1: solution in {} steps", path.len());
println!("\npart 1: solution in {} steps", path);
//for (i, step) in path.into_iter().enumerate() {
// println!("STEP {}:\n{}\n", i, step);
//}
}
// d dilithium
// e elerium
// p plutonium
// P promethium
// r ruthenium
// s strontium
// t thulium
let input2 = vec!["dG,dM,eG,eM,tG,tM,pG,sG", "pM,sM", "PG,PM,rG,rM", ""];
if let Ok(path) = solve(&input2[..]) {
//println!("\npart 2: solution in {} steps", path.len());
println!("\npart 2: solution in {} steps", path);
//for (i, step) in path.into_iter().enumerate() {
// println!("STEP {}:\n{}\n", i, step);
//}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() -> Result<()> {
Ok(())
}
}
| score | identifier_name |
day11.rs | use std::{collections::HashSet, io::Write};
use itertools::Itertools;
use snafu::Snafu;
type Result<T> = std::result::Result<T, Error>;
#[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Clone)]
enum Item {
Chip(char),
Generator(char),
ChipAndGenerator,
}
impl std::str::FromStr for Item {
type Err = Error;
fn from_str(s: &str) -> Result<Self> {
let s: Vec<char> = s.chars().collect();
if s.len() != 2 {
return Err(Error::ParseItem {
data: s[0].to_string(),
});
}
match s[1] {
'M' => Ok(Item::Chip(s[0])),
'G' => Ok(Item::Generator(s[0])),
_ => Err(Error::ParseItem {
data: s[1].to_string(),
}),
}
}
}
impl std::fmt::Debug for Item {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Item::Chip(id) => write!(f, "{}M", id),
Item::Generator(id) => write!(f, "{}G", id),
Item::ChipAndGenerator => write!(f, "<>"),
}
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
struct State {
elevator: usize,
floors: Vec<HashSet<Item>>,
}
impl std::fmt::Display for State {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for (i, floor) in self.floors.iter().enumerate() {
write!(
f,
"{} {}: {:?}\n",
i,
if self.elevator == i { "E" } else { " " },
floor
)?;
}
Ok(())
}
}
fn would_fry(items: &HashSet<Item>) -> bool {
for item in items {
match item {
Item::Chip(id) => {
if items.contains(&Item::Generator(*id)) {
// chip is protected by generator
continue;
}
for other_item in items {
if let Item::Generator(other_id) = other_item {
// TODO: we might not need this if
if other_id != id {
// chip gets fried by another generator
return true;
}
}
}
}
Item::Generator(_) => {}
Item::ChipAndGenerator => {}
}
}
false
}
impl State {
fn score(&self) -> usize {
self.floors
.iter()
.enumerate() | for floor in &self.floors[..self.floors.len() - 1] {
if !floor.is_empty() {
return false;
}
}
true
}
fn get_neighbors(&self) -> Vec<State> {
let mut out = Vec::new();
// calculate valid floors that the elevator can move to
let mut valid_destinations = Vec::new();
if self.elevator > 0 {
valid_destinations.push(self.elevator - 1);
};
if self.elevator < self.floors.len() - 1 {
valid_destinations.push(self.elevator + 1);
}
for num_items in 1..=2 {
// generate sets of items that can be taken from current floor - none, one, or two
for moved_items in self.floors[self.elevator].iter().combinations(num_items) {
let moved_items: HashSet<Item> = moved_items.into_iter().cloned().collect();
for destination in &valid_destinations {
// take moved_items from self.elevator to destination
let current_floor: HashSet<Item> = self.floors[self.elevator]
.difference(&moved_items)
.cloned()
.collect();
let destination_floor: HashSet<Item> = self.floors[*destination]
.union(&moved_items)
.cloned()
.collect();
// do not perform invalid moves
if would_fry(¤t_floor) || would_fry(&destination_floor) {
continue;
}
let mut new_state: State = self.clone();
new_state.floors[self.elevator] = current_floor;
new_state.floors[*destination] = destination_floor;
new_state.elevator = *destination;
out.push(new_state);
}
}
}
out
}
}
impl std::hash::Hash for State {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.elevator.hash(state);
let floors: Vec<Vec<Item>> = self
.floors
.iter()
.map(|items| {
// build list of ids for which both Chip and Generator are seen
let merge: Vec<char> = items
.iter()
.filter_map(|i| {
if let Item::Chip(id) = i {
if items.contains(&Item::Generator(*id)) {
return Some(*id);
}
}
None
})
.collect();
let mut items = items.clone();
for id in &merge {
items.remove(&Item::Chip(*id));
items.remove(&Item::Generator(*id));
}
let mut items: Vec<Item> = items.into_iter().collect();
for _ in &merge {
items.push(Item::ChipAndGenerator);
}
items.sort();
items
})
.collect();
floors.hash(state);
}
}
#[derive(Debug, Snafu)]
enum Error {
#[snafu(display("I/O error: {}", source))]
Io { source: std::io::Error },
#[snafu(display("Int format error for '{}': {}", data, source))]
ParseInt {
data: String,
source: std::num::ParseIntError,
},
#[snafu(display("Invalid item: '{}'", data))]
ParseItem { data: String },
}
fn solve(input: &[&str]) -> Result<usize> {
// -> Result<Vec<State>> {
let start = State {
elevator: 0,
floors: input
.iter()
.map(|l| {
if l.trim().is_empty() {
Ok(HashSet::new())
} else {
l.split(",")
.map(|i| i.parse())
.collect::<Result<HashSet<Item>>>()
}
})
.collect::<Result<_>>()?,
};
//let mut queue = vec![(0, Vec::new(), start.clone())];
let mut queue = vec![(0, start.clone())];
let mut seen: HashSet<State> = HashSet::new();
let mut max_steps = 0;
let mut best_score = 0;
while !queue.is_empty() {
//let (steps, path, state) = queue.remove(0);
let (steps, state) = queue.remove(0);
//println!("{}\n{}", steps, state);
if max_steps < steps {
max_steps = steps;
print!(".");
std::io::stdout().flush().unwrap();
}
for next_state in state.get_neighbors() {
if seen.contains(&next_state) {
continue;
}
let score = next_state.score();
if score > best_score {
best_score = score;
}
// dirty heuristic: don't explore very bad states
if score < best_score - 40 {
continue;
}
//let mut next_path = path.clone();
//next_path.push(next_state.clone());
seen.insert(next_state.clone());
//queue.push((steps + 1, next_path, next_state));
queue.push((steps + 1, next_state));
}
if state.is_success() {
//return Ok(path);
return Ok(steps);
}
}
panic!("No solution")
}
fn main() -> Result<()> {
//let input = vec![
// "HM,LM", // The first floor contains a hydrogen-compatible microchip and a lithium-compatible microchip.
// "HG", // The second floor contains a hydrogen generator.
// "LG", // The third floor contains a lithium generator.
// "", // The fourth floor contains nothing relevant.
//];
// p plutonium
// P promethium
// r ruthenium
// s strontium
// t thulium
let input1 = vec![
"tG,tM,pG,sG", // The first floor contains a thulium generator, a thulium-compatible microchip, a plutonium generator, and a strontium generator.
"pM,sM", // The second floor contains a plutonium-compatible microchip and a strontium-compatible microchip.
"PG,PM,rG,rM", // The third floor contains a promethium generator, a promethium-compatible microchip, a ruthenium generator, and a ruthenium-compatible microchip.
"", // The fourth floor contains nothing relevant.
];
if let Ok(path) = solve(&input1[..]) {
//println!("\npart 1: solution in {} steps", path.len());
println!("\npart 1: solution in {} steps", path);
//for (i, step) in path.into_iter().enumerate() {
// println!("STEP {}:\n{}\n", i, step);
//}
}
// d dilithium
// e elerium
// p plutonium
// P promethium
// r ruthenium
// s strontium
// t thulium
let input2 = vec!["dG,dM,eG,eM,tG,tM,pG,sG", "pM,sM", "PG,PM,rG,rM", ""];
if let Ok(path) = solve(&input2[..]) {
//println!("\npart 2: solution in {} steps", path.len());
println!("\npart 2: solution in {} steps", path);
//for (i, step) in path.into_iter().enumerate() {
// println!("STEP {}:\n{}\n", i, step);
//}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() -> Result<()> {
Ok(())
}
} | .map(|(i, f)| f.len() * (i + 1) * 10)
.sum::<usize>()
}
fn is_success(&self) -> bool { | random_line_split |
smpl.tpl.js | if (typeof define !== 'function') {var define = require('amdefine')(module)}
/**
* @module smpl
* @submodule smpl.tpl
* @class smpl.tpl
* @static
*/
define(['./smpl.string', './smpl.utils'], function(smpl) {
'use strict';
var MESSAGES = {
wrongClosed: 'Incorrect element closed <{0}.{1}>. Openned one was <{2}.{3}>.',
wrongParsed: 'Template <{0}>: tried to parse non-existing {1} <{2}>.',
duplicateBlock: 'Template <{0}>: Block name <{1}> was used twice.'
};
var REGEX = {
block: /^\s*(\/?BLOCK):\s*([-\w]+)\s*$/i,
widget: /^\s*(\/?WIDGET):\s*([-\w]+)(?:@([-\w]+):([-:\w]+))?\s*([-\w]+=".*?"+\s*)*(\/?\s*)$/i
};
smpl.tpl = {};
smpl.tpl.Template = function(name, blocks, parents) {
this.__name = name || 'anonymous';
this.__blocks = blocks;
this.__widgets = {};
this.__parents = parents;
};
smpl.tpl.Template.prototype.getInstance = function() {
var tpl = Object.create(this);
tpl.__data = null;
return tpl;
};
smpl.tpl.Template.prototype.init = function(html, partial) {
/* jshint evil: true */
if (typeof html === 'string') {
smpl.tpl.utils.make(this, html);
}
if (!partial && typeof this.__blocks[smpl.tpl.utils.MAIN] === 'string') {
for (var blkId in this.__blocks) {
if (typeof this.__blocks[blkId] === 'string') {
this.__blocks[blkId] = new Function('smpl', '$', '"use strict";' + this.__blocks[blkId]);
}
}
}
if (smpl.tpl.globalObj) {
this.__globalKey = this.__name + '_' + smpl.utils.uniq();
smpl.tpl.globalObj[this.__globalKey] = this;
this.__globalKey = smpl.tpl.globalKey + '["' + this.__globalKey + '"]';
}
};
smpl.tpl.Template.prototype.set = function(block, key, value) {
if (arguments.length === 2) {
value = key;
key = block;
block = smpl.tpl.utils.MAIN;
}
this.getData(block)[key] = value;
};
smpl.tpl.Template.prototype.getData = function(blockId) {
if (this.__data[blockId]) return this.__data[blockId];
var parent = this.getData(this.__parents[blockId]);
var data = Object.create(parent);
this.__data[blockId] = data;
return data;
};
smpl.tpl.Template.prototype.retrieve = function(blkId) {
blkId = blkId || smpl.tpl.utils.MAIN;
var blk = this.__parsedBlocks[blkId];
if (blk) {
delete this.__parsedBlocks[blkId];
return blk.join('');
} else {
return '';
}
};
smpl.tpl.Template.prototype.reset = function() {
if (!this.__data) {
this.init();
}
this.__data = {};
this.__data[smpl.tpl.utils.MAIN] = {
me: this.__globalKey
};
this.__parsedBlocks = {};
return this;
};
smpl.tpl.Template.prototype.parseBlock = function(blkId, config) {
if (this.__blocks[blkId]) {
var str = this.__blocks[blkId].call(this, smpl, this.getData(blkId));
delete this.__data[blkId];
var widget = this.__widgets[blkId];
if (widget) {
var lib = smpl.tpl.utils.libraries[widget.lib];
widget = lib.retrieveWidget(widget.widget, config, str);
str = widget.html;
}
this.__parsedBlocks[blkId] = this.__parsedBlocks[blkId] || [];
this.__parsedBlocks[blkId].push(str);
return widget && widget.widget;
} else {
throw new Error(smpl.string.supplant(MESSAGES.wrongParsed, [this.__name, 'block', blkId]));
}
};
smpl.tpl.Template.prototype.parseWidget = function(widgetId, config) {
if (this.__widgets[widgetId]) {
var widget = this.__widgets[widgetId];
config = config || {};
if (widget.args) {
for (var key in widget.args) {
config[key] = widget.args[key];
}
//TODO: Support token in config
}
return this.parseBlock(widgetId, config);
} else {
throw new Error(smpl.string.supplant(MESSAGES.wrongParsed, [this.__name, 'widget', widgetId]));
}
};
smpl.tpl.Template.prototype.parse = function() {
this.reset();
this.onParse.apply(this, arguments);
this.parseBlock(smpl.tpl.utils.MAIN);
return this;
};
smpl.tpl.Template.prototype.load = function(container, display) {
/* jshint browser: true */
if (typeof container === 'string') {
container = document.getElementById(container);
}
if (container) {
display = display || container.style.display;
container.style.display = 'none';
container.innerHTML = this.retrieve();
if (this.onLoad) {
this.onLoad(container, display);
}
container.style.display = display;
}
};
smpl.tpl.Template.prototype.onParse = function(pfx, obj) {
this.set(pfx, obj);
};
smpl.tpl.globalRepo = {};
smpl.tpl.enableGlobal = function(key, obj) {
if (key) {
smpl.tpl.globalKey = key;
smpl.tpl.globalObj = obj;
} else if (smpl.global.smpl === smpl) {
smpl.tpl.globalKey = 'smpl.tpl.globalRepo';
smpl.tpl.globalObj = smpl.tpl.globalRepo;
}
return !!smpl.tpl.globalKey;
};
smpl.tpl.utils = {};
smpl.tpl.utils.libraries = {};
smpl.tpl.utils.registerLibrary = function(libName, library) {
smpl.tpl.utils.libraries[libName] = library;
};
smpl.tpl.utils.MAIN = '_main';
smpl.tpl.utils.make = function(tpl, txt) {
var l = txt.length,
pos = 0,
startPos = 0,
stack = [],
newpos;
tpl.__blocks = {};
tpl.__parents = {};
this.processToken(tpl, stack, {type: 'block', name: this.MAIN});
while (pos < l) {
var chr = txt.charAt(pos++);
var nextChar = txt.charAt(pos);
if (chr === '\\' && (nextChar === '\\' || nextChar === '{')) { // skip escaped \ and {
++pos;
} else if (chr === '<' && nextChar === '!' && txt.charAt(pos + 1) === '-' && txt.charAt(pos + 2) === '-') {
// html comment. search for block or widget
newpos = txt.indexOf('-->', pos + 3);
if (newpos !== -1) {
var m = REGEX.block.exec(txt.slice(pos + 3, newpos));
if (!m) {
m = REGEX.widget.exec(txt.slice(pos + 3, newpos));
}
if (m) {
this.processToken(tpl, stack, {type: 'html', txt: txt.slice(startPos, pos - 1)});
this.processToken(tpl, stack, {
type: m[1].toLowerCase(),
name: m[2],
lib: m[3],
widget: m[4],
args: m[5] && this.parseArgs(m[5])
});
if (m[6]) { //Autoclose widget (`/` at the end of the block)
this.processToken(tpl, stack, {type: '/' + m[1].toLowerCase(), name: m[2]});
}
pos = newpos + 3;
startPos = pos;
}
}
} else if (chr === '{') {
newpos = this.jsTokenize(txt, pos);
if (newpos !== pos) {
var tokenTxt = txt.slice(startPos, pos - 1);
this.processToken(tpl, stack, {type: 'html', txt: tokenTxt, beforeJs: true});
startPos = pos;
this.processToken(tpl, stack, {type: 'js', txt: txt.slice(pos, newpos)});
startPos = pos = newpos + 1; //skip closing }
}
}
}
this.processToken(tpl, stack, {type: 'html', txt: txt.slice(startPos, pos)});
this.processToken(tpl, stack, {type: '/block', name: this.MAIN});
};
smpl.tpl.utils.parseArgs = function(argsText) {
var args = {};
if (argsText) {
var re = /([-\w]+)="(.*?)"/g;
var arg;
while ((arg = re.exec(argsText)) !== null) {
args[arg[1]] = smpl.string.unescapeHTML(arg[2]);
}
}
return args;
};
smpl.tpl.utils.processToken = function(tpl, stack, token) {
var processed;
switch (token.type) {
case 'block':
case 'widget':
stack.push(token);
if (tpl.__blocks[token.name]) {
throw new Error(smpl.string.supplant(MESSAGES.duplicateBlock, [tpl.__name, token.name]));
}
tpl.__blocks[token.name] = [];
if (token.type === 'widget') {
tpl.__widgets[token.name] = {
lib: token.lib,
widget: token.widget,
args: token.args
};
}
break;
case '/widget':
case '/block':
var closed = stack.pop();
if (closed.name !== token.name || closed.type !== token.type.slice(1)) {
throw new Error(smpl.string.supplant(MESSAGES.wrongClosed,
[token.type.slice(1), token.name, closed.type, closed.name]));
}
tpl.__blocks[closed.name] = 'return ' + (tpl.__blocks[closed.name].join(' + ') || '""');
var parent = stack[stack.length - 1];
if (parent) {
tpl.__parents[closed.name] = parent.name;
}
processed = "(this.retrieve('" + closed.name + "') || '')";
break;
case 'html':
token.txt = token.txt.replace(token.beforeJs ? /(\\*)\1\\(\{|$)/g : /(\\*)\1\\(\{)/g, '$1$2');
processed = "'" + smpl.string.escapeJs(token.txt) + "'";
break;
case 'js':
processed = this.compileJs(token.txt);
break;
}
if (processed && stack.length) {
var activeToken = stack[stack.length - 1];
tpl.__blocks[activeToken.name].push(processed);
}
};
smpl.tpl.utils.jsTokenize = function(input, pos) {
var l = input.length,
context = [],
closingChar = {
'(': ')',
'[': ']',
'{': '}'
},
lastchar = '',
newLine = /[\u000A\u000D\u2028\u2029]/,
at = input.charAt(pos) === '@',
initialPos = pos;
while (pos < l) {
var chr = input.charAt(pos++);
if (newLine.test(chr)) { | } else if (chr === '/' && input.charAt(pos) === '*') { //Multi line comment
if (!at) return initialPos;
var newpos = input.indexOf('*/', pos + 1);
pos = (newpos === -1) ? l : newpos + 2;
} else {
if (chr === '"' || chr === "'") { //String
pos = this.findUnescaped(input, chr, pos);
} else if (chr === '/' && '(,=:[!&|?{};'.indexOf(lastchar) !== -1) { //Regexp literal
while (pos < l) {
chr = input.charAt(pos++);
if (chr === '\\') {
++pos;
} else if (chr === '[') {
pos = this.findUnescaped(input, ']', pos);
} else if (chr === '/') {
break;
}
}
} else if (chr === '{' || chr === '[' || chr === '(') { //new context opening
context.push(closingChar[chr]);
} else if (chr === '}' || chr === ']' || chr === ')') { //closing context
var lastContext = context.pop();
if (lastContext !== chr) {
--pos;
break;
}
}
if (!/\s/.test(chr)) {
lastchar = chr;
}
}
}
return pos;
};
smpl.tpl.utils.findUnescaped = function(input, character, pos) {
var l = input.length;
while (pos < l) {
var chr = input.charAt(pos++);
if (chr === '\\') {
++pos;
} else if (chr === character) {
break;
}
}
return pos;
};
smpl.tpl.utils.compileJs = function(input) {
var noEscape;
if (input.charAt(0) === '{' && input.charAt(input.length - 1) === '}') {
noEscape = true;
input = input.slice(1, -1);
}
var noDolar = /^\$[@\s]/.test(input);
var at = /^\$?@/.test(input);
input = input.slice(+noDolar + at);
if (!noDolar) {
input = input.replace(/\$(?!\.)/g, '$.');
}
if (noEscape) {
input = '((' + input + ")||'')";
} else {
input = 'smpl.string.escapeHTML(' + input + "||'')";
}
return input;
};
smpl.tpl.utils.precompile = function(html) {
var tpl = new smpl.tpl.Template();
tpl.init(html, true);
var js = 'define(["module", "smpl/smpl.tpl"], function(module, smpl) {';
js += 'return new smpl.tpl.Template(module.id,';
js += JSON.stringify(tpl.__blocks);
js += ',';
js += JSON.stringify(tpl.__parents);
js += ');});';
return js;
};
return smpl;
}); | if (!at) return initialPos;
} else if (chr === '/' && input.charAt(pos) === '/') { //Single line comment
if (!at) return initialPos;
while (!newLine.test(input.charAt(++pos)));
++pos; | random_line_split |
smpl.tpl.js | if (typeof define !== 'function') {var define = require('amdefine')(module)}
/**
* @module smpl
* @submodule smpl.tpl
* @class smpl.tpl
* @static
*/
define(['./smpl.string', './smpl.utils'], function(smpl) {
'use strict';
var MESSAGES = {
wrongClosed: 'Incorrect element closed <{0}.{1}>. Openned one was <{2}.{3}>.',
wrongParsed: 'Template <{0}>: tried to parse non-existing {1} <{2}>.',
duplicateBlock: 'Template <{0}>: Block name <{1}> was used twice.'
};
var REGEX = {
block: /^\s*(\/?BLOCK):\s*([-\w]+)\s*$/i,
widget: /^\s*(\/?WIDGET):\s*([-\w]+)(?:@([-\w]+):([-:\w]+))?\s*([-\w]+=".*?"+\s*)*(\/?\s*)$/i
};
smpl.tpl = {};
smpl.tpl.Template = function(name, blocks, parents) {
this.__name = name || 'anonymous';
this.__blocks = blocks;
this.__widgets = {};
this.__parents = parents;
};
smpl.tpl.Template.prototype.getInstance = function() {
var tpl = Object.create(this);
tpl.__data = null;
return tpl;
};
smpl.tpl.Template.prototype.init = function(html, partial) {
/* jshint evil: true */
if (typeof html === 'string') {
smpl.tpl.utils.make(this, html);
}
if (!partial && typeof this.__blocks[smpl.tpl.utils.MAIN] === 'string') {
for (var blkId in this.__blocks) {
if (typeof this.__blocks[blkId] === 'string') |
}
}
if (smpl.tpl.globalObj) {
this.__globalKey = this.__name + '_' + smpl.utils.uniq();
smpl.tpl.globalObj[this.__globalKey] = this;
this.__globalKey = smpl.tpl.globalKey + '["' + this.__globalKey + '"]';
}
};
smpl.tpl.Template.prototype.set = function(block, key, value) {
if (arguments.length === 2) {
value = key;
key = block;
block = smpl.tpl.utils.MAIN;
}
this.getData(block)[key] = value;
};
smpl.tpl.Template.prototype.getData = function(blockId) {
if (this.__data[blockId]) return this.__data[blockId];
var parent = this.getData(this.__parents[blockId]);
var data = Object.create(parent);
this.__data[blockId] = data;
return data;
};
smpl.tpl.Template.prototype.retrieve = function(blkId) {
blkId = blkId || smpl.tpl.utils.MAIN;
var blk = this.__parsedBlocks[blkId];
if (blk) {
delete this.__parsedBlocks[blkId];
return blk.join('');
} else {
return '';
}
};
smpl.tpl.Template.prototype.reset = function() {
if (!this.__data) {
this.init();
}
this.__data = {};
this.__data[smpl.tpl.utils.MAIN] = {
me: this.__globalKey
};
this.__parsedBlocks = {};
return this;
};
smpl.tpl.Template.prototype.parseBlock = function(blkId, config) {
if (this.__blocks[blkId]) {
var str = this.__blocks[blkId].call(this, smpl, this.getData(blkId));
delete this.__data[blkId];
var widget = this.__widgets[blkId];
if (widget) {
var lib = smpl.tpl.utils.libraries[widget.lib];
widget = lib.retrieveWidget(widget.widget, config, str);
str = widget.html;
}
this.__parsedBlocks[blkId] = this.__parsedBlocks[blkId] || [];
this.__parsedBlocks[blkId].push(str);
return widget && widget.widget;
} else {
throw new Error(smpl.string.supplant(MESSAGES.wrongParsed, [this.__name, 'block', blkId]));
}
};
smpl.tpl.Template.prototype.parseWidget = function(widgetId, config) {
if (this.__widgets[widgetId]) {
var widget = this.__widgets[widgetId];
config = config || {};
if (widget.args) {
for (var key in widget.args) {
config[key] = widget.args[key];
}
//TODO: Support token in config
}
return this.parseBlock(widgetId, config);
} else {
throw new Error(smpl.string.supplant(MESSAGES.wrongParsed, [this.__name, 'widget', widgetId]));
}
};
smpl.tpl.Template.prototype.parse = function() {
this.reset();
this.onParse.apply(this, arguments);
this.parseBlock(smpl.tpl.utils.MAIN);
return this;
};
smpl.tpl.Template.prototype.load = function(container, display) {
/* jshint browser: true */
if (typeof container === 'string') {
container = document.getElementById(container);
}
if (container) {
display = display || container.style.display;
container.style.display = 'none';
container.innerHTML = this.retrieve();
if (this.onLoad) {
this.onLoad(container, display);
}
container.style.display = display;
}
};
smpl.tpl.Template.prototype.onParse = function(pfx, obj) {
this.set(pfx, obj);
};
smpl.tpl.globalRepo = {};
smpl.tpl.enableGlobal = function(key, obj) {
if (key) {
smpl.tpl.globalKey = key;
smpl.tpl.globalObj = obj;
} else if (smpl.global.smpl === smpl) {
smpl.tpl.globalKey = 'smpl.tpl.globalRepo';
smpl.tpl.globalObj = smpl.tpl.globalRepo;
}
return !!smpl.tpl.globalKey;
};
smpl.tpl.utils = {};
smpl.tpl.utils.libraries = {};
smpl.tpl.utils.registerLibrary = function(libName, library) {
smpl.tpl.utils.libraries[libName] = library;
};
smpl.tpl.utils.MAIN = '_main';
smpl.tpl.utils.make = function(tpl, txt) {
var l = txt.length,
pos = 0,
startPos = 0,
stack = [],
newpos;
tpl.__blocks = {};
tpl.__parents = {};
this.processToken(tpl, stack, {type: 'block', name: this.MAIN});
while (pos < l) {
var chr = txt.charAt(pos++);
var nextChar = txt.charAt(pos);
if (chr === '\\' && (nextChar === '\\' || nextChar === '{')) { // skip escaped \ and {
++pos;
} else if (chr === '<' && nextChar === '!' && txt.charAt(pos + 1) === '-' && txt.charAt(pos + 2) === '-') {
// html comment. search for block or widget
newpos = txt.indexOf('-->', pos + 3);
if (newpos !== -1) {
var m = REGEX.block.exec(txt.slice(pos + 3, newpos));
if (!m) {
m = REGEX.widget.exec(txt.slice(pos + 3, newpos));
}
if (m) {
this.processToken(tpl, stack, {type: 'html', txt: txt.slice(startPos, pos - 1)});
this.processToken(tpl, stack, {
type: m[1].toLowerCase(),
name: m[2],
lib: m[3],
widget: m[4],
args: m[5] && this.parseArgs(m[5])
});
if (m[6]) { //Autoclose widget (`/` at the end of the block)
this.processToken(tpl, stack, {type: '/' + m[1].toLowerCase(), name: m[2]});
}
pos = newpos + 3;
startPos = pos;
}
}
} else if (chr === '{') {
newpos = this.jsTokenize(txt, pos);
if (newpos !== pos) {
var tokenTxt = txt.slice(startPos, pos - 1);
this.processToken(tpl, stack, {type: 'html', txt: tokenTxt, beforeJs: true});
startPos = pos;
this.processToken(tpl, stack, {type: 'js', txt: txt.slice(pos, newpos)});
startPos = pos = newpos + 1; //skip closing }
}
}
}
this.processToken(tpl, stack, {type: 'html', txt: txt.slice(startPos, pos)});
this.processToken(tpl, stack, {type: '/block', name: this.MAIN});
};
smpl.tpl.utils.parseArgs = function(argsText) {
var args = {};
if (argsText) {
var re = /([-\w]+)="(.*?)"/g;
var arg;
while ((arg = re.exec(argsText)) !== null) {
args[arg[1]] = smpl.string.unescapeHTML(arg[2]);
}
}
return args;
};
smpl.tpl.utils.processToken = function(tpl, stack, token) {
var processed;
switch (token.type) {
case 'block':
case 'widget':
stack.push(token);
if (tpl.__blocks[token.name]) {
throw new Error(smpl.string.supplant(MESSAGES.duplicateBlock, [tpl.__name, token.name]));
}
tpl.__blocks[token.name] = [];
if (token.type === 'widget') {
tpl.__widgets[token.name] = {
lib: token.lib,
widget: token.widget,
args: token.args
};
}
break;
case '/widget':
case '/block':
var closed = stack.pop();
if (closed.name !== token.name || closed.type !== token.type.slice(1)) {
throw new Error(smpl.string.supplant(MESSAGES.wrongClosed,
[token.type.slice(1), token.name, closed.type, closed.name]));
}
tpl.__blocks[closed.name] = 'return ' + (tpl.__blocks[closed.name].join(' + ') || '""');
var parent = stack[stack.length - 1];
if (parent) {
tpl.__parents[closed.name] = parent.name;
}
processed = "(this.retrieve('" + closed.name + "') || '')";
break;
case 'html':
token.txt = token.txt.replace(token.beforeJs ? /(\\*)\1\\(\{|$)/g : /(\\*)\1\\(\{)/g, '$1$2');
processed = "'" + smpl.string.escapeJs(token.txt) + "'";
break;
case 'js':
processed = this.compileJs(token.txt);
break;
}
if (processed && stack.length) {
var activeToken = stack[stack.length - 1];
tpl.__blocks[activeToken.name].push(processed);
}
};
smpl.tpl.utils.jsTokenize = function(input, pos) {
var l = input.length,
context = [],
closingChar = {
'(': ')',
'[': ']',
'{': '}'
},
lastchar = '',
newLine = /[\u000A\u000D\u2028\u2029]/,
at = input.charAt(pos) === '@',
initialPos = pos;
while (pos < l) {
var chr = input.charAt(pos++);
if (newLine.test(chr)) {
if (!at) return initialPos;
} else if (chr === '/' && input.charAt(pos) === '/') { //Single line comment
if (!at) return initialPos;
while (!newLine.test(input.charAt(++pos)));
++pos;
} else if (chr === '/' && input.charAt(pos) === '*') { //Multi line comment
if (!at) return initialPos;
var newpos = input.indexOf('*/', pos + 1);
pos = (newpos === -1) ? l : newpos + 2;
} else {
if (chr === '"' || chr === "'") { //String
pos = this.findUnescaped(input, chr, pos);
} else if (chr === '/' && '(,=:[!&|?{};'.indexOf(lastchar) !== -1) { //Regexp literal
while (pos < l) {
chr = input.charAt(pos++);
if (chr === '\\') {
++pos;
} else if (chr === '[') {
pos = this.findUnescaped(input, ']', pos);
} else if (chr === '/') {
break;
}
}
} else if (chr === '{' || chr === '[' || chr === '(') { //new context opening
context.push(closingChar[chr]);
} else if (chr === '}' || chr === ']' || chr === ')') { //closing context
var lastContext = context.pop();
if (lastContext !== chr) {
--pos;
break;
}
}
if (!/\s/.test(chr)) {
lastchar = chr;
}
}
}
return pos;
};
smpl.tpl.utils.findUnescaped = function(input, character, pos) {
var l = input.length;
while (pos < l) {
var chr = input.charAt(pos++);
if (chr === '\\') {
++pos;
} else if (chr === character) {
break;
}
}
return pos;
};
smpl.tpl.utils.compileJs = function(input) {
var noEscape;
if (input.charAt(0) === '{' && input.charAt(input.length - 1) === '}') {
noEscape = true;
input = input.slice(1, -1);
}
var noDolar = /^\$[@\s]/.test(input);
var at = /^\$?@/.test(input);
input = input.slice(+noDolar + at);
if (!noDolar) {
input = input.replace(/\$(?!\.)/g, '$.');
}
if (noEscape) {
input = '((' + input + ")||'')";
} else {
input = 'smpl.string.escapeHTML(' + input + "||'')";
}
return input;
};
smpl.tpl.utils.precompile = function(html) {
var tpl = new smpl.tpl.Template();
tpl.init(html, true);
var js = 'define(["module", "smpl/smpl.tpl"], function(module, smpl) {';
js += 'return new smpl.tpl.Template(module.id,';
js += JSON.stringify(tpl.__blocks);
js += ',';
js += JSON.stringify(tpl.__parents);
js += ');});';
return js;
};
return smpl;
});
| {
this.__blocks[blkId] = new Function('smpl', '$', '"use strict";' + this.__blocks[blkId]);
} | conditional_block |
cart.component.ts | import { Component, OnInit } from '@angular/core';
import { AuthenticationService } from '../services/authentication.service';
import { ProductService } from '../services/product.service';
import { ToastrService } from '../toast.service';
import {Router} from '@angular/router';
import { environment } from '../../environments/environment';
import { OrderService } from '../services/orders.service';
import { DomSanitizer } from '@angular/platform-browser';
import { CountriesService } from '../services/countries.service';
import { CartService } from '../core/cart/cart.service';
declare var jQuery:any;
@Component({
selector: 'app-cart',
templateUrl: './cart.component.html',
styleUrls: ['./cart.component.scss']
})
export class CartComponent implements OnInit {
itemToDelete:any;
buyerId:any;
products:any = [];
empty:boolean;
showLoading:boolean=true;
total:any;
shoppingEnpoint:any = 'shoppingcart/items';
shoppingCartId:string;
API:any = environment.apiURLImg;
countries:any = [];
shipping:any = 0;
cart:any;
/******** Other fees ***********/
lastMilteCost = 0;
sfsMargin = 0;
uaeTaxes = 0;
customs= 0;
firstMileCost = 0;
handlingFees=0;
totalCustoms;
totalSFSMargin= 0;
totalUAETaxes=0;
totalFirstMileCost=0;
totalLastMileCost=0;
totalOtherFees:any = 0;
totalHandlingFees=0;
/******** END Other fees ***********/
otherFees:number = 0;
totalWithShipping:any;
index:any;
userinfo:any;
imageCart: any = [];
preparataion:any =[];
taxesPer:any;
staticField: any;
showSnackBar:boolean = false;
itemsDeleted: any = [];
constructor(private auth: AuthenticationService, private productService: ProductService,
private toast:ToastrService, private router:Router, private cartService:OrderService,
private sanitizer: DomSanitizer, private countriesService: CountriesService, private cService: CartService) { }
async ngOnInit() {
// this.getCart();
//this.getItems()
this.userinfo = this.auth.getLoginData();
this.buyerId = this.userinfo['id'];
await this.getCountries();
await this.getPreparation();
await this.validateCart();
this.getTotal();
}
//VALIDATING CART
async validateCart(){
await new Promise((resolve, reject) => {
this.cartService.validateCart(this.userinfo['id']).subscribe(val =>{
console.log("Cart Validation", val);
if(val['items'].length > 0){
this.itemsDeleted = val['items'];
this.showSnackBar = true;
}
resolve();
}, error =>{
reject();
})
});
}
getTotal(){
this.cartService.getCart( this.buyerId )
.subscribe(
cart=> {
console.log("Cart", cart);
if(cart && cart.hasOwnProperty('items')){
console.log("Si existe");
if(cart['items'].length > 0){
console.log("Si es mayor a cero");
this.cart = cart;
this.shoppingCartId=cart['id']
this.products=cart['items'];
this.lastMilteCost = cart['lastMileCost'];
this.firstMileCost = cart['firstMileCosts'];
this.sfsMargin = cart['sfsMargin'];
this.uaeTaxes = cart['uaeTaxes'];
this.customs = cart['customs'];
this.total= cart['subTotal'];
this.shipping = cart['shipping'];
this.taxesPer = cart['currentCharges']['uaeTaxes'];
this.totalOtherFees = cart['totalOtherFees'];
this.totalWithShipping = cart['total'];
this.products.forEach((data, index) => {
if (data.fish.imagePrimary && data.fish.imagePrimary != '') {
this.imageCart[index] = this.sanitizer.bypassSecurityTrustStyle(`url(${this.API}${data.fish.imagePrimary})`);
}
else if (data.images && data.images.length > 0) {
let src = data['images'][0].src ? data['images'][0].src : data['images'][0];
this.imageCart[index] = this.sanitizer.bypassSecurityTrustStyle(`url(${this.API}${src})`);
}
else {
this.imageCart[index] = this.sanitizer.bypassSecurityTrustStyle('url(../../assets/default-img-product.jpg)');
}
});
this.hideLoader();
this.empty = false;
}else |
} else{
this.hideLoader();
}
},
error=> {
console.log( error );
}
)
}
getTextWidth(text, font) {
// re-use canvas object for better performance
var canvas = document.createElement("canvas");
var context = canvas.getContext("2d");
context.font = font;
var metrics = context.measureText(text);
return metrics.width;
}
public isVacio(id, idSuffix) {
let element = document.querySelector('#' + id) as HTMLInputElement;
if (element === null) return true;
//unit measure
const suffixElement = document.getElementById(idSuffix);
const width = this.getTextWidth(element.value, 'Josefin Sans, sans-serif');
if (suffixElement !== null)
suffixElement.style.left = ($(element).width() / 2) + width + 'px';
return element.value === '';
}
public getTag(perBox, id, unitOfSale){
let element = document.querySelector('#' + id) as HTMLInputElement;
if (element === null) return '';
try{
let val = Number(element.value);
if(val <= 1 && perBox === true) {
return 'box';
}
return perBox === true ? 'boxes' : (unitOfSale).toLowerCase();
}
catch(e){
console.log(e);
return '';
}
}
//GET COUNTRIES
async getCountries() {
await new Promise((resolve, reject) => {
this.countriesService.getCountries().subscribe(
result => {
this.countries = result;
resolve();
},
error => {
console.log( error );
reject();
}
);
})
}
//FUNCTION TO GET ONLY THE TOTALS WHEN CHANGING QTY OF A PRODUCT
getTotalPricing(){
//this.showLoading=true;
this.cartService.getCart( this.userinfo['id'] )
.subscribe(
cart=> {
this.showLoading = false;
if(cart && cart.hasOwnProperty('items')){
if(cart['items'].length > 0){
this.products=cart['items'];
this.products.forEach((data, index) => {
setTimeout(() => {
console.log( jQuery('#range-' + data.fish.id), data.quantity.value);
jQuery('#range-' + data.fish.id).val(data.quantity.value);
this.moveBubble(data.fish.id);
}, 100);
});
this.lastMilteCost = cart['lastMileCost'];
this.firstMileCost = cart['firstMileCosts'];
this.sfsMargin = cart['sfsMargin'];
this.uaeTaxes = cart['totalUAETaxes'];
this.customs = cart['customs'];
this.total= cart['subTotal'];
this.shipping = cart['shipping'];
this.totalOtherFees = cart['totalOtherFees']+cart['totalUAETaxes'];
this.totalWithShipping = cart['total'];
}
}
},
error=> {
//this.showLoading = false;
console.log( error );
}
)
}
hideLoader(){
this.showLoading=false;
this.empty = true;
}
getItems(){
let cart = {
"buyer": this.userinfo['id']
}
this.productService.saveData("shoppingcart", cart).subscribe(result => {
this.cService.setCart(result);
console.log(' calcular totales', result );
},e=>{console.log(e)})
}
getTotalxItem(count, price){
return count*price;
}
deleteItem(i, id){
this.productService.deleteData(`itemshopping/${id}`).subscribe(
result=>{
this.products.splice(i, 1);
console.log("Borrando item..", result, this.products);
this.getItems();
jQuery('#confirmDelete').modal('hide');
this.getAllProductsCount();
if(this.products.length == 0){
this.empty = true;
}
},
e=>{
this.toast.error("Error deleting item!", "Error",{positionClass:"toast-top-right"} );
console.log(e)
}
)
}
getAllProductsCount(){
var items:any = {"items": []};
this.products.forEach((element, index) => {
let item = {
"id": element['id'],
"quantity": {
"type": element['quantity'].type,
"value": element['quantity'].value
}
}
items['items'].push(item);
console.log( 'get Product Counts', item );
if (items['items'].length == this.products.length){
this.updatecart(items);
}
});
}
updatecart(items){
this.productService.updateData(this.shoppingEnpoint, items).subscribe(result => {
console.log( 'result', result );
this.getTotalPricing();
}, error => {
this.toast.error("Error updating cart!", "Error",{positionClass:"toast-top-right"} );
})
}
checkout(){
localStorage.setItem('shippingCost', this.shipping);
localStorage.setItem('shoppingTotal', this.totalWithShipping);
localStorage.setItem('shoppingCartId', this.shoppingCartId);
localStorage.setItem('totalOtherFees', this.totalOtherFees);
//this.router.navigate(['/checkout'], {queryParams: {shoppingCartId: this.shoppingCartId}});
this.router.navigate(['/reviewcart'], {queryParams: {shoppingCartId: this.shoppingCartId}});
}
findCountryName(value) {
for (var i = 0; i < this.countries.length; i++) {
if (this.countries[i]['code'] === value) {
return this.countries[i].name;
}
}
return null;
}
//FIND NAME OF PREPARATION
findPreparationName(id){
for (var i = 0; i < this.preparataion.length; i++) {
if (this.preparataion[i]['id'] === id) {
return this.preparataion[i].name;
}
}
return null;
}
showConfirmModal(itemID:string, index){
console.log("Product modal ID", itemID, index);
this.itemToDelete = itemID;
this.index = index;
jQuery('#confirmDelete').modal('show');
}
validateMax(i){
console.log(this.products[i].quantity.value);
if(this.products[i].quantity.value > this.products[i].fish.maximumOrder){
this.products[i].quantity.value = this.products[i].fish.maximumOrder;
this.getAllProductsCount();
}else{
this.getAllProductsCount();
}
}
//Function to hide span
hideMe(id) {
const span = document.getElementById('qty-kg-' + id);
const input = document.getElementById('edit-qty-' + id);
(span as HTMLElement).style.display = 'none';
(input as HTMLElement).style.display = 'inline-block';
input.focus();
}
handleInput($event, id, i, max, min, boxweight){
console.log("ON INput", $event.srcElement.value);
let val = $event.srcElement.value;
this.staticField = $event.srcElement.value;
var that = this;
setTimeout(() => {
if(that.staticField == val){
console.log("El valor no ha cambiado en un segundo");
this.manualInput(id, i, max, min, boxweight);
}
}, 1000);
}
//Functino to enter manual kg
manualInput(id, i, max, min, boxweight = 1) {
console.log("BOx Weight", boxweight);
let val: any = jQuery('#edit-qty-' + id).val();
val = val;
console.log("minimo y maximo", min, max, val);
if (val > parseInt(max)) {
val = parseInt(max);
}else if(val < parseInt(min)){
val = parseInt(min);
}
this.products[i].quantity.value = val * boxweight;
// jQuery('#range-' + id).val(val);
// this.moveBubble(id);
this.getAllProductsCount();
}
//Function to hide input and show span
showSpan(id) {
const span = document.getElementById('qty-kg-' + id);
const input = document.getElementById('edit-qty-' + id);
(input as HTMLElement).style.display = 'none';
(span as HTMLElement).style.display = 'block';
}
//JAVASCRIPT FOR SLIDES
moveBubble(id){
console.log("Id", id);
var el, newPoint, newPlace, offset;
jQuery('#range-' + id).on('input', function () {
console.log("input");
jQuery(this).trigger('change');
});
// Select all range inputs, watch for change
jQuery('#range-' + id).change(function() {
console.log("Changing");
// Cache this for efficiency
el = jQuery(this);
// Measure width of range input
var width = el.width();
console.log("Width", width);
// Figure out placement percentage between left and right of input
newPoint = (el.val() - el.attr("min")) / (el.attr("max") - el.attr("min"));
console.log("Move Bubble", parseInt(el.val()), el.attr("max"), el.attr("min"));
offset = -1;
// Prevent bubble from going beyond left or right (unsupported browsers)
if (newPoint < 0) { newPlace = 0; }
else if (newPoint > 1) { newPlace = width; }
else { newPlace = width * newPoint + offset; offset -= newPoint; }
// Move bubble
jQuery('#qty-kg-'+id).css('margin-left', newPlace);
jQuery('#edit-qty-'+id).css('margin-left', newPlace);
})
// Fake a change to position bubble at page load
.trigger('change');
}
//GET RANGE VALUE ON CHANGE FOR EACH PRODUCT
getRange(id, i) {
console.log(id, i);
let val: any = jQuery('#range-' + id).val();
console.log("Range Val", val);
this.products[i].quantity.value = val;
this.moveBubble(id);
console.log("Product in array", this.products[i]);
this.getAllProductsCount();
}
showRangeVal(id, i){
let val: any = jQuery('#range-' + id).val();
this.products[i].quantity.value = val;
}
//get preparation
async getPreparation(){
await new Promise((resolve, reject) => {
this.productService.getData(`fishPreparation`).subscribe(res=> {
console.log("Prep", res);
this.preparataion = res;
resolve();
}, error =>{reject()})
})
}
closeSnackBar(){
this.showSnackBar = false;
}
public getFixedNumber(number) {
// if (number !== null && Math.round(number) !== number) {
// number = number.toFixed(2);
// }
return parseInt(number);
}
}
| {
this.hideLoader();
} | conditional_block |
cart.component.ts | import { Component, OnInit } from '@angular/core';
import { AuthenticationService } from '../services/authentication.service';
import { ProductService } from '../services/product.service';
import { ToastrService } from '../toast.service';
import {Router} from '@angular/router';
import { environment } from '../../environments/environment';
import { OrderService } from '../services/orders.service';
import { DomSanitizer } from '@angular/platform-browser';
import { CountriesService } from '../services/countries.service';
import { CartService } from '../core/cart/cart.service';
declare var jQuery:any;
@Component({
selector: 'app-cart',
templateUrl: './cart.component.html',
styleUrls: ['./cart.component.scss']
})
export class CartComponent implements OnInit {
itemToDelete:any;
buyerId:any;
products:any = [];
empty:boolean;
showLoading:boolean=true;
total:any;
shoppingEnpoint:any = 'shoppingcart/items';
shoppingCartId:string;
API:any = environment.apiURLImg;
countries:any = [];
shipping:any = 0;
cart:any;
/******** Other fees ***********/
lastMilteCost = 0;
sfsMargin = 0;
uaeTaxes = 0;
customs= 0;
firstMileCost = 0;
handlingFees=0;
totalCustoms;
totalSFSMargin= 0;
totalUAETaxes=0;
totalFirstMileCost=0;
totalLastMileCost=0;
totalOtherFees:any = 0;
totalHandlingFees=0;
/******** END Other fees ***********/
otherFees:number = 0;
totalWithShipping:any;
index:any;
userinfo:any;
imageCart: any = [];
preparataion:any =[];
taxesPer:any;
staticField: any;
showSnackBar:boolean = false;
itemsDeleted: any = [];
constructor(private auth: AuthenticationService, private productService: ProductService,
private toast:ToastrService, private router:Router, private cartService:OrderService,
private sanitizer: DomSanitizer, private countriesService: CountriesService, private cService: CartService) { }
async ngOnInit() {
// this.getCart();
//this.getItems()
this.userinfo = this.auth.getLoginData();
this.buyerId = this.userinfo['id'];
await this.getCountries();
await this.getPreparation();
await this.validateCart();
this.getTotal();
}
//VALIDATING CART
async validateCart(){
await new Promise((resolve, reject) => {
this.cartService.validateCart(this.userinfo['id']).subscribe(val =>{
console.log("Cart Validation", val);
if(val['items'].length > 0){
this.itemsDeleted = val['items'];
this.showSnackBar = true;
}
resolve();
}, error =>{
reject();
})
});
}
getTotal(){
this.cartService.getCart( this.buyerId )
.subscribe(
cart=> {
console.log("Cart", cart);
if(cart && cart.hasOwnProperty('items')){
console.log("Si existe");
if(cart['items'].length > 0){
console.log("Si es mayor a cero");
this.cart = cart;
this.shoppingCartId=cart['id']
this.products=cart['items'];
this.lastMilteCost = cart['lastMileCost'];
this.firstMileCost = cart['firstMileCosts'];
this.sfsMargin = cart['sfsMargin'];
this.uaeTaxes = cart['uaeTaxes'];
this.customs = cart['customs'];
this.total= cart['subTotal'];
this.shipping = cart['shipping'];
this.taxesPer = cart['currentCharges']['uaeTaxes'];
this.totalOtherFees = cart['totalOtherFees'];
this.totalWithShipping = cart['total'];
this.products.forEach((data, index) => {
if (data.fish.imagePrimary && data.fish.imagePrimary != '') {
this.imageCart[index] = this.sanitizer.bypassSecurityTrustStyle(`url(${this.API}${data.fish.imagePrimary})`);
}
else if (data.images && data.images.length > 0) {
let src = data['images'][0].src ? data['images'][0].src : data['images'][0];
this.imageCart[index] = this.sanitizer.bypassSecurityTrustStyle(`url(${this.API}${src})`);
}
else {
this.imageCart[index] = this.sanitizer.bypassSecurityTrustStyle('url(../../assets/default-img-product.jpg)');
}
});
this.hideLoader();
this.empty = false;
}else{
this.hideLoader();
}
} else{
this.hideLoader();
}
},
error=> {
console.log( error );
}
)
}
getTextWidth(text, font) {
// re-use canvas object for better performance
var canvas = document.createElement("canvas");
var context = canvas.getContext("2d");
context.font = font;
var metrics = context.measureText(text);
return metrics.width;
}
public isVacio(id, idSuffix) {
let element = document.querySelector('#' + id) as HTMLInputElement;
if (element === null) return true;
//unit measure
const suffixElement = document.getElementById(idSuffix);
const width = this.getTextWidth(element.value, 'Josefin Sans, sans-serif');
if (suffixElement !== null)
suffixElement.style.left = ($(element).width() / 2) + width + 'px';
return element.value === '';
}
public getTag(perBox, id, unitOfSale){
let element = document.querySelector('#' + id) as HTMLInputElement;
if (element === null) return '';
try{
let val = Number(element.value);
if(val <= 1 && perBox === true) {
return 'box';
}
return perBox === true ? 'boxes' : (unitOfSale).toLowerCase();
}
catch(e){
console.log(e);
return '';
}
}
//GET COUNTRIES
async getCountries() {
await new Promise((resolve, reject) => {
this.countriesService.getCountries().subscribe(
result => {
this.countries = result;
resolve();
},
error => {
console.log( error );
reject();
}
);
})
}
//FUNCTION TO GET ONLY THE TOTALS WHEN CHANGING QTY OF A PRODUCT
getTotalPricing(){
//this.showLoading=true;
this.cartService.getCart( this.userinfo['id'] )
.subscribe(
cart=> {
this.showLoading = false;
if(cart && cart.hasOwnProperty('items')){
if(cart['items'].length > 0){
this.products=cart['items'];
this.products.forEach((data, index) => {
setTimeout(() => {
console.log( jQuery('#range-' + data.fish.id), data.quantity.value);
jQuery('#range-' + data.fish.id).val(data.quantity.value);
this.moveBubble(data.fish.id);
}, 100);
});
this.lastMilteCost = cart['lastMileCost'];
this.firstMileCost = cart['firstMileCosts'];
this.sfsMargin = cart['sfsMargin'];
this.uaeTaxes = cart['totalUAETaxes'];
this.customs = cart['customs'];
this.total= cart['subTotal'];
this.shipping = cart['shipping'];
this.totalOtherFees = cart['totalOtherFees']+cart['totalUAETaxes'];
this.totalWithShipping = cart['total'];
}
}
},
error=> {
//this.showLoading = false;
console.log( error );
}
)
}
hideLoader(){
this.showLoading=false;
this.empty = true;
}
getItems(){
let cart = {
"buyer": this.userinfo['id']
}
this.productService.saveData("shoppingcart", cart).subscribe(result => {
this.cService.setCart(result);
console.log(' calcular totales', result );
},e=>{console.log(e)})
}
getTotalxItem(count, price){
return count*price;
}
deleteItem(i, id){
this.productService.deleteData(`itemshopping/${id}`).subscribe(
result=>{
this.products.splice(i, 1);
console.log("Borrando item..", result, this.products);
this.getItems();
jQuery('#confirmDelete').modal('hide');
this.getAllProductsCount();
if(this.products.length == 0){
this.empty = true;
}
},
e=>{
this.toast.error("Error deleting item!", "Error",{positionClass:"toast-top-right"} );
console.log(e)
}
)
}
getAllProductsCount(){
var items:any = {"items": []};
this.products.forEach((element, index) => {
let item = {
"id": element['id'],
"quantity": {
"type": element['quantity'].type,
"value": element['quantity'].value
}
}
items['items'].push(item);
console.log( 'get Product Counts', item );
if (items['items'].length == this.products.length){
this.updatecart(items);
}
});
}
| (items){
this.productService.updateData(this.shoppingEnpoint, items).subscribe(result => {
console.log( 'result', result );
this.getTotalPricing();
}, error => {
this.toast.error("Error updating cart!", "Error",{positionClass:"toast-top-right"} );
})
}
checkout(){
localStorage.setItem('shippingCost', this.shipping);
localStorage.setItem('shoppingTotal', this.totalWithShipping);
localStorage.setItem('shoppingCartId', this.shoppingCartId);
localStorage.setItem('totalOtherFees', this.totalOtherFees);
//this.router.navigate(['/checkout'], {queryParams: {shoppingCartId: this.shoppingCartId}});
this.router.navigate(['/reviewcart'], {queryParams: {shoppingCartId: this.shoppingCartId}});
}
findCountryName(value) {
for (var i = 0; i < this.countries.length; i++) {
if (this.countries[i]['code'] === value) {
return this.countries[i].name;
}
}
return null;
}
//FIND NAME OF PREPARATION
findPreparationName(id){
for (var i = 0; i < this.preparataion.length; i++) {
if (this.preparataion[i]['id'] === id) {
return this.preparataion[i].name;
}
}
return null;
}
showConfirmModal(itemID:string, index){
console.log("Product modal ID", itemID, index);
this.itemToDelete = itemID;
this.index = index;
jQuery('#confirmDelete').modal('show');
}
validateMax(i){
console.log(this.products[i].quantity.value);
if(this.products[i].quantity.value > this.products[i].fish.maximumOrder){
this.products[i].quantity.value = this.products[i].fish.maximumOrder;
this.getAllProductsCount();
}else{
this.getAllProductsCount();
}
}
//Function to hide span
hideMe(id) {
const span = document.getElementById('qty-kg-' + id);
const input = document.getElementById('edit-qty-' + id);
(span as HTMLElement).style.display = 'none';
(input as HTMLElement).style.display = 'inline-block';
input.focus();
}
handleInput($event, id, i, max, min, boxweight){
console.log("ON INput", $event.srcElement.value);
let val = $event.srcElement.value;
this.staticField = $event.srcElement.value;
var that = this;
setTimeout(() => {
if(that.staticField == val){
console.log("El valor no ha cambiado en un segundo");
this.manualInput(id, i, max, min, boxweight);
}
}, 1000);
}
//Functino to enter manual kg
manualInput(id, i, max, min, boxweight = 1) {
console.log("BOx Weight", boxweight);
let val: any = jQuery('#edit-qty-' + id).val();
val = val;
console.log("minimo y maximo", min, max, val);
if (val > parseInt(max)) {
val = parseInt(max);
}else if(val < parseInt(min)){
val = parseInt(min);
}
this.products[i].quantity.value = val * boxweight;
// jQuery('#range-' + id).val(val);
// this.moveBubble(id);
this.getAllProductsCount();
}
//Function to hide input and show span
showSpan(id) {
const span = document.getElementById('qty-kg-' + id);
const input = document.getElementById('edit-qty-' + id);
(input as HTMLElement).style.display = 'none';
(span as HTMLElement).style.display = 'block';
}
//JAVASCRIPT FOR SLIDES
moveBubble(id){
console.log("Id", id);
var el, newPoint, newPlace, offset;
jQuery('#range-' + id).on('input', function () {
console.log("input");
jQuery(this).trigger('change');
});
// Select all range inputs, watch for change
jQuery('#range-' + id).change(function() {
console.log("Changing");
// Cache this for efficiency
el = jQuery(this);
// Measure width of range input
var width = el.width();
console.log("Width", width);
// Figure out placement percentage between left and right of input
newPoint = (el.val() - el.attr("min")) / (el.attr("max") - el.attr("min"));
console.log("Move Bubble", parseInt(el.val()), el.attr("max"), el.attr("min"));
offset = -1;
// Prevent bubble from going beyond left or right (unsupported browsers)
if (newPoint < 0) { newPlace = 0; }
else if (newPoint > 1) { newPlace = width; }
else { newPlace = width * newPoint + offset; offset -= newPoint; }
// Move bubble
jQuery('#qty-kg-'+id).css('margin-left', newPlace);
jQuery('#edit-qty-'+id).css('margin-left', newPlace);
})
// Fake a change to position bubble at page load
.trigger('change');
}
//GET RANGE VALUE ON CHANGE FOR EACH PRODUCT
getRange(id, i) {
console.log(id, i);
let val: any = jQuery('#range-' + id).val();
console.log("Range Val", val);
this.products[i].quantity.value = val;
this.moveBubble(id);
console.log("Product in array", this.products[i]);
this.getAllProductsCount();
}
showRangeVal(id, i){
let val: any = jQuery('#range-' + id).val();
this.products[i].quantity.value = val;
}
//get preparation
async getPreparation(){
await new Promise((resolve, reject) => {
this.productService.getData(`fishPreparation`).subscribe(res=> {
console.log("Prep", res);
this.preparataion = res;
resolve();
}, error =>{reject()})
})
}
closeSnackBar(){
this.showSnackBar = false;
}
public getFixedNumber(number) {
// if (number !== null && Math.round(number) !== number) {
// number = number.toFixed(2);
// }
return parseInt(number);
}
}
| updatecart | identifier_name |
cart.component.ts | import { Component, OnInit } from '@angular/core';
import { AuthenticationService } from '../services/authentication.service';
import { ProductService } from '../services/product.service';
import { ToastrService } from '../toast.service';
import {Router} from '@angular/router';
import { environment } from '../../environments/environment';
import { OrderService } from '../services/orders.service';
import { DomSanitizer } from '@angular/platform-browser';
import { CountriesService } from '../services/countries.service';
import { CartService } from '../core/cart/cart.service';
declare var jQuery:any;
@Component({
selector: 'app-cart',
templateUrl: './cart.component.html',
styleUrls: ['./cart.component.scss']
})
export class CartComponent implements OnInit {
itemToDelete:any;
buyerId:any;
products:any = [];
empty:boolean;
showLoading:boolean=true;
total:any;
shoppingEnpoint:any = 'shoppingcart/items';
shoppingCartId:string;
API:any = environment.apiURLImg;
countries:any = [];
shipping:any = 0;
cart:any;
/******** Other fees ***********/
lastMilteCost = 0;
sfsMargin = 0;
uaeTaxes = 0;
customs= 0;
firstMileCost = 0;
handlingFees=0;
totalCustoms;
totalSFSMargin= 0;
totalUAETaxes=0;
totalFirstMileCost=0;
totalLastMileCost=0;
totalOtherFees:any = 0;
totalHandlingFees=0;
/******** END Other fees ***********/
otherFees:number = 0;
totalWithShipping:any;
index:any;
userinfo:any;
imageCart: any = [];
preparataion:any =[];
taxesPer:any;
staticField: any;
showSnackBar:boolean = false;
itemsDeleted: any = [];
constructor(private auth: AuthenticationService, private productService: ProductService,
private toast:ToastrService, private router:Router, private cartService:OrderService,
private sanitizer: DomSanitizer, private countriesService: CountriesService, private cService: CartService) { }
async ngOnInit() {
// this.getCart();
//this.getItems()
this.userinfo = this.auth.getLoginData();
this.buyerId = this.userinfo['id'];
await this.getCountries();
await this.getPreparation();
await this.validateCart();
this.getTotal();
}
//VALIDATING CART
async validateCart(){
await new Promise((resolve, reject) => {
this.cartService.validateCart(this.userinfo['id']).subscribe(val =>{
console.log("Cart Validation", val);
if(val['items'].length > 0){
this.itemsDeleted = val['items'];
this.showSnackBar = true;
}
resolve();
}, error =>{
reject();
})
});
}
getTotal(){
this.cartService.getCart( this.buyerId )
.subscribe(
cart=> {
console.log("Cart", cart);
if(cart && cart.hasOwnProperty('items')){
console.log("Si existe");
if(cart['items'].length > 0){
console.log("Si es mayor a cero");
this.cart = cart;
this.shoppingCartId=cart['id']
this.products=cart['items'];
this.lastMilteCost = cart['lastMileCost'];
this.firstMileCost = cart['firstMileCosts'];
this.sfsMargin = cart['sfsMargin'];
this.uaeTaxes = cart['uaeTaxes'];
this.customs = cart['customs'];
this.total= cart['subTotal'];
this.shipping = cart['shipping'];
this.taxesPer = cart['currentCharges']['uaeTaxes'];
this.totalOtherFees = cart['totalOtherFees'];
this.totalWithShipping = cart['total'];
this.products.forEach((data, index) => {
if (data.fish.imagePrimary && data.fish.imagePrimary != '') {
this.imageCart[index] = this.sanitizer.bypassSecurityTrustStyle(`url(${this.API}${data.fish.imagePrimary})`);
}
else if (data.images && data.images.length > 0) {
let src = data['images'][0].src ? data['images'][0].src : data['images'][0];
this.imageCart[index] = this.sanitizer.bypassSecurityTrustStyle(`url(${this.API}${src})`);
}
else {
this.imageCart[index] = this.sanitizer.bypassSecurityTrustStyle('url(../../assets/default-img-product.jpg)');
}
});
this.hideLoader();
this.empty = false;
}else{
this.hideLoader();
}
} else{
this.hideLoader();
}
},
error=> {
console.log( error );
}
)
}
getTextWidth(text, font) {
// re-use canvas object for better performance
var canvas = document.createElement("canvas");
var context = canvas.getContext("2d");
context.font = font;
var metrics = context.measureText(text);
return metrics.width;
}
public isVacio(id, idSuffix) {
let element = document.querySelector('#' + id) as HTMLInputElement;
if (element === null) return true;
//unit measure
const suffixElement = document.getElementById(idSuffix);
const width = this.getTextWidth(element.value, 'Josefin Sans, sans-serif');
if (suffixElement !== null)
suffixElement.style.left = ($(element).width() / 2) + width + 'px';
return element.value === '';
}
public getTag(perBox, id, unitOfSale){
let element = document.querySelector('#' + id) as HTMLInputElement;
if (element === null) return '';
try{
let val = Number(element.value);
if(val <= 1 && perBox === true) {
return 'box';
}
return perBox === true ? 'boxes' : (unitOfSale).toLowerCase();
}
catch(e){
console.log(e);
return '';
}
}
//GET COUNTRIES
async getCountries() {
await new Promise((resolve, reject) => {
this.countriesService.getCountries().subscribe(
result => {
this.countries = result;
resolve();
},
error => {
console.log( error );
reject();
}
);
})
}
//FUNCTION TO GET ONLY THE TOTALS WHEN CHANGING QTY OF A PRODUCT
getTotalPricing(){
//this.showLoading=true;
this.cartService.getCart( this.userinfo['id'] )
.subscribe(
cart=> {
this.showLoading = false;
if(cart && cart.hasOwnProperty('items')){
if(cart['items'].length > 0){
this.products=cart['items'];
this.products.forEach((data, index) => {
setTimeout(() => {
console.log( jQuery('#range-' + data.fish.id), data.quantity.value);
jQuery('#range-' + data.fish.id).val(data.quantity.value);
this.moveBubble(data.fish.id);
}, 100);
});
this.lastMilteCost = cart['lastMileCost'];
this.firstMileCost = cart['firstMileCosts'];
this.sfsMargin = cart['sfsMargin'];
this.uaeTaxes = cart['totalUAETaxes'];
this.customs = cart['customs'];
this.total= cart['subTotal'];
this.shipping = cart['shipping'];
this.totalOtherFees = cart['totalOtherFees']+cart['totalUAETaxes'];
this.totalWithShipping = cart['total'];
}
}
},
error=> {
//this.showLoading = false;
console.log( error );
}
)
}
hideLoader(){
this.showLoading=false;
this.empty = true;
}
getItems(){
let cart = {
"buyer": this.userinfo['id']
}
this.productService.saveData("shoppingcart", cart).subscribe(result => {
this.cService.setCart(result);
console.log(' calcular totales', result );
},e=>{console.log(e)})
}
getTotalxItem(count, price){
return count*price;
}
deleteItem(i, id){
this.productService.deleteData(`itemshopping/${id}`).subscribe(
result=>{
this.products.splice(i, 1);
console.log("Borrando item..", result, this.products);
this.getItems();
jQuery('#confirmDelete').modal('hide');
this.getAllProductsCount();
if(this.products.length == 0){
this.empty = true;
}
},
e=>{
this.toast.error("Error deleting item!", "Error",{positionClass:"toast-top-right"} );
console.log(e)
}
)
}
getAllProductsCount(){
var items:any = {"items": []};
this.products.forEach((element, index) => {
let item = {
"id": element['id'],
"quantity": {
"type": element['quantity'].type,
"value": element['quantity'].value
}
}
items['items'].push(item);
console.log( 'get Product Counts', item );
if (items['items'].length == this.products.length){
this.updatecart(items);
}
});
}
updatecart(items){
this.productService.updateData(this.shoppingEnpoint, items).subscribe(result => {
console.log( 'result', result );
this.getTotalPricing();
}, error => {
this.toast.error("Error updating cart!", "Error",{positionClass:"toast-top-right"} );
})
}
checkout(){
localStorage.setItem('shippingCost', this.shipping);
localStorage.setItem('shoppingTotal', this.totalWithShipping);
localStorage.setItem('shoppingCartId', this.shoppingCartId);
localStorage.setItem('totalOtherFees', this.totalOtherFees);
//this.router.navigate(['/checkout'], {queryParams: {shoppingCartId: this.shoppingCartId}});
this.router.navigate(['/reviewcart'], {queryParams: {shoppingCartId: this.shoppingCartId}});
}
findCountryName(value) {
for (var i = 0; i < this.countries.length; i++) {
if (this.countries[i]['code'] === value) {
return this.countries[i].name;
}
}
return null;
}
//FIND NAME OF PREPARATION
findPreparationName(id){
for (var i = 0; i < this.preparataion.length; i++) {
if (this.preparataion[i]['id'] === id) {
return this.preparataion[i].name;
}
}
return null;
}
showConfirmModal(itemID:string, index) |
validateMax(i){
console.log(this.products[i].quantity.value);
if(this.products[i].quantity.value > this.products[i].fish.maximumOrder){
this.products[i].quantity.value = this.products[i].fish.maximumOrder;
this.getAllProductsCount();
}else{
this.getAllProductsCount();
}
}
//Function to hide span
hideMe(id) {
const span = document.getElementById('qty-kg-' + id);
const input = document.getElementById('edit-qty-' + id);
(span as HTMLElement).style.display = 'none';
(input as HTMLElement).style.display = 'inline-block';
input.focus();
}
handleInput($event, id, i, max, min, boxweight){
console.log("ON INput", $event.srcElement.value);
let val = $event.srcElement.value;
this.staticField = $event.srcElement.value;
var that = this;
setTimeout(() => {
if(that.staticField == val){
console.log("El valor no ha cambiado en un segundo");
this.manualInput(id, i, max, min, boxweight);
}
}, 1000);
}
//Functino to enter manual kg
manualInput(id, i, max, min, boxweight = 1) {
console.log("BOx Weight", boxweight);
let val: any = jQuery('#edit-qty-' + id).val();
val = val;
console.log("minimo y maximo", min, max, val);
if (val > parseInt(max)) {
val = parseInt(max);
}else if(val < parseInt(min)){
val = parseInt(min);
}
this.products[i].quantity.value = val * boxweight;
// jQuery('#range-' + id).val(val);
// this.moveBubble(id);
this.getAllProductsCount();
}
//Function to hide input and show span
showSpan(id) {
const span = document.getElementById('qty-kg-' + id);
const input = document.getElementById('edit-qty-' + id);
(input as HTMLElement).style.display = 'none';
(span as HTMLElement).style.display = 'block';
}
//JAVASCRIPT FOR SLIDES
moveBubble(id){
console.log("Id", id);
var el, newPoint, newPlace, offset;
jQuery('#range-' + id).on('input', function () {
console.log("input");
jQuery(this).trigger('change');
});
// Select all range inputs, watch for change
jQuery('#range-' + id).change(function() {
console.log("Changing");
// Cache this for efficiency
el = jQuery(this);
// Measure width of range input
var width = el.width();
console.log("Width", width);
// Figure out placement percentage between left and right of input
newPoint = (el.val() - el.attr("min")) / (el.attr("max") - el.attr("min"));
console.log("Move Bubble", parseInt(el.val()), el.attr("max"), el.attr("min"));
offset = -1;
// Prevent bubble from going beyond left or right (unsupported browsers)
if (newPoint < 0) { newPlace = 0; }
else if (newPoint > 1) { newPlace = width; }
else { newPlace = width * newPoint + offset; offset -= newPoint; }
// Move bubble
jQuery('#qty-kg-'+id).css('margin-left', newPlace);
jQuery('#edit-qty-'+id).css('margin-left', newPlace);
})
// Fake a change to position bubble at page load
.trigger('change');
}
//GET RANGE VALUE ON CHANGE FOR EACH PRODUCT
getRange(id, i) {
console.log(id, i);
let val: any = jQuery('#range-' + id).val();
console.log("Range Val", val);
this.products[i].quantity.value = val;
this.moveBubble(id);
console.log("Product in array", this.products[i]);
this.getAllProductsCount();
}
showRangeVal(id, i){
let val: any = jQuery('#range-' + id).val();
this.products[i].quantity.value = val;
}
//get preparation
async getPreparation(){
await new Promise((resolve, reject) => {
this.productService.getData(`fishPreparation`).subscribe(res=> {
console.log("Prep", res);
this.preparataion = res;
resolve();
}, error =>{reject()})
})
}
closeSnackBar(){
this.showSnackBar = false;
}
public getFixedNumber(number) {
// if (number !== null && Math.round(number) !== number) {
// number = number.toFixed(2);
// }
return parseInt(number);
}
}
| {
console.log("Product modal ID", itemID, index);
this.itemToDelete = itemID;
this.index = index;
jQuery('#confirmDelete').modal('show');
} | identifier_body |
cart.component.ts | import { Component, OnInit } from '@angular/core';
import { AuthenticationService } from '../services/authentication.service';
import { ProductService } from '../services/product.service';
import { ToastrService } from '../toast.service';
import {Router} from '@angular/router';
import { environment } from '../../environments/environment';
import { OrderService } from '../services/orders.service';
import { DomSanitizer } from '@angular/platform-browser';
import { CountriesService } from '../services/countries.service';
import { CartService } from '../core/cart/cart.service';
declare var jQuery:any;
@Component({
selector: 'app-cart',
templateUrl: './cart.component.html',
styleUrls: ['./cart.component.scss']
})
export class CartComponent implements OnInit {
itemToDelete:any;
buyerId:any;
products:any = [];
empty:boolean;
showLoading:boolean=true;
total:any;
shoppingEnpoint:any = 'shoppingcart/items';
shoppingCartId:string;
API:any = environment.apiURLImg;
countries:any = [];
shipping:any = 0;
cart:any;
/******** Other fees ***********/
lastMilteCost = 0;
sfsMargin = 0;
uaeTaxes = 0;
customs= 0;
firstMileCost = 0;
handlingFees=0;
totalCustoms;
totalSFSMargin= 0;
totalUAETaxes=0;
totalFirstMileCost=0;
totalLastMileCost=0;
totalOtherFees:any = 0;
totalHandlingFees=0;
/******** END Other fees ***********/
otherFees:number = 0;
totalWithShipping:any;
index:any;
userinfo:any;
imageCart: any = [];
preparataion:any =[];
taxesPer:any;
staticField: any;
showSnackBar:boolean = false;
itemsDeleted: any = [];
constructor(private auth: AuthenticationService, private productService: ProductService,
private toast:ToastrService, private router:Router, private cartService:OrderService,
private sanitizer: DomSanitizer, private countriesService: CountriesService, private cService: CartService) { }
async ngOnInit() {
// this.getCart();
//this.getItems()
this.userinfo = this.auth.getLoginData();
this.buyerId = this.userinfo['id'];
await this.getCountries();
await this.getPreparation();
await this.validateCart();
this.getTotal();
}
//VALIDATING CART
async validateCart(){
await new Promise((resolve, reject) => {
this.cartService.validateCart(this.userinfo['id']).subscribe(val =>{
console.log("Cart Validation", val);
if(val['items'].length > 0){
this.itemsDeleted = val['items'];
this.showSnackBar = true;
}
resolve();
}, error =>{
reject();
})
});
}
getTotal(){
this.cartService.getCart( this.buyerId )
.subscribe(
cart=> {
console.log("Cart", cart);
if(cart && cart.hasOwnProperty('items')){
console.log("Si existe");
if(cart['items'].length > 0){
console.log("Si es mayor a cero");
this.cart = cart;
this.shoppingCartId=cart['id']
this.products=cart['items'];
this.lastMilteCost = cart['lastMileCost'];
this.firstMileCost = cart['firstMileCosts'];
this.sfsMargin = cart['sfsMargin'];
this.uaeTaxes = cart['uaeTaxes'];
this.customs = cart['customs'];
this.total= cart['subTotal'];
this.shipping = cart['shipping'];
this.taxesPer = cart['currentCharges']['uaeTaxes'];
this.totalOtherFees = cart['totalOtherFees'];
this.totalWithShipping = cart['total'];
this.products.forEach((data, index) => {
if (data.fish.imagePrimary && data.fish.imagePrimary != '') {
this.imageCart[index] = this.sanitizer.bypassSecurityTrustStyle(`url(${this.API}${data.fish.imagePrimary})`);
}
else if (data.images && data.images.length > 0) {
let src = data['images'][0].src ? data['images'][0].src : data['images'][0];
this.imageCart[index] = this.sanitizer.bypassSecurityTrustStyle(`url(${this.API}${src})`);
}
else {
this.imageCart[index] = this.sanitizer.bypassSecurityTrustStyle('url(../../assets/default-img-product.jpg)');
}
});
this.hideLoader();
this.empty = false;
}else{
this.hideLoader();
}
} else{
this.hideLoader();
}
},
error=> {
console.log( error );
}
)
}
getTextWidth(text, font) {
// re-use canvas object for better performance | var context = canvas.getContext("2d");
context.font = font;
var metrics = context.measureText(text);
return metrics.width;
}
public isVacio(id, idSuffix) {
let element = document.querySelector('#' + id) as HTMLInputElement;
if (element === null) return true;
//unit measure
const suffixElement = document.getElementById(idSuffix);
const width = this.getTextWidth(element.value, 'Josefin Sans, sans-serif');
if (suffixElement !== null)
suffixElement.style.left = ($(element).width() / 2) + width + 'px';
return element.value === '';
}
public getTag(perBox, id, unitOfSale){
let element = document.querySelector('#' + id) as HTMLInputElement;
if (element === null) return '';
try{
let val = Number(element.value);
if(val <= 1 && perBox === true) {
return 'box';
}
return perBox === true ? 'boxes' : (unitOfSale).toLowerCase();
}
catch(e){
console.log(e);
return '';
}
}
//GET COUNTRIES
async getCountries() {
await new Promise((resolve, reject) => {
this.countriesService.getCountries().subscribe(
result => {
this.countries = result;
resolve();
},
error => {
console.log( error );
reject();
}
);
})
}
//FUNCTION TO GET ONLY THE TOTALS WHEN CHANGING QTY OF A PRODUCT
getTotalPricing(){
//this.showLoading=true;
this.cartService.getCart( this.userinfo['id'] )
.subscribe(
cart=> {
this.showLoading = false;
if(cart && cart.hasOwnProperty('items')){
if(cart['items'].length > 0){
this.products=cart['items'];
this.products.forEach((data, index) => {
setTimeout(() => {
console.log( jQuery('#range-' + data.fish.id), data.quantity.value);
jQuery('#range-' + data.fish.id).val(data.quantity.value);
this.moveBubble(data.fish.id);
}, 100);
});
this.lastMilteCost = cart['lastMileCost'];
this.firstMileCost = cart['firstMileCosts'];
this.sfsMargin = cart['sfsMargin'];
this.uaeTaxes = cart['totalUAETaxes'];
this.customs = cart['customs'];
this.total= cart['subTotal'];
this.shipping = cart['shipping'];
this.totalOtherFees = cart['totalOtherFees']+cart['totalUAETaxes'];
this.totalWithShipping = cart['total'];
}
}
},
error=> {
//this.showLoading = false;
console.log( error );
}
)
}
hideLoader(){
this.showLoading=false;
this.empty = true;
}
getItems(){
let cart = {
"buyer": this.userinfo['id']
}
this.productService.saveData("shoppingcart", cart).subscribe(result => {
this.cService.setCart(result);
console.log(' calcular totales', result );
},e=>{console.log(e)})
}
getTotalxItem(count, price){
return count*price;
}
deleteItem(i, id){
this.productService.deleteData(`itemshopping/${id}`).subscribe(
result=>{
this.products.splice(i, 1);
console.log("Borrando item..", result, this.products);
this.getItems();
jQuery('#confirmDelete').modal('hide');
this.getAllProductsCount();
if(this.products.length == 0){
this.empty = true;
}
},
e=>{
this.toast.error("Error deleting item!", "Error",{positionClass:"toast-top-right"} );
console.log(e)
}
)
}
getAllProductsCount(){
var items:any = {"items": []};
this.products.forEach((element, index) => {
let item = {
"id": element['id'],
"quantity": {
"type": element['quantity'].type,
"value": element['quantity'].value
}
}
items['items'].push(item);
console.log( 'get Product Counts', item );
if (items['items'].length == this.products.length){
this.updatecart(items);
}
});
}
updatecart(items){
this.productService.updateData(this.shoppingEnpoint, items).subscribe(result => {
console.log( 'result', result );
this.getTotalPricing();
}, error => {
this.toast.error("Error updating cart!", "Error",{positionClass:"toast-top-right"} );
})
}
checkout(){
localStorage.setItem('shippingCost', this.shipping);
localStorage.setItem('shoppingTotal', this.totalWithShipping);
localStorage.setItem('shoppingCartId', this.shoppingCartId);
localStorage.setItem('totalOtherFees', this.totalOtherFees);
//this.router.navigate(['/checkout'], {queryParams: {shoppingCartId: this.shoppingCartId}});
this.router.navigate(['/reviewcart'], {queryParams: {shoppingCartId: this.shoppingCartId}});
}
findCountryName(value) {
for (var i = 0; i < this.countries.length; i++) {
if (this.countries[i]['code'] === value) {
return this.countries[i].name;
}
}
return null;
}
//FIND NAME OF PREPARATION
findPreparationName(id){
for (var i = 0; i < this.preparataion.length; i++) {
if (this.preparataion[i]['id'] === id) {
return this.preparataion[i].name;
}
}
return null;
}
showConfirmModal(itemID:string, index){
console.log("Product modal ID", itemID, index);
this.itemToDelete = itemID;
this.index = index;
jQuery('#confirmDelete').modal('show');
}
validateMax(i){
console.log(this.products[i].quantity.value);
if(this.products[i].quantity.value > this.products[i].fish.maximumOrder){
this.products[i].quantity.value = this.products[i].fish.maximumOrder;
this.getAllProductsCount();
}else{
this.getAllProductsCount();
}
}
//Function to hide span
hideMe(id) {
const span = document.getElementById('qty-kg-' + id);
const input = document.getElementById('edit-qty-' + id);
(span as HTMLElement).style.display = 'none';
(input as HTMLElement).style.display = 'inline-block';
input.focus();
}
handleInput($event, id, i, max, min, boxweight){
console.log("ON INput", $event.srcElement.value);
let val = $event.srcElement.value;
this.staticField = $event.srcElement.value;
var that = this;
setTimeout(() => {
if(that.staticField == val){
console.log("El valor no ha cambiado en un segundo");
this.manualInput(id, i, max, min, boxweight);
}
}, 1000);
}
//Functino to enter manual kg
manualInput(id, i, max, min, boxweight = 1) {
console.log("BOx Weight", boxweight);
let val: any = jQuery('#edit-qty-' + id).val();
val = val;
console.log("minimo y maximo", min, max, val);
if (val > parseInt(max)) {
val = parseInt(max);
}else if(val < parseInt(min)){
val = parseInt(min);
}
this.products[i].quantity.value = val * boxweight;
// jQuery('#range-' + id).val(val);
// this.moveBubble(id);
this.getAllProductsCount();
}
//Function to hide input and show span
showSpan(id) {
const span = document.getElementById('qty-kg-' + id);
const input = document.getElementById('edit-qty-' + id);
(input as HTMLElement).style.display = 'none';
(span as HTMLElement).style.display = 'block';
}
//JAVASCRIPT FOR SLIDES
moveBubble(id){
console.log("Id", id);
var el, newPoint, newPlace, offset;
jQuery('#range-' + id).on('input', function () {
console.log("input");
jQuery(this).trigger('change');
});
// Select all range inputs, watch for change
jQuery('#range-' + id).change(function() {
console.log("Changing");
// Cache this for efficiency
el = jQuery(this);
// Measure width of range input
var width = el.width();
console.log("Width", width);
// Figure out placement percentage between left and right of input
newPoint = (el.val() - el.attr("min")) / (el.attr("max") - el.attr("min"));
console.log("Move Bubble", parseInt(el.val()), el.attr("max"), el.attr("min"));
offset = -1;
// Prevent bubble from going beyond left or right (unsupported browsers)
if (newPoint < 0) { newPlace = 0; }
else if (newPoint > 1) { newPlace = width; }
else { newPlace = width * newPoint + offset; offset -= newPoint; }
// Move bubble
jQuery('#qty-kg-'+id).css('margin-left', newPlace);
jQuery('#edit-qty-'+id).css('margin-left', newPlace);
})
// Fake a change to position bubble at page load
.trigger('change');
}
//GET RANGE VALUE ON CHANGE FOR EACH PRODUCT
getRange(id, i) {
console.log(id, i);
let val: any = jQuery('#range-' + id).val();
console.log("Range Val", val);
this.products[i].quantity.value = val;
this.moveBubble(id);
console.log("Product in array", this.products[i]);
this.getAllProductsCount();
}
showRangeVal(id, i){
let val: any = jQuery('#range-' + id).val();
this.products[i].quantity.value = val;
}
//get preparation
async getPreparation(){
await new Promise((resolve, reject) => {
this.productService.getData(`fishPreparation`).subscribe(res=> {
console.log("Prep", res);
this.preparataion = res;
resolve();
}, error =>{reject()})
})
}
closeSnackBar(){
this.showSnackBar = false;
}
public getFixedNumber(number) {
// if (number !== null && Math.round(number) !== number) {
// number = number.toFixed(2);
// }
return parseInt(number);
}
} | var canvas = document.createElement("canvas"); | random_line_split |
sink.rs | use anyhow::{anyhow, Context, Result};
use byte_unit::n_mib_bytes;
use fuzztruction_shared::util::try_get_child_exit_reason;
use log::error;
use std::env::set_current_dir;
use std::os::unix::prelude::AsRawFd;
use std::{
convert::TryFrom,
ffi::CString,
fs::{File, OpenOptions},
io::{Seek, SeekFrom, Write},
ops::*,
path::PathBuf,
};
use std::{fs, io, mem};
use std::{os::raw::c_char, time::Duration};
use thiserror::Error;
use libc::SIGKILL;
use mktemp::Temp;
use nix::sys::signal::Signal;
use crate::config::Config;
use crate::io_channels::InputChannel;
use crate::sink_bitmap::{Bitmap, BITMAP_DEFAULT_MAP_SIZE};
use filedescriptor;
const FS_OPT_MAPSIZE: u32 = 0x40000000;
// FDs used by the forkserver to communicate with us.
// Hardcoded in AFLs config.h.
const FORKSRV_FD: i32 = 198;
const AFL_READ_FROM_PARENT_FD: i32 = FORKSRV_FD;
const AFL_WRITE_TO_PARENT_FD: i32 = FORKSRV_FD + 1;
const AFL_SHM_ENV_VAR_NAME: &str = "__AFL_SHM_ID";
const AFL_DEFAULT_TIMEOUT: Duration = Duration::from_millis(10000);
fn repeat_on_interrupt<F, R>(f: F) -> R
where
F: Fn() -> R,
R: TryInto<libc::c_int> + Clone,
{
loop {
let ret = f();
if ret.clone().try_into().unwrap_or(0) != -libc::EINTR {
return ret;
} else {
log::trace!("Repeating call because of EINTR");
}
}
}
/// Type used to represent error conditions of the source.
#[derive(Error, Debug)]
pub enum SinkError {
#[error("The workdir '{0}' already exists.")]
WorkdirExists(String),
#[error("Fatal error occurred: {0}")]
FatalError(String),
#[error("Exceeded timeout while waiting for data: {0}")]
CommunicationTimeoutError(String),
#[error(transparent)]
Other(#[from] anyhow::Error),
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum RunResult {
Terminated(i32),
Signalled(Signal),
TimedOut,
}
#[derive(Debug)]
pub struct AflSink {
/// That file system path to the target binary.
path: PathBuf,
/// The arguments passed to the binary.
args: Vec<String>,
/// Workdir
#[allow(unused)]
workdir: PathBuf,
/// Description of how the target binary consumes fuzzing input.
input_channel: InputChannel,
/// The file that is used to pass input to the target.
input_file: (File, PathBuf),
/// The session id of the forkserver we are communicating with.
forkserver_sid: Option<i32>,
/// The bitmap used to compute coverage.
bitmap: Bitmap,
/// The fd used to send data to the forkserver.
send_fd: Option<i32>,
/// Non blocking fd used to receive data from the forkserver.
receive_fd: Option<i32>,
#[allow(unused)]
stdout_file: Option<(File, PathBuf)>,
#[allow(unused)]
stderr_file: Option<(File, PathBuf)>,
/// Whether to log the output written to stdout. If false, the output is discarded.
log_stdout: bool,
/// Whether to log the output written to stderr. If false, the output is discarded.
log_stderr: bool,
config: Option<Config>,
bitmap_was_resize: bool,
}
impl AflSink {
pub fn new(
path: PathBuf,
mut args: Vec<String>,
mut workdir: PathBuf,
input_channel: InputChannel,
config: Option<&Config>,
log_stdout: bool,
log_stderr: bool,
) -> Result<AflSink> {
workdir.push("sink");
// Create the file into we write inputdata before execution.
fs::create_dir_all(&workdir)?;
set_current_dir(&workdir)?;
let tmpfile_path = Temp::new_file_in(&workdir).unwrap().to_path_buf();
let mut input_file_path = String::from(tmpfile_path.to_str().unwrap());
input_file_path.push_str("_input");
let input_file_path = PathBuf::from(input_file_path);
let input_file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(&input_file_path)?;
// Replace the @@ marker in the args with the actual file path (if input type is File).
if input_channel == InputChannel::File {
if let Some(elem) = args.iter_mut().find(|e| **e == "@@") {
*elem = input_file_path.to_str().unwrap().to_owned();
} else {
return Err(anyhow!(format!("No @@ marker in args, even though the input channel is defined as file. args: {:#?}", args)));
}
}
let mut stdout_file = None;
if log_stdout {
// Setup file for stdout logging.
let mut path = workdir.clone();
path.push("stdout");
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(&path)
.unwrap();
stdout_file = Some((file, path));
}
let mut stderr_file = None;
if log_stderr {
// Setup file for stdout logging.
let mut path = workdir.clone();
path.push("stderr");
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(&path)
.unwrap();
stderr_file = Some((file, path));
}
Ok(AflSink {
path,
args,
workdir,
input_channel,
input_file: (input_file, input_file_path),
forkserver_sid: None,
bitmap: Bitmap::new_in_shm(BITMAP_DEFAULT_MAP_SIZE, 0x00),
send_fd: None,
receive_fd: None,
log_stdout,
log_stderr,
stdout_file,
stderr_file,
config: config.cloned(),
bitmap_was_resize: false,
})
}
pub fn from_config(config: &Config, id: Option<usize>) -> Result<AflSink> {
let config_new = config.clone();
let mut workdir = config_new.general.work_dir.clone();
workdir.push(
id.map(|id| id.to_string())
.unwrap_or_else(|| "0".to_owned()),
);
let sink = AflSink::new(
config_new.sink.bin_path,
config_new.sink.arguments,
workdir,
config_new.sink.input_type,
Some(config),
config.sink.log_stdout,
config.sink.log_stderr,
)?;
Ok(sink)
}
/// Wait for the given duration for the forkserver read fd to become ready.
/// Returns Ok(true) if data becomes ready during the given `timeout`, else
/// Ok(false).
///
/// # Error
///
/// Returns an Error if an unexpected error occurs.
fn wait_for_data(&self, timeout: Duration) -> Result<()> {
let pollfd = filedescriptor::pollfd {
fd: self.receive_fd.unwrap(),
events: filedescriptor::POLLIN,
revents: 0,
};
let mut pollfds = [pollfd];
let nready = filedescriptor::poll(&mut pollfds, Some(timeout));
match nready {
Ok(1) => Ok(()),
Ok(0) => Err(SinkError::CommunicationTimeoutError(format!(
"Did not received data after {:?}",
timeout
))
.into()),
Ok(n) => {
unreachable!("Unexpected return value: {}", n);
}
Err(ref err) => {
if let filedescriptor::Error::Poll(err) = err {
if err.kind() == io::ErrorKind::Interrupted {
return self.wait_for_data(timeout);
}
}
Err(SinkError::FatalError(format!("Failed to poll fd: {:#?}", err)).into())
}
}
}
pub fn start(&mut self) -> Result<()> {
// send_pipe[1](we) -> send_pipe[0](forkserver).
let send_pipe = [0i32; 2];
// receive_pipe[1](forkserver) -> receive_pipe[0](we).
let receive_pipe = [0i32; 2];
// Create pipe for communicating with the forkserver.
unsafe {
let ret = libc::pipe(send_pipe.as_ptr() as *mut i32);
assert_eq!(ret, 0);
let ret = libc::pipe(receive_pipe.as_ptr() as *mut i32);
assert_eq!(ret, 0);
}
self.send_fd = Some(send_pipe[1]);
let child_receive_fd = send_pipe[0];
self.receive_fd = Some(receive_pipe[0]);
let child_send_fd = receive_pipe[1];
let child_pid = unsafe { libc::fork() };
match child_pid {
-1 => return Err(anyhow!("Fork failed!")),
0 => {
/*
Child
Be aware that we are forking a potentially multithreaded application
here. Since fork() only copies the calling thread, the environment
might be left in a dirty state because of, e.g., mutexs that where
locked at the time fork was called.
Because of this it is only save to call async-signal-safe functions
(https://man7.org/linux/man-pages/man7/signal-safety.7.html).
Note that loggin function (debug!...) often internally use mutexes
to lock the output buffer, thus using logging here is forbidden
and likely causes deadlocks.
*/
let map_shm_id = self.bitmap.shm_id();
unsafe {
let ret = libc::setsid();
assert!(ret >= 0);
}
// Setup args
let path =
self.path.to_str().map(|s| s.to_owned()).ok_or_else(|| {
SinkError::Other(anyhow!("Invalid UTF-8 character in path"))
})?;
let mut args = self.args.clone();
args.insert(0, path.clone());
let argv_nonref: Vec<CString> = args
.iter()
.map(|arg| CString::new(arg.as_bytes()).unwrap())
.collect();
let mut argv: Vec<*const c_char> =
argv_nonref.iter().map(|arg| arg.as_ptr()).collect();
argv.push(std::ptr::null());
// Setup environment
let mut envp: Vec<*const c_char> = Vec::new();
let shm_env_var =
CString::new(format!("{}={}", AFL_SHM_ENV_VAR_NAME, map_shm_id)).unwrap();
envp.push(shm_env_var.as_ptr());
let mut env_from_config = Vec::new();
if let Some(cfg) = self.config.as_ref() {
cfg.sink.env.iter().for_each(|var| {
env_from_config
.push(CString::new(format!("{}={}", var.0, var.1).as_bytes()).unwrap())
})
}
let afl_maps_size =
CString::new(format!("AFL_MAP_SIZE={}", self.bitmap().size())).unwrap();
envp.push(afl_maps_size.as_bytes().as_ptr() as *const i8);
env_from_config.iter().for_each(|e| {
envp.push(e.as_bytes().as_ptr() as *const i8);
});
envp.push(std::ptr::null());
let dev_null_fd = unsafe {
let path = CString::new("/dev/null".as_bytes()).unwrap();
libc::open(path.as_ptr(), libc::O_RDONLY)
};
if dev_null_fd < 0 {
panic!("Failed to open /dev/null");
}
match self.input_channel {
InputChannel::Stdin => unsafe {
libc::dup2(self.input_file.0.as_raw_fd(), 0);
},
_ => unsafe {
libc::dup2(dev_null_fd, 0);
},
}
if self.log_stdout {
// unsafe {
// let fd = self.stdout_file.as_ref().unwrap().0.as_raw_fd();
// libc::dup2(fd, libc::STDOUT_FILENO);
// libc::close(fd);
// }
} else {
unsafe {
libc::dup2(dev_null_fd, libc::STDOUT_FILENO);
}
}
if self.log_stderr | else {
unsafe {
libc::dup2(dev_null_fd, libc::STDERR_FILENO);
}
}
unsafe {
libc::close(dev_null_fd);
}
unsafe {
// Close the pipe ends used by our parent.
libc::close(self.receive_fd.unwrap());
libc::close(self.send_fd.unwrap());
// Remap fds to the ones used by the forkserver.
// The fds might have by chance the correct value, in this case
// dup2 & close would actually cause us to close the fd we intended to pass.
if child_receive_fd != AFL_READ_FROM_PARENT_FD {
let ret = libc::dup2(child_receive_fd, AFL_READ_FROM_PARENT_FD);
assert!(ret >= 0);
libc::close(child_receive_fd);
}
if child_send_fd != AFL_WRITE_TO_PARENT_FD {
let ret = libc::dup2(child_send_fd, AFL_WRITE_TO_PARENT_FD);
assert!(ret >= 0);
libc::close(child_send_fd);
}
}
unsafe {
if !self.log_stdout && !self.log_stderr {
// if we log stderr or stdout, the limit will cause our
// fuzzer to fail after some time.
let mut rlim: libc::rlimit = std::mem::zeroed();
rlim.rlim_cur = n_mib_bytes!(512).try_into().unwrap();
rlim.rlim_max = n_mib_bytes!(512).try_into().unwrap();
let ret = libc::setrlimit(libc::RLIMIT_FSIZE, &rlim as *const libc::rlimit);
assert_eq!(ret, 0);
}
// Disable core dumps
let limit_val: libc::rlimit = std::mem::zeroed();
let ret = libc::setrlimit(libc::RLIMIT_CORE, &limit_val);
assert_eq!(ret, 0);
// Max AS size.
let mut rlim: libc::rlimit = std::mem::zeroed();
rlim.rlim_cur = n_mib_bytes!(512).try_into().unwrap();
rlim.rlim_max = n_mib_bytes!(512).try_into().unwrap();
let ret = libc::setrlimit(libc::RLIMIT_AS, &rlim as *const libc::rlimit);
assert_eq!(ret, 0);
let ret = libc::personality(libc::ADDR_NO_RANDOMIZE as u64);
assert_eq!(ret, 0);
}
if let Err(err) = self.drop_privileges() {
log::error!("Failed to drop privileges: {:#?}", err);
panic!();
}
// Make sure that UID == EUID, since if this is not the case,
// ld will ignore LD_PRELOAD which we need to use for targets
// that normally load instrumented libraries during runtime.
assert_eq!(nix::unistd::getuid(), nix::unistd::geteuid());
assert_eq!(nix::unistd::getegid(), nix::unistd::getegid());
let prog = CString::new(path.as_bytes()).unwrap();
unsafe {
libc::execve(prog.as_ptr(), argv.as_ptr(), envp.as_ptr());
}
unreachable!("Failed to call execve on '{}'", path);
}
_ => { /* The parent */ }
}
/* The parent */
log::info!("Forkserver has pid {}", child_pid);
// Note th sid, thus we can kill the child later.
// This is a sid since the child calls setsid().
self.forkserver_sid = Some(child_pid);
// Close the pipe ends used by the child.
unsafe {
libc::close(child_receive_fd);
libc::close(child_send_fd);
}
unsafe {
libc::fcntl(self.send_fd.unwrap(), libc::F_SETFD, libc::FD_CLOEXEC);
libc::fcntl(self.receive_fd.unwrap(), libc::F_SETFD, libc::FD_CLOEXEC);
}
// Wait for for hello from the child.
self.wait_for_data(AFL_DEFAULT_TIMEOUT)
.context("Timeout while waiting for forkserver to come up.")?;
// Read the available data.
let buffer = [0u8; 4];
unsafe {
let ret = libc::read(
self.receive_fd.unwrap(),
buffer.as_ptr() as *mut libc::c_void,
4,
);
if ret != 4 {
return Err(anyhow!(format!(
"Failed to do handshake with forkserver. ret={}",
ret
)));
}
// Process extended attributes used by AFL++.
// Sett src/afl-forkserver.c:689 (afl_fsrv_start)
let status = u32::from_ne_bytes(buffer);
log::info!("Forkserver status: 0x{:x}", status);
if status & FS_OPT_MAPSIZE == FS_OPT_MAPSIZE {
log::info!("Got extended option FS_OPT_MAPSIZE from forkserver");
let new_map_size = ((status & 0x00fffffe) >> 1) + 1;
log::info!("Target requests a map of size {} bytes", new_map_size);
log::info!("Current map size is {} bytes", self.bitmap().size());
if self.bitmap_was_resize {
log::info!("Already resized, skipping....");
return Ok(());
}
let new_map_size = new_map_size.next_power_of_two() as usize;
if new_map_size > self.bitmap().size() {
log::info!("Resizing bitmap to {} bytes", new_map_size);
self.stop();
let new_map = Bitmap::new_in_shm(new_map_size, 0x00);
let _ = mem::replace(self.bitmap(), new_map);
self.bitmap_was_resize = true;
return self.start();
}
}
}
// if self.stdout_file.is_some() {
// // Take the the stdout file thus its fd gets dropped.
// self.stdout_file.take();
// }
// if self.stderr_file.is_some() {
// // Take the the stderr file thus its fd gets dropped.
// self.stderr_file.take();
// }
// We are ready to fuzz!
Ok(())
}
fn drop_privileges(&mut self) -> Result<()> {
let uid_gid = self
.config
.as_ref()
.map(|config| config.general.jail_uid_gid())
.unwrap_or(None);
if uid_gid.is_some() {
jail::acquire_privileges()?;
}
if let Some((uid, gid)) = uid_gid {
jail::drop_privileges(uid, gid, true)?;
}
Ok(())
}
/// Stops the forksever. Must be called before calling start() again.
/// It is save to call this function multiple times.
pub fn stop(&mut self) {
if let Some(sid) = self.forkserver_sid.take() {
unsafe {
libc::close(self.send_fd.unwrap());
libc::close(self.receive_fd.unwrap());
let ret = libc::killpg(sid, SIGKILL);
assert!(ret == 0);
// reap it
libc::waitpid(sid, std::ptr::null_mut() as *mut libc::c_int, 0);
}
}
}
/// Write the given bytes into the sinks input channel. This function
/// is only allowed to be called on sinks with InputChannel::Stdin or InputChannel::File
/// input channel.
pub fn write(&mut self, data: &[u8]) {
debug_assert!(
self.input_channel == InputChannel::Stdin || self.input_channel == InputChannel::File
);
self.input_file.0.seek(SeekFrom::Start(0)).unwrap();
self.input_file.0.set_len(0).unwrap();
self.input_file.0.write_all(data).unwrap();
self.input_file.0.seek(SeekFrom::Start(0)).unwrap();
self.input_file.0.sync_all().unwrap();
}
pub fn run(&mut self, timeout: Duration) -> Result<RunResult> {
self.bitmap().reset();
let buffer = [0u8; 4];
let buf_ptr = buffer.as_ptr() as *mut libc::c_void;
// Tell the forkserver to fork.
log::trace!("Requesting fork");
let ret = repeat_on_interrupt(|| unsafe { libc::write(self.send_fd.unwrap(), buf_ptr, 4) });
if ret != 4 {
error!("Fork request failed");
return Err(anyhow!("Failed to write to send_fd: {}", ret));
}
log::trace!("Waiting for child pid");
self.wait_for_data(AFL_DEFAULT_TIMEOUT)
.context("Failed to retrive child pid from forkserver")?;
let ret =
repeat_on_interrupt(|| unsafe { libc::read(self.receive_fd.unwrap(), buf_ptr, 4) });
if ret != 4 {
error!("Failed to retrive child pid");
return Err(anyhow!("Failed to read from receive_non_blocking_fd"));
}
let child_pid = i32::from_le_bytes(buffer);
log::trace!("Got child pid {}", child_pid);
if child_pid <= 0 {
log::error!("Child pid '{}' is invalid", child_pid);
return Err(anyhow!(
"Failed to parse child_pid. child_pid={}, bytes={:?}",
child_pid,
buffer
));
}
log::trace!("Waiting for child termination");
match self.wait_for_data(timeout) {
Ok(_) => (),
Err(err) => {
log::trace!("Child timed out: {:#?}", err);
// Kill the child since it appears to have timed out.
let kill_ret = nix::sys::signal::kill(
nix::unistd::Pid::from_raw(child_pid),
nix::sys::signal::SIGKILL,
);
if let Err(ref err) = kill_ret {
// This might just be caused by the fact that the child won the race
// and terminated before we killed it.
log::trace!("Failed to kill child: {:#?}", err);
}
if let Err(err) = self
.wait_for_data(AFL_DEFAULT_TIMEOUT)
.context("Child did not acknowledge termination request")
{
let reason = try_get_child_exit_reason(self.forkserver_sid.unwrap());
log::error!(
"Exit reason: {:#?}, child_pid={:?}, kill_ret={:?}",
reason,
child_pid,
kill_ret
);
return Err(err.context(format!("child_exit_reason={:#?}", reason)));
}
// Consume exit status.
let ret = unsafe { libc::read(self.receive_fd.unwrap(), buf_ptr, 4) };
if ret != 4 {
log::error!("Expected {} != 4", ret);
}
return Ok(RunResult::TimedOut);
}
}
log::trace!("Child terminated, getting exit status");
let ret =
repeat_on_interrupt(|| unsafe { libc::read(self.receive_fd.unwrap(), buf_ptr, 4) });
if ret != 4 {
error!("Failed to get exit status");
return Err(anyhow!("Failed to read from receive_non_blocking_fd"));
}
let exit_status = i32::from_le_bytes(buffer);
log::trace!("Child status is {}", exit_status);
if libc::WIFEXITED(exit_status) {
Ok(RunResult::Terminated(libc::WEXITSTATUS(exit_status)))
} else if libc::WIFSIGNALED(exit_status) {
let signal = libc::WTERMSIG(exit_status);
let signal = match Signal::try_from(signal) {
Ok(s) => s,
Err(e) => {
error!(
"Failed to parse signal code {}. Error: {:?}. Using dummy signal SIGUSR2",
signal, e
);
// Some dummy signal type.
Signal::SIGUSR2
}
};
Ok(RunResult::Signalled(signal))
} else {
unreachable!();
}
}
pub fn bitmap(&mut self) -> &mut Bitmap {
&mut self.bitmap
}
}
impl Drop for AflSink {
fn drop(&mut self) {
self.stop();
}
}
| {
//unsafe {
// let fd = self.stderr_file.as_ref().unwrap().0.as_raw_fd();
// libc::dup2(fd, libc::STDERR_FILENO);
// libc::close(fd);
//}
} | conditional_block |
sink.rs | use anyhow::{anyhow, Context, Result};
use byte_unit::n_mib_bytes;
use fuzztruction_shared::util::try_get_child_exit_reason;
use log::error;
use std::env::set_current_dir;
use std::os::unix::prelude::AsRawFd;
use std::{
convert::TryFrom,
ffi::CString,
fs::{File, OpenOptions},
io::{Seek, SeekFrom, Write},
ops::*,
path::PathBuf,
};
use std::{fs, io, mem};
use std::{os::raw::c_char, time::Duration};
use thiserror::Error;
use libc::SIGKILL;
use mktemp::Temp;
use nix::sys::signal::Signal;
use crate::config::Config;
use crate::io_channels::InputChannel;
use crate::sink_bitmap::{Bitmap, BITMAP_DEFAULT_MAP_SIZE};
use filedescriptor;
const FS_OPT_MAPSIZE: u32 = 0x40000000;
// FDs used by the forkserver to communicate with us.
// Hardcoded in AFLs config.h.
const FORKSRV_FD: i32 = 198;
const AFL_READ_FROM_PARENT_FD: i32 = FORKSRV_FD;
const AFL_WRITE_TO_PARENT_FD: i32 = FORKSRV_FD + 1;
const AFL_SHM_ENV_VAR_NAME: &str = "__AFL_SHM_ID";
const AFL_DEFAULT_TIMEOUT: Duration = Duration::from_millis(10000);
fn repeat_on_interrupt<F, R>(f: F) -> R
where
F: Fn() -> R,
R: TryInto<libc::c_int> + Clone,
{
loop {
let ret = f();
if ret.clone().try_into().unwrap_or(0) != -libc::EINTR {
return ret;
} else {
log::trace!("Repeating call because of EINTR");
}
}
}
/// Type used to represent error conditions of the source.
#[derive(Error, Debug)]
pub enum SinkError {
#[error("The workdir '{0}' already exists.")]
WorkdirExists(String),
#[error("Fatal error occurred: {0}")]
FatalError(String),
#[error("Exceeded timeout while waiting for data: {0}")]
CommunicationTimeoutError(String),
#[error(transparent)]
Other(#[from] anyhow::Error),
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum RunResult {
Terminated(i32),
Signalled(Signal),
TimedOut,
}
#[derive(Debug)]
pub struct AflSink {
/// That file system path to the target binary.
path: PathBuf,
/// The arguments passed to the binary.
args: Vec<String>,
/// Workdir
#[allow(unused)]
workdir: PathBuf,
/// Description of how the target binary consumes fuzzing input.
input_channel: InputChannel,
/// The file that is used to pass input to the target.
input_file: (File, PathBuf),
/// The session id of the forkserver we are communicating with.
forkserver_sid: Option<i32>,
/// The bitmap used to compute coverage.
bitmap: Bitmap,
/// The fd used to send data to the forkserver.
send_fd: Option<i32>,
/// Non blocking fd used to receive data from the forkserver.
receive_fd: Option<i32>,
#[allow(unused)]
stdout_file: Option<(File, PathBuf)>,
#[allow(unused)]
stderr_file: Option<(File, PathBuf)>,
/// Whether to log the output written to stdout. If false, the output is discarded.
log_stdout: bool,
/// Whether to log the output written to stderr. If false, the output is discarded.
log_stderr: bool,
config: Option<Config>,
bitmap_was_resize: bool,
}
impl AflSink {
pub fn new(
path: PathBuf,
mut args: Vec<String>,
mut workdir: PathBuf,
input_channel: InputChannel,
config: Option<&Config>,
log_stdout: bool,
log_stderr: bool,
) -> Result<AflSink> {
workdir.push("sink");
// Create the file into we write inputdata before execution.
fs::create_dir_all(&workdir)?;
set_current_dir(&workdir)?;
let tmpfile_path = Temp::new_file_in(&workdir).unwrap().to_path_buf();
let mut input_file_path = String::from(tmpfile_path.to_str().unwrap());
input_file_path.push_str("_input");
let input_file_path = PathBuf::from(input_file_path);
let input_file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(&input_file_path)?;
// Replace the @@ marker in the args with the actual file path (if input type is File).
if input_channel == InputChannel::File {
if let Some(elem) = args.iter_mut().find(|e| **e == "@@") {
*elem = input_file_path.to_str().unwrap().to_owned();
} else {
return Err(anyhow!(format!("No @@ marker in args, even though the input channel is defined as file. args: {:#?}", args)));
}
}
let mut stdout_file = None;
if log_stdout {
// Setup file for stdout logging.
let mut path = workdir.clone();
path.push("stdout");
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(&path)
.unwrap();
stdout_file = Some((file, path));
}
let mut stderr_file = None;
if log_stderr {
// Setup file for stdout logging.
let mut path = workdir.clone();
path.push("stderr");
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(&path)
.unwrap();
stderr_file = Some((file, path));
}
Ok(AflSink {
path,
args,
workdir,
input_channel,
input_file: (input_file, input_file_path),
forkserver_sid: None,
bitmap: Bitmap::new_in_shm(BITMAP_DEFAULT_MAP_SIZE, 0x00),
send_fd: None,
receive_fd: None,
log_stdout,
log_stderr,
stdout_file,
stderr_file,
config: config.cloned(),
bitmap_was_resize: false,
})
}
pub fn from_config(config: &Config, id: Option<usize>) -> Result<AflSink> {
let config_new = config.clone();
let mut workdir = config_new.general.work_dir.clone();
workdir.push(
id.map(|id| id.to_string())
.unwrap_or_else(|| "0".to_owned()),
);
let sink = AflSink::new(
config_new.sink.bin_path,
config_new.sink.arguments,
workdir,
config_new.sink.input_type,
Some(config),
config.sink.log_stdout,
config.sink.log_stderr,
)?;
Ok(sink)
}
/// Wait for the given duration for the forkserver read fd to become ready.
/// Returns Ok(true) if data becomes ready during the given `timeout`, else
/// Ok(false).
///
/// # Error
///
/// Returns an Error if an unexpected error occurs.
fn wait_for_data(&self, timeout: Duration) -> Result<()> {
let pollfd = filedescriptor::pollfd {
fd: self.receive_fd.unwrap(),
events: filedescriptor::POLLIN,
revents: 0,
};
let mut pollfds = [pollfd];
let nready = filedescriptor::poll(&mut pollfds, Some(timeout));
match nready {
Ok(1) => Ok(()),
Ok(0) => Err(SinkError::CommunicationTimeoutError(format!(
"Did not received data after {:?}",
timeout
))
.into()),
Ok(n) => {
unreachable!("Unexpected return value: {}", n);
}
Err(ref err) => {
if let filedescriptor::Error::Poll(err) = err {
if err.kind() == io::ErrorKind::Interrupted {
return self.wait_for_data(timeout);
}
}
Err(SinkError::FatalError(format!("Failed to poll fd: {:#?}", err)).into())
}
}
}
pub fn start(&mut self) -> Result<()> {
// send_pipe[1](we) -> send_pipe[0](forkserver).
let send_pipe = [0i32; 2];
// receive_pipe[1](forkserver) -> receive_pipe[0](we).
let receive_pipe = [0i32; 2];
// Create pipe for communicating with the forkserver.
unsafe {
let ret = libc::pipe(send_pipe.as_ptr() as *mut i32);
assert_eq!(ret, 0);
let ret = libc::pipe(receive_pipe.as_ptr() as *mut i32);
assert_eq!(ret, 0);
}
self.send_fd = Some(send_pipe[1]);
let child_receive_fd = send_pipe[0];
self.receive_fd = Some(receive_pipe[0]);
let child_send_fd = receive_pipe[1];
let child_pid = unsafe { libc::fork() };
match child_pid {
-1 => return Err(anyhow!("Fork failed!")),
0 => {
/*
Child
Be aware that we are forking a potentially multithreaded application
here. Since fork() only copies the calling thread, the environment
might be left in a dirty state because of, e.g., mutexs that where
locked at the time fork was called.
Because of this it is only save to call async-signal-safe functions
(https://man7.org/linux/man-pages/man7/signal-safety.7.html).
Note that loggin function (debug!...) often internally use mutexes
to lock the output buffer, thus using logging here is forbidden
and likely causes deadlocks.
*/
let map_shm_id = self.bitmap.shm_id();
unsafe {
let ret = libc::setsid();
assert!(ret >= 0);
}
// Setup args
let path =
self.path.to_str().map(|s| s.to_owned()).ok_or_else(|| {
SinkError::Other(anyhow!("Invalid UTF-8 character in path"))
})?;
let mut args = self.args.clone();
args.insert(0, path.clone());
let argv_nonref: Vec<CString> = args
.iter()
.map(|arg| CString::new(arg.as_bytes()).unwrap())
.collect();
let mut argv: Vec<*const c_char> =
argv_nonref.iter().map(|arg| arg.as_ptr()).collect();
argv.push(std::ptr::null());
// Setup environment
let mut envp: Vec<*const c_char> = Vec::new();
let shm_env_var =
CString::new(format!("{}={}", AFL_SHM_ENV_VAR_NAME, map_shm_id)).unwrap();
envp.push(shm_env_var.as_ptr());
let mut env_from_config = Vec::new();
if let Some(cfg) = self.config.as_ref() {
cfg.sink.env.iter().for_each(|var| {
env_from_config
.push(CString::new(format!("{}={}", var.0, var.1).as_bytes()).unwrap())
})
}
let afl_maps_size =
CString::new(format!("AFL_MAP_SIZE={}", self.bitmap().size())).unwrap();
envp.push(afl_maps_size.as_bytes().as_ptr() as *const i8);
env_from_config.iter().for_each(|e| {
envp.push(e.as_bytes().as_ptr() as *const i8);
});
envp.push(std::ptr::null());
let dev_null_fd = unsafe {
let path = CString::new("/dev/null".as_bytes()).unwrap();
libc::open(path.as_ptr(), libc::O_RDONLY)
};
if dev_null_fd < 0 {
panic!("Failed to open /dev/null");
}
match self.input_channel {
InputChannel::Stdin => unsafe {
libc::dup2(self.input_file.0.as_raw_fd(), 0);
},
_ => unsafe {
libc::dup2(dev_null_fd, 0);
},
}
if self.log_stdout {
// unsafe {
// let fd = self.stdout_file.as_ref().unwrap().0.as_raw_fd();
// libc::dup2(fd, libc::STDOUT_FILENO);
// libc::close(fd);
// }
} else {
unsafe {
libc::dup2(dev_null_fd, libc::STDOUT_FILENO);
}
}
if self.log_stderr {
//unsafe {
// let fd = self.stderr_file.as_ref().unwrap().0.as_raw_fd();
// libc::dup2(fd, libc::STDERR_FILENO);
// libc::close(fd);
//}
} else {
unsafe {
libc::dup2(dev_null_fd, libc::STDERR_FILENO);
}
}
unsafe {
libc::close(dev_null_fd);
}
unsafe {
// Close the pipe ends used by our parent.
libc::close(self.receive_fd.unwrap());
libc::close(self.send_fd.unwrap());
// Remap fds to the ones used by the forkserver.
// The fds might have by chance the correct value, in this case
// dup2 & close would actually cause us to close the fd we intended to pass.
if child_receive_fd != AFL_READ_FROM_PARENT_FD {
let ret = libc::dup2(child_receive_fd, AFL_READ_FROM_PARENT_FD);
assert!(ret >= 0);
libc::close(child_receive_fd);
}
if child_send_fd != AFL_WRITE_TO_PARENT_FD {
let ret = libc::dup2(child_send_fd, AFL_WRITE_TO_PARENT_FD);
assert!(ret >= 0);
libc::close(child_send_fd);
}
}
unsafe {
if !self.log_stdout && !self.log_stderr {
// if we log stderr or stdout, the limit will cause our
// fuzzer to fail after some time.
let mut rlim: libc::rlimit = std::mem::zeroed();
rlim.rlim_cur = n_mib_bytes!(512).try_into().unwrap();
rlim.rlim_max = n_mib_bytes!(512).try_into().unwrap();
let ret = libc::setrlimit(libc::RLIMIT_FSIZE, &rlim as *const libc::rlimit);
assert_eq!(ret, 0);
}
// Disable core dumps
let limit_val: libc::rlimit = std::mem::zeroed();
let ret = libc::setrlimit(libc::RLIMIT_CORE, &limit_val);
assert_eq!(ret, 0);
// Max AS size.
let mut rlim: libc::rlimit = std::mem::zeroed();
rlim.rlim_cur = n_mib_bytes!(512).try_into().unwrap();
rlim.rlim_max = n_mib_bytes!(512).try_into().unwrap();
let ret = libc::setrlimit(libc::RLIMIT_AS, &rlim as *const libc::rlimit);
assert_eq!(ret, 0);
let ret = libc::personality(libc::ADDR_NO_RANDOMIZE as u64);
assert_eq!(ret, 0);
}
if let Err(err) = self.drop_privileges() {
log::error!("Failed to drop privileges: {:#?}", err);
panic!();
}
// Make sure that UID == EUID, since if this is not the case,
// ld will ignore LD_PRELOAD which we need to use for targets
// that normally load instrumented libraries during runtime.
assert_eq!(nix::unistd::getuid(), nix::unistd::geteuid());
assert_eq!(nix::unistd::getegid(), nix::unistd::getegid());
let prog = CString::new(path.as_bytes()).unwrap();
unsafe {
libc::execve(prog.as_ptr(), argv.as_ptr(), envp.as_ptr());
}
unreachable!("Failed to call execve on '{}'", path);
}
_ => { /* The parent */ }
}
/* The parent */
log::info!("Forkserver has pid {}", child_pid);
// Note th sid, thus we can kill the child later.
// This is a sid since the child calls setsid().
self.forkserver_sid = Some(child_pid);
// Close the pipe ends used by the child.
unsafe {
libc::close(child_receive_fd);
libc::close(child_send_fd);
}
unsafe {
libc::fcntl(self.send_fd.unwrap(), libc::F_SETFD, libc::FD_CLOEXEC);
libc::fcntl(self.receive_fd.unwrap(), libc::F_SETFD, libc::FD_CLOEXEC);
}
// Wait for for hello from the child.
self.wait_for_data(AFL_DEFAULT_TIMEOUT)
.context("Timeout while waiting for forkserver to come up.")?;
// Read the available data.
let buffer = [0u8; 4];
unsafe {
let ret = libc::read(
self.receive_fd.unwrap(),
buffer.as_ptr() as *mut libc::c_void,
4,
);
if ret != 4 {
return Err(anyhow!(format!(
"Failed to do handshake with forkserver. ret={}",
ret
)));
}
// Process extended attributes used by AFL++.
// Sett src/afl-forkserver.c:689 (afl_fsrv_start)
let status = u32::from_ne_bytes(buffer);
log::info!("Forkserver status: 0x{:x}", status);
if status & FS_OPT_MAPSIZE == FS_OPT_MAPSIZE {
log::info!("Got extended option FS_OPT_MAPSIZE from forkserver");
let new_map_size = ((status & 0x00fffffe) >> 1) + 1;
log::info!("Target requests a map of size {} bytes", new_map_size);
log::info!("Current map size is {} bytes", self.bitmap().size());
if self.bitmap_was_resize {
log::info!("Already resized, skipping....");
return Ok(());
}
let new_map_size = new_map_size.next_power_of_two() as usize;
if new_map_size > self.bitmap().size() {
log::info!("Resizing bitmap to {} bytes", new_map_size);
self.stop();
let new_map = Bitmap::new_in_shm(new_map_size, 0x00);
let _ = mem::replace(self.bitmap(), new_map);
self.bitmap_was_resize = true;
return self.start();
}
}
}
// if self.stdout_file.is_some() {
// // Take the the stdout file thus its fd gets dropped.
// self.stdout_file.take();
// }
// if self.stderr_file.is_some() {
// // Take the the stderr file thus its fd gets dropped.
// self.stderr_file.take();
// }
// We are ready to fuzz!
Ok(())
}
fn drop_privileges(&mut self) -> Result<()> {
let uid_gid = self
.config
.as_ref()
.map(|config| config.general.jail_uid_gid())
.unwrap_or(None);
if uid_gid.is_some() {
jail::acquire_privileges()?;
}
if let Some((uid, gid)) = uid_gid {
jail::drop_privileges(uid, gid, true)?;
}
Ok(())
}
/// Stops the forksever. Must be called before calling start() again.
/// It is save to call this function multiple times.
pub fn stop(&mut self) {
if let Some(sid) = self.forkserver_sid.take() {
unsafe {
libc::close(self.send_fd.unwrap());
libc::close(self.receive_fd.unwrap());
let ret = libc::killpg(sid, SIGKILL);
assert!(ret == 0);
// reap it
libc::waitpid(sid, std::ptr::null_mut() as *mut libc::c_int, 0);
}
}
}
/// Write the given bytes into the sinks input channel. This function
/// is only allowed to be called on sinks with InputChannel::Stdin or InputChannel::File
/// input channel.
pub fn | (&mut self, data: &[u8]) {
debug_assert!(
self.input_channel == InputChannel::Stdin || self.input_channel == InputChannel::File
);
self.input_file.0.seek(SeekFrom::Start(0)).unwrap();
self.input_file.0.set_len(0).unwrap();
self.input_file.0.write_all(data).unwrap();
self.input_file.0.seek(SeekFrom::Start(0)).unwrap();
self.input_file.0.sync_all().unwrap();
}
pub fn run(&mut self, timeout: Duration) -> Result<RunResult> {
self.bitmap().reset();
let buffer = [0u8; 4];
let buf_ptr = buffer.as_ptr() as *mut libc::c_void;
// Tell the forkserver to fork.
log::trace!("Requesting fork");
let ret = repeat_on_interrupt(|| unsafe { libc::write(self.send_fd.unwrap(), buf_ptr, 4) });
if ret != 4 {
error!("Fork request failed");
return Err(anyhow!("Failed to write to send_fd: {}", ret));
}
log::trace!("Waiting for child pid");
self.wait_for_data(AFL_DEFAULT_TIMEOUT)
.context("Failed to retrive child pid from forkserver")?;
let ret =
repeat_on_interrupt(|| unsafe { libc::read(self.receive_fd.unwrap(), buf_ptr, 4) });
if ret != 4 {
error!("Failed to retrive child pid");
return Err(anyhow!("Failed to read from receive_non_blocking_fd"));
}
let child_pid = i32::from_le_bytes(buffer);
log::trace!("Got child pid {}", child_pid);
if child_pid <= 0 {
log::error!("Child pid '{}' is invalid", child_pid);
return Err(anyhow!(
"Failed to parse child_pid. child_pid={}, bytes={:?}",
child_pid,
buffer
));
}
log::trace!("Waiting for child termination");
match self.wait_for_data(timeout) {
Ok(_) => (),
Err(err) => {
log::trace!("Child timed out: {:#?}", err);
// Kill the child since it appears to have timed out.
let kill_ret = nix::sys::signal::kill(
nix::unistd::Pid::from_raw(child_pid),
nix::sys::signal::SIGKILL,
);
if let Err(ref err) = kill_ret {
// This might just be caused by the fact that the child won the race
// and terminated before we killed it.
log::trace!("Failed to kill child: {:#?}", err);
}
if let Err(err) = self
.wait_for_data(AFL_DEFAULT_TIMEOUT)
.context("Child did not acknowledge termination request")
{
let reason = try_get_child_exit_reason(self.forkserver_sid.unwrap());
log::error!(
"Exit reason: {:#?}, child_pid={:?}, kill_ret={:?}",
reason,
child_pid,
kill_ret
);
return Err(err.context(format!("child_exit_reason={:#?}", reason)));
}
// Consume exit status.
let ret = unsafe { libc::read(self.receive_fd.unwrap(), buf_ptr, 4) };
if ret != 4 {
log::error!("Expected {} != 4", ret);
}
return Ok(RunResult::TimedOut);
}
}
log::trace!("Child terminated, getting exit status");
let ret =
repeat_on_interrupt(|| unsafe { libc::read(self.receive_fd.unwrap(), buf_ptr, 4) });
if ret != 4 {
error!("Failed to get exit status");
return Err(anyhow!("Failed to read from receive_non_blocking_fd"));
}
let exit_status = i32::from_le_bytes(buffer);
log::trace!("Child status is {}", exit_status);
if libc::WIFEXITED(exit_status) {
Ok(RunResult::Terminated(libc::WEXITSTATUS(exit_status)))
} else if libc::WIFSIGNALED(exit_status) {
let signal = libc::WTERMSIG(exit_status);
let signal = match Signal::try_from(signal) {
Ok(s) => s,
Err(e) => {
error!(
"Failed to parse signal code {}. Error: {:?}. Using dummy signal SIGUSR2",
signal, e
);
// Some dummy signal type.
Signal::SIGUSR2
}
};
Ok(RunResult::Signalled(signal))
} else {
unreachable!();
}
}
pub fn bitmap(&mut self) -> &mut Bitmap {
&mut self.bitmap
}
}
impl Drop for AflSink {
fn drop(&mut self) {
self.stop();
}
}
| write | identifier_name |
sink.rs | use anyhow::{anyhow, Context, Result};
use byte_unit::n_mib_bytes;
use fuzztruction_shared::util::try_get_child_exit_reason;
use log::error;
use std::env::set_current_dir;
use std::os::unix::prelude::AsRawFd;
use std::{
convert::TryFrom,
ffi::CString,
fs::{File, OpenOptions},
io::{Seek, SeekFrom, Write},
ops::*,
path::PathBuf,
};
use std::{fs, io, mem};
use std::{os::raw::c_char, time::Duration};
use thiserror::Error;
use libc::SIGKILL;
use mktemp::Temp;
use nix::sys::signal::Signal;
use crate::config::Config;
use crate::io_channels::InputChannel;
use crate::sink_bitmap::{Bitmap, BITMAP_DEFAULT_MAP_SIZE};
use filedescriptor;
const FS_OPT_MAPSIZE: u32 = 0x40000000;
// FDs used by the forkserver to communicate with us.
// Hardcoded in AFLs config.h.
const FORKSRV_FD: i32 = 198;
const AFL_READ_FROM_PARENT_FD: i32 = FORKSRV_FD;
const AFL_WRITE_TO_PARENT_FD: i32 = FORKSRV_FD + 1;
const AFL_SHM_ENV_VAR_NAME: &str = "__AFL_SHM_ID";
const AFL_DEFAULT_TIMEOUT: Duration = Duration::from_millis(10000);
fn repeat_on_interrupt<F, R>(f: F) -> R
where
F: Fn() -> R,
R: TryInto<libc::c_int> + Clone,
{
loop {
let ret = f();
if ret.clone().try_into().unwrap_or(0) != -libc::EINTR {
return ret;
} else {
log::trace!("Repeating call because of EINTR");
}
}
}
/// Type used to represent error conditions of the source.
#[derive(Error, Debug)]
pub enum SinkError {
#[error("The workdir '{0}' already exists.")]
WorkdirExists(String),
#[error("Fatal error occurred: {0}")]
FatalError(String),
#[error("Exceeded timeout while waiting for data: {0}")]
CommunicationTimeoutError(String),
#[error(transparent)]
Other(#[from] anyhow::Error),
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum RunResult {
Terminated(i32),
Signalled(Signal),
TimedOut,
}
#[derive(Debug)]
pub struct AflSink {
/// That file system path to the target binary.
path: PathBuf,
/// The arguments passed to the binary.
args: Vec<String>,
/// Workdir
#[allow(unused)]
workdir: PathBuf,
/// Description of how the target binary consumes fuzzing input.
input_channel: InputChannel,
/// The file that is used to pass input to the target.
input_file: (File, PathBuf),
/// The session id of the forkserver we are communicating with.
forkserver_sid: Option<i32>,
/// The bitmap used to compute coverage.
bitmap: Bitmap,
/// The fd used to send data to the forkserver.
send_fd: Option<i32>,
/// Non blocking fd used to receive data from the forkserver.
receive_fd: Option<i32>,
#[allow(unused)]
stdout_file: Option<(File, PathBuf)>,
#[allow(unused)]
stderr_file: Option<(File, PathBuf)>,
/// Whether to log the output written to stdout. If false, the output is discarded.
log_stdout: bool,
/// Whether to log the output written to stderr. If false, the output is discarded.
log_stderr: bool,
config: Option<Config>,
bitmap_was_resize: bool,
}
impl AflSink {
pub fn new(
path: PathBuf,
mut args: Vec<String>,
mut workdir: PathBuf,
input_channel: InputChannel,
config: Option<&Config>,
log_stdout: bool,
log_stderr: bool,
) -> Result<AflSink> {
workdir.push("sink");
// Create the file into we write inputdata before execution.
fs::create_dir_all(&workdir)?;
set_current_dir(&workdir)?;
let tmpfile_path = Temp::new_file_in(&workdir).unwrap().to_path_buf();
let mut input_file_path = String::from(tmpfile_path.to_str().unwrap());
input_file_path.push_str("_input");
let input_file_path = PathBuf::from(input_file_path);
let input_file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(&input_file_path)?;
// Replace the @@ marker in the args with the actual file path (if input type is File).
if input_channel == InputChannel::File {
if let Some(elem) = args.iter_mut().find(|e| **e == "@@") {
*elem = input_file_path.to_str().unwrap().to_owned();
} else {
return Err(anyhow!(format!("No @@ marker in args, even though the input channel is defined as file. args: {:#?}", args)));
}
}
let mut stdout_file = None;
if log_stdout {
// Setup file for stdout logging.
let mut path = workdir.clone();
path.push("stdout");
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(&path)
.unwrap();
stdout_file = Some((file, path));
}
let mut stderr_file = None;
if log_stderr {
// Setup file for stdout logging.
let mut path = workdir.clone();
path.push("stderr");
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(&path)
.unwrap();
stderr_file = Some((file, path));
}
Ok(AflSink {
path,
args,
workdir,
input_channel,
input_file: (input_file, input_file_path),
forkserver_sid: None,
bitmap: Bitmap::new_in_shm(BITMAP_DEFAULT_MAP_SIZE, 0x00),
send_fd: None,
receive_fd: None,
log_stdout,
log_stderr,
stdout_file,
stderr_file,
config: config.cloned(),
bitmap_was_resize: false,
})
}
pub fn from_config(config: &Config, id: Option<usize>) -> Result<AflSink> {
let config_new = config.clone();
let mut workdir = config_new.general.work_dir.clone();
workdir.push(
id.map(|id| id.to_string())
.unwrap_or_else(|| "0".to_owned()),
);
let sink = AflSink::new(
config_new.sink.bin_path,
config_new.sink.arguments,
workdir,
config_new.sink.input_type,
Some(config),
config.sink.log_stdout,
config.sink.log_stderr,
)?;
Ok(sink)
}
/// Wait for the given duration for the forkserver read fd to become ready.
/// Returns Ok(true) if data becomes ready during the given `timeout`, else
/// Ok(false).
///
/// # Error
///
/// Returns an Error if an unexpected error occurs.
fn wait_for_data(&self, timeout: Duration) -> Result<()> {
let pollfd = filedescriptor::pollfd {
fd: self.receive_fd.unwrap(),
events: filedescriptor::POLLIN,
revents: 0,
};
let mut pollfds = [pollfd];
let nready = filedescriptor::poll(&mut pollfds, Some(timeout));
match nready {
Ok(1) => Ok(()),
Ok(0) => Err(SinkError::CommunicationTimeoutError(format!(
"Did not received data after {:?}",
timeout
))
.into()),
Ok(n) => {
unreachable!("Unexpected return value: {}", n);
}
Err(ref err) => {
if let filedescriptor::Error::Poll(err) = err {
if err.kind() == io::ErrorKind::Interrupted {
return self.wait_for_data(timeout);
}
}
Err(SinkError::FatalError(format!("Failed to poll fd: {:#?}", err)).into())
}
}
}
pub fn start(&mut self) -> Result<()> {
// send_pipe[1](we) -> send_pipe[0](forkserver).
let send_pipe = [0i32; 2];
// receive_pipe[1](forkserver) -> receive_pipe[0](we).
let receive_pipe = [0i32; 2];
// Create pipe for communicating with the forkserver.
unsafe {
let ret = libc::pipe(send_pipe.as_ptr() as *mut i32);
assert_eq!(ret, 0);
let ret = libc::pipe(receive_pipe.as_ptr() as *mut i32);
assert_eq!(ret, 0);
}
self.send_fd = Some(send_pipe[1]);
let child_receive_fd = send_pipe[0];
self.receive_fd = Some(receive_pipe[0]);
let child_send_fd = receive_pipe[1];
let child_pid = unsafe { libc::fork() };
match child_pid {
-1 => return Err(anyhow!("Fork failed!")),
0 => {
/*
Child
Be aware that we are forking a potentially multithreaded application
here. Since fork() only copies the calling thread, the environment
might be left in a dirty state because of, e.g., mutexs that where
locked at the time fork was called.
Because of this it is only save to call async-signal-safe functions
(https://man7.org/linux/man-pages/man7/signal-safety.7.html).
Note that loggin function (debug!...) often internally use mutexes
to lock the output buffer, thus using logging here is forbidden
and likely causes deadlocks.
*/
let map_shm_id = self.bitmap.shm_id();
unsafe {
let ret = libc::setsid();
assert!(ret >= 0);
}
// Setup args
let path =
self.path.to_str().map(|s| s.to_owned()).ok_or_else(|| {
SinkError::Other(anyhow!("Invalid UTF-8 character in path"))
})?;
let mut args = self.args.clone();
args.insert(0, path.clone());
let argv_nonref: Vec<CString> = args
.iter()
.map(|arg| CString::new(arg.as_bytes()).unwrap())
.collect();
let mut argv: Vec<*const c_char> =
argv_nonref.iter().map(|arg| arg.as_ptr()).collect();
argv.push(std::ptr::null());
// Setup environment
let mut envp: Vec<*const c_char> = Vec::new();
let shm_env_var =
CString::new(format!("{}={}", AFL_SHM_ENV_VAR_NAME, map_shm_id)).unwrap();
envp.push(shm_env_var.as_ptr());
let mut env_from_config = Vec::new();
if let Some(cfg) = self.config.as_ref() {
cfg.sink.env.iter().for_each(|var| {
env_from_config
.push(CString::new(format!("{}={}", var.0, var.1).as_bytes()).unwrap())
})
}
let afl_maps_size =
CString::new(format!("AFL_MAP_SIZE={}", self.bitmap().size())).unwrap();
envp.push(afl_maps_size.as_bytes().as_ptr() as *const i8);
env_from_config.iter().for_each(|e| {
envp.push(e.as_bytes().as_ptr() as *const i8);
});
envp.push(std::ptr::null());
let dev_null_fd = unsafe {
let path = CString::new("/dev/null".as_bytes()).unwrap();
libc::open(path.as_ptr(), libc::O_RDONLY)
};
if dev_null_fd < 0 {
panic!("Failed to open /dev/null");
}
match self.input_channel {
InputChannel::Stdin => unsafe {
libc::dup2(self.input_file.0.as_raw_fd(), 0);
},
_ => unsafe {
libc::dup2(dev_null_fd, 0);
},
}
if self.log_stdout {
// unsafe {
// let fd = self.stdout_file.as_ref().unwrap().0.as_raw_fd();
// libc::dup2(fd, libc::STDOUT_FILENO);
// libc::close(fd);
// }
} else {
unsafe {
libc::dup2(dev_null_fd, libc::STDOUT_FILENO);
}
}
if self.log_stderr {
//unsafe {
// let fd = self.stderr_file.as_ref().unwrap().0.as_raw_fd();
// libc::dup2(fd, libc::STDERR_FILENO);
// libc::close(fd);
//}
} else {
unsafe {
libc::dup2(dev_null_fd, libc::STDERR_FILENO);
}
}
unsafe {
libc::close(dev_null_fd);
}
unsafe {
// Close the pipe ends used by our parent.
libc::close(self.receive_fd.unwrap());
libc::close(self.send_fd.unwrap());
// Remap fds to the ones used by the forkserver.
// The fds might have by chance the correct value, in this case
// dup2 & close would actually cause us to close the fd we intended to pass.
if child_receive_fd != AFL_READ_FROM_PARENT_FD {
let ret = libc::dup2(child_receive_fd, AFL_READ_FROM_PARENT_FD);
assert!(ret >= 0);
libc::close(child_receive_fd);
}
if child_send_fd != AFL_WRITE_TO_PARENT_FD {
let ret = libc::dup2(child_send_fd, AFL_WRITE_TO_PARENT_FD);
assert!(ret >= 0);
libc::close(child_send_fd);
}
}
unsafe {
if !self.log_stdout && !self.log_stderr {
// if we log stderr or stdout, the limit will cause our
// fuzzer to fail after some time.
let mut rlim: libc::rlimit = std::mem::zeroed();
rlim.rlim_cur = n_mib_bytes!(512).try_into().unwrap();
rlim.rlim_max = n_mib_bytes!(512).try_into().unwrap();
let ret = libc::setrlimit(libc::RLIMIT_FSIZE, &rlim as *const libc::rlimit);
assert_eq!(ret, 0);
}
// Disable core dumps
let limit_val: libc::rlimit = std::mem::zeroed();
let ret = libc::setrlimit(libc::RLIMIT_CORE, &limit_val);
assert_eq!(ret, 0);
// Max AS size.
let mut rlim: libc::rlimit = std::mem::zeroed();
rlim.rlim_cur = n_mib_bytes!(512).try_into().unwrap();
rlim.rlim_max = n_mib_bytes!(512).try_into().unwrap();
let ret = libc::setrlimit(libc::RLIMIT_AS, &rlim as *const libc::rlimit);
assert_eq!(ret, 0);
let ret = libc::personality(libc::ADDR_NO_RANDOMIZE as u64);
assert_eq!(ret, 0);
}
if let Err(err) = self.drop_privileges() {
log::error!("Failed to drop privileges: {:#?}", err);
panic!();
}
// Make sure that UID == EUID, since if this is not the case,
// ld will ignore LD_PRELOAD which we need to use for targets
// that normally load instrumented libraries during runtime.
assert_eq!(nix::unistd::getuid(), nix::unistd::geteuid());
assert_eq!(nix::unistd::getegid(), nix::unistd::getegid());
let prog = CString::new(path.as_bytes()).unwrap();
unsafe {
libc::execve(prog.as_ptr(), argv.as_ptr(), envp.as_ptr());
}
unreachable!("Failed to call execve on '{}'", path);
}
_ => { /* The parent */ }
}
/* The parent */
log::info!("Forkserver has pid {}", child_pid);
// Note th sid, thus we can kill the child later.
// This is a sid since the child calls setsid().
self.forkserver_sid = Some(child_pid);
// Close the pipe ends used by the child.
unsafe {
libc::close(child_receive_fd);
libc::close(child_send_fd);
}
unsafe {
libc::fcntl(self.send_fd.unwrap(), libc::F_SETFD, libc::FD_CLOEXEC);
libc::fcntl(self.receive_fd.unwrap(), libc::F_SETFD, libc::FD_CLOEXEC);
}
// Wait for for hello from the child.
self.wait_for_data(AFL_DEFAULT_TIMEOUT)
.context("Timeout while waiting for forkserver to come up.")?;
// Read the available data.
let buffer = [0u8; 4];
unsafe {
let ret = libc::read(
self.receive_fd.unwrap(),
buffer.as_ptr() as *mut libc::c_void,
4,
);
if ret != 4 {
return Err(anyhow!(format!(
"Failed to do handshake with forkserver. ret={}",
ret
)));
}
// Process extended attributes used by AFL++.
// Sett src/afl-forkserver.c:689 (afl_fsrv_start)
let status = u32::from_ne_bytes(buffer);
log::info!("Forkserver status: 0x{:x}", status);
if status & FS_OPT_MAPSIZE == FS_OPT_MAPSIZE {
log::info!("Got extended option FS_OPT_MAPSIZE from forkserver");
let new_map_size = ((status & 0x00fffffe) >> 1) + 1;
log::info!("Target requests a map of size {} bytes", new_map_size);
log::info!("Current map size is {} bytes", self.bitmap().size());
if self.bitmap_was_resize {
log::info!("Already resized, skipping....");
return Ok(());
}
let new_map_size = new_map_size.next_power_of_two() as usize;
if new_map_size > self.bitmap().size() {
log::info!("Resizing bitmap to {} bytes", new_map_size);
self.stop();
let new_map = Bitmap::new_in_shm(new_map_size, 0x00);
let _ = mem::replace(self.bitmap(), new_map);
self.bitmap_was_resize = true;
return self.start();
}
}
}
// if self.stdout_file.is_some() {
// // Take the the stdout file thus its fd gets dropped.
// self.stdout_file.take();
// }
// if self.stderr_file.is_some() {
// // Take the the stderr file thus its fd gets dropped.
// self.stderr_file.take();
// }
// We are ready to fuzz!
Ok(())
}
fn drop_privileges(&mut self) -> Result<()> {
let uid_gid = self
.config
.as_ref()
.map(|config| config.general.jail_uid_gid())
.unwrap_or(None);
if uid_gid.is_some() {
jail::acquire_privileges()?;
}
if let Some((uid, gid)) = uid_gid {
jail::drop_privileges(uid, gid, true)?;
}
Ok(())
}
/// Stops the forksever. Must be called before calling start() again.
/// It is save to call this function multiple times.
pub fn stop(&mut self) {
if let Some(sid) = self.forkserver_sid.take() {
unsafe {
libc::close(self.send_fd.unwrap());
libc::close(self.receive_fd.unwrap());
let ret = libc::killpg(sid, SIGKILL);
assert!(ret == 0);
// reap it
libc::waitpid(sid, std::ptr::null_mut() as *mut libc::c_int, 0);
}
}
}
/// Write the given bytes into the sinks input channel. This function
/// is only allowed to be called on sinks with InputChannel::Stdin or InputChannel::File
/// input channel.
pub fn write(&mut self, data: &[u8]) {
debug_assert!(
self.input_channel == InputChannel::Stdin || self.input_channel == InputChannel::File
);
self.input_file.0.seek(SeekFrom::Start(0)).unwrap();
self.input_file.0.set_len(0).unwrap();
self.input_file.0.write_all(data).unwrap();
self.input_file.0.seek(SeekFrom::Start(0)).unwrap();
self.input_file.0.sync_all().unwrap();
}
pub fn run(&mut self, timeout: Duration) -> Result<RunResult> {
self.bitmap().reset();
let buffer = [0u8; 4];
let buf_ptr = buffer.as_ptr() as *mut libc::c_void;
// Tell the forkserver to fork.
log::trace!("Requesting fork");
let ret = repeat_on_interrupt(|| unsafe { libc::write(self.send_fd.unwrap(), buf_ptr, 4) });
if ret != 4 {
error!("Fork request failed");
return Err(anyhow!("Failed to write to send_fd: {}", ret));
}
log::trace!("Waiting for child pid");
self.wait_for_data(AFL_DEFAULT_TIMEOUT)
.context("Failed to retrive child pid from forkserver")?;
let ret =
repeat_on_interrupt(|| unsafe { libc::read(self.receive_fd.unwrap(), buf_ptr, 4) });
if ret != 4 {
error!("Failed to retrive child pid");
return Err(anyhow!("Failed to read from receive_non_blocking_fd"));
}
let child_pid = i32::from_le_bytes(buffer);
log::trace!("Got child pid {}", child_pid);
if child_pid <= 0 {
log::error!("Child pid '{}' is invalid", child_pid);
return Err(anyhow!(
"Failed to parse child_pid. child_pid={}, bytes={:?}",
child_pid,
buffer
));
}
log::trace!("Waiting for child termination");
match self.wait_for_data(timeout) {
Ok(_) => (),
Err(err) => {
log::trace!("Child timed out: {:#?}", err);
// Kill the child since it appears to have timed out.
let kill_ret = nix::sys::signal::kill(
nix::unistd::Pid::from_raw(child_pid),
nix::sys::signal::SIGKILL,
);
if let Err(ref err) = kill_ret {
// This might just be caused by the fact that the child won the race
// and terminated before we killed it.
log::trace!("Failed to kill child: {:#?}", err);
}
if let Err(err) = self
.wait_for_data(AFL_DEFAULT_TIMEOUT)
.context("Child did not acknowledge termination request")
{
let reason = try_get_child_exit_reason(self.forkserver_sid.unwrap());
log::error!(
"Exit reason: {:#?}, child_pid={:?}, kill_ret={:?}",
reason,
child_pid,
kill_ret
);
return Err(err.context(format!("child_exit_reason={:#?}", reason)));
}
// Consume exit status.
let ret = unsafe { libc::read(self.receive_fd.unwrap(), buf_ptr, 4) };
if ret != 4 {
log::error!("Expected {} != 4", ret);
}
return Ok(RunResult::TimedOut);
}
} | let ret =
repeat_on_interrupt(|| unsafe { libc::read(self.receive_fd.unwrap(), buf_ptr, 4) });
if ret != 4 {
error!("Failed to get exit status");
return Err(anyhow!("Failed to read from receive_non_blocking_fd"));
}
let exit_status = i32::from_le_bytes(buffer);
log::trace!("Child status is {}", exit_status);
if libc::WIFEXITED(exit_status) {
Ok(RunResult::Terminated(libc::WEXITSTATUS(exit_status)))
} else if libc::WIFSIGNALED(exit_status) {
let signal = libc::WTERMSIG(exit_status);
let signal = match Signal::try_from(signal) {
Ok(s) => s,
Err(e) => {
error!(
"Failed to parse signal code {}. Error: {:?}. Using dummy signal SIGUSR2",
signal, e
);
// Some dummy signal type.
Signal::SIGUSR2
}
};
Ok(RunResult::Signalled(signal))
} else {
unreachable!();
}
}
pub fn bitmap(&mut self) -> &mut Bitmap {
&mut self.bitmap
}
}
impl Drop for AflSink {
fn drop(&mut self) {
self.stop();
}
} |
log::trace!("Child terminated, getting exit status"); | random_line_split |
app_store_connect.py | """Tasks for managing Debug Information Files from Apple App Store Connect.
Users can instruct Sentry to download dSYM from App Store Connect and put them into Sentry's
debug files. These tasks enable this functionality.
"""
import logging
import pathlib
import tempfile
from typing import List, Mapping, Tuple
import requests
import sentry_sdk
from django.utils import timezone
from sentry.lang.native import appconnect
from sentry.models import (
AppConnectBuild,
LatestAppConnectBuildsCheck,
Project,
ProjectOption,
debugfile,
)
from sentry.tasks.base import instrumented_task
from sentry.utils import json, metrics, sdk
from sentry.utils.appleconnect import appstore_connect as appstoreconnect_api
logger = logging.getLogger(__name__) |
# Sadly this decorator makes this entire function untyped for now as it does not itself have
# typing annotations. So we do all the work outside of the decorated task function to work
# around this.
# Since all these args must be pickled we keep them to built-in types as well.
@instrumented_task(name="sentry.tasks.app_store_connect.dsym_download", queue="appstoreconnect", ignore_result=True) # type: ignore
def dsym_download(project_id: int, config_id: str) -> None:
inner_dsym_download(project_id=project_id, config_id=config_id)
def inner_dsym_download(project_id: int, config_id: str) -> None:
"""Downloads the dSYMs from App Store Connect and stores them in the Project's debug files."""
with sdk.configure_scope() as scope:
scope.set_tag("project", project_id)
scope.set_tag("config_id", config_id)
project = Project.objects.get(pk=project_id)
config = appconnect.AppStoreConnectConfig.from_project_config(project, config_id)
client = appconnect.AppConnectClient.from_config(config)
listed_builds = client.list_builds()
builds = process_builds(project=project, config=config, to_process=listed_builds)
if not builds:
return
for i, (build, build_state) in enumerate(builds):
with sdk.configure_scope() as scope:
scope.set_context("dsym_downloads", {"total": len(builds), "completed": i})
with tempfile.NamedTemporaryFile() as dsyms_zip:
try:
client.download_dsyms(build, pathlib.Path(dsyms_zip.name))
# For no dSYMs, let the build be marked as fetched so they're not
# repeatedly re-checked every time this task is run.
except appconnect.NoDsymsError:
logger.debug("No dSYMs for build %s", build)
# Moves on to the next build so we don't check off fetched. This url will
# eventuallyTM be populated, so revisit it at a later time.
except appconnect.PendingDsymsError:
logger.debug("dSYM url currently unavailable for build %s", build)
continue
# early-return in unauthorized and forbidden to avoid trying all the other builds
# as well, since an expired token will error for all of them.
# the error is also swallowed unreported because this is an expected and actionable
# error.
except appstoreconnect_api.UnauthorizedError:
sentry_sdk.capture_message(
"Not authorized to download dSYM using current App Store Connect credentials",
level="info",
)
return
except appstoreconnect_api.ForbiddenError:
sentry_sdk.capture_message(
"Forbidden from downloading dSYM using current App Store Connect credentials",
level="info",
)
return
# Don't let malformed URLs abort all pending downloads in case it's an isolated instance
except ValueError as e:
sdk.capture_exception(e)
continue
# Assume request errors are a server side issue and do not abort all the
# pending downloads.
except appstoreconnect_api.RequestError as e:
sdk.capture_exception(e)
continue
except requests.RequestException as e:
sdk.capture_exception(e)
continue
else:
create_difs_from_dsyms_zip(dsyms_zip.name, project)
logger.debug("Uploaded dSYMs for build %s", build)
metrics.incr("tasks.app_store_connect.builds_ingested", sample_rate=1)
build_state.fetched = True
build_state.save()
def create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) -> None:
with sentry_sdk.start_span(op="dsym-difs", description="Extract difs dSYM zip"):
with open(dsyms_zip, "rb") as fp:
created = debugfile.create_files_from_dif_zip(fp, project, accept_unknown=True)
for proj_debug_file in created:
logger.debug("Created %r for project %s", proj_debug_file, project.id)
def get_or_create_persisted_build(
project: Project, config: appconnect.AppStoreConnectConfig, build: appconnect.BuildInfo
) -> AppConnectBuild:
"""Fetches the sentry-internal :class:`AppConnectBuild`.
The build corresponds to the :class:`appconnect.BuildInfo` as returned by the
AppStore Connect API. If no build exists yet, a new "pending" build is created.
"""
try:
build_state = AppConnectBuild.objects.get(
project=project,
app_id=build.app_id,
platform=build.platform,
bundle_short_version=build.version,
bundle_version=build.build_number,
)
except AppConnectBuild.DoesNotExist:
build_state = AppConnectBuild(
project=project,
app_id=build.app_id,
bundle_id=config.bundleId,
platform=build.platform,
bundle_short_version=build.version,
bundle_version=build.build_number,
uploaded_to_appstore=build.uploaded_date,
first_seen=timezone.now(),
fetched=False,
)
build_state.save()
return build_state
def process_builds(
project: Project,
config: appconnect.AppStoreConnectConfig,
to_process: List[appconnect.BuildInfo],
) -> List[Tuple[appconnect.BuildInfo, AppConnectBuild]]:
"""Returns a list of builds whose dSYMs need to be updated or fetched.
This will create a new "pending" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`
that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved
upon creation.
"""
pending_builds = []
with sentry_sdk.start_span(
op="appconnect-update-builds", description="Update AppStoreConnect builds in database"
):
for build in to_process:
build_state = get_or_create_persisted_build(project, config, build)
if not build_state.fetched:
pending_builds.append((build, build_state))
LatestAppConnectBuildsCheck.objects.create_or_update(
project=project, source_id=config.id, values={"last_checked": timezone.now()}
)
return pending_builds
# Untyped decorator would stop type-checking of entire function, split into an inner
# function instead which can be type checked.
@instrumented_task( # type: ignore
name="sentry.tasks.app_store_connect.refresh_all_builds",
queue="appstoreconnect",
ignore_result=True,
)
def refresh_all_builds() -> None:
inner_refresh_all_builds()
def inner_refresh_all_builds() -> None:
"""Refreshes all AppStoreConnect builds for all projects.
This iterates over all the projects configured in Sentry and for any which has an
AppStoreConnect symbol source configured will poll the AppStoreConnect API to check if
there are new builds.
"""
# We have no way to query for AppStore Connect symbol sources directly, but
# getting all of the project options that have custom symbol sources
# configured is a reasonable compromise, as the number of those should be
# low enough to traverse every hour.
# Another alternative would be to get a list of projects that have had a
# previous successful import, as indicated by existing `AppConnectBuild`
# objects. But that would miss projects that have a valid AppStore Connect
# setup, but have not yet published any kind of build to AppStore.
options = ProjectOption.objects.filter(key=appconnect.SYMBOL_SOURCES_PROP_NAME)
count = 0
for option in options:
with sdk.push_scope() as scope:
scope.set_tag("project", option.project_id)
try:
if not option.value:
# An empty string set as option value, the UI does this when deleting
# all sources. This is not valid JSON.
continue
# We are parsing JSON thus all types are Any, so give the type-checker some
# extra help. We are maybe slightly lying about the type, but the
# attributes we do access are all string values.
all_sources: List[Mapping[str, str]] = json.loads(option.value)
for source in all_sources:
try:
source_id = source["id"]
source_type = source["type"]
except KeyError:
logger.exception("Malformed symbol source")
continue
if source_type == appconnect.SYMBOL_SOURCE_TYPE_NAME:
dsym_download.apply_async(
kwargs={
"project_id": option.project_id,
"config_id": source_id,
}
)
count += 1
except Exception:
logger.exception("Failed to refresh AppStoreConnect builds")
metrics.gauge("tasks.app_store_connect.refreshed", count, sample_rate=1) | random_line_split | |
app_store_connect.py | """Tasks for managing Debug Information Files from Apple App Store Connect.
Users can instruct Sentry to download dSYM from App Store Connect and put them into Sentry's
debug files. These tasks enable this functionality.
"""
import logging
import pathlib
import tempfile
from typing import List, Mapping, Tuple
import requests
import sentry_sdk
from django.utils import timezone
from sentry.lang.native import appconnect
from sentry.models import (
AppConnectBuild,
LatestAppConnectBuildsCheck,
Project,
ProjectOption,
debugfile,
)
from sentry.tasks.base import instrumented_task
from sentry.utils import json, metrics, sdk
from sentry.utils.appleconnect import appstore_connect as appstoreconnect_api
logger = logging.getLogger(__name__)
# Sadly this decorator makes this entire function untyped for now as it does not itself have
# typing annotations. So we do all the work outside of the decorated task function to work
# around this.
# Since all these args must be pickled we keep them to built-in types as well.
@instrumented_task(name="sentry.tasks.app_store_connect.dsym_download", queue="appstoreconnect", ignore_result=True) # type: ignore
def dsym_download(project_id: int, config_id: str) -> None:
inner_dsym_download(project_id=project_id, config_id=config_id)
def inner_dsym_download(project_id: int, config_id: str) -> None:
"""Downloads the dSYMs from App Store Connect and stores them in the Project's debug files."""
with sdk.configure_scope() as scope:
scope.set_tag("project", project_id)
scope.set_tag("config_id", config_id)
project = Project.objects.get(pk=project_id)
config = appconnect.AppStoreConnectConfig.from_project_config(project, config_id)
client = appconnect.AppConnectClient.from_config(config)
listed_builds = client.list_builds()
builds = process_builds(project=project, config=config, to_process=listed_builds)
if not builds:
return
for i, (build, build_state) in enumerate(builds):
|
def create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) -> None:
with sentry_sdk.start_span(op="dsym-difs", description="Extract difs dSYM zip"):
with open(dsyms_zip, "rb") as fp:
created = debugfile.create_files_from_dif_zip(fp, project, accept_unknown=True)
for proj_debug_file in created:
logger.debug("Created %r for project %s", proj_debug_file, project.id)
def get_or_create_persisted_build(
project: Project, config: appconnect.AppStoreConnectConfig, build: appconnect.BuildInfo
) -> AppConnectBuild:
"""Fetches the sentry-internal :class:`AppConnectBuild`.
The build corresponds to the :class:`appconnect.BuildInfo` as returned by the
AppStore Connect API. If no build exists yet, a new "pending" build is created.
"""
try:
build_state = AppConnectBuild.objects.get(
project=project,
app_id=build.app_id,
platform=build.platform,
bundle_short_version=build.version,
bundle_version=build.build_number,
)
except AppConnectBuild.DoesNotExist:
build_state = AppConnectBuild(
project=project,
app_id=build.app_id,
bundle_id=config.bundleId,
platform=build.platform,
bundle_short_version=build.version,
bundle_version=build.build_number,
uploaded_to_appstore=build.uploaded_date,
first_seen=timezone.now(),
fetched=False,
)
build_state.save()
return build_state
def process_builds(
project: Project,
config: appconnect.AppStoreConnectConfig,
to_process: List[appconnect.BuildInfo],
) -> List[Tuple[appconnect.BuildInfo, AppConnectBuild]]:
"""Returns a list of builds whose dSYMs need to be updated or fetched.
This will create a new "pending" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`
that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved
upon creation.
"""
pending_builds = []
with sentry_sdk.start_span(
op="appconnect-update-builds", description="Update AppStoreConnect builds in database"
):
for build in to_process:
build_state = get_or_create_persisted_build(project, config, build)
if not build_state.fetched:
pending_builds.append((build, build_state))
LatestAppConnectBuildsCheck.objects.create_or_update(
project=project, source_id=config.id, values={"last_checked": timezone.now()}
)
return pending_builds
# Untyped decorator would stop type-checking of entire function, split into an inner
# function instead which can be type checked.
@instrumented_task( # type: ignore
name="sentry.tasks.app_store_connect.refresh_all_builds",
queue="appstoreconnect",
ignore_result=True,
)
def refresh_all_builds() -> None:
inner_refresh_all_builds()
def inner_refresh_all_builds() -> None:
"""Refreshes all AppStoreConnect builds for all projects.
This iterates over all the projects configured in Sentry and for any which has an
AppStoreConnect symbol source configured will poll the AppStoreConnect API to check if
there are new builds.
"""
# We have no way to query for AppStore Connect symbol sources directly, but
# getting all of the project options that have custom symbol sources
# configured is a reasonable compromise, as the number of those should be
# low enough to traverse every hour.
# Another alternative would be to get a list of projects that have had a
# previous successful import, as indicated by existing `AppConnectBuild`
# objects. But that would miss projects that have a valid AppStore Connect
# setup, but have not yet published any kind of build to AppStore.
options = ProjectOption.objects.filter(key=appconnect.SYMBOL_SOURCES_PROP_NAME)
count = 0
for option in options:
with sdk.push_scope() as scope:
scope.set_tag("project", option.project_id)
try:
if not option.value:
# An empty string set as option value, the UI does this when deleting
# all sources. This is not valid JSON.
continue
# We are parsing JSON thus all types are Any, so give the type-checker some
# extra help. We are maybe slightly lying about the type, but the
# attributes we do access are all string values.
all_sources: List[Mapping[str, str]] = json.loads(option.value)
for source in all_sources:
try:
source_id = source["id"]
source_type = source["type"]
except KeyError:
logger.exception("Malformed symbol source")
continue
if source_type == appconnect.SYMBOL_SOURCE_TYPE_NAME:
dsym_download.apply_async(
kwargs={
"project_id": option.project_id,
"config_id": source_id,
}
)
count += 1
except Exception:
logger.exception("Failed to refresh AppStoreConnect builds")
metrics.gauge("tasks.app_store_connect.refreshed", count, sample_rate=1)
| with sdk.configure_scope() as scope:
scope.set_context("dsym_downloads", {"total": len(builds), "completed": i})
with tempfile.NamedTemporaryFile() as dsyms_zip:
try:
client.download_dsyms(build, pathlib.Path(dsyms_zip.name))
# For no dSYMs, let the build be marked as fetched so they're not
# repeatedly re-checked every time this task is run.
except appconnect.NoDsymsError:
logger.debug("No dSYMs for build %s", build)
# Moves on to the next build so we don't check off fetched. This url will
# eventuallyTM be populated, so revisit it at a later time.
except appconnect.PendingDsymsError:
logger.debug("dSYM url currently unavailable for build %s", build)
continue
# early-return in unauthorized and forbidden to avoid trying all the other builds
# as well, since an expired token will error for all of them.
# the error is also swallowed unreported because this is an expected and actionable
# error.
except appstoreconnect_api.UnauthorizedError:
sentry_sdk.capture_message(
"Not authorized to download dSYM using current App Store Connect credentials",
level="info",
)
return
except appstoreconnect_api.ForbiddenError:
sentry_sdk.capture_message(
"Forbidden from downloading dSYM using current App Store Connect credentials",
level="info",
)
return
# Don't let malformed URLs abort all pending downloads in case it's an isolated instance
except ValueError as e:
sdk.capture_exception(e)
continue
# Assume request errors are a server side issue and do not abort all the
# pending downloads.
except appstoreconnect_api.RequestError as e:
sdk.capture_exception(e)
continue
except requests.RequestException as e:
sdk.capture_exception(e)
continue
else:
create_difs_from_dsyms_zip(dsyms_zip.name, project)
logger.debug("Uploaded dSYMs for build %s", build)
metrics.incr("tasks.app_store_connect.builds_ingested", sample_rate=1)
build_state.fetched = True
build_state.save() | conditional_block |
app_store_connect.py | """Tasks for managing Debug Information Files from Apple App Store Connect.
Users can instruct Sentry to download dSYM from App Store Connect and put them into Sentry's
debug files. These tasks enable this functionality.
"""
import logging
import pathlib
import tempfile
from typing import List, Mapping, Tuple
import requests
import sentry_sdk
from django.utils import timezone
from sentry.lang.native import appconnect
from sentry.models import (
AppConnectBuild,
LatestAppConnectBuildsCheck,
Project,
ProjectOption,
debugfile,
)
from sentry.tasks.base import instrumented_task
from sentry.utils import json, metrics, sdk
from sentry.utils.appleconnect import appstore_connect as appstoreconnect_api
logger = logging.getLogger(__name__)
# Sadly this decorator makes this entire function untyped for now as it does not itself have
# typing annotations. So we do all the work outside of the decorated task function to work
# around this.
# Since all these args must be pickled we keep them to built-in types as well.
@instrumented_task(name="sentry.tasks.app_store_connect.dsym_download", queue="appstoreconnect", ignore_result=True) # type: ignore
def dsym_download(project_id: int, config_id: str) -> None:
inner_dsym_download(project_id=project_id, config_id=config_id)
def inner_dsym_download(project_id: int, config_id: str) -> None:
"""Downloads the dSYMs from App Store Connect and stores them in the Project's debug files."""
with sdk.configure_scope() as scope:
scope.set_tag("project", project_id)
scope.set_tag("config_id", config_id)
project = Project.objects.get(pk=project_id)
config = appconnect.AppStoreConnectConfig.from_project_config(project, config_id)
client = appconnect.AppConnectClient.from_config(config)
listed_builds = client.list_builds()
builds = process_builds(project=project, config=config, to_process=listed_builds)
if not builds:
return
for i, (build, build_state) in enumerate(builds):
with sdk.configure_scope() as scope:
scope.set_context("dsym_downloads", {"total": len(builds), "completed": i})
with tempfile.NamedTemporaryFile() as dsyms_zip:
try:
client.download_dsyms(build, pathlib.Path(dsyms_zip.name))
# For no dSYMs, let the build be marked as fetched so they're not
# repeatedly re-checked every time this task is run.
except appconnect.NoDsymsError:
logger.debug("No dSYMs for build %s", build)
# Moves on to the next build so we don't check off fetched. This url will
# eventuallyTM be populated, so revisit it at a later time.
except appconnect.PendingDsymsError:
logger.debug("dSYM url currently unavailable for build %s", build)
continue
# early-return in unauthorized and forbidden to avoid trying all the other builds
# as well, since an expired token will error for all of them.
# the error is also swallowed unreported because this is an expected and actionable
# error.
except appstoreconnect_api.UnauthorizedError:
sentry_sdk.capture_message(
"Not authorized to download dSYM using current App Store Connect credentials",
level="info",
)
return
except appstoreconnect_api.ForbiddenError:
sentry_sdk.capture_message(
"Forbidden from downloading dSYM using current App Store Connect credentials",
level="info",
)
return
# Don't let malformed URLs abort all pending downloads in case it's an isolated instance
except ValueError as e:
sdk.capture_exception(e)
continue
# Assume request errors are a server side issue and do not abort all the
# pending downloads.
except appstoreconnect_api.RequestError as e:
sdk.capture_exception(e)
continue
except requests.RequestException as e:
sdk.capture_exception(e)
continue
else:
create_difs_from_dsyms_zip(dsyms_zip.name, project)
logger.debug("Uploaded dSYMs for build %s", build)
metrics.incr("tasks.app_store_connect.builds_ingested", sample_rate=1)
build_state.fetched = True
build_state.save()
def create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) -> None:
|
def get_or_create_persisted_build(
project: Project, config: appconnect.AppStoreConnectConfig, build: appconnect.BuildInfo
) -> AppConnectBuild:
"""Fetches the sentry-internal :class:`AppConnectBuild`.
The build corresponds to the :class:`appconnect.BuildInfo` as returned by the
AppStore Connect API. If no build exists yet, a new "pending" build is created.
"""
try:
build_state = AppConnectBuild.objects.get(
project=project,
app_id=build.app_id,
platform=build.platform,
bundle_short_version=build.version,
bundle_version=build.build_number,
)
except AppConnectBuild.DoesNotExist:
build_state = AppConnectBuild(
project=project,
app_id=build.app_id,
bundle_id=config.bundleId,
platform=build.platform,
bundle_short_version=build.version,
bundle_version=build.build_number,
uploaded_to_appstore=build.uploaded_date,
first_seen=timezone.now(),
fetched=False,
)
build_state.save()
return build_state
def process_builds(
project: Project,
config: appconnect.AppStoreConnectConfig,
to_process: List[appconnect.BuildInfo],
) -> List[Tuple[appconnect.BuildInfo, AppConnectBuild]]:
"""Returns a list of builds whose dSYMs need to be updated or fetched.
This will create a new "pending" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`
that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved
upon creation.
"""
pending_builds = []
with sentry_sdk.start_span(
op="appconnect-update-builds", description="Update AppStoreConnect builds in database"
):
for build in to_process:
build_state = get_or_create_persisted_build(project, config, build)
if not build_state.fetched:
pending_builds.append((build, build_state))
LatestAppConnectBuildsCheck.objects.create_or_update(
project=project, source_id=config.id, values={"last_checked": timezone.now()}
)
return pending_builds
# Untyped decorator would stop type-checking of entire function, split into an inner
# function instead which can be type checked.
@instrumented_task( # type: ignore
name="sentry.tasks.app_store_connect.refresh_all_builds",
queue="appstoreconnect",
ignore_result=True,
)
def refresh_all_builds() -> None:
inner_refresh_all_builds()
def inner_refresh_all_builds() -> None:
"""Refreshes all AppStoreConnect builds for all projects.
This iterates over all the projects configured in Sentry and for any which has an
AppStoreConnect symbol source configured will poll the AppStoreConnect API to check if
there are new builds.
"""
# We have no way to query for AppStore Connect symbol sources directly, but
# getting all of the project options that have custom symbol sources
# configured is a reasonable compromise, as the number of those should be
# low enough to traverse every hour.
# Another alternative would be to get a list of projects that have had a
# previous successful import, as indicated by existing `AppConnectBuild`
# objects. But that would miss projects that have a valid AppStore Connect
# setup, but have not yet published any kind of build to AppStore.
options = ProjectOption.objects.filter(key=appconnect.SYMBOL_SOURCES_PROP_NAME)
count = 0
for option in options:
with sdk.push_scope() as scope:
scope.set_tag("project", option.project_id)
try:
if not option.value:
# An empty string set as option value, the UI does this when deleting
# all sources. This is not valid JSON.
continue
# We are parsing JSON thus all types are Any, so give the type-checker some
# extra help. We are maybe slightly lying about the type, but the
# attributes we do access are all string values.
all_sources: List[Mapping[str, str]] = json.loads(option.value)
for source in all_sources:
try:
source_id = source["id"]
source_type = source["type"]
except KeyError:
logger.exception("Malformed symbol source")
continue
if source_type == appconnect.SYMBOL_SOURCE_TYPE_NAME:
dsym_download.apply_async(
kwargs={
"project_id": option.project_id,
"config_id": source_id,
}
)
count += 1
except Exception:
logger.exception("Failed to refresh AppStoreConnect builds")
metrics.gauge("tasks.app_store_connect.refreshed", count, sample_rate=1)
| with sentry_sdk.start_span(op="dsym-difs", description="Extract difs dSYM zip"):
with open(dsyms_zip, "rb") as fp:
created = debugfile.create_files_from_dif_zip(fp, project, accept_unknown=True)
for proj_debug_file in created:
logger.debug("Created %r for project %s", proj_debug_file, project.id) | identifier_body |
app_store_connect.py | """Tasks for managing Debug Information Files from Apple App Store Connect.
Users can instruct Sentry to download dSYM from App Store Connect and put them into Sentry's
debug files. These tasks enable this functionality.
"""
import logging
import pathlib
import tempfile
from typing import List, Mapping, Tuple
import requests
import sentry_sdk
from django.utils import timezone
from sentry.lang.native import appconnect
from sentry.models import (
AppConnectBuild,
LatestAppConnectBuildsCheck,
Project,
ProjectOption,
debugfile,
)
from sentry.tasks.base import instrumented_task
from sentry.utils import json, metrics, sdk
from sentry.utils.appleconnect import appstore_connect as appstoreconnect_api
logger = logging.getLogger(__name__)
# Sadly this decorator makes this entire function untyped for now as it does not itself have
# typing annotations. So we do all the work outside of the decorated task function to work
# around this.
# Since all these args must be pickled we keep them to built-in types as well.
@instrumented_task(name="sentry.tasks.app_store_connect.dsym_download", queue="appstoreconnect", ignore_result=True) # type: ignore
def dsym_download(project_id: int, config_id: str) -> None:
inner_dsym_download(project_id=project_id, config_id=config_id)
def inner_dsym_download(project_id: int, config_id: str) -> None:
"""Downloads the dSYMs from App Store Connect and stores them in the Project's debug files."""
with sdk.configure_scope() as scope:
scope.set_tag("project", project_id)
scope.set_tag("config_id", config_id)
project = Project.objects.get(pk=project_id)
config = appconnect.AppStoreConnectConfig.from_project_config(project, config_id)
client = appconnect.AppConnectClient.from_config(config)
listed_builds = client.list_builds()
builds = process_builds(project=project, config=config, to_process=listed_builds)
if not builds:
return
for i, (build, build_state) in enumerate(builds):
with sdk.configure_scope() as scope:
scope.set_context("dsym_downloads", {"total": len(builds), "completed": i})
with tempfile.NamedTemporaryFile() as dsyms_zip:
try:
client.download_dsyms(build, pathlib.Path(dsyms_zip.name))
# For no dSYMs, let the build be marked as fetched so they're not
# repeatedly re-checked every time this task is run.
except appconnect.NoDsymsError:
logger.debug("No dSYMs for build %s", build)
# Moves on to the next build so we don't check off fetched. This url will
# eventuallyTM be populated, so revisit it at a later time.
except appconnect.PendingDsymsError:
logger.debug("dSYM url currently unavailable for build %s", build)
continue
# early-return in unauthorized and forbidden to avoid trying all the other builds
# as well, since an expired token will error for all of them.
# the error is also swallowed unreported because this is an expected and actionable
# error.
except appstoreconnect_api.UnauthorizedError:
sentry_sdk.capture_message(
"Not authorized to download dSYM using current App Store Connect credentials",
level="info",
)
return
except appstoreconnect_api.ForbiddenError:
sentry_sdk.capture_message(
"Forbidden from downloading dSYM using current App Store Connect credentials",
level="info",
)
return
# Don't let malformed URLs abort all pending downloads in case it's an isolated instance
except ValueError as e:
sdk.capture_exception(e)
continue
# Assume request errors are a server side issue and do not abort all the
# pending downloads.
except appstoreconnect_api.RequestError as e:
sdk.capture_exception(e)
continue
except requests.RequestException as e:
sdk.capture_exception(e)
continue
else:
create_difs_from_dsyms_zip(dsyms_zip.name, project)
logger.debug("Uploaded dSYMs for build %s", build)
metrics.incr("tasks.app_store_connect.builds_ingested", sample_rate=1)
build_state.fetched = True
build_state.save()
def create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) -> None:
with sentry_sdk.start_span(op="dsym-difs", description="Extract difs dSYM zip"):
with open(dsyms_zip, "rb") as fp:
created = debugfile.create_files_from_dif_zip(fp, project, accept_unknown=True)
for proj_debug_file in created:
logger.debug("Created %r for project %s", proj_debug_file, project.id)
def get_or_create_persisted_build(
project: Project, config: appconnect.AppStoreConnectConfig, build: appconnect.BuildInfo
) -> AppConnectBuild:
"""Fetches the sentry-internal :class:`AppConnectBuild`.
The build corresponds to the :class:`appconnect.BuildInfo` as returned by the
AppStore Connect API. If no build exists yet, a new "pending" build is created.
"""
try:
build_state = AppConnectBuild.objects.get(
project=project,
app_id=build.app_id,
platform=build.platform,
bundle_short_version=build.version,
bundle_version=build.build_number,
)
except AppConnectBuild.DoesNotExist:
build_state = AppConnectBuild(
project=project,
app_id=build.app_id,
bundle_id=config.bundleId,
platform=build.platform,
bundle_short_version=build.version,
bundle_version=build.build_number,
uploaded_to_appstore=build.uploaded_date,
first_seen=timezone.now(),
fetched=False,
)
build_state.save()
return build_state
def process_builds(
project: Project,
config: appconnect.AppStoreConnectConfig,
to_process: List[appconnect.BuildInfo],
) -> List[Tuple[appconnect.BuildInfo, AppConnectBuild]]:
"""Returns a list of builds whose dSYMs need to be updated or fetched.
This will create a new "pending" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`
that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved
upon creation.
"""
pending_builds = []
with sentry_sdk.start_span(
op="appconnect-update-builds", description="Update AppStoreConnect builds in database"
):
for build in to_process:
build_state = get_or_create_persisted_build(project, config, build)
if not build_state.fetched:
pending_builds.append((build, build_state))
LatestAppConnectBuildsCheck.objects.create_or_update(
project=project, source_id=config.id, values={"last_checked": timezone.now()}
)
return pending_builds
# Untyped decorator would stop type-checking of entire function, split into an inner
# function instead which can be type checked.
@instrumented_task( # type: ignore
name="sentry.tasks.app_store_connect.refresh_all_builds",
queue="appstoreconnect",
ignore_result=True,
)
def refresh_all_builds() -> None:
inner_refresh_all_builds()
def | () -> None:
"""Refreshes all AppStoreConnect builds for all projects.
This iterates over all the projects configured in Sentry and for any which has an
AppStoreConnect symbol source configured will poll the AppStoreConnect API to check if
there are new builds.
"""
# We have no way to query for AppStore Connect symbol sources directly, but
# getting all of the project options that have custom symbol sources
# configured is a reasonable compromise, as the number of those should be
# low enough to traverse every hour.
# Another alternative would be to get a list of projects that have had a
# previous successful import, as indicated by existing `AppConnectBuild`
# objects. But that would miss projects that have a valid AppStore Connect
# setup, but have not yet published any kind of build to AppStore.
options = ProjectOption.objects.filter(key=appconnect.SYMBOL_SOURCES_PROP_NAME)
count = 0
for option in options:
with sdk.push_scope() as scope:
scope.set_tag("project", option.project_id)
try:
if not option.value:
# An empty string set as option value, the UI does this when deleting
# all sources. This is not valid JSON.
continue
# We are parsing JSON thus all types are Any, so give the type-checker some
# extra help. We are maybe slightly lying about the type, but the
# attributes we do access are all string values.
all_sources: List[Mapping[str, str]] = json.loads(option.value)
for source in all_sources:
try:
source_id = source["id"]
source_type = source["type"]
except KeyError:
logger.exception("Malformed symbol source")
continue
if source_type == appconnect.SYMBOL_SOURCE_TYPE_NAME:
dsym_download.apply_async(
kwargs={
"project_id": option.project_id,
"config_id": source_id,
}
)
count += 1
except Exception:
logger.exception("Failed to refresh AppStoreConnect builds")
metrics.gauge("tasks.app_store_connect.refreshed", count, sample_rate=1)
| inner_refresh_all_builds | identifier_name |
player.rs | use std::collections::HashMap;
use std::convert::TryInto;
use std::path::PathBuf;
use std::sync::mpsc::Sender;
use std::time::{Duration, Instant};
use dbus::arg::RefArg;
use dbus::blocking::stdintf::org_freedesktop_dbus::Properties;
use dbus::blocking::BlockingSender;
use dbus::blocking::{Connection, Proxy};
use dbus::{arg, Message};
use log::{debug, info, warn};
use url::Url;
const MPRIS2_PREFIX: &str = "org.mpris.MediaPlayer2.";
const MPRIS2_PATH: &str = "/org/mpris/MediaPlayer2";
type DbusStringMap = HashMap<String, arg::Variant<Box<dyn arg::RefArg>>>;
pub type ConnectionProxy<'a> = Proxy<'a, &'a Connection>;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum PlaybackStatus {
Playing,
Paused,
Stopped,
}
#[derive(Clone, Debug)]
pub struct Metadata {
album: Option<String>,
title: String,
artists: Option<Vec<String>>,
file_path: PathBuf,
length: i64,
}
impl Metadata {
#[allow(dead_code)]
pub fn album(&self) -> &Option<String> {
&self.album
}
#[allow(dead_code)]
pub fn title(&self) -> &String {
&self.title
}
#[allow(dead_code)]
pub fn artists(&self) -> &Option<Vec<String>> {
&self.artists
}
pub fn file_path(&self) -> &PathBuf {
&self.file_path
}
#[allow(dead_code)]
pub fn length(&self) -> i64 {
self.length
}
}
#[derive(Debug)]
pub enum Event {
PlayerShutDown,
PlaybackStatusChange(PlaybackStatus),
Seeked { position: Duration },
MetadataChange(Option<Metadata>),
}
#[derive(Debug)]
pub struct Progress {
/// If player is stopped, metadata will be None
metadata: Option<Metadata>,
playback_status: PlaybackStatus,
/// When this Progress was constructed, in order to calculate how old it is.
instant: Instant,
/// Position at the time of construction
position: Duration,
}
impl Progress {
pub fn new(
playback_status: PlaybackStatus,
position: Duration,
metadata: Option<Metadata>,
) -> Progress {
Progress {
metadata,
playback_status,
instant: Instant::now(),
position,
}
}
pub fn metadata(&self) -> &Option<Metadata> {
&self.metadata
}
pub fn take_metadata(self) -> Option<Metadata> {
self.metadata
}
pub fn playback_status(&self) -> PlaybackStatus {
self.playback_status
}
pub fn instant(&self) -> Instant {
self.instant
}
pub fn position(&self) -> Duration {
self.position
}
}
fn query_player_property<T>(p: &ConnectionProxy, name: &str) -> Result<T, String>
where
for<'b> T: dbus::arg::Get<'b>,
{
p.get("org.mpris.MediaPlayer2.Player", name)
.map_err(|e| e.to_string())
}
pub fn query_player_position(p: &ConnectionProxy) -> Result<Duration, String> {
let v = query_player_property::<i64>(p, "Position")?;
if v < 0 {
panic!("Wrong position value");
}
Ok(Duration::from_micros(v.try_into().unwrap()))
}
fn query_player_playback_status(p: &ConnectionProxy) -> Result<PlaybackStatus, String> {
query_player_property::<String>(p, "PlaybackStatus").map(|v| parse_playback_status(&v))
}
fn parse_player_metadata<T: arg::RefArg>(
metadata_map: HashMap<String, T>,
) -> Result<Option<Metadata>, String> {
debug!("metadata_map = {:?}", metadata_map);
let file_path_encoded = match metadata_map.get("xesam:url") {
Some(url) => url
.as_str()
.ok_or("url metadata should be string")?
.to_string(),
// If playlist has reached end, new metadata event is sent,
// but it doesn't contain any of the following keys
None => return Ok(None),
};
let file_path_url = Url::parse(&file_path_encoded)
.map_err(|e| format!("invalid format of url metadata: {}", e.to_string()))?;
let file_path = file_path_url
.to_file_path()
.map_err(|_| format!("invalid format of url metadata: {}", file_path_url))?;
let album = metadata_map
.get("xesam:album")
.map(|v| {
v.as_str()
.ok_or("album metadata should be string")
.map(|x| x.to_string())
})
.transpose()?;
let title = metadata_map["xesam:title"]
.as_str()
.ok_or("title metadata should be string")?
.to_string();
let length = metadata_map["mpris:length"]
.as_i64()
.ok_or("length metadata should be i64")?;
let artists = metadata_map
.get("xesam:artist")
.map(|v| {
v.as_iter()
.ok_or("artist metadata should be iterator")?
.next()
.ok_or("artist metadata should contain at least one entry")?
.as_iter()
.ok_or("artist metadata should have nested iterator")?
.map(|x| {
Ok(x.as_str()
.ok_or("artist metadata values should be string")?
.to_string())
})
.collect::<Result<Vec<String>, &'static str>>()
})
.transpose()?;
Ok(Some(Metadata {
album,
title,
artists,
file_path,
length,
}))
}
fn query_player_metadata(p: &ConnectionProxy) -> Result<Option<Metadata>, String> {
query_player_property::<DbusStringMap>(p, "Metadata").and_then(parse_player_metadata)
}
pub fn query_progress(p: &ConnectionProxy) -> Result<Progress, String> {
let playback_status = query_player_playback_status(p)?;
let position = query_player_position(p)?;
let instant = Instant::now();
let metadata = if playback_status != PlaybackStatus::Stopped {
query_player_metadata(p)?
} else {
None
};
Ok(Progress {
metadata,
playback_status,
instant,
position,
})
}
fn parse_playback_status(playback_status: &str) -> PlaybackStatus {
match playback_status {
"Playing" => PlaybackStatus::Playing,
"Paused" => PlaybackStatus::Paused,
"Stopped" => PlaybackStatus::Stopped,
_ => panic!(""),
}
}
fn query_unique_owner_name<S: Into<String>>(c: &Connection, bus_name: S) -> Result<String, String> {
let get_name_owner = Message::new_method_call(
"org.freedesktop.DBus",
"/",
"org.freedesktop.DBus",
"GetNameOwner",
)
.map_err(|e| e.to_string())?
.append1(bus_name.into());
| .map(|reply| {
reply
.get1()
.expect("GetNameOwner must have name as first member")
})
}
fn query_all_player_buses(c: &Connection) -> Result<Vec<String>, String> {
let list_names = Message::new_method_call(
"org.freedesktop.DBus",
"/",
"org.freedesktop.DBus",
"ListNames",
)?;
let reply = c
.send_with_reply_and_block(list_names, Duration::from_millis(500))
.map_err(|e| e.to_string())?;
let names: arg::Array<&str, _> = reply.read1().map_err(|e| e.to_string())?;
Ok(names
.filter(|name| name.starts_with(MPRIS2_PREFIX))
.map(|str_ref| str_ref.to_owned())
.collect())
}
fn get_message_item_dict(
a: &arg::Variant<Box<dyn arg::RefArg>>,
) -> HashMap<String, Box<&dyn arg::RefArg>> {
let mut it = a.as_iter().unwrap();
let d_variant = it.next().unwrap();
let d_it = d_variant.as_iter().unwrap();
let v = d_it.collect::<Vec<_>>();
v.chunks(2)
.map(|c| {
let key = c[0].as_str().unwrap();
(key.to_string(), Box::new(c[1]))
})
.collect()
}
#[derive(Debug)]
pub struct DbusPropertiesChangedHappened {
pub interface_name: String,
pub changed_properties: DbusStringMap,
pub invalidated_properties: Vec<String>,
}
impl dbus::message::SignalArgs for DbusPropertiesChangedHappened {
const NAME: &'static str = "PropertiesChanged";
const INTERFACE: &'static str = "org.freedesktop.DBus.Properties";
}
impl arg::ReadAll for DbusPropertiesChangedHappened {
fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> {
Ok(Self {
interface_name: i.read()?,
changed_properties: i.read()?,
invalidated_properties: i.read()?,
})
}
}
#[derive(Debug)]
pub struct MediaPlayer2SeekedHappened {
pub position_us: i64,
}
impl dbus::message::SignalArgs for MediaPlayer2SeekedHappened {
const NAME: &'static str = "Seeked";
const INTERFACE: &'static str = "org.mpris.MediaPlayer2.Player";
}
impl arg::ReadAll for MediaPlayer2SeekedHappened {
fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> {
Ok(Self {
position_us: i.read()?,
})
}
}
#[derive(Debug)]
pub struct DbusNameOwnedChanged {
pub name: String,
pub new_owner: String,
pub old_owner: String,
}
impl dbus::message::SignalArgs for DbusNameOwnedChanged {
const NAME: &'static str = "NameOwnerChanged";
const INTERFACE: &'static str = "org.freedesktop.DBus";
}
impl arg::ReadAll for DbusNameOwnedChanged {
fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> {
Ok(Self {
name: i.read()?,
new_owner: i.read()?,
old_owner: i.read()?,
})
}
}
pub fn get_connection_proxy<'a>(
c: &'a Connection,
player_owner_name: &'a str,
) -> ConnectionProxy<'a> {
c.with_proxy(player_owner_name, MPRIS2_PATH, Duration::from_millis(5000))
}
fn get_mediaplayer2_seeked_handler(
sender: Sender<Event>,
) -> impl Fn(MediaPlayer2SeekedHappened, &Connection) -> bool {
move |e: MediaPlayer2SeekedHappened, _: &Connection| {
debug!("Seek happened: {:?}", e);
if e.position_us < 0 {
panic!(
"Position value must be positive number, found {}",
e.position_us
);
}
sender
.send(Event::Seeked {
position: Duration::from_micros(e.position_us as u64),
})
.unwrap();
true
}
}
fn get_dbus_properties_changed_handler(
sender: Sender<Event>,
) -> impl Fn(DbusPropertiesChangedHappened, &Connection) -> bool {
move |e: DbusPropertiesChangedHappened, _: &Connection| {
debug!("DBus.Properties happened: {:?}", e);
if e.interface_name == "org.mpris.MediaPlayer2.Player" {
for (k, v) in &e.changed_properties {
match k.as_ref() {
"PlaybackStatus" => {
let playback_status = v.as_str().unwrap();
debug!("playback_status = {:?}", playback_status);
sender
.send(Event::PlaybackStatusChange(parse_playback_status(
&playback_status,
)))
.unwrap();
}
"Metadata" => {
let metadata_map = get_message_item_dict(v);
debug!("metadata_map = {:?}", metadata_map);
let metadata = parse_player_metadata(metadata_map).unwrap();
sender.send(Event::MetadataChange(metadata)).unwrap();
}
_ => {
warn!("Unknown PropertiesChanged event:");
for p in &e.changed_properties {
warn!(" changed_property = {:?}", p);
}
warn!(
" invalidated_properties = {:?}",
e.invalidated_properties
);
}
}
}
}
true
}
}
fn get_dbus_name_owned_changed_handler(
sender: Sender<Event>,
player_owner_name: String,
) -> impl Fn(DbusNameOwnedChanged, &Connection) -> bool {
move |e: DbusNameOwnedChanged, _: &Connection| {
debug!("DbusNameOwnedChanged happened: {:?}", e);
if e.name == player_owner_name && e.old_owner.is_empty() && e.new_owner == player_owner_name
{
sender.send(Event::PlayerShutDown).unwrap();
}
true
}
}
pub fn subscribe<'a>(
c: &'a Connection,
player: &str,
sender: &Sender<Event>,
) -> Result<String, String> {
let all_player_buses = query_all_player_buses(&c)?;
let player_bus = format!("{}{}", MPRIS2_PREFIX, player);
if !all_player_buses.contains(&player_bus) {
info!("all players = {:?}", all_player_buses);
return Err("Player not running".to_owned());
}
let player_owner_name = query_unique_owner_name(&c, player_bus)?;
debug!("player_owner_name = {:?}", player_owner_name);
let p = get_connection_proxy(c, &player_owner_name);
p.match_signal(get_dbus_properties_changed_handler(sender.clone()))
.map_err(|e| e.to_string())?;
p.match_signal(get_mediaplayer2_seeked_handler(sender.clone()))
.map_err(|e| e.to_string())?;
// p.match_signal(|_: MediaPlayer2TrackListChangeHappened, _: &Connection| {
// debug!("TrackList happened");
// true
// }).map_err(|e| e.to_string())?;
let proxy_generic_dbus = c.with_proxy(
"org.freedesktop.DBus",
"/org/freedesktop/DBus",
Duration::from_millis(5000),
);
proxy_generic_dbus
.match_signal(get_dbus_name_owned_changed_handler(
sender.clone(),
player_owner_name.clone(),
))
.map_err(|e| e.to_string())?;
Ok(player_owner_name)
} | c.send_with_reply_and_block(get_name_owner, Duration::from_millis(100))
.map_err(|e| e.to_string()) | random_line_split |
player.rs | use std::collections::HashMap;
use std::convert::TryInto;
use std::path::PathBuf;
use std::sync::mpsc::Sender;
use std::time::{Duration, Instant};
use dbus::arg::RefArg;
use dbus::blocking::stdintf::org_freedesktop_dbus::Properties;
use dbus::blocking::BlockingSender;
use dbus::blocking::{Connection, Proxy};
use dbus::{arg, Message};
use log::{debug, info, warn};
use url::Url;
const MPRIS2_PREFIX: &str = "org.mpris.MediaPlayer2.";
const MPRIS2_PATH: &str = "/org/mpris/MediaPlayer2";
type DbusStringMap = HashMap<String, arg::Variant<Box<dyn arg::RefArg>>>;
pub type ConnectionProxy<'a> = Proxy<'a, &'a Connection>;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum PlaybackStatus {
Playing,
Paused,
Stopped,
}
#[derive(Clone, Debug)]
pub struct Metadata {
album: Option<String>,
title: String,
artists: Option<Vec<String>>,
file_path: PathBuf,
length: i64,
}
impl Metadata {
#[allow(dead_code)]
pub fn album(&self) -> &Option<String> {
&self.album
}
#[allow(dead_code)]
pub fn title(&self) -> &String {
&self.title
}
#[allow(dead_code)]
pub fn artists(&self) -> &Option<Vec<String>> {
&self.artists
}
pub fn file_path(&self) -> &PathBuf {
&self.file_path
}
#[allow(dead_code)]
pub fn length(&self) -> i64 {
self.length
}
}
#[derive(Debug)]
pub enum Event {
PlayerShutDown,
PlaybackStatusChange(PlaybackStatus),
Seeked { position: Duration },
MetadataChange(Option<Metadata>),
}
#[derive(Debug)]
pub struct Progress {
/// If player is stopped, metadata will be None
metadata: Option<Metadata>,
playback_status: PlaybackStatus,
/// When this Progress was constructed, in order to calculate how old it is.
instant: Instant,
/// Position at the time of construction
position: Duration,
}
impl Progress {
pub fn new(
playback_status: PlaybackStatus,
position: Duration,
metadata: Option<Metadata>,
) -> Progress {
Progress {
metadata,
playback_status,
instant: Instant::now(),
position,
}
}
pub fn metadata(&self) -> &Option<Metadata> {
&self.metadata
}
pub fn take_metadata(self) -> Option<Metadata> {
self.metadata
}
pub fn playback_status(&self) -> PlaybackStatus {
self.playback_status
}
pub fn instant(&self) -> Instant {
self.instant
}
pub fn position(&self) -> Duration {
self.position
}
}
fn query_player_property<T>(p: &ConnectionProxy, name: &str) -> Result<T, String>
where
for<'b> T: dbus::arg::Get<'b>,
{
p.get("org.mpris.MediaPlayer2.Player", name)
.map_err(|e| e.to_string())
}
pub fn query_player_position(p: &ConnectionProxy) -> Result<Duration, String> {
let v = query_player_property::<i64>(p, "Position")?;
if v < 0 {
panic!("Wrong position value");
}
Ok(Duration::from_micros(v.try_into().unwrap()))
}
fn query_player_playback_status(p: &ConnectionProxy) -> Result<PlaybackStatus, String> |
fn parse_player_metadata<T: arg::RefArg>(
metadata_map: HashMap<String, T>,
) -> Result<Option<Metadata>, String> {
debug!("metadata_map = {:?}", metadata_map);
let file_path_encoded = match metadata_map.get("xesam:url") {
Some(url) => url
.as_str()
.ok_or("url metadata should be string")?
.to_string(),
// If playlist has reached end, new metadata event is sent,
// but it doesn't contain any of the following keys
None => return Ok(None),
};
let file_path_url = Url::parse(&file_path_encoded)
.map_err(|e| format!("invalid format of url metadata: {}", e.to_string()))?;
let file_path = file_path_url
.to_file_path()
.map_err(|_| format!("invalid format of url metadata: {}", file_path_url))?;
let album = metadata_map
.get("xesam:album")
.map(|v| {
v.as_str()
.ok_or("album metadata should be string")
.map(|x| x.to_string())
})
.transpose()?;
let title = metadata_map["xesam:title"]
.as_str()
.ok_or("title metadata should be string")?
.to_string();
let length = metadata_map["mpris:length"]
.as_i64()
.ok_or("length metadata should be i64")?;
let artists = metadata_map
.get("xesam:artist")
.map(|v| {
v.as_iter()
.ok_or("artist metadata should be iterator")?
.next()
.ok_or("artist metadata should contain at least one entry")?
.as_iter()
.ok_or("artist metadata should have nested iterator")?
.map(|x| {
Ok(x.as_str()
.ok_or("artist metadata values should be string")?
.to_string())
})
.collect::<Result<Vec<String>, &'static str>>()
})
.transpose()?;
Ok(Some(Metadata {
album,
title,
artists,
file_path,
length,
}))
}
fn query_player_metadata(p: &ConnectionProxy) -> Result<Option<Metadata>, String> {
query_player_property::<DbusStringMap>(p, "Metadata").and_then(parse_player_metadata)
}
pub fn query_progress(p: &ConnectionProxy) -> Result<Progress, String> {
let playback_status = query_player_playback_status(p)?;
let position = query_player_position(p)?;
let instant = Instant::now();
let metadata = if playback_status != PlaybackStatus::Stopped {
query_player_metadata(p)?
} else {
None
};
Ok(Progress {
metadata,
playback_status,
instant,
position,
})
}
fn parse_playback_status(playback_status: &str) -> PlaybackStatus {
match playback_status {
"Playing" => PlaybackStatus::Playing,
"Paused" => PlaybackStatus::Paused,
"Stopped" => PlaybackStatus::Stopped,
_ => panic!(""),
}
}
fn query_unique_owner_name<S: Into<String>>(c: &Connection, bus_name: S) -> Result<String, String> {
let get_name_owner = Message::new_method_call(
"org.freedesktop.DBus",
"/",
"org.freedesktop.DBus",
"GetNameOwner",
)
.map_err(|e| e.to_string())?
.append1(bus_name.into());
c.send_with_reply_and_block(get_name_owner, Duration::from_millis(100))
.map_err(|e| e.to_string())
.map(|reply| {
reply
.get1()
.expect("GetNameOwner must have name as first member")
})
}
fn query_all_player_buses(c: &Connection) -> Result<Vec<String>, String> {
let list_names = Message::new_method_call(
"org.freedesktop.DBus",
"/",
"org.freedesktop.DBus",
"ListNames",
)?;
let reply = c
.send_with_reply_and_block(list_names, Duration::from_millis(500))
.map_err(|e| e.to_string())?;
let names: arg::Array<&str, _> = reply.read1().map_err(|e| e.to_string())?;
Ok(names
.filter(|name| name.starts_with(MPRIS2_PREFIX))
.map(|str_ref| str_ref.to_owned())
.collect())
}
fn get_message_item_dict(
a: &arg::Variant<Box<dyn arg::RefArg>>,
) -> HashMap<String, Box<&dyn arg::RefArg>> {
let mut it = a.as_iter().unwrap();
let d_variant = it.next().unwrap();
let d_it = d_variant.as_iter().unwrap();
let v = d_it.collect::<Vec<_>>();
v.chunks(2)
.map(|c| {
let key = c[0].as_str().unwrap();
(key.to_string(), Box::new(c[1]))
})
.collect()
}
#[derive(Debug)]
pub struct DbusPropertiesChangedHappened {
pub interface_name: String,
pub changed_properties: DbusStringMap,
pub invalidated_properties: Vec<String>,
}
impl dbus::message::SignalArgs for DbusPropertiesChangedHappened {
const NAME: &'static str = "PropertiesChanged";
const INTERFACE: &'static str = "org.freedesktop.DBus.Properties";
}
impl arg::ReadAll for DbusPropertiesChangedHappened {
fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> {
Ok(Self {
interface_name: i.read()?,
changed_properties: i.read()?,
invalidated_properties: i.read()?,
})
}
}
#[derive(Debug)]
pub struct MediaPlayer2SeekedHappened {
pub position_us: i64,
}
impl dbus::message::SignalArgs for MediaPlayer2SeekedHappened {
const NAME: &'static str = "Seeked";
const INTERFACE: &'static str = "org.mpris.MediaPlayer2.Player";
}
impl arg::ReadAll for MediaPlayer2SeekedHappened {
fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> {
Ok(Self {
position_us: i.read()?,
})
}
}
#[derive(Debug)]
pub struct DbusNameOwnedChanged {
pub name: String,
pub new_owner: String,
pub old_owner: String,
}
impl dbus::message::SignalArgs for DbusNameOwnedChanged {
const NAME: &'static str = "NameOwnerChanged";
const INTERFACE: &'static str = "org.freedesktop.DBus";
}
impl arg::ReadAll for DbusNameOwnedChanged {
fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> {
Ok(Self {
name: i.read()?,
new_owner: i.read()?,
old_owner: i.read()?,
})
}
}
pub fn get_connection_proxy<'a>(
c: &'a Connection,
player_owner_name: &'a str,
) -> ConnectionProxy<'a> {
c.with_proxy(player_owner_name, MPRIS2_PATH, Duration::from_millis(5000))
}
fn get_mediaplayer2_seeked_handler(
sender: Sender<Event>,
) -> impl Fn(MediaPlayer2SeekedHappened, &Connection) -> bool {
move |e: MediaPlayer2SeekedHappened, _: &Connection| {
debug!("Seek happened: {:?}", e);
if e.position_us < 0 {
panic!(
"Position value must be positive number, found {}",
e.position_us
);
}
sender
.send(Event::Seeked {
position: Duration::from_micros(e.position_us as u64),
})
.unwrap();
true
}
}
fn get_dbus_properties_changed_handler(
sender: Sender<Event>,
) -> impl Fn(DbusPropertiesChangedHappened, &Connection) -> bool {
move |e: DbusPropertiesChangedHappened, _: &Connection| {
debug!("DBus.Properties happened: {:?}", e);
if e.interface_name == "org.mpris.MediaPlayer2.Player" {
for (k, v) in &e.changed_properties {
match k.as_ref() {
"PlaybackStatus" => {
let playback_status = v.as_str().unwrap();
debug!("playback_status = {:?}", playback_status);
sender
.send(Event::PlaybackStatusChange(parse_playback_status(
&playback_status,
)))
.unwrap();
}
"Metadata" => {
let metadata_map = get_message_item_dict(v);
debug!("metadata_map = {:?}", metadata_map);
let metadata = parse_player_metadata(metadata_map).unwrap();
sender.send(Event::MetadataChange(metadata)).unwrap();
}
_ => {
warn!("Unknown PropertiesChanged event:");
for p in &e.changed_properties {
warn!(" changed_property = {:?}", p);
}
warn!(
" invalidated_properties = {:?}",
e.invalidated_properties
);
}
}
}
}
true
}
}
fn get_dbus_name_owned_changed_handler(
sender: Sender<Event>,
player_owner_name: String,
) -> impl Fn(DbusNameOwnedChanged, &Connection) -> bool {
move |e: DbusNameOwnedChanged, _: &Connection| {
debug!("DbusNameOwnedChanged happened: {:?}", e);
if e.name == player_owner_name && e.old_owner.is_empty() && e.new_owner == player_owner_name
{
sender.send(Event::PlayerShutDown).unwrap();
}
true
}
}
pub fn subscribe<'a>(
c: &'a Connection,
player: &str,
sender: &Sender<Event>,
) -> Result<String, String> {
let all_player_buses = query_all_player_buses(&c)?;
let player_bus = format!("{}{}", MPRIS2_PREFIX, player);
if !all_player_buses.contains(&player_bus) {
info!("all players = {:?}", all_player_buses);
return Err("Player not running".to_owned());
}
let player_owner_name = query_unique_owner_name(&c, player_bus)?;
debug!("player_owner_name = {:?}", player_owner_name);
let p = get_connection_proxy(c, &player_owner_name);
p.match_signal(get_dbus_properties_changed_handler(sender.clone()))
.map_err(|e| e.to_string())?;
p.match_signal(get_mediaplayer2_seeked_handler(sender.clone()))
.map_err(|e| e.to_string())?;
// p.match_signal(|_: MediaPlayer2TrackListChangeHappened, _: &Connection| {
// debug!("TrackList happened");
// true
// }).map_err(|e| e.to_string())?;
let proxy_generic_dbus = c.with_proxy(
"org.freedesktop.DBus",
"/org/freedesktop/DBus",
Duration::from_millis(5000),
);
proxy_generic_dbus
.match_signal(get_dbus_name_owned_changed_handler(
sender.clone(),
player_owner_name.clone(),
))
.map_err(|e| e.to_string())?;
Ok(player_owner_name)
}
| {
query_player_property::<String>(p, "PlaybackStatus").map(|v| parse_playback_status(&v))
} | identifier_body |
player.rs | use std::collections::HashMap;
use std::convert::TryInto;
use std::path::PathBuf;
use std::sync::mpsc::Sender;
use std::time::{Duration, Instant};
use dbus::arg::RefArg;
use dbus::blocking::stdintf::org_freedesktop_dbus::Properties;
use dbus::blocking::BlockingSender;
use dbus::blocking::{Connection, Proxy};
use dbus::{arg, Message};
use log::{debug, info, warn};
use url::Url;
const MPRIS2_PREFIX: &str = "org.mpris.MediaPlayer2.";
const MPRIS2_PATH: &str = "/org/mpris/MediaPlayer2";
type DbusStringMap = HashMap<String, arg::Variant<Box<dyn arg::RefArg>>>;
pub type ConnectionProxy<'a> = Proxy<'a, &'a Connection>;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum PlaybackStatus {
Playing,
Paused,
Stopped,
}
#[derive(Clone, Debug)]
pub struct Metadata {
album: Option<String>,
title: String,
artists: Option<Vec<String>>,
file_path: PathBuf,
length: i64,
}
impl Metadata {
#[allow(dead_code)]
pub fn album(&self) -> &Option<String> {
&self.album
}
#[allow(dead_code)]
pub fn title(&self) -> &String {
&self.title
}
#[allow(dead_code)]
pub fn artists(&self) -> &Option<Vec<String>> {
&self.artists
}
pub fn file_path(&self) -> &PathBuf {
&self.file_path
}
#[allow(dead_code)]
pub fn length(&self) -> i64 {
self.length
}
}
#[derive(Debug)]
pub enum Event {
PlayerShutDown,
PlaybackStatusChange(PlaybackStatus),
Seeked { position: Duration },
MetadataChange(Option<Metadata>),
}
#[derive(Debug)]
pub struct Progress {
/// If player is stopped, metadata will be None
metadata: Option<Metadata>,
playback_status: PlaybackStatus,
/// When this Progress was constructed, in order to calculate how old it is.
instant: Instant,
/// Position at the time of construction
position: Duration,
}
impl Progress {
pub fn new(
playback_status: PlaybackStatus,
position: Duration,
metadata: Option<Metadata>,
) -> Progress {
Progress {
metadata,
playback_status,
instant: Instant::now(),
position,
}
}
pub fn metadata(&self) -> &Option<Metadata> {
&self.metadata
}
pub fn take_metadata(self) -> Option<Metadata> {
self.metadata
}
pub fn playback_status(&self) -> PlaybackStatus {
self.playback_status
}
pub fn instant(&self) -> Instant {
self.instant
}
pub fn | (&self) -> Duration {
self.position
}
}
fn query_player_property<T>(p: &ConnectionProxy, name: &str) -> Result<T, String>
where
for<'b> T: dbus::arg::Get<'b>,
{
p.get("org.mpris.MediaPlayer2.Player", name)
.map_err(|e| e.to_string())
}
pub fn query_player_position(p: &ConnectionProxy) -> Result<Duration, String> {
let v = query_player_property::<i64>(p, "Position")?;
if v < 0 {
panic!("Wrong position value");
}
Ok(Duration::from_micros(v.try_into().unwrap()))
}
fn query_player_playback_status(p: &ConnectionProxy) -> Result<PlaybackStatus, String> {
query_player_property::<String>(p, "PlaybackStatus").map(|v| parse_playback_status(&v))
}
fn parse_player_metadata<T: arg::RefArg>(
metadata_map: HashMap<String, T>,
) -> Result<Option<Metadata>, String> {
debug!("metadata_map = {:?}", metadata_map);
let file_path_encoded = match metadata_map.get("xesam:url") {
Some(url) => url
.as_str()
.ok_or("url metadata should be string")?
.to_string(),
// If playlist has reached end, new metadata event is sent,
// but it doesn't contain any of the following keys
None => return Ok(None),
};
let file_path_url = Url::parse(&file_path_encoded)
.map_err(|e| format!("invalid format of url metadata: {}", e.to_string()))?;
let file_path = file_path_url
.to_file_path()
.map_err(|_| format!("invalid format of url metadata: {}", file_path_url))?;
let album = metadata_map
.get("xesam:album")
.map(|v| {
v.as_str()
.ok_or("album metadata should be string")
.map(|x| x.to_string())
})
.transpose()?;
let title = metadata_map["xesam:title"]
.as_str()
.ok_or("title metadata should be string")?
.to_string();
let length = metadata_map["mpris:length"]
.as_i64()
.ok_or("length metadata should be i64")?;
let artists = metadata_map
.get("xesam:artist")
.map(|v| {
v.as_iter()
.ok_or("artist metadata should be iterator")?
.next()
.ok_or("artist metadata should contain at least one entry")?
.as_iter()
.ok_or("artist metadata should have nested iterator")?
.map(|x| {
Ok(x.as_str()
.ok_or("artist metadata values should be string")?
.to_string())
})
.collect::<Result<Vec<String>, &'static str>>()
})
.transpose()?;
Ok(Some(Metadata {
album,
title,
artists,
file_path,
length,
}))
}
fn query_player_metadata(p: &ConnectionProxy) -> Result<Option<Metadata>, String> {
query_player_property::<DbusStringMap>(p, "Metadata").and_then(parse_player_metadata)
}
pub fn query_progress(p: &ConnectionProxy) -> Result<Progress, String> {
let playback_status = query_player_playback_status(p)?;
let position = query_player_position(p)?;
let instant = Instant::now();
let metadata = if playback_status != PlaybackStatus::Stopped {
query_player_metadata(p)?
} else {
None
};
Ok(Progress {
metadata,
playback_status,
instant,
position,
})
}
fn parse_playback_status(playback_status: &str) -> PlaybackStatus {
match playback_status {
"Playing" => PlaybackStatus::Playing,
"Paused" => PlaybackStatus::Paused,
"Stopped" => PlaybackStatus::Stopped,
_ => panic!(""),
}
}
fn query_unique_owner_name<S: Into<String>>(c: &Connection, bus_name: S) -> Result<String, String> {
let get_name_owner = Message::new_method_call(
"org.freedesktop.DBus",
"/",
"org.freedesktop.DBus",
"GetNameOwner",
)
.map_err(|e| e.to_string())?
.append1(bus_name.into());
c.send_with_reply_and_block(get_name_owner, Duration::from_millis(100))
.map_err(|e| e.to_string())
.map(|reply| {
reply
.get1()
.expect("GetNameOwner must have name as first member")
})
}
fn query_all_player_buses(c: &Connection) -> Result<Vec<String>, String> {
let list_names = Message::new_method_call(
"org.freedesktop.DBus",
"/",
"org.freedesktop.DBus",
"ListNames",
)?;
let reply = c
.send_with_reply_and_block(list_names, Duration::from_millis(500))
.map_err(|e| e.to_string())?;
let names: arg::Array<&str, _> = reply.read1().map_err(|e| e.to_string())?;
Ok(names
.filter(|name| name.starts_with(MPRIS2_PREFIX))
.map(|str_ref| str_ref.to_owned())
.collect())
}
fn get_message_item_dict(
a: &arg::Variant<Box<dyn arg::RefArg>>,
) -> HashMap<String, Box<&dyn arg::RefArg>> {
let mut it = a.as_iter().unwrap();
let d_variant = it.next().unwrap();
let d_it = d_variant.as_iter().unwrap();
let v = d_it.collect::<Vec<_>>();
v.chunks(2)
.map(|c| {
let key = c[0].as_str().unwrap();
(key.to_string(), Box::new(c[1]))
})
.collect()
}
#[derive(Debug)]
pub struct DbusPropertiesChangedHappened {
pub interface_name: String,
pub changed_properties: DbusStringMap,
pub invalidated_properties: Vec<String>,
}
impl dbus::message::SignalArgs for DbusPropertiesChangedHappened {
const NAME: &'static str = "PropertiesChanged";
const INTERFACE: &'static str = "org.freedesktop.DBus.Properties";
}
impl arg::ReadAll for DbusPropertiesChangedHappened {
fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> {
Ok(Self {
interface_name: i.read()?,
changed_properties: i.read()?,
invalidated_properties: i.read()?,
})
}
}
#[derive(Debug)]
pub struct MediaPlayer2SeekedHappened {
pub position_us: i64,
}
impl dbus::message::SignalArgs for MediaPlayer2SeekedHappened {
const NAME: &'static str = "Seeked";
const INTERFACE: &'static str = "org.mpris.MediaPlayer2.Player";
}
impl arg::ReadAll for MediaPlayer2SeekedHappened {
fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> {
Ok(Self {
position_us: i.read()?,
})
}
}
#[derive(Debug)]
pub struct DbusNameOwnedChanged {
pub name: String,
pub new_owner: String,
pub old_owner: String,
}
impl dbus::message::SignalArgs for DbusNameOwnedChanged {
const NAME: &'static str = "NameOwnerChanged";
const INTERFACE: &'static str = "org.freedesktop.DBus";
}
impl arg::ReadAll for DbusNameOwnedChanged {
fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> {
Ok(Self {
name: i.read()?,
new_owner: i.read()?,
old_owner: i.read()?,
})
}
}
pub fn get_connection_proxy<'a>(
c: &'a Connection,
player_owner_name: &'a str,
) -> ConnectionProxy<'a> {
c.with_proxy(player_owner_name, MPRIS2_PATH, Duration::from_millis(5000))
}
fn get_mediaplayer2_seeked_handler(
sender: Sender<Event>,
) -> impl Fn(MediaPlayer2SeekedHappened, &Connection) -> bool {
move |e: MediaPlayer2SeekedHappened, _: &Connection| {
debug!("Seek happened: {:?}", e);
if e.position_us < 0 {
panic!(
"Position value must be positive number, found {}",
e.position_us
);
}
sender
.send(Event::Seeked {
position: Duration::from_micros(e.position_us as u64),
})
.unwrap();
true
}
}
fn get_dbus_properties_changed_handler(
sender: Sender<Event>,
) -> impl Fn(DbusPropertiesChangedHappened, &Connection) -> bool {
move |e: DbusPropertiesChangedHappened, _: &Connection| {
debug!("DBus.Properties happened: {:?}", e);
if e.interface_name == "org.mpris.MediaPlayer2.Player" {
for (k, v) in &e.changed_properties {
match k.as_ref() {
"PlaybackStatus" => {
let playback_status = v.as_str().unwrap();
debug!("playback_status = {:?}", playback_status);
sender
.send(Event::PlaybackStatusChange(parse_playback_status(
&playback_status,
)))
.unwrap();
}
"Metadata" => {
let metadata_map = get_message_item_dict(v);
debug!("metadata_map = {:?}", metadata_map);
let metadata = parse_player_metadata(metadata_map).unwrap();
sender.send(Event::MetadataChange(metadata)).unwrap();
}
_ => {
warn!("Unknown PropertiesChanged event:");
for p in &e.changed_properties {
warn!(" changed_property = {:?}", p);
}
warn!(
" invalidated_properties = {:?}",
e.invalidated_properties
);
}
}
}
}
true
}
}
fn get_dbus_name_owned_changed_handler(
sender: Sender<Event>,
player_owner_name: String,
) -> impl Fn(DbusNameOwnedChanged, &Connection) -> bool {
move |e: DbusNameOwnedChanged, _: &Connection| {
debug!("DbusNameOwnedChanged happened: {:?}", e);
if e.name == player_owner_name && e.old_owner.is_empty() && e.new_owner == player_owner_name
{
sender.send(Event::PlayerShutDown).unwrap();
}
true
}
}
pub fn subscribe<'a>(
c: &'a Connection,
player: &str,
sender: &Sender<Event>,
) -> Result<String, String> {
let all_player_buses = query_all_player_buses(&c)?;
let player_bus = format!("{}{}", MPRIS2_PREFIX, player);
if !all_player_buses.contains(&player_bus) {
info!("all players = {:?}", all_player_buses);
return Err("Player not running".to_owned());
}
let player_owner_name = query_unique_owner_name(&c, player_bus)?;
debug!("player_owner_name = {:?}", player_owner_name);
let p = get_connection_proxy(c, &player_owner_name);
p.match_signal(get_dbus_properties_changed_handler(sender.clone()))
.map_err(|e| e.to_string())?;
p.match_signal(get_mediaplayer2_seeked_handler(sender.clone()))
.map_err(|e| e.to_string())?;
// p.match_signal(|_: MediaPlayer2TrackListChangeHappened, _: &Connection| {
// debug!("TrackList happened");
// true
// }).map_err(|e| e.to_string())?;
let proxy_generic_dbus = c.with_proxy(
"org.freedesktop.DBus",
"/org/freedesktop/DBus",
Duration::from_millis(5000),
);
proxy_generic_dbus
.match_signal(get_dbus_name_owned_changed_handler(
sender.clone(),
player_owner_name.clone(),
))
.map_err(|e| e.to_string())?;
Ok(player_owner_name)
}
| position | identifier_name |
initialize.py | #! usr/bin/env python3
#############################################################
#
# Author: John Turner
# Version: 1.0
# Last Updated: 2/18/2015
#
# This file contains the database initialization script for
# the Recruiter Connect Database. This file will drop, recreate
# and migrate the db, and then insert the data necessary to
# start working, including:
# - Creating a SuperUser
# - Adding the Groups
# - Adding Users to the Groups
#
#############################################################
import random
import os
import datetime
import sys
# Initialize the django environment, import the models
os.environ['DJANGO_SETTINGS_MODULE'] = 'chef.settings'
import base_app.models as mod
import django
django.setup()
# grab models that I'll need
from django.contrib.auth.models import Group, Permission, ContentType
from django.db import connection
import subprocess
# Drop database, recreate it, migrate it
cursor = connection.cursor()
cursor.execute('DROP SCHEMA PUBLIC CASCADE')
cursor.execute('CREATE SCHEMA PUBLIC')
subprocess.call([sys.executable, 'manage.py', 'migrate'])
#############################################################################
############################## GROUPS/USERS #################################
#############################################################################
Group.objects.all().delete()
############################# ADMINISTRATORS ################################
# Administrators - have full rights to all system
# - add superuser to this group
group = Group()
group.name= "Administrator"
group.save()
# Add all permissions to Admin group
permissions = Permission.objects.all()
for permission in permissions:
group.permissions.add(permission)
mod.User.objects.all().delete()
# Create superuser with the following credentials:
# - username = admin
# - password: password
# - email = john.duane.turner@gmail.com
# First create a new address and add it to the superuser
address = mod.Address()
address.address1= '1100 BYU Lane'
address.city = 'Provo'
address.state = 'UT'
address.ZIP = '84606'
address.save()
user = mod.User.objects.create_superuser( username='admin', email='slowe89@me.com', password='password' )
user.first_name= 'Spencer'
user.last_name= 'Lowe'
user.phone= '7073303952'
user.security_question= ''
user.security_answer= ''
user.address = address
user.save()
group.user_set.add(user)
################################# MANAGERS ###################################
# Managers - can add/edit/delete the following:
# - Events
# - Areas
# - Inventory
# - Transactions
group = Group()
group.name = 'Manager'
group.save()
# Add permissions for the Managers
# Get content types first
content_types = ContentType.objects.exclude(app_label='auth').exclude(app_label='admin')
content_types.exclude(app_label='base_app', model='User').exclude(app_label='contenttypes').exclude(app_label='sessions')
for content_type in content_types:
permissions = Permission.objects.filter(content_type=content_type)
for permission in permissions:
group.permissions.add(permission)
################################## GUESTS ####################################
# Guests have no permissions, they are used in creating new users
group = Group()
group.name = 'Guest'
group.save()
address = mod.Address()
address.address1 = 'Nowhere'
address.address2 = 'Nowhere'
address.city = 'Nowhere'
address.state = 'UT'
address.ZIP = '84606'
address.save()
address2 = mod.Address()
address2.address1 = '123 Lollipop Ln.'
address2.address2 = ''
address2.city = 'Candy Land'
address2.state = 'UT'
address2.ZIP = '84601'
address2.save()
# Add a couple of guests
for data in [
{'first_name':'Joseph', 'last_name':'Townson', 'email':'easybuttonsales@gmail.com', 'address':address, 'phone':'7134088245', 'security_question':'What is your name?', 'security_answer':'Joseph', 'username':'jobro1', },
{'first_name':'Sarah', 'last_name':'Townson', 'email':'spencer@colonialheritage.info', 'address':address, 'phone':'7134088245', 'security_question':'What is your name?', 'security_answer':'Joseph', 'username':'sarahbro1', }
]:
user = mod.User()
for key in data:
setattr(user, key, data[key])
user.set_password('password')
user.save()
group.user_set.add(user)
#############################################################################
################################ DUMMY DATA #################################
#############################################################################
################################## VENUES ###################################
venue = mod.Venue()
venue.name = "The Park"
venue.address = address2
venue.save()
################################## EVENTS ###################################
event_template = mod.PublicEvent()
event_template.name = "Fun Event"
event_template.description = "A roudy get together for families in the community. We will do things, participate in things, experience things, and generally have a good time. Come one, come all to see the amount of fun you can have when you gather for no real reason."
event_template.save()
event = mod.Event()
event.venue = venue
event.event_template = event_template
event.start_date = "2016-01-01"
event.end_date = "2016-01-03"
event.event_map = "Map goes here"
event.save()
################################## AREAS ####################################
area = mod.Area()
area.name = "Vendors"
area.event = event
area.description = "This is where vendors will be located."
area.place_number = 1
area.coordinator = user
area.supervisor = user
area.save()
################################ INVENTORY ##################################
# Bulk product - musket balls
photo = mod.Photograph()
photo.place_taken = "Colonial Heritage Festival"
photo.image = "products/media/product_images/musket_balls.jpg"
photo.description = "Musket balls for sale!"
photo.photographer = user
photo.save()
specs = mod.ProductSpecification()
specs.name = 'Musket Balls'
specs.price = 2.50
specs.description = 'Made to the exact specifications to match what was shot during the Revolutionary War!'
specs.manufacturer = 'Test manufacturer'
specs.average_cost = 2.50
specs.sku = '111'
specs.order_form_name = 'Test order form'
specs.production_time = 'Test production time'
specs.photograph = photo
specs.save()
inventory = mod.Inventory()
inventory.quantity_on_hand = 400
inventory.shelf_location = 'Corner'
inventory.order_file = 'Test File'
inventory.condition = 'Old'
inventory.specs = specs
inventory.save()
sale_item = mod.ExpectedSaleItem()
sale_item.product_specification = specs
sale_item.high_price = '25.00'
sale_item.low_price = '1.00'
sale_item.area = area
sale_item.save()
############################ SERIALIZED PRODUCT #############################
# Broom
photo = mod.Photograph()
photo.place_taken = "Colonial Heritage Fsetival"
photo.image = "products/media/product_images/broom.jpg"
photo.description = "Broom made by one of our very own artisans!"
photo.photographer = user
photo.save()
specs = mod.ProductSpecification()
specs.name = 'Broom'
specs.price = 23.99
specs.description = 'Bring a colonial flair to your normal chores!'
specs.manufacturer = 'Artisan Allen'
specs.average_cost = 23.99
specs.sku = '111'
specs.order_form_name = 'Test order form'
specs.production_time = 'Test production time'
specs.photograph = photo
specs.save()
product = mod.SerializedProduct()
product.quantity_on_hand = 1
product.shelf_location = 'Back Corner'
product.order_file = 'Test File'
product.condition = 'Ancient'
product.specs = specs
product.serial_number = '2222222'
product.cost = 20.00
product.status = 'Good'
product.save()
################################## ITEM #####################################
# Item for rent
photo = mod.Photograph()
photo.place_taken = "Colonial Heritage Fsetival"
photo.image = "rentals/media/canon.jpg"
photo.description = "A cannon that really fires!"
photo.photographer = user
photo.save()
specs = mod.ProductSpecification()
specs.name = 'Cannon'
specs.price = 9.99
specs.description = 'Cannon lent to us for rent by the Smithsonian'
specs.manufacturer = 'Test manufacturer'
specs.average_cost = 9.99
specs.sku = '111'
specs.order_form_name = 'Test order form'
specs.production_time = 'Test production time'
specs.photograph = photo
specs.save()
rental_item = mod.Item()
rental_item.quantity_on_hand= 1
rental_item.shelf_location = 'Front Corner'
rental_item.order_file = 'Test File'
rental_item.condition = 'New'
rental_item.specs = specs
rental_item.standard_rental_price= 9.99
rental_item.times_rented = 2
rental_item.price_per_day = 9.99
rental_item.replacement_price = 190.00
rental_item.save()
# Item not for rent
specs = mod.ProductSpecification()
specs.name = 'Full-Sized Replica of the Liberty Bell'
specs.price = 20000.0
specs.description = 'Made to the exact dimensions as the actual Liberty Bell. For display only.'
specs.manufacturer = 'Test manufacturer'
specs.average_cost = 2.0
specs.sku = '111'
specs.order_form_name= 'Test order form'
specs.production_time= 'Test production time'
specs.save()
item = mod.Item()
item.quantity_on_hand= 1
item.shelf_location = 'Front Corner'
item.order_file = 'Test File'
item.condition = 'New'
item.specs = specs
item.save()
############################## WARDROBE ITEM ################################
# Man's Jacket
photo = mod.Photograph()
photo.place_taken = "Colonial Heritage Fsetival"
photo.image = "rentals/media/mans_jacket.jpg"
photo.description = "Colonial era man's jacket!"
photo.photographer = user
photo.save()
specs = mod.ProductSpecification()
specs.name = 'Jacket'
specs.price = 4.90
specs.description = "Man's jacket from the 1600's"
specs.manufacturer = 'H&M'
specs.average_cost = 4.90
specs.sku = '111'
specs.order_form_name = 'Test order form'
specs.production_time = 'Test production time'
specs.photograph = photo
specs.save()
wardrobe_item = mod.WardrobeItem()
wardrobe_item.quantity_on_hand= 1
wardrobe_item.shelf_location = 'Front Right Corner'
wardrobe_item.order_file = 'Test File'
wardrobe_item.condition = 'Newest'
wardrobe_item.specs = specs
wardrobe_item.standard_rental_price= 4.90
wardrobe_item.times_rented = 2
wardrobe_item.price_per_day = 4.90
wardrobe_item.replacement_price = 27.99
wardrobe_item.size = 38
wardrobe_item.size_modifier= 'S'
wardrobe_item.gender = 'M'
wardrobe_item.color = 'White'
wardrobe_item.pattern = 'Paisely'
wardrobe_item.start_year = '1677-1-1'
wardrobe_item.end_year = '1678-1-1'
wardrobe_item.save()
# Man's shirt
photo = mod.Photograph()
photo.place_taken = "Colonial Heritage Fsetival"
photo.image = "rentals/media/mans_shirt.jpg"
photo.description = "Colonial era man's shirt!"
photo.photographer = user
photo.save()
specs = mod.ProductSpecification()
specs.name = 'Shirt'
specs.price = 2.98
specs.description = "Man's dress shirt from the 1600's"
specs.manufacturer = 'Banana Republic'
specs.average_cost = 2.98
specs.sku = '111'
specs.order_form_name = 'Test order form'
specs.production_time = 'Test production time'
specs.photograph = photo
specs.save()
wardrobe_item = mod.WardrobeItem()
wardrobe_item.quantity_on_hand= 1
wardrobe_item.shelf_location = 'Front Right Corner'
wardrobe_item.order_file = 'Test File'
wardrobe_item.condition = 'Newest'
wardrobe_item.specs = specs
wardrobe_item.standard_rental_price= 2.98
wardrobe_item.times_rented = 3
wardrobe_item.price_per_day = 2.98
wardrobe_item.replacement_price = 32.50
wardrobe_item.size = 38
wardrobe_item.size_modifier= 'L'
wardrobe_item.gender = 'M'
wardrobe_item.color = 'White'
wardrobe_item.pattern = 'Paisely'
wardrobe_item.start_year = '1677-1-1'
wardrobe_item.end_year = '1678-1-1'
wardrobe_item.save()
#############################################################################
############################## TRANSACTIONS #################################
#############################################################################
for data in [
{'customer': user}
]:
transaction = mod.Transaction() |
for key in data:
setattr(transaction, key, data[key])
transaction.save()
################################# RENTAL #####################################
for data in [
{'date_out':'2000-01-01 00:00:00', 'due_date': '2001-01-01', 'item':item, 'transaction':transaction, 'amount':40.87},
{'date_out':'2014-01-01 00:00:00', 'due_date': '2015-02-19', 'item':wardrobe_item, 'transaction':transaction, 'amount':27.29},
{'date_out':'2014-01-01 00:00:00', 'due_date': '2015-01-20', 'item':wardrobe_item, 'transaction':transaction, 'amount':280.14},
]:
rental = mod.RentalItem()
for key in data:
setattr(rental, key, data[key])
rental.save() | random_line_split | |
initialize.py | #! usr/bin/env python3
#############################################################
#
# Author: John Turner
# Version: 1.0
# Last Updated: 2/18/2015
#
# This file contains the database initialization script for
# the Recruiter Connect Database. This file will drop, recreate
# and migrate the db, and then insert the data necessary to
# start working, including:
# - Creating a SuperUser
# - Adding the Groups
# - Adding Users to the Groups
#
#############################################################
import random
import os
import datetime
import sys
# Initialize the django environment, import the models
os.environ['DJANGO_SETTINGS_MODULE'] = 'chef.settings'
import base_app.models as mod
import django
django.setup()
# grab models that I'll need
from django.contrib.auth.models import Group, Permission, ContentType
from django.db import connection
import subprocess
# Drop database, recreate it, migrate it
cursor = connection.cursor()
cursor.execute('DROP SCHEMA PUBLIC CASCADE')
cursor.execute('CREATE SCHEMA PUBLIC')
subprocess.call([sys.executable, 'manage.py', 'migrate'])
#############################################################################
############################## GROUPS/USERS #################################
#############################################################################
Group.objects.all().delete()
############################# ADMINISTRATORS ################################
# Administrators - have full rights to all system
# - add superuser to this group
group = Group()
group.name= "Administrator"
group.save()
# Add all permissions to Admin group
permissions = Permission.objects.all()
for permission in permissions:
group.permissions.add(permission)
mod.User.objects.all().delete()
# Create superuser with the following credentials:
# - username = admin
# - password: password
# - email = john.duane.turner@gmail.com
# First create a new address and add it to the superuser
address = mod.Address()
address.address1= '1100 BYU Lane'
address.city = 'Provo'
address.state = 'UT'
address.ZIP = '84606'
address.save()
user = mod.User.objects.create_superuser( username='admin', email='slowe89@me.com', password='password' )
user.first_name= 'Spencer'
user.last_name= 'Lowe'
user.phone= '7073303952'
user.security_question= ''
user.security_answer= ''
user.address = address
user.save()
group.user_set.add(user)
################################# MANAGERS ###################################
# Managers - can add/edit/delete the following:
# - Events
# - Areas
# - Inventory
# - Transactions
group = Group()
group.name = 'Manager'
group.save()
# Add permissions for the Managers
# Get content types first
content_types = ContentType.objects.exclude(app_label='auth').exclude(app_label='admin')
content_types.exclude(app_label='base_app', model='User').exclude(app_label='contenttypes').exclude(app_label='sessions')
for content_type in content_types:
permissions = Permission.objects.filter(content_type=content_type)
for permission in permissions:
group.permissions.add(permission)
################################## GUESTS ####################################
# Guests have no permissions, they are used in creating new users
group = Group()
group.name = 'Guest'
group.save()
address = mod.Address()
address.address1 = 'Nowhere'
address.address2 = 'Nowhere'
address.city = 'Nowhere'
address.state = 'UT'
address.ZIP = '84606'
address.save()
address2 = mod.Address()
address2.address1 = '123 Lollipop Ln.'
address2.address2 = ''
address2.city = 'Candy Land'
address2.state = 'UT'
address2.ZIP = '84601'
address2.save()
# Add a couple of guests
for data in [
{'first_name':'Joseph', 'last_name':'Townson', 'email':'easybuttonsales@gmail.com', 'address':address, 'phone':'7134088245', 'security_question':'What is your name?', 'security_answer':'Joseph', 'username':'jobro1', },
{'first_name':'Sarah', 'last_name':'Townson', 'email':'spencer@colonialheritage.info', 'address':address, 'phone':'7134088245', 'security_question':'What is your name?', 'security_answer':'Joseph', 'username':'sarahbro1', }
]:
user = mod.User()
for key in data:
setattr(user, key, data[key])
user.set_password('password')
user.save()
group.user_set.add(user)
#############################################################################
################################ DUMMY DATA #################################
#############################################################################
################################## VENUES ###################################
venue = mod.Venue()
venue.name = "The Park"
venue.address = address2
venue.save()
################################## EVENTS ###################################
event_template = mod.PublicEvent()
event_template.name = "Fun Event"
event_template.description = "A roudy get together for families in the community. We will do things, participate in things, experience things, and generally have a good time. Come one, come all to see the amount of fun you can have when you gather for no real reason."
event_template.save()
event = mod.Event()
event.venue = venue
event.event_template = event_template
event.start_date = "2016-01-01"
event.end_date = "2016-01-03"
event.event_map = "Map goes here"
event.save()
################################## AREAS ####################################
area = mod.Area()
area.name = "Vendors"
area.event = event
area.description = "This is where vendors will be located."
area.place_number = 1
area.coordinator = user
area.supervisor = user
area.save()
################################ INVENTORY ##################################
# Bulk product - musket balls
photo = mod.Photograph()
photo.place_taken = "Colonial Heritage Festival"
photo.image = "products/media/product_images/musket_balls.jpg"
photo.description = "Musket balls for sale!"
photo.photographer = user
photo.save()
specs = mod.ProductSpecification()
specs.name = 'Musket Balls'
specs.price = 2.50
specs.description = 'Made to the exact specifications to match what was shot during the Revolutionary War!'
specs.manufacturer = 'Test manufacturer'
specs.average_cost = 2.50
specs.sku = '111'
specs.order_form_name = 'Test order form'
specs.production_time = 'Test production time'
specs.photograph = photo
specs.save()
inventory = mod.Inventory()
inventory.quantity_on_hand = 400
inventory.shelf_location = 'Corner'
inventory.order_file = 'Test File'
inventory.condition = 'Old'
inventory.specs = specs
inventory.save()
sale_item = mod.ExpectedSaleItem()
sale_item.product_specification = specs
sale_item.high_price = '25.00'
sale_item.low_price = '1.00'
sale_item.area = area
sale_item.save()
############################ SERIALIZED PRODUCT #############################
# Broom
photo = mod.Photograph()
photo.place_taken = "Colonial Heritage Fsetival"
photo.image = "products/media/product_images/broom.jpg"
photo.description = "Broom made by one of our very own artisans!"
photo.photographer = user
photo.save()
specs = mod.ProductSpecification()
specs.name = 'Broom'
specs.price = 23.99
specs.description = 'Bring a colonial flair to your normal chores!'
specs.manufacturer = 'Artisan Allen'
specs.average_cost = 23.99
specs.sku = '111'
specs.order_form_name = 'Test order form'
specs.production_time = 'Test production time'
specs.photograph = photo
specs.save()
product = mod.SerializedProduct()
product.quantity_on_hand = 1
product.shelf_location = 'Back Corner'
product.order_file = 'Test File'
product.condition = 'Ancient'
product.specs = specs
product.serial_number = '2222222'
product.cost = 20.00
product.status = 'Good'
product.save()
################################## ITEM #####################################
# Item for rent
photo = mod.Photograph()
photo.place_taken = "Colonial Heritage Fsetival"
photo.image = "rentals/media/canon.jpg"
photo.description = "A cannon that really fires!"
photo.photographer = user
photo.save()
specs = mod.ProductSpecification()
specs.name = 'Cannon'
specs.price = 9.99
specs.description = 'Cannon lent to us for rent by the Smithsonian'
specs.manufacturer = 'Test manufacturer'
specs.average_cost = 9.99
specs.sku = '111'
specs.order_form_name = 'Test order form'
specs.production_time = 'Test production time'
specs.photograph = photo
specs.save()
rental_item = mod.Item()
rental_item.quantity_on_hand= 1
rental_item.shelf_location = 'Front Corner'
rental_item.order_file = 'Test File'
rental_item.condition = 'New'
rental_item.specs = specs
rental_item.standard_rental_price= 9.99
rental_item.times_rented = 2
rental_item.price_per_day = 9.99
rental_item.replacement_price = 190.00
rental_item.save()
# Item not for rent
specs = mod.ProductSpecification()
specs.name = 'Full-Sized Replica of the Liberty Bell'
specs.price = 20000.0
specs.description = 'Made to the exact dimensions as the actual Liberty Bell. For display only.'
specs.manufacturer = 'Test manufacturer'
specs.average_cost = 2.0
specs.sku = '111'
specs.order_form_name= 'Test order form'
specs.production_time= 'Test production time'
specs.save()
item = mod.Item()
item.quantity_on_hand= 1
item.shelf_location = 'Front Corner'
item.order_file = 'Test File'
item.condition = 'New'
item.specs = specs
item.save()
############################## WARDROBE ITEM ################################
# Man's Jacket
photo = mod.Photograph()
photo.place_taken = "Colonial Heritage Fsetival"
photo.image = "rentals/media/mans_jacket.jpg"
photo.description = "Colonial era man's jacket!"
photo.photographer = user
photo.save()
specs = mod.ProductSpecification()
specs.name = 'Jacket'
specs.price = 4.90
specs.description = "Man's jacket from the 1600's"
specs.manufacturer = 'H&M'
specs.average_cost = 4.90
specs.sku = '111'
specs.order_form_name = 'Test order form'
specs.production_time = 'Test production time'
specs.photograph = photo
specs.save()
wardrobe_item = mod.WardrobeItem()
wardrobe_item.quantity_on_hand= 1
wardrobe_item.shelf_location = 'Front Right Corner'
wardrobe_item.order_file = 'Test File'
wardrobe_item.condition = 'Newest'
wardrobe_item.specs = specs
wardrobe_item.standard_rental_price= 4.90
wardrobe_item.times_rented = 2
wardrobe_item.price_per_day = 4.90
wardrobe_item.replacement_price = 27.99
wardrobe_item.size = 38
wardrobe_item.size_modifier= 'S'
wardrobe_item.gender = 'M'
wardrobe_item.color = 'White'
wardrobe_item.pattern = 'Paisely'
wardrobe_item.start_year = '1677-1-1'
wardrobe_item.end_year = '1678-1-1'
wardrobe_item.save()
# Man's shirt
photo = mod.Photograph()
photo.place_taken = "Colonial Heritage Fsetival"
photo.image = "rentals/media/mans_shirt.jpg"
photo.description = "Colonial era man's shirt!"
photo.photographer = user
photo.save()
specs = mod.ProductSpecification()
specs.name = 'Shirt'
specs.price = 2.98
specs.description = "Man's dress shirt from the 1600's"
specs.manufacturer = 'Banana Republic'
specs.average_cost = 2.98
specs.sku = '111'
specs.order_form_name = 'Test order form'
specs.production_time = 'Test production time'
specs.photograph = photo
specs.save()
wardrobe_item = mod.WardrobeItem()
wardrobe_item.quantity_on_hand= 1
wardrobe_item.shelf_location = 'Front Right Corner'
wardrobe_item.order_file = 'Test File'
wardrobe_item.condition = 'Newest'
wardrobe_item.specs = specs
wardrobe_item.standard_rental_price= 2.98
wardrobe_item.times_rented = 3
wardrobe_item.price_per_day = 2.98
wardrobe_item.replacement_price = 32.50
wardrobe_item.size = 38
wardrobe_item.size_modifier= 'L'
wardrobe_item.gender = 'M'
wardrobe_item.color = 'White'
wardrobe_item.pattern = 'Paisely'
wardrobe_item.start_year = '1677-1-1'
wardrobe_item.end_year = '1678-1-1'
wardrobe_item.save()
#############################################################################
############################## TRANSACTIONS #################################
#############################################################################
for data in [
{'customer': user}
]:
transaction = mod.Transaction()
for key in data:
setattr(transaction, key, data[key])
transaction.save()
################################# RENTAL #####################################
for data in [
{'date_out':'2000-01-01 00:00:00', 'due_date': '2001-01-01', 'item':item, 'transaction':transaction, 'amount':40.87},
{'date_out':'2014-01-01 00:00:00', 'due_date': '2015-02-19', 'item':wardrobe_item, 'transaction':transaction, 'amount':27.29},
{'date_out':'2014-01-01 00:00:00', 'due_date': '2015-01-20', 'item':wardrobe_item, 'transaction':transaction, 'amount':280.14},
]:
rental = mod.RentalItem()
for key in data:
|
rental.save() | setattr(rental, key, data[key]) | conditional_block |
pharmacie-details.ts | import { Component, ViewChild, ElementRef, EventEmitter, ApplicationRef, NgZone } from '@angular/core';
import { ModalController, NavController, NavParams, ToastController, Platform, LoadingController } from 'ionic-angular';
import { SocialSharing, Push, GoogleAnalytics } from 'ionic-native';
// Importantion du provider de Pharmacies
import {PharmaciesProvider} from '../../providers/pharmacies/pharmacies';
// Importantion du provider d'avis de pharmacie
import {OpinionsProvider} from '../../providers/opinions/opinions';
// Importantion du provider Subscriber | import {Pharmacie} from '../../models/pharmacie';
// Importation de la page de détail d'une pharmacie
import {OpinionPage} from '../opinion/opinion';
// Importation de la page de saisie des horaires de la pharmacie
import {HoursPage} from '../hours/hours';
declare var google;
declare var $;
declare var _;
declare var moment;
/*
Generated class for the PharmacieDetailsPage page.
See http://ionicframework.com/docs/v2/components/#navigation for more info on
Ionic pages and navigation.
*/
@Component({
templateUrl: 'build/pages/pharmacie-details/pharmacie-details.html',
// Ajout du provider Pharmacies et Opinions
providers: [PharmaciesProvider, OpinionsProvider, SubscriberProvider]
})
export class PharmacieDetailsPage {
@ViewChild('map') mapElement: ElementRef;
map: any;
id: string;
rs: string;
pharmacie: Pharmacie = new Pharmacie;
loader: any;
options: string;
moment: any;
hours: { mo: string, tu: string, we: string, th: string, fr: string, sa: string, su: string};
constructor(
public modalCtrl: ModalController,
private platform: Platform,
public nav: NavController,
private loadingController: LoadingController,
private navParams: NavParams,
private pharmaciesProvider: PharmaciesProvider,
private opinionsProvider: OpinionsProvider,
private subscriberProvider: SubscriberProvider,
private applicationRef: ApplicationRef,
private zone: NgZone,
private toastController : ToastController) {
this.moment = moment;
this.platform = platform;
this.platform.ready().then(() => {
GoogleAnalytics.trackView('pharmacie-details');
});
// Récupération des paramètres id et raison sociale depuis les paramètres de navigation
this.id = navParams.get('id');
this.rs = navParams.get('rs');
// Affichage de la mire de chargement
this.loader = this.loadingController.create({
content: 'Chargement en cours...'
});
this.loader.present();
// Définition du segment Carte par défaut pour afficher la carte (au lieu des commentaires)
this.options = 'map';
// Récupère les détails de la pharmacie depuis l'API.
pharmaciesProvider.loadDetails(this.id)
.then( pharmacie => {
this.pharmacie = pharmacie;
this.pharmacie.isFavorite = this.isFavorite();
this.pharmacie.favoriteIcon = this.isFavorite() ? 'star' : 'star-outline' ;
this.fetchOpinions(); // Récupère les avis pour avoir la note moyenne de la pharmacie
this.formatHours();
this.loader.dismiss(); // On efface la mire de chargement
this.displayMap(); // On affiche la carte avec la position de la pharmacie
})
}
// Formate les horaires d'ouverture en une heure au format HH:mm
formatHours() {
if (this.pharmacie.hours) {
function addZero(i) {
if (i < 10) {
i = "0" + i;
}
return i;
}
function formatDay(amo, amc, pmo, pmc) {
if (amo == 0 && pmc == 1440)
return 'Ouvert 24h/24';
// Pas d'horaire le matin
if (amo == 0 && amc == 0) {
// Pas d'horaire l'après midi
if (pmo == 0 || pmc == 0)
return 'Fermé';
else
return `${addZero(Math.trunc(pmo/60))}:${addZero(pmo%60)}-${addZero(Math.trunc(pmc/60))}:${addZero(pmc%60)}`;
// Horaires le matin
} else {
let hours = `${addZero(Math.trunc(amo/60))}:${addZero(amo%60)}-${addZero(Math.trunc(amc/60))}:${addZero(amc%60)}`;
// Horaires l'après midi
if (pmo != 0 && pmc != 0)
hours += ` ${addZero(Math.trunc(pmo/60))}:${addZero(pmo%60)}-${addZero(Math.trunc(pmc/60))}:${addZero(pmc%60)}`;
return hours;
}
};
this.hours = {
mo: formatDay(this.pharmacie.hours.mo.amo, this.pharmacie.hours.mo.amc, this.pharmacie.hours.mo.pmo, this.pharmacie.hours.mo.pmc),
tu: formatDay(this.pharmacie.hours.tu.amo, this.pharmacie.hours.tu.amc, this.pharmacie.hours.tu.pmo, this.pharmacie.hours.tu.pmc),
we: formatDay(this.pharmacie.hours.we.amo, this.pharmacie.hours.we.amc, this.pharmacie.hours.we.pmo, this.pharmacie.hours.we.pmc),
th: formatDay(this.pharmacie.hours.th.amo, this.pharmacie.hours.th.amc, this.pharmacie.hours.th.pmo, this.pharmacie.hours.th.pmc),
fr: formatDay(this.pharmacie.hours.fr.amo, this.pharmacie.hours.fr.amc, this.pharmacie.hours.fr.pmo, this.pharmacie.hours.fr.pmc),
sa: formatDay(this.pharmacie.hours.sa.amo, this.pharmacie.hours.sa.amc, this.pharmacie.hours.sa.pmo, this.pharmacie.hours.sa.pmc),
su: formatDay(this.pharmacie.hours.su.amo, this.pharmacie.hours.su.amc, this.pharmacie.hours.su.pmo, this.pharmacie.hours.su.pmc)
};
}
}
// Regarde dans le localStorage si la pharmacie fait partie des pharmacies favorites
isFavorite(){
if (!localStorage.getItem('favorites'))
return false;
let favorites = JSON.parse(localStorage.getItem('favorites'));
return (favorites.indexOf(this.pharmacie._id) !== -1);
}
// Ajoute la pharmacie à la liste des pharmacies favorites
toggleFavorite() {
this.platform.ready().then(() => {
GoogleAnalytics.trackEvent('pharmacie-details', 'toggleFavorite', 'Ajout / Retirer la pharmacie des favoris', 1);
});
let $favorites = $('ion-icon#favorites');
let isFavorites = $favorites.attr('class') === 'ion-md-star',
classIcon,
msgToast,
favorites;
// Si la pharmacie fait partie des pharmacies favorites
if (isFavorites) {
// On désabonne l'utilisateur aux commentaires de la pharmacie
this.subscriberProvider.unsubscribe(this.pharmacie._id);
classIcon = 'ion-md-star-outline';
msgToast = 'Pharmacie retirée des favoris';
// Mise a jour de la liste des pharmacies favorites
favorites = JSON.parse(localStorage.getItem('favorites'));
favorites = favorites.filter((item) => item != this.pharmacie._id);
// Si la pharmacie ne fait pas partie des pharmacies favorites
} else {
// On abonne l'utilisateur aux commentaires de la pharmacie
this.subscriberProvider.subscribe(this.pharmacie._id);
classIcon = 'ion-md-star';
msgToast = 'Pharmacie ajoutée aux favoris';
if (localStorage.getItem('favorites')) {
favorites = JSON.parse(localStorage.getItem('favorites'));
favorites.push(this.pharmacie._id);
} else {
favorites = [this.pharmacie._id];
}
}
// Sauvegarde de la liste des pharmacies favorites dans le localeStorage
localStorage.setItem('favorites', JSON.stringify(favorites));
$favorites.attr('class', classIcon);
let toast = this.toastController.create({
message: msgToast,
duration: 3000,
position: 'middle'
});
toast.present();
}
// Ouverture d'une application GPS du téléphone (google maps, waze) pour afficher l'itinéraire jusqu'à la pharmacie.
navigateToPharmacie() {
let destination = `${this.pharmacie.loc[1]},${this.pharmacie.loc[0]}`;
if (this.platform.is('ios')){
window.open('maps://?q=' + destination, '_system');
} else {
let label = encodeURI(`${this.pharmacie.numvoie} ${this.pharmacie.typvoie} ${this.pharmacie.voie}, ${this.pharmacie.cpville}`);
window.open('geo:0,0?q=' + destination + '(' + label + ')', '_system');
}
}
// Partage les infos de la pharmacie avec une application externe
share() {
SocialSharing.share(
`${this.pharmacie.rs}
Adresse : ${this.pharmacie.numvoie} ${this.pharmacie.typvoie} ${this.pharmacie.voie}, ${this.pharmacie.cpville}
Tél: ${this.pharmacie.telephone}
Fax: ${this.pharmacie.fax}`,
this.pharmacie.rs,
'',
''
).then( result => {
console.log(result);
}).catch(err => {
console.log(err);
});
}
// Chargement de la carte google map centré sur la pharmacie
displayMap() {
this.platform.ready().then(() => {
GoogleAnalytics.trackEvent('pharmacie-details', 'displayMap', 'Affichage de la carte', 1);
});
setTimeout(function() {
// Coordonnées géographique de la pharmacie
let latLng = new google.maps.LatLng(this.pharmacie.loc[1], this.pharmacie.loc[0]);
// Options de la carte
let mapOptions = {
center: latLng,
zoom: 14,
mapTypeId: google.maps.MapTypeId.ROADMAP
}
// Création de la carte
this.map = new google.maps.Map(this.mapElement.nativeElement, mapOptions);
// Ajout d'un marqueur sur la carte correspondant à l'emplacement de la pharmacie
let marker = new google.maps.Marker({
map: this.map,
//animation: google.maps.Animation.BOUNCE,
//animation: google.maps.Animation.DROP,
icon: './img/marker.png',
position: latLng
});
}.bind(this),0);
}
// Récupère les avis de la pharmacie et calcul la note moyenne.
fetchOpinions() {
this.platform.ready().then(() => {
GoogleAnalytics.trackEvent('pharmacie-details', 'fetchOpinions', 'Afficher la liste des avis', 1);
});
// Récupération des avis depuis l'API en appelant la fonction load du provider Opinions.
this.opinionsProvider.load(this.pharmacie._id)
// On assigne le résultat de la promesse à la variable locale pharmacies.opinions.
.then(opinions => {
// Calcul de la note moyenne de la pharmacie à partir des notes de tous les avis.
this.pharmacie.rate = !_.isEmpty(opinions) ? (_.reduce(opinions, (result, item) => {return parseInt(result) + parseInt(item.rate);}, 0 ) / _.size(opinions)) : 0;
this.pharmacie.opinions = opinions;
});
}
displayHours(event) {
}
// Ouvre la page du formulaire d'ajout d'un avis sur la pharmacie.
openModalNewOpinion(event) {
let modal = this.modalCtrl.create(OpinionPage, { pharmacieId: this.pharmacie._id});
modal.present();
}
// Ouvre la page du formulaire de modification des horaires de la pharmacie.
openModalUpdateHours(event) {
let leftEvent = new EventEmitter();
leftEvent.subscribe(data => {
this.pharmaciesProvider.loadDetails(this.id)
.then( pharmacie => {
this.pharmacie = pharmacie;
this.pharmacie.isFavorite = this.isFavorite();
this.pharmacie.favoriteIcon = this.isFavorite() ? 'star' : 'star-outline' ;
this.fetchOpinions(); // Récupère les avis pour avoir la note moyenne de la pharmacie
this.formatHours();
this.displayMap(); // On affiche la carte avec la position de la pharmacie
this.applicationRef.tick();
this.zone.run(() => {console.log('enabled time travel');})
})
});
let modal = this.modalCtrl.create(HoursPage, { pharmacie: this.pharmacie, event: leftEvent});
modal.present();
}
} | import {SubscriberProvider} from '../../providers/subscriber/subscriber';
// Importation du modèle de données Pharmacie | random_line_split |
pharmacie-details.ts | import { Component, ViewChild, ElementRef, EventEmitter, ApplicationRef, NgZone } from '@angular/core';
import { ModalController, NavController, NavParams, ToastController, Platform, LoadingController } from 'ionic-angular';
import { SocialSharing, Push, GoogleAnalytics } from 'ionic-native';
// Importantion du provider de Pharmacies
import {PharmaciesProvider} from '../../providers/pharmacies/pharmacies';
// Importantion du provider d'avis de pharmacie
import {OpinionsProvider} from '../../providers/opinions/opinions';
// Importantion du provider Subscriber
import {SubscriberProvider} from '../../providers/subscriber/subscriber';
// Importation du modèle de données Pharmacie
import {Pharmacie} from '../../models/pharmacie';
// Importation de la page de détail d'une pharmacie
import {OpinionPage} from '../opinion/opinion';
// Importation de la page de saisie des horaires de la pharmacie
import {HoursPage} from '../hours/hours';
declare var google;
declare var $;
declare var _;
declare var moment;
/*
Generated class for the PharmacieDetailsPage page.
See http://ionicframework.com/docs/v2/components/#navigation for more info on
Ionic pages and navigation.
*/
@Component({
templateUrl: 'build/pages/pharmacie-details/pharmacie-details.html',
// Ajout du provider Pharmacies et Opinions
providers: [PharmaciesProvider, OpinionsProvider, SubscriberProvider]
})
export class PharmacieDetailsPage {
@ViewChild('map') mapElement: ElementRef;
map: any;
id: string;
rs: string;
pharmacie: Pharmacie = new Pharmacie;
loader: any;
options: string;
moment: any;
hours: { mo: string, tu: string, we: string, th: string, fr: string, sa: string, su: string};
constructor(
public modalCtrl: ModalController,
private platform: Platform,
public nav: NavController,
private loadingController: LoadingController,
private navParams: NavParams,
private pharmaciesProvider: PharmaciesProvider,
private opinionsProvider: OpinionsProvider,
private subscriberProvider: SubscriberProvider,
private applicationRef: ApplicationRef,
private zone: NgZone,
private toastController : ToastController) {
this.moment = moment;
this.platform = platform;
this.platform.ready().then(() => {
GoogleAnalytics.trackView('pharmacie-details');
});
// Récupération des paramètres id et raison sociale depuis les paramètres de navigation
this.id = navParams.get('id');
this.rs = navParams.get('rs');
// Affichage de la mire de chargement
this.loader = this.loadingController.create({
content: 'Chargement en cours...'
});
this.loader.present();
// Définition du segment Carte par défaut pour afficher la carte (au lieu des commentaires)
this.options = 'map';
// Récupère les détails de la pharmacie depuis l'API.
pharmaciesProvider.loadDetails(this.id)
.then( pharmacie => {
this.pharmacie = pharmacie;
this.pharmacie.isFavorite = this.isFavorite();
this.pharmacie.favoriteIcon = this.isFavorite() ? 'star' : 'star-outline' ;
this.fetchOpinions(); // Récupère les avis pour avoir la note moyenne de la pharmacie
this.formatHours();
this.loader.dismiss(); // On efface la mire de chargement
this.displayMap(); // On affiche la carte avec la position de la pharmacie
})
}
// Formate les horaires d'ouverture en une heure au format HH:mm
formatHours() {
if (this.pharmacie.hours) {
function addZero(i) {
if (i < 10) {
i = "0" + i;
}
return i;
}
function formatDay(amo, amc, pmo, pmc) {
if (amo == 0 && pmc == 1440)
return 'Ouvert 24h/24';
// Pas d'horaire le matin
if (amo == 0 && amc == 0) {
// Pas d'horaire l'après midi
if (pmo == 0 || pmc == 0)
return 'Fermé';
else
return `${addZero(Math.trunc(pmo/60))}:${addZero(pmo%60)}-${addZero(Math.trunc(pmc/60))}:${addZero(pmc%60)}`;
// Horaires le matin
} else {
let hours = `${addZero(Math.trunc(amo/60))}:${addZero(amo%60)}-${addZero(Math.trunc(amc/60))}:${addZero(amc%60)}`;
// Horaires l'après midi
if (pmo != 0 && pmc != 0)
hours += ` ${addZero(Math.trunc(pmo/60))}:${addZero(pmo%60)}-${addZero(Math.trunc(pmc/60))}:${addZero(pmc%60)}`;
return hours;
}
};
this.hours = {
mo: formatDay(this.pharmacie.hours.mo.amo, this.pharmacie.hours.mo.amc, this.pharmacie.hours.mo.pmo, this.pharmacie.hours.mo.pmc),
tu: formatDay(this.pharmacie.hours.tu.amo, this.pharmacie.hours.tu.amc, this.pharmacie.hours.tu.pmo, this.pharmacie.hours.tu.pmc),
we: formatDay(this.pharmacie.hours.we.amo, this.pharmacie.hours.we.amc, this.pharmacie.hours.we.pmo, this.pharmacie.hours.we.pmc),
th: formatDay(this.pharmacie.hours.th.amo, this.pharmacie.hours.th.amc, this.pharmacie.hours.th.pmo, this.pharmacie.hours.th.pmc),
fr: formatDay(this.pharmacie.hours.fr.amo, this.pharmacie.hours.fr.amc, this.pharmacie.hours.fr.pmo, this.pharmacie.hours.fr.pmc),
sa: formatDay(this.pharmacie.hours.sa.amo, this.pharmacie.hours.sa.amc, this.pharmacie.hours.sa.pmo, this.pharmacie.hours.sa.pmc),
su: formatDay(this.pharmacie.hours.su.amo, this.pharmacie.hours.su.amc, this.pharmacie.hours.su.pmo, this.pharmacie.hours.su.pmc)
};
}
}
// Regarde dans le localStorage si la pharmacie fait partie des pharmacies favorites
isFavorite(){
if (!localStorage.getItem('favorites'))
return false;
let favorites = JSON.parse(localStorage.getItem('favorites'));
return (favorites.indexOf(this.pharmacie._id) !== -1);
}
// Ajoute la pharmacie à la liste des pharmacies favorites
toggleFavorite() {
this.platform.ready().then(() => {
GoogleAnalytics.trackEvent('pharmacie-details', 'toggleFavorite', 'Ajout / Retirer la pharmacie des favoris', 1);
});
let $favorites = $('ion-icon#favorites');
let isFavorites = $favorites.attr('class') === 'ion-md-star',
classIcon,
msgToast,
favorites;
// Si la pharmacie fait partie des pharmacies favorites
if (isFavorites) {
// On désa | abonne l'utilisateur aux commentaires de la pharmacie
this.subscriberProvider.subscribe(this.pharmacie._id);
classIcon = 'ion-md-star';
msgToast = 'Pharmacie ajoutée aux favoris';
if (localStorage.getItem('favorites')) {
favorites = JSON.parse(localStorage.getItem('favorites'));
favorites.push(this.pharmacie._id);
} else {
favorites = [this.pharmacie._id];
}
}
// Sauvegarde de la liste des pharmacies favorites dans le localeStorage
localStorage.setItem('favorites', JSON.stringify(favorites));
$favorites.attr('class', classIcon);
let toast = this.toastController.create({
message: msgToast,
duration: 3000,
position: 'middle'
});
toast.present();
}
// Ouverture d'une application GPS du téléphone (google maps, waze) pour afficher l'itinéraire jusqu'à la pharmacie.
navigateToPharmacie() {
let destination = `${this.pharmacie.loc[1]},${this.pharmacie.loc[0]}`;
if (this.platform.is('ios')){
window.open('maps://?q=' + destination, '_system');
} else {
let label = encodeURI(`${this.pharmacie.numvoie} ${this.pharmacie.typvoie} ${this.pharmacie.voie}, ${this.pharmacie.cpville}`);
window.open('geo:0,0?q=' + destination + '(' + label + ')', '_system');
}
}
// Partage les infos de la pharmacie avec une application externe
share() {
SocialSharing.share(
`${this.pharmacie.rs}
Adresse : ${this.pharmacie.numvoie} ${this.pharmacie.typvoie} ${this.pharmacie.voie}, ${this.pharmacie.cpville}
Tél: ${this.pharmacie.telephone}
Fax: ${this.pharmacie.fax}`,
this.pharmacie.rs,
'',
''
).then( result => {
console.log(result);
}).catch(err => {
console.log(err);
});
}
// Chargement de la carte google map centré sur la pharmacie
displayMap() {
this.platform.ready().then(() => {
GoogleAnalytics.trackEvent('pharmacie-details', 'displayMap', 'Affichage de la carte', 1);
});
setTimeout(function() {
// Coordonnées géographique de la pharmacie
let latLng = new google.maps.LatLng(this.pharmacie.loc[1], this.pharmacie.loc[0]);
// Options de la carte
let mapOptions = {
center: latLng,
zoom: 14,
mapTypeId: google.maps.MapTypeId.ROADMAP
}
// Création de la carte
this.map = new google.maps.Map(this.mapElement.nativeElement, mapOptions);
// Ajout d'un marqueur sur la carte correspondant à l'emplacement de la pharmacie
let marker = new google.maps.Marker({
map: this.map,
//animation: google.maps.Animation.BOUNCE,
//animation: google.maps.Animation.DROP,
icon: './img/marker.png',
position: latLng
});
}.bind(this),0);
}
// Récupère les avis de la pharmacie et calcul la note moyenne.
fetchOpinions() {
this.platform.ready().then(() => {
GoogleAnalytics.trackEvent('pharmacie-details', 'fetchOpinions', 'Afficher la liste des avis', 1);
});
// Récupération des avis depuis l'API en appelant la fonction load du provider Opinions.
this.opinionsProvider.load(this.pharmacie._id)
// On assigne le résultat de la promesse à la variable locale pharmacies.opinions.
.then(opinions => {
// Calcul de la note moyenne de la pharmacie à partir des notes de tous les avis.
this.pharmacie.rate = !_.isEmpty(opinions) ? (_.reduce(opinions, (result, item) => {return parseInt(result) + parseInt(item.rate);}, 0 ) / _.size(opinions)) : 0;
this.pharmacie.opinions = opinions;
});
}
displayHours(event) {
}
// Ouvre la page du formulaire d'ajout d'un avis sur la pharmacie.
openModalNewOpinion(event) {
let modal = this.modalCtrl.create(OpinionPage, { pharmacieId: this.pharmacie._id});
modal.present();
}
// Ouvre la page du formulaire de modification des horaires de la pharmacie.
openModalUpdateHours(event) {
let leftEvent = new EventEmitter();
leftEvent.subscribe(data => {
this.pharmaciesProvider.loadDetails(this.id)
.then( pharmacie => {
this.pharmacie = pharmacie;
this.pharmacie.isFavorite = this.isFavorite();
this.pharmacie.favoriteIcon = this.isFavorite() ? 'star' : 'star-outline' ;
this.fetchOpinions(); // Récupère les avis pour avoir la note moyenne de la pharmacie
this.formatHours();
this.displayMap(); // On affiche la carte avec la position de la pharmacie
this.applicationRef.tick();
this.zone.run(() => {console.log('enabled time travel');})
})
});
let modal = this.modalCtrl.create(HoursPage, { pharmacie: this.pharmacie, event: leftEvent});
modal.present();
}
}
| bonne l'utilisateur aux commentaires de la pharmacie
this.subscriberProvider.unsubscribe(this.pharmacie._id);
classIcon = 'ion-md-star-outline';
msgToast = 'Pharmacie retirée des favoris';
// Mise a jour de la liste des pharmacies favorites
favorites = JSON.parse(localStorage.getItem('favorites'));
favorites = favorites.filter((item) => item != this.pharmacie._id);
// Si la pharmacie ne fait pas partie des pharmacies favorites
} else {
// On | conditional_block |
pharmacie-details.ts | import { Component, ViewChild, ElementRef, EventEmitter, ApplicationRef, NgZone } from '@angular/core';
import { ModalController, NavController, NavParams, ToastController, Platform, LoadingController } from 'ionic-angular';
import { SocialSharing, Push, GoogleAnalytics } from 'ionic-native';
// Importantion du provider de Pharmacies
import {PharmaciesProvider} from '../../providers/pharmacies/pharmacies';
// Importantion du provider d'avis de pharmacie
import {OpinionsProvider} from '../../providers/opinions/opinions';
// Importantion du provider Subscriber
import {SubscriberProvider} from '../../providers/subscriber/subscriber';
// Importation du modèle de données Pharmacie
import {Pharmacie} from '../../models/pharmacie';
// Importation de la page de détail d'une pharmacie
import {OpinionPage} from '../opinion/opinion';
// Importation de la page de saisie des horaires de la pharmacie
import {HoursPage} from '../hours/hours';
declare var google;
declare var $;
declare var _;
declare var moment;
/*
Generated class for the PharmacieDetailsPage page.
See http://ionicframework.com/docs/v2/components/#navigation for more info on
Ionic pages and navigation.
*/
@Component({
templateUrl: 'build/pages/pharmacie-details/pharmacie-details.html',
// Ajout du provider Pharmacies et Opinions
providers: [PharmaciesProvider, OpinionsProvider, SubscriberProvider]
})
export class PharmacieDetailsPage {
@ViewChild('map') mapElement: ElementRef;
map: any;
id: string;
rs: string;
pharmacie: Pharmacie = new Pharmacie;
loader: any;
options: string;
moment: any;
hours: { mo: string, tu: string, we: string, th: string, fr: string, sa: string, su: string};
constructor(
public modalCtrl: ModalController,
private platform: Platform,
public nav: NavController,
private loadingController: LoadingController,
private navParams: NavParams,
private pharmaciesProvider: PharmaciesProvider,
private opinionsProvider: OpinionsProvider,
private subscriberProvider: SubscriberProvider,
private applicationRef: ApplicationRef,
private zone: NgZone,
private toastController : ToastController) {
this.moment = moment;
this.platform = platform;
this.platform.ready().then(() => {
GoogleAnalytics.trackView('pharmacie-details');
});
// Récupération des paramètres id et raison sociale depuis les paramètres de navigation
this.id = navParams.get('id');
this.rs = navParams.get('rs');
// Affichage de la mire de chargement
this.loader = this.loadingController.create({
content: 'Chargement en cours...'
});
this.loader.present();
// Définition du segment Carte par défaut pour afficher la carte (au lieu des commentaires)
this.options = 'map';
// Récupère les détails de la pharmacie depuis l'API.
pharmaciesProvider.loadDetails(this.id)
.then( pharmacie => {
this.pharmacie = pharmacie;
this.pharmacie.isFavorite = this.isFavorite();
this.pharmacie.favoriteIcon = this.isFavorite() ? 'star' : 'star-outline' ;
this.fetchOpinions(); // Récupère les avis pour avoir la note moyenne de la pharmacie
this.formatHours();
this.loader.dismiss(); // On efface la mire de chargement
this.displayMap(); // On affiche la carte avec la position de la pharmacie
})
}
// Formate les horaires d'ouverture en une heure au format HH:mm
formatHours() | his.pharmacie.hours) {
function addZero(i) {
if (i < 10) {
i = "0" + i;
}
return i;
}
function formatDay(amo, amc, pmo, pmc) {
if (amo == 0 && pmc == 1440)
return 'Ouvert 24h/24';
// Pas d'horaire le matin
if (amo == 0 && amc == 0) {
// Pas d'horaire l'après midi
if (pmo == 0 || pmc == 0)
return 'Fermé';
else
return `${addZero(Math.trunc(pmo/60))}:${addZero(pmo%60)}-${addZero(Math.trunc(pmc/60))}:${addZero(pmc%60)}`;
// Horaires le matin
} else {
let hours = `${addZero(Math.trunc(amo/60))}:${addZero(amo%60)}-${addZero(Math.trunc(amc/60))}:${addZero(amc%60)}`;
// Horaires l'après midi
if (pmo != 0 && pmc != 0)
hours += ` ${addZero(Math.trunc(pmo/60))}:${addZero(pmo%60)}-${addZero(Math.trunc(pmc/60))}:${addZero(pmc%60)}`;
return hours;
}
};
this.hours = {
mo: formatDay(this.pharmacie.hours.mo.amo, this.pharmacie.hours.mo.amc, this.pharmacie.hours.mo.pmo, this.pharmacie.hours.mo.pmc),
tu: formatDay(this.pharmacie.hours.tu.amo, this.pharmacie.hours.tu.amc, this.pharmacie.hours.tu.pmo, this.pharmacie.hours.tu.pmc),
we: formatDay(this.pharmacie.hours.we.amo, this.pharmacie.hours.we.amc, this.pharmacie.hours.we.pmo, this.pharmacie.hours.we.pmc),
th: formatDay(this.pharmacie.hours.th.amo, this.pharmacie.hours.th.amc, this.pharmacie.hours.th.pmo, this.pharmacie.hours.th.pmc),
fr: formatDay(this.pharmacie.hours.fr.amo, this.pharmacie.hours.fr.amc, this.pharmacie.hours.fr.pmo, this.pharmacie.hours.fr.pmc),
sa: formatDay(this.pharmacie.hours.sa.amo, this.pharmacie.hours.sa.amc, this.pharmacie.hours.sa.pmo, this.pharmacie.hours.sa.pmc),
su: formatDay(this.pharmacie.hours.su.amo, this.pharmacie.hours.su.amc, this.pharmacie.hours.su.pmo, this.pharmacie.hours.su.pmc)
};
}
}
// Regarde dans le localStorage si la pharmacie fait partie des pharmacies favorites
isFavorite(){
if (!localStorage.getItem('favorites'))
return false;
let favorites = JSON.parse(localStorage.getItem('favorites'));
return (favorites.indexOf(this.pharmacie._id) !== -1);
}
// Ajoute la pharmacie à la liste des pharmacies favorites
toggleFavorite() {
this.platform.ready().then(() => {
GoogleAnalytics.trackEvent('pharmacie-details', 'toggleFavorite', 'Ajout / Retirer la pharmacie des favoris', 1);
});
let $favorites = $('ion-icon#favorites');
let isFavorites = $favorites.attr('class') === 'ion-md-star',
classIcon,
msgToast,
favorites;
// Si la pharmacie fait partie des pharmacies favorites
if (isFavorites) {
// On désabonne l'utilisateur aux commentaires de la pharmacie
this.subscriberProvider.unsubscribe(this.pharmacie._id);
classIcon = 'ion-md-star-outline';
msgToast = 'Pharmacie retirée des favoris';
// Mise a jour de la liste des pharmacies favorites
favorites = JSON.parse(localStorage.getItem('favorites'));
favorites = favorites.filter((item) => item != this.pharmacie._id);
// Si la pharmacie ne fait pas partie des pharmacies favorites
} else {
// On abonne l'utilisateur aux commentaires de la pharmacie
this.subscriberProvider.subscribe(this.pharmacie._id);
classIcon = 'ion-md-star';
msgToast = 'Pharmacie ajoutée aux favoris';
if (localStorage.getItem('favorites')) {
favorites = JSON.parse(localStorage.getItem('favorites'));
favorites.push(this.pharmacie._id);
} else {
favorites = [this.pharmacie._id];
}
}
// Sauvegarde de la liste des pharmacies favorites dans le localeStorage
localStorage.setItem('favorites', JSON.stringify(favorites));
$favorites.attr('class', classIcon);
let toast = this.toastController.create({
message: msgToast,
duration: 3000,
position: 'middle'
});
toast.present();
}
// Ouverture d'une application GPS du téléphone (google maps, waze) pour afficher l'itinéraire jusqu'à la pharmacie.
navigateToPharmacie() {
let destination = `${this.pharmacie.loc[1]},${this.pharmacie.loc[0]}`;
if (this.platform.is('ios')){
window.open('maps://?q=' + destination, '_system');
} else {
let label = encodeURI(`${this.pharmacie.numvoie} ${this.pharmacie.typvoie} ${this.pharmacie.voie}, ${this.pharmacie.cpville}`);
window.open('geo:0,0?q=' + destination + '(' + label + ')', '_system');
}
}
// Partage les infos de la pharmacie avec une application externe
share() {
SocialSharing.share(
`${this.pharmacie.rs}
Adresse : ${this.pharmacie.numvoie} ${this.pharmacie.typvoie} ${this.pharmacie.voie}, ${this.pharmacie.cpville}
Tél: ${this.pharmacie.telephone}
Fax: ${this.pharmacie.fax}`,
this.pharmacie.rs,
'',
''
).then( result => {
console.log(result);
}).catch(err => {
console.log(err);
});
}
// Chargement de la carte google map centré sur la pharmacie
displayMap() {
this.platform.ready().then(() => {
GoogleAnalytics.trackEvent('pharmacie-details', 'displayMap', 'Affichage de la carte', 1);
});
setTimeout(function() {
// Coordonnées géographique de la pharmacie
let latLng = new google.maps.LatLng(this.pharmacie.loc[1], this.pharmacie.loc[0]);
// Options de la carte
let mapOptions = {
center: latLng,
zoom: 14,
mapTypeId: google.maps.MapTypeId.ROADMAP
}
// Création de la carte
this.map = new google.maps.Map(this.mapElement.nativeElement, mapOptions);
// Ajout d'un marqueur sur la carte correspondant à l'emplacement de la pharmacie
let marker = new google.maps.Marker({
map: this.map,
//animation: google.maps.Animation.BOUNCE,
//animation: google.maps.Animation.DROP,
icon: './img/marker.png',
position: latLng
});
}.bind(this),0);
}
// Récupère les avis de la pharmacie et calcul la note moyenne.
fetchOpinions() {
this.platform.ready().then(() => {
GoogleAnalytics.trackEvent('pharmacie-details', 'fetchOpinions', 'Afficher la liste des avis', 1);
});
// Récupération des avis depuis l'API en appelant la fonction load du provider Opinions.
this.opinionsProvider.load(this.pharmacie._id)
// On assigne le résultat de la promesse à la variable locale pharmacies.opinions.
.then(opinions => {
// Calcul de la note moyenne de la pharmacie à partir des notes de tous les avis.
this.pharmacie.rate = !_.isEmpty(opinions) ? (_.reduce(opinions, (result, item) => {return parseInt(result) + parseInt(item.rate);}, 0 ) / _.size(opinions)) : 0;
this.pharmacie.opinions = opinions;
});
}
displayHours(event) {
}
// Ouvre la page du formulaire d'ajout d'un avis sur la pharmacie.
openModalNewOpinion(event) {
let modal = this.modalCtrl.create(OpinionPage, { pharmacieId: this.pharmacie._id});
modal.present();
}
// Ouvre la page du formulaire de modification des horaires de la pharmacie.
openModalUpdateHours(event) {
let leftEvent = new EventEmitter();
leftEvent.subscribe(data => {
this.pharmaciesProvider.loadDetails(this.id)
.then( pharmacie => {
this.pharmacie = pharmacie;
this.pharmacie.isFavorite = this.isFavorite();
this.pharmacie.favoriteIcon = this.isFavorite() ? 'star' : 'star-outline' ;
this.fetchOpinions(); // Récupère les avis pour avoir la note moyenne de la pharmacie
this.formatHours();
this.displayMap(); // On affiche la carte avec la position de la pharmacie
this.applicationRef.tick();
this.zone.run(() => {console.log('enabled time travel');})
})
});
let modal = this.modalCtrl.create(HoursPage, { pharmacie: this.pharmacie, event: leftEvent});
modal.present();
}
}
| {
if (t | identifier_name |
did.rs | #[cfg(feature = "alloc")]
use alloc::string::String;
#[cfg(feature = "alloc")]
use alloc::string::ToString as _;
use core::cmp::Ordering;
use core::convert::TryFrom;
use core::fmt::Debug;
use core::fmt::Display;
use core::fmt::Formatter;
use core::fmt::Result as FmtResult;
use core::hash::Hash;
use core::hash::Hasher;
use core::str::FromStr;
use crate::core::Core;
use crate::error::Error;
use crate::error::Result;
#[derive(Clone, Copy)]
pub struct Inspect<'a>(&'a DID);
impl Debug for Inspect<'_> {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.debug_struct("DID")
.field("method", &self.0.method())
.field("method_id", &self.0.method_id())
.field("path", &self.0.path())
.field("query", &self.0.query())
.field("fragment", &self.0.fragment())
.finish()
}
}
/// A Decentralized Identifier (DID).
///
/// [More Info (W3C DID Core)](https://www.w3.org/TR/did-core/)
#[derive(Clone)]
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "serde", serde(into = "String", try_from = "String"))]
pub struct DID {
data: String,
core: Core,
}
impl DID {
/// The URL scheme for Decentralized Identifiers.
pub const SCHEME: &'static str = "did";
/// Parses a [`DID`] from the provided `input`.
///
/// # Errors
///
/// Returns `Err` if any DID segments are invalid.
pub fn parse(input: impl AsRef<str>) -> Result<Self> {
Ok(Self {
data: input.as_ref().to_string(),
core: Core::parse(input)?,
})
}
/// Returns a wrapped `DID` with a more detailed `Debug` implementation.
#[inline]
pub const fn inspect(&self) -> Inspect {
Inspect(self)
}
/// Returns the serialized [`DID`].
///
/// This is fast since the serialized value is stored in the [`DID`].
#[inline]
pub fn as_str(&self) -> &str {
&*self.data
}
/// Consumes the [`DID`] and returns the serialization.
#[cfg(feature = "alloc")]
#[inline]
pub fn into_string(self) -> String {
self.data
}
/// Returns the [`DID`] scheme. See [`DID::SCHEME`].
#[inline]
pub const fn scheme(&self) -> &'static str {
DID::SCHEME
}
/// Returns the [`DID`] authority.
#[inline]
pub fn authority(&self) -> &str {
self.core.authority(self.as_str())
}
/// Returns the [`DID`] method name.
#[inline]
pub fn method(&self) -> &str {
self.core.method(self.as_str())
}
/// Returns the [`DID`] method-specific ID.
#[inline]
pub fn method_id(&self) -> &str {
self.core.method_id(self.as_str())
}
/// Returns the [`DID`] path.
#[inline]
pub fn path(&self) -> &str {
self.core.path(self.as_str())
}
/// Returns the [`DID`] method query, if any.
#[inline]
pub fn query(&self) -> Option<&str> {
self.core.query(self.as_str())
}
/// Returns the [`DID`] method fragment, if any.
#[inline]
pub fn fragment(&self) -> Option<&str> {
self.core.fragment(self.as_str())
}
/// Parses the [`DID`] query and returns an iterator of (key, value) pairs.
#[inline]
pub fn query_pairs(&self) -> form_urlencoded::Parse {
self.core.query_pairs(self.as_str())
}
/// Change the method of the [`DID`].
#[inline]
pub fn set_method(&mut self, value: impl AsRef<str>) {
self.core.set_method(&mut self.data, value.as_ref());
}
/// Change the method-specific-id of the [`DID`].
#[inline]
pub fn set_method_id(&mut self, value: impl AsRef<str>) {
self.core.set_method_id(&mut self.data, value.as_ref());
}
/// Change the path of the [`DID`].
#[inline]
pub fn set_path(&mut self, value: impl AsRef<str>) {
self.core.set_path(&mut self.data, value.as_ref());
}
/// Change the query of the [`DID`].
///
/// No serialization is performed.
#[inline]
pub fn set_query(&mut self, value: Option<&str>) {
self.core.set_query(&mut self.data, value);
}
/// Change the fragment of the [`DID`].
///
/// No serialization is performed.
#[inline]
pub fn set_fragment(&mut self, value: Option<&str>) {
self.core.set_fragment(&mut self.data, value);
}
/// Creates a new [`DID`] by joining `self` with the relative DID `other`.
///
/// # Errors
///
/// Returns `Err` if any base or relative DID segments are invalid.
#[cfg(feature = "alloc")]
pub fn join(&self, other: impl AsRef<str>) -> Result<Self> {
let data: &str = other.as_ref();
let core: Core = Core::parse_relative(data)?;
resolution::transform_references(self, (data, &core))
}
}
impl Hash for DID {
fn hash<H>(&self, hasher: &mut H)
where
H: Hasher,
{
self.as_str().hash(hasher)
}
}
impl PartialEq for DID {
fn eq(&self, other: &Self) -> bool {
self.as_str() == other.as_str()
}
}
impl Eq for DID {}
impl PartialOrd for DID {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.as_str().partial_cmp(other.as_str())
}
}
impl Ord for DID {
fn cmp(&self, other: &Self) -> Ordering {
self.as_str().cmp(other.as_str())
}
}
impl PartialEq<str> for DID {
fn eq(&self, other: &str) -> bool {
self.as_str() == other
}
}
impl PartialEq<&'_ str> for DID {
fn eq(&self, other: &&'_ str) -> bool {
self == *other
}
}
impl Debug for DID {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.write_fmt(format_args!("{:?}", self.as_str()))
}
}
impl Display for DID {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.write_fmt(format_args!("{}", self.as_str()))
}
}
impl AsRef<str> for DID {
fn as_ref(&self) -> &str {
self.data.as_ref()
}
}
impl FromStr for DID {
type Err = Error;
fn from_str(string: &str) -> Result<Self, Self::Err> {
Self::parse(string)
}
}
#[cfg(feature = "alloc")]
impl TryFrom<String> for DID {
type Error = Error;
fn try_from(other: String) -> Result<Self, Self::Error> {
Self::parse(other)
}
}
#[cfg(feature = "alloc")]
impl From<DID> for String {
fn from(other: DID) -> Self {
other.into_string()
}
}
// =============================================================================
// Reference Resolution
// See RFC 3986 - https://tools.ietf.org/html/rfc3986#section-5
// =============================================================================
#[cfg(feature = "alloc")]
mod resolution {
use alloc::borrow::Cow;
use core::fmt::Display;
use core::fmt::Formatter;
use core::fmt::Result as FmtResult;
use core::str::from_utf8_unchecked;
use crate::core::Core;
use crate::did::DID;
use crate::error::Error;
use crate::error::Result;
#[derive(Debug)]
#[repr(transparent)]
pub struct Path<'a>(Cow<'a, str>);
impl<'a> Path<'a> {
pub const fn new() -> Self {
Self(Cow::Borrowed(""))
}
pub fn push(&mut self, value: impl AsRef<[u8]>) {
self
.0
.to_mut()
.push_str(unsafe { from_utf8_unchecked(value.as_ref()) });
}
pub fn pop(&mut self) {
if self.0.is_empty() {
return;
}
if let Some(index) = self.0.rfind('/') {
self.0.to_mut().replace_range(index.., "");
}
}
}
impl<'a> From<Path<'a>> for Cow<'a, str> {
fn from(other: Path<'a>) -> Self {
other.0
}
}
impl Display for Path<'_> {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
Display::fmt(&self.0, f)
}
}
/// Transform References.
///
/// Transforms a DID reference into its target DID.
///
/// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.2)
#[allow(non_snake_case)]
pub fn transform_references(base: &DID, (data, core): (&str, &Core)) -> Result<DID> {
let P: &str = core.path(data);
let Q: Option<&str> = core.query(data);
let mut T: DID = base.clone();
if P.is_empty() {
T.set_path(base.path());
T.set_query(Q.or_else(|| base.query()));
} else {
if P.starts_with('/') {
T.set_path(remove_dot_segments(P));
} else {
T.set_path(remove_dot_segments(&merge_paths(base, P)?));
}
T.set_query(Q);
}
T.set_method(base.method()); // TODO: Remove? This in inherited via clone
T.set_method_id(base.method_id()); // TODO: Remove? This in inherited via clone
T.set_fragment(core.fragment(data));
Ok(T)
}
/// Merge Paths.
///
/// Merges a relative-path reference with the path of the base DID.
///
/// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.3)
pub fn merge_paths<'a>(base: &'a DID, data: &'a str) -> Result<Cow<'a, str>> {
// Ensure the base DID has an authority component.
//
// The DID authority is `<method>:<method-specific-id>` so it should always
// be present for non-relative DIDs.
if base.method().is_empty() || base.method_id().is_empty() {
return Err(Error::InvalidAuthority);
}
// 1. If the base URI has a defined authority component and an empty
// path, then return a string consisting of "/" concatenated with the
// reference's path.
if base.path().is_empty() {
return Ok(data.into());
}
// 2. Return a string consisting of the reference's path component
// appended to all but the last segment of the base URI's path (i.e.,
// excluding any characters after the right-most "/" in the base URI
// path, or excluding the entire base URI path if it does not contain
// any "/" characters).
let mut path: &str = base.path();
if let Some(index) = path.rfind('/') {
path = &path[..=index];
}
Ok([path, data].join("").into())
}
/// Remove Dot Segments.
///
/// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.4)
pub fn remove_dot_segments(path: &str) -> Cow<str> {
fn next_segment(input: impl AsRef<[u8]>) -> Option<usize> {
match input.as_ref() {
[b'/', input @ ..] => next_segment(input).map(|index| index + 1),
input => input.iter().position(|byte| *byte == b'/'),
}
}
let mut output: Path = Path::new();
let mut input: &[u8] = path.as_bytes();
loop {
match input {
// Remove prefix ../
[b'.', b'.', b'/', ..] => {
input = &input[3..];
}
// Remove prefix ./
[b'.', b'/', ..] => {
input = &input[2..];
}
// Replace prefix /./
[b'/', b'.', b'/', ..] => |
// Replace prefix /.
[b'/', b'.'] => {
input = &input[..1];
}
// Replace prefix /../
[b'/', b'.', b'.', b'/', ..] => {
input = &input[3..];
output.pop();
}
// Replace prefix /..
[b'/', b'.', b'.'] => {
input = &input[..2];
output.pop();
}
// Remove .
[b'.'] => {
input = &input[1..];
}
// Remove ..
[b'.', b'.'] => {
input = &input[2..];
}
_ => {
if let Some(index) = next_segment(input) {
output.push(&input[..index]);
input = &input[index..];
} else {
output.push(input);
break;
}
}
}
}
output.into()
}
}
| {
input = &input[2..];
} | conditional_block |
did.rs | #[cfg(feature = "alloc")]
use alloc::string::String;
#[cfg(feature = "alloc")]
use alloc::string::ToString as _;
use core::cmp::Ordering;
use core::convert::TryFrom;
use core::fmt::Debug;
use core::fmt::Display;
use core::fmt::Formatter;
use core::fmt::Result as FmtResult;
use core::hash::Hash;
use core::hash::Hasher;
use core::str::FromStr;
use crate::core::Core;
use crate::error::Error;
use crate::error::Result;
#[derive(Clone, Copy)]
pub struct Inspect<'a>(&'a DID);
impl Debug for Inspect<'_> {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.debug_struct("DID")
.field("method", &self.0.method())
.field("method_id", &self.0.method_id())
.field("path", &self.0.path())
.field("query", &self.0.query())
.field("fragment", &self.0.fragment())
.finish()
}
}
/// A Decentralized Identifier (DID).
///
/// [More Info (W3C DID Core)](https://www.w3.org/TR/did-core/)
#[derive(Clone)]
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "serde", serde(into = "String", try_from = "String"))]
pub struct DID {
data: String,
core: Core,
}
impl DID {
/// The URL scheme for Decentralized Identifiers.
pub const SCHEME: &'static str = "did";
/// Parses a [`DID`] from the provided `input`.
///
/// # Errors
///
/// Returns `Err` if any DID segments are invalid.
pub fn parse(input: impl AsRef<str>) -> Result<Self> {
Ok(Self {
data: input.as_ref().to_string(),
core: Core::parse(input)?,
})
}
/// Returns a wrapped `DID` with a more detailed `Debug` implementation.
#[inline]
pub const fn inspect(&self) -> Inspect {
Inspect(self)
}
/// Returns the serialized [`DID`].
///
/// This is fast since the serialized value is stored in the [`DID`].
#[inline]
pub fn as_str(&self) -> &str {
&*self.data
}
/// Consumes the [`DID`] and returns the serialization.
#[cfg(feature = "alloc")]
#[inline]
pub fn into_string(self) -> String {
self.data
}
/// Returns the [`DID`] scheme. See [`DID::SCHEME`].
#[inline]
pub const fn scheme(&self) -> &'static str {
DID::SCHEME
}
/// Returns the [`DID`] authority.
#[inline]
pub fn authority(&self) -> &str {
self.core.authority(self.as_str())
}
/// Returns the [`DID`] method name.
#[inline]
pub fn method(&self) -> &str {
self.core.method(self.as_str())
}
/// Returns the [`DID`] method-specific ID.
#[inline]
pub fn method_id(&self) -> &str {
self.core.method_id(self.as_str())
}
/// Returns the [`DID`] path.
#[inline]
pub fn path(&self) -> &str {
self.core.path(self.as_str())
}
/// Returns the [`DID`] method query, if any.
#[inline]
pub fn query(&self) -> Option<&str> {
self.core.query(self.as_str())
}
/// Returns the [`DID`] method fragment, if any.
#[inline]
pub fn fragment(&self) -> Option<&str> {
self.core.fragment(self.as_str())
}
/// Parses the [`DID`] query and returns an iterator of (key, value) pairs.
#[inline]
pub fn query_pairs(&self) -> form_urlencoded::Parse {
self.core.query_pairs(self.as_str())
}
/// Change the method of the [`DID`].
#[inline]
pub fn set_method(&mut self, value: impl AsRef<str>) {
self.core.set_method(&mut self.data, value.as_ref());
}
/// Change the method-specific-id of the [`DID`].
#[inline]
pub fn set_method_id(&mut self, value: impl AsRef<str>) {
self.core.set_method_id(&mut self.data, value.as_ref());
}
/// Change the path of the [`DID`].
#[inline]
pub fn set_path(&mut self, value: impl AsRef<str>) {
self.core.set_path(&mut self.data, value.as_ref());
}
/// Change the query of the [`DID`].
///
/// No serialization is performed.
#[inline]
pub fn set_query(&mut self, value: Option<&str>) {
self.core.set_query(&mut self.data, value);
}
/// Change the fragment of the [`DID`].
///
/// No serialization is performed.
#[inline]
pub fn set_fragment(&mut self, value: Option<&str>) {
self.core.set_fragment(&mut self.data, value);
}
/// Creates a new [`DID`] by joining `self` with the relative DID `other`.
///
/// # Errors
///
/// Returns `Err` if any base or relative DID segments are invalid.
#[cfg(feature = "alloc")]
pub fn join(&self, other: impl AsRef<str>) -> Result<Self> {
let data: &str = other.as_ref();
let core: Core = Core::parse_relative(data)?;
resolution::transform_references(self, (data, &core))
}
}
impl Hash for DID {
fn hash<H>(&self, hasher: &mut H)
where
H: Hasher,
{
self.as_str().hash(hasher)
}
}
impl PartialEq for DID {
fn eq(&self, other: &Self) -> bool {
self.as_str() == other.as_str()
}
}
impl Eq for DID {}
impl PartialOrd for DID {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.as_str().partial_cmp(other.as_str())
}
}
impl Ord for DID {
fn cmp(&self, other: &Self) -> Ordering {
self.as_str().cmp(other.as_str())
}
}
impl PartialEq<str> for DID {
fn eq(&self, other: &str) -> bool {
self.as_str() == other
}
}
impl PartialEq<&'_ str> for DID {
fn eq(&self, other: &&'_ str) -> bool {
self == *other
}
}
impl Debug for DID {
fn fmt(&self, f: &mut Formatter) -> FmtResult |
}
impl Display for DID {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.write_fmt(format_args!("{}", self.as_str()))
}
}
impl AsRef<str> for DID {
fn as_ref(&self) -> &str {
self.data.as_ref()
}
}
impl FromStr for DID {
type Err = Error;
fn from_str(string: &str) -> Result<Self, Self::Err> {
Self::parse(string)
}
}
#[cfg(feature = "alloc")]
impl TryFrom<String> for DID {
type Error = Error;
fn try_from(other: String) -> Result<Self, Self::Error> {
Self::parse(other)
}
}
#[cfg(feature = "alloc")]
impl From<DID> for String {
fn from(other: DID) -> Self {
other.into_string()
}
}
// =============================================================================
// Reference Resolution
// See RFC 3986 - https://tools.ietf.org/html/rfc3986#section-5
// =============================================================================
#[cfg(feature = "alloc")]
mod resolution {
use alloc::borrow::Cow;
use core::fmt::Display;
use core::fmt::Formatter;
use core::fmt::Result as FmtResult;
use core::str::from_utf8_unchecked;
use crate::core::Core;
use crate::did::DID;
use crate::error::Error;
use crate::error::Result;
#[derive(Debug)]
#[repr(transparent)]
pub struct Path<'a>(Cow<'a, str>);
impl<'a> Path<'a> {
pub const fn new() -> Self {
Self(Cow::Borrowed(""))
}
pub fn push(&mut self, value: impl AsRef<[u8]>) {
self
.0
.to_mut()
.push_str(unsafe { from_utf8_unchecked(value.as_ref()) });
}
pub fn pop(&mut self) {
if self.0.is_empty() {
return;
}
if let Some(index) = self.0.rfind('/') {
self.0.to_mut().replace_range(index.., "");
}
}
}
impl<'a> From<Path<'a>> for Cow<'a, str> {
fn from(other: Path<'a>) -> Self {
other.0
}
}
impl Display for Path<'_> {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
Display::fmt(&self.0, f)
}
}
/// Transform References.
///
/// Transforms a DID reference into its target DID.
///
/// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.2)
#[allow(non_snake_case)]
pub fn transform_references(base: &DID, (data, core): (&str, &Core)) -> Result<DID> {
let P: &str = core.path(data);
let Q: Option<&str> = core.query(data);
let mut T: DID = base.clone();
if P.is_empty() {
T.set_path(base.path());
T.set_query(Q.or_else(|| base.query()));
} else {
if P.starts_with('/') {
T.set_path(remove_dot_segments(P));
} else {
T.set_path(remove_dot_segments(&merge_paths(base, P)?));
}
T.set_query(Q);
}
T.set_method(base.method()); // TODO: Remove? This in inherited via clone
T.set_method_id(base.method_id()); // TODO: Remove? This in inherited via clone
T.set_fragment(core.fragment(data));
Ok(T)
}
/// Merge Paths.
///
/// Merges a relative-path reference with the path of the base DID.
///
/// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.3)
pub fn merge_paths<'a>(base: &'a DID, data: &'a str) -> Result<Cow<'a, str>> {
// Ensure the base DID has an authority component.
//
// The DID authority is `<method>:<method-specific-id>` so it should always
// be present for non-relative DIDs.
if base.method().is_empty() || base.method_id().is_empty() {
return Err(Error::InvalidAuthority);
}
// 1. If the base URI has a defined authority component and an empty
// path, then return a string consisting of "/" concatenated with the
// reference's path.
if base.path().is_empty() {
return Ok(data.into());
}
// 2. Return a string consisting of the reference's path component
// appended to all but the last segment of the base URI's path (i.e.,
// excluding any characters after the right-most "/" in the base URI
// path, or excluding the entire base URI path if it does not contain
// any "/" characters).
let mut path: &str = base.path();
if let Some(index) = path.rfind('/') {
path = &path[..=index];
}
Ok([path, data].join("").into())
}
/// Remove Dot Segments.
///
/// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.4)
pub fn remove_dot_segments(path: &str) -> Cow<str> {
fn next_segment(input: impl AsRef<[u8]>) -> Option<usize> {
match input.as_ref() {
[b'/', input @ ..] => next_segment(input).map(|index| index + 1),
input => input.iter().position(|byte| *byte == b'/'),
}
}
let mut output: Path = Path::new();
let mut input: &[u8] = path.as_bytes();
loop {
match input {
// Remove prefix ../
[b'.', b'.', b'/', ..] => {
input = &input[3..];
}
// Remove prefix ./
[b'.', b'/', ..] => {
input = &input[2..];
}
// Replace prefix /./
[b'/', b'.', b'/', ..] => {
input = &input[2..];
}
// Replace prefix /.
[b'/', b'.'] => {
input = &input[..1];
}
// Replace prefix /../
[b'/', b'.', b'.', b'/', ..] => {
input = &input[3..];
output.pop();
}
// Replace prefix /..
[b'/', b'.', b'.'] => {
input = &input[..2];
output.pop();
}
// Remove .
[b'.'] => {
input = &input[1..];
}
// Remove ..
[b'.', b'.'] => {
input = &input[2..];
}
_ => {
if let Some(index) = next_segment(input) {
output.push(&input[..index]);
input = &input[index..];
} else {
output.push(input);
break;
}
}
}
}
output.into()
}
}
| {
f.write_fmt(format_args!("{:?}", self.as_str()))
} | identifier_body |
did.rs | #[cfg(feature = "alloc")]
use alloc::string::String;
#[cfg(feature = "alloc")]
use alloc::string::ToString as _;
use core::cmp::Ordering;
use core::convert::TryFrom;
use core::fmt::Debug;
use core::fmt::Display;
use core::fmt::Formatter;
use core::fmt::Result as FmtResult;
use core::hash::Hash;
use core::hash::Hasher;
use core::str::FromStr;
use crate::core::Core;
use crate::error::Error;
use crate::error::Result;
#[derive(Clone, Copy)]
pub struct Inspect<'a>(&'a DID);
impl Debug for Inspect<'_> {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.debug_struct("DID")
.field("method", &self.0.method())
.field("method_id", &self.0.method_id())
.field("path", &self.0.path())
.field("query", &self.0.query())
.field("fragment", &self.0.fragment())
.finish()
}
}
/// A Decentralized Identifier (DID).
///
/// [More Info (W3C DID Core)](https://www.w3.org/TR/did-core/)
#[derive(Clone)]
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "serde", serde(into = "String", try_from = "String"))]
pub struct DID {
data: String,
core: Core,
}
impl DID {
/// The URL scheme for Decentralized Identifiers.
pub const SCHEME: &'static str = "did";
/// Parses a [`DID`] from the provided `input`.
///
/// # Errors
///
/// Returns `Err` if any DID segments are invalid.
pub fn parse(input: impl AsRef<str>) -> Result<Self> {
Ok(Self {
data: input.as_ref().to_string(),
core: Core::parse(input)?,
})
}
/// Returns a wrapped `DID` with a more detailed `Debug` implementation.
#[inline]
pub const fn inspect(&self) -> Inspect {
Inspect(self)
}
/// Returns the serialized [`DID`].
///
/// This is fast since the serialized value is stored in the [`DID`].
#[inline]
pub fn as_str(&self) -> &str {
&*self.data
}
/// Consumes the [`DID`] and returns the serialization.
#[cfg(feature = "alloc")]
#[inline]
pub fn into_string(self) -> String {
self.data
}
/// Returns the [`DID`] scheme. See [`DID::SCHEME`].
#[inline]
pub const fn scheme(&self) -> &'static str {
DID::SCHEME
}
/// Returns the [`DID`] authority.
#[inline]
pub fn authority(&self) -> &str {
self.core.authority(self.as_str())
}
/// Returns the [`DID`] method name.
#[inline]
pub fn method(&self) -> &str {
self.core.method(self.as_str())
}
/// Returns the [`DID`] method-specific ID.
#[inline]
pub fn method_id(&self) -> &str {
self.core.method_id(self.as_str())
}
/// Returns the [`DID`] path. | self.core.path(self.as_str())
}
/// Returns the [`DID`] method query, if any.
#[inline]
pub fn query(&self) -> Option<&str> {
self.core.query(self.as_str())
}
/// Returns the [`DID`] method fragment, if any.
#[inline]
pub fn fragment(&self) -> Option<&str> {
self.core.fragment(self.as_str())
}
/// Parses the [`DID`] query and returns an iterator of (key, value) pairs.
#[inline]
pub fn query_pairs(&self) -> form_urlencoded::Parse {
self.core.query_pairs(self.as_str())
}
/// Change the method of the [`DID`].
#[inline]
pub fn set_method(&mut self, value: impl AsRef<str>) {
self.core.set_method(&mut self.data, value.as_ref());
}
/// Change the method-specific-id of the [`DID`].
#[inline]
pub fn set_method_id(&mut self, value: impl AsRef<str>) {
self.core.set_method_id(&mut self.data, value.as_ref());
}
/// Change the path of the [`DID`].
#[inline]
pub fn set_path(&mut self, value: impl AsRef<str>) {
self.core.set_path(&mut self.data, value.as_ref());
}
/// Change the query of the [`DID`].
///
/// No serialization is performed.
#[inline]
pub fn set_query(&mut self, value: Option<&str>) {
self.core.set_query(&mut self.data, value);
}
/// Change the fragment of the [`DID`].
///
/// No serialization is performed.
#[inline]
pub fn set_fragment(&mut self, value: Option<&str>) {
self.core.set_fragment(&mut self.data, value);
}
/// Creates a new [`DID`] by joining `self` with the relative DID `other`.
///
/// # Errors
///
/// Returns `Err` if any base or relative DID segments are invalid.
#[cfg(feature = "alloc")]
pub fn join(&self, other: impl AsRef<str>) -> Result<Self> {
let data: &str = other.as_ref();
let core: Core = Core::parse_relative(data)?;
resolution::transform_references(self, (data, &core))
}
}
impl Hash for DID {
fn hash<H>(&self, hasher: &mut H)
where
H: Hasher,
{
self.as_str().hash(hasher)
}
}
impl PartialEq for DID {
fn eq(&self, other: &Self) -> bool {
self.as_str() == other.as_str()
}
}
impl Eq for DID {}
impl PartialOrd for DID {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.as_str().partial_cmp(other.as_str())
}
}
impl Ord for DID {
fn cmp(&self, other: &Self) -> Ordering {
self.as_str().cmp(other.as_str())
}
}
impl PartialEq<str> for DID {
fn eq(&self, other: &str) -> bool {
self.as_str() == other
}
}
impl PartialEq<&'_ str> for DID {
fn eq(&self, other: &&'_ str) -> bool {
self == *other
}
}
impl Debug for DID {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.write_fmt(format_args!("{:?}", self.as_str()))
}
}
impl Display for DID {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.write_fmt(format_args!("{}", self.as_str()))
}
}
impl AsRef<str> for DID {
fn as_ref(&self) -> &str {
self.data.as_ref()
}
}
impl FromStr for DID {
type Err = Error;
fn from_str(string: &str) -> Result<Self, Self::Err> {
Self::parse(string)
}
}
#[cfg(feature = "alloc")]
impl TryFrom<String> for DID {
type Error = Error;
fn try_from(other: String) -> Result<Self, Self::Error> {
Self::parse(other)
}
}
#[cfg(feature = "alloc")]
impl From<DID> for String {
fn from(other: DID) -> Self {
other.into_string()
}
}
// =============================================================================
// Reference Resolution
// See RFC 3986 - https://tools.ietf.org/html/rfc3986#section-5
// =============================================================================
#[cfg(feature = "alloc")]
mod resolution {
use alloc::borrow::Cow;
use core::fmt::Display;
use core::fmt::Formatter;
use core::fmt::Result as FmtResult;
use core::str::from_utf8_unchecked;
use crate::core::Core;
use crate::did::DID;
use crate::error::Error;
use crate::error::Result;
#[derive(Debug)]
#[repr(transparent)]
pub struct Path<'a>(Cow<'a, str>);
impl<'a> Path<'a> {
pub const fn new() -> Self {
Self(Cow::Borrowed(""))
}
pub fn push(&mut self, value: impl AsRef<[u8]>) {
self
.0
.to_mut()
.push_str(unsafe { from_utf8_unchecked(value.as_ref()) });
}
pub fn pop(&mut self) {
if self.0.is_empty() {
return;
}
if let Some(index) = self.0.rfind('/') {
self.0.to_mut().replace_range(index.., "");
}
}
}
impl<'a> From<Path<'a>> for Cow<'a, str> {
fn from(other: Path<'a>) -> Self {
other.0
}
}
impl Display for Path<'_> {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
Display::fmt(&self.0, f)
}
}
/// Transform References.
///
/// Transforms a DID reference into its target DID.
///
/// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.2)
#[allow(non_snake_case)]
pub fn transform_references(base: &DID, (data, core): (&str, &Core)) -> Result<DID> {
let P: &str = core.path(data);
let Q: Option<&str> = core.query(data);
let mut T: DID = base.clone();
if P.is_empty() {
T.set_path(base.path());
T.set_query(Q.or_else(|| base.query()));
} else {
if P.starts_with('/') {
T.set_path(remove_dot_segments(P));
} else {
T.set_path(remove_dot_segments(&merge_paths(base, P)?));
}
T.set_query(Q);
}
T.set_method(base.method()); // TODO: Remove? This in inherited via clone
T.set_method_id(base.method_id()); // TODO: Remove? This in inherited via clone
T.set_fragment(core.fragment(data));
Ok(T)
}
/// Merge Paths.
///
/// Merges a relative-path reference with the path of the base DID.
///
/// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.3)
pub fn merge_paths<'a>(base: &'a DID, data: &'a str) -> Result<Cow<'a, str>> {
// Ensure the base DID has an authority component.
//
// The DID authority is `<method>:<method-specific-id>` so it should always
// be present for non-relative DIDs.
if base.method().is_empty() || base.method_id().is_empty() {
return Err(Error::InvalidAuthority);
}
// 1. If the base URI has a defined authority component and an empty
// path, then return a string consisting of "/" concatenated with the
// reference's path.
if base.path().is_empty() {
return Ok(data.into());
}
// 2. Return a string consisting of the reference's path component
// appended to all but the last segment of the base URI's path (i.e.,
// excluding any characters after the right-most "/" in the base URI
// path, or excluding the entire base URI path if it does not contain
// any "/" characters).
let mut path: &str = base.path();
if let Some(index) = path.rfind('/') {
path = &path[..=index];
}
Ok([path, data].join("").into())
}
/// Remove Dot Segments.
///
/// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.4)
pub fn remove_dot_segments(path: &str) -> Cow<str> {
fn next_segment(input: impl AsRef<[u8]>) -> Option<usize> {
match input.as_ref() {
[b'/', input @ ..] => next_segment(input).map(|index| index + 1),
input => input.iter().position(|byte| *byte == b'/'),
}
}
let mut output: Path = Path::new();
let mut input: &[u8] = path.as_bytes();
loop {
match input {
// Remove prefix ../
[b'.', b'.', b'/', ..] => {
input = &input[3..];
}
// Remove prefix ./
[b'.', b'/', ..] => {
input = &input[2..];
}
// Replace prefix /./
[b'/', b'.', b'/', ..] => {
input = &input[2..];
}
// Replace prefix /.
[b'/', b'.'] => {
input = &input[..1];
}
// Replace prefix /../
[b'/', b'.', b'.', b'/', ..] => {
input = &input[3..];
output.pop();
}
// Replace prefix /..
[b'/', b'.', b'.'] => {
input = &input[..2];
output.pop();
}
// Remove .
[b'.'] => {
input = &input[1..];
}
// Remove ..
[b'.', b'.'] => {
input = &input[2..];
}
_ => {
if let Some(index) = next_segment(input) {
output.push(&input[..index]);
input = &input[index..];
} else {
output.push(input);
break;
}
}
}
}
output.into()
}
} | #[inline]
pub fn path(&self) -> &str { | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.