text stringlengths 8 4.13M |
|---|
//! Contains the types of results returned by CRUD operations.
use std::collections::{HashMap, VecDeque};
use crate::{
bson::{serde_helpers, Bson, Document},
change_stream::event::ResumeToken,
db::options::CreateCollectionOptions,
serde_util,
Namespace,
};
use bson::{Binary, RawDocumentBuf};
use serde::{Deserialize, Serialize};
/// The result of a [`Collection::insert_one`](../struct.Collection.html#method.insert_one)
/// operation.
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
#[non_exhaustive]
pub struct InsertOneResult {
/// The `_id` field of the document inserted.
pub inserted_id: Bson,
}
impl InsertOneResult {
pub(crate) fn from_insert_many_result(result: InsertManyResult) -> Self {
Self {
inserted_id: result.inserted_ids.get(&0).cloned().unwrap_or(Bson::Null),
}
}
}
/// The result of a [`Collection::insert_many`](../struct.Collection.html#method.insert_many)
/// operation.
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
#[non_exhaustive]
pub struct InsertManyResult {
/// The `_id` field of the documents inserted.
pub inserted_ids: HashMap<usize, Bson>,
}
impl InsertManyResult {
pub(crate) fn new() -> Self {
InsertManyResult {
inserted_ids: HashMap::new(),
}
}
}
/// The result of a [`Collection::update_one`](../struct.Collection.html#method.update_one) or
/// [`Collection::update_many`](../struct.Collection.html#method.update_many) operation.
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
#[non_exhaustive]
pub struct UpdateResult {
/// The number of documents that matched the filter.
#[serde(serialize_with = "crate::bson::serde_helpers::serialize_u64_as_i64")]
pub matched_count: u64,
/// The number of documents that were modified by the operation.
#[serde(serialize_with = "crate::bson::serde_helpers::serialize_u64_as_i64")]
pub modified_count: u64,
/// The `_id` field of the upserted document.
pub upserted_id: Option<Bson>,
}
/// The result of a [`Collection::delete_one`](../struct.Collection.html#method.delete_one) or
/// [`Collection::delete_many`](../struct.Collection.html#method.delete_many) operation.
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
#[non_exhaustive]
pub struct DeleteResult {
/// The number of documents deleted by the operation.
#[serde(serialize_with = "crate::bson::serde_helpers::serialize_u64_as_i64")]
pub deleted_count: u64,
}
/// Information about the index created as a result of a
/// [`Collection::create_index`](../struct.Collection.html#method.create_index).
#[derive(Debug, Clone, PartialEq)]
#[non_exhaustive]
pub struct CreateIndexResult {
/// The name of the index created in the `createIndex` command.
pub index_name: String,
}
/// Information about the indexes created as a result of a
/// [`Collection::create_indexes`](../struct.Collection.html#method.create_indexes).
#[derive(Debug, Clone, PartialEq)]
#[non_exhaustive]
pub struct CreateIndexesResult {
/// The list containing the names of all indexes created in the `createIndexes` command.
pub index_names: Vec<String>,
}
impl CreateIndexesResult {
pub(crate) fn into_create_index_result(self) -> CreateIndexResult {
CreateIndexResult {
index_name: self.index_names.into_iter().next().unwrap(),
}
}
}
#[derive(Debug, Clone)]
pub(crate) struct GetMoreResult {
pub(crate) batch: VecDeque<RawDocumentBuf>,
pub(crate) exhausted: bool,
pub(crate) post_batch_resume_token: Option<ResumeToken>,
pub(crate) ns: Namespace,
pub(crate) id: i64,
}
/// Describes the type of data store returned when executing
/// [`Database::list_collections`](../struct.Database.html#method.list_collections).
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
#[non_exhaustive]
pub enum CollectionType {
/// Indicates that the data store is a view.
View,
/// Indicates that the data store is a collection.
Collection,
/// Indicates that the data store is a timeseries.
Timeseries,
}
/// Info about the collection that is contained in the `CollectionSpecification::info` field of a
/// specification returned from
/// [`Database::list_collections`](../struct.Database.html#method.list_collections).
///
/// See the MongoDB [manual](https://www.mongodb.com/docs/manual/reference/command/listCollections/#listCollections.cursor)
/// for more information.
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
#[non_exhaustive]
pub struct CollectionSpecificationInfo {
/// Indicates whether or not the data store is read-only.
pub read_only: bool,
/// The collection's UUID - once established, this does not change and remains the same across
/// replica set members and shards in a sharded cluster. If the data store is a view, this
/// field is `None`.
pub uuid: Option<Binary>,
}
/// Information about a collection as reported by
/// [`Database::list_collections`](../struct.Database.html#method.list_collections).
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
#[non_exhaustive]
pub struct CollectionSpecification {
/// The name of the collection.
pub name: String,
/// Type of the data store.
#[serde(rename = "type")]
pub collection_type: CollectionType,
/// The options used to create the collection.
pub options: CreateCollectionOptions,
/// Additional info pertaining to the collection.
pub info: CollectionSpecificationInfo,
/// Provides information on the _id index for the collection
/// For views, this is `None`.
pub id_index: Option<Document>,
}
/// A struct modeling the information about an individual database returned from
/// [`Client::list_databases`](../struct.Client.html#method.list_databases).
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "camelCase")]
#[non_exhaustive]
pub struct DatabaseSpecification {
/// The name of the database.
pub name: String,
/// The amount of disk space in bytes that is consumed by the database.
#[serde(
deserialize_with = "serde_util::deserialize_u64_from_bson_number",
serialize_with = "serde_helpers::serialize_u64_as_i64"
)]
pub size_on_disk: u64,
/// Whether the database has any data.
pub empty: bool,
/// For sharded clusters, this field includes a document which maps each shard to the size in
/// bytes of the database on disk on that shard. For non sharded environments, this field
/// is `None`.
pub shards: Option<Document>,
}
|
use chrono::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, Utc};
use postgres::types::Type;
use rust_decimal::Decimal;
use serde_json::Value;
use std::collections::HashMap;
use uuid::Uuid;
#[derive(Copy, Clone, Debug)]
pub enum PostgresTypeSystem {
Bool(bool),
Float4(bool),
Float8(bool),
Numeric(bool),
Int2(bool),
Int4(bool),
Int8(bool),
Float4Array(bool),
Float8Array(bool),
NumericArray(bool),
Int2Array(bool),
Int4Array(bool),
Int8Array(bool),
Date(bool),
Char(bool),
BpChar(bool),
VarChar(bool),
Text(bool),
ByteA(bool),
Time(bool),
Timestamp(bool),
TimestampTz(bool),
UUID(bool),
JSON(bool),
JSONB(bool),
Enum(bool),
HSTORE(bool),
}
impl_typesystem! {
system = PostgresTypeSystem,
mappings = {
{ Int2 => i16 }
{ Int4 => i32 }
{ Int8 => i64 }
{ Float4 => f32 }
{ Float8 => f64 }
{ Numeric => Decimal }
{ Int2Array => Vec<i16> }
{ Int4Array => Vec<i32> }
{ Int8Array => Vec<i64> }
{ Float4Array => Vec<f32> }
{ Float8Array => Vec<f64> }
{ NumericArray => Vec<Decimal> }
{ Bool => bool }
{ Char => i8 }
{ Text | BpChar | VarChar | Enum => &'r str }
{ ByteA => Vec<u8> }
{ Time => NaiveTime }
{ Timestamp => NaiveDateTime }
{ TimestampTz => DateTime<Utc> }
{ Date => NaiveDate }
{ UUID => Uuid }
{ JSON | JSONB => Value }
{ HSTORE => HashMap<String, Option<String>> }
}
}
impl<'a> From<&'a Type> for PostgresTypeSystem {
fn from(ty: &'a Type) -> PostgresTypeSystem {
use PostgresTypeSystem::*;
match ty.name() {
"int2" => Int2(true),
"int4" => Int4(true),
"int8" => Int8(true),
"float4" => Float4(true),
"float8" => Float8(true),
"numeric" => Numeric(true),
"_int2" => Int2Array(true),
"_int4" => Int4Array(true),
"_int8" => Int8Array(true),
"_float4" => Float4Array(true),
"_float8" => Float8Array(true),
"_numeric" => NumericArray(true),
"bool" => Bool(true),
"char" => Char(true),
"text" | "citext" | "ltree" | "lquery" | "ltxtquery" => Text(true),
"bpchar" => BpChar(true),
"varchar" => VarChar(true),
"bytea" => ByteA(true),
"time" => Time(true),
"timestamp" => Timestamp(true),
"timestamptz" => TimestampTz(true),
"date" => Date(true),
"uuid" => UUID(true),
"json" => JSON(true),
"jsonb" => JSONB(true),
"hstore" => HSTORE(true),
_ => match ty.kind() {
postgres::types::Kind::Enum(_) => Enum(true),
_ => unimplemented!("{}", ty.name()),
},
}
}
}
pub struct PostgresTypePairs<'a>(pub &'a Type, pub &'a PostgresTypeSystem);
// Link (postgres::Type, connectorx::PostgresTypes) back to the one defiend by the postgres crate.
impl<'a> From<PostgresTypePairs<'a>> for Type {
fn from(ty: PostgresTypePairs) -> Type {
use PostgresTypeSystem::*;
match ty.1 {
Enum(_) => Type::TEXT,
HSTORE(_) => Type::TEXT, // hstore is not supported in binary protocol (since no corresponding inner TYPE)
_ => ty.0.clone(),
}
}
}
|
use std::io;
fn main() {
loop {
println!("Enter 1 to convert from Celcius or 2 to convert from Farenheit:");
let mut input = String::new();
io::stdin().read_line(&mut input)
.expect("Failed to read line");
let input: u32 = match input.trim().parse() {
Ok(num) => num,
Err(_) => continue,
};
if input == 1 {
to_farenheit();
break;
} else if input == 2 {
to_celcius();
break;
} else {
println!("Please enter 1 or 2!");
}
}
}
fn to_celcius() {
println!("How many degrees farenheit?");
let mut input = String::new();
io::stdin().read_line(&mut input)
.expect("Failed to read line");
let input: f32 = match input.trim().parse() {
Ok(num) => num,
Err(_) => 0.0,
};
let celcius = (input - 32.0) / 1.8;
println!("{} fareneheit in celcius is {}.", input, celcius);
}
fn to_farenheit() {
println!("How many degrees celcius?");
let mut input = String::new();
io::stdin().read_line(&mut input)
.expect("Failed to read line");
let input: f32 = match input.trim().parse() {
Ok(num) => num,
Err(_) => 0.0,
};
let farenheit = input * 1.8 + 32.0;
println!("{} celcius in farenheit is {}.", input, farenheit);
}
|
extern crate hyper;
use hyper::Client;
use hyper::body::HttpBody as _;
pub struct OpenWeatherApi{
pub api_key: String
}
impl OpenWeatherApi {
pub async fn get_json(&self, city : String) -> String {
let client = Client::new();
let url: String = String::from(format!("http://api.openweathermap.org/data/2.5/weather?q={}&APPID={}&units=metric", city, self.api_key));
let parse_to_uri = url.parse().unwrap();
let mut res = client.get(parse_to_uri).await.unwrap();
return String::from(format!("{:?}", res.body_mut().data().await));
}
pub fn print_data(&self, value : serde_json::Value){
println!("Name: {}", value["name"].to_string());
println!("Country: {}", value["sys"]["country"].to_string());
println!("Lon: {}", value["coord"]["lon"].to_string());
println!("Lat: {}", value["coord"]["lat"].to_string());
println!("Weather");
println!("Main: {}", value["weather"][0]["main"].to_string().replace("\\", "").replace("\"", ""));
println!("Description: {}", value["weather"][0]["description"].to_string().replace("\\", "").replace("\"", ""));
println!("Temp: {}", value["main"]["temp"].to_string());
println!("Feels like: {}", value["main"]["feels_like"].to_string());
println!("Temp min: {}", value["main"]["temp_min"].to_string());
println!("Temp max: {}", value["main"]["temp_max"].to_string());
println!("Pressure: {}", value["main"]["pressure"].to_string());
println!("Humidity: {}", value["main"]["humidity"].to_string());
println!("Sea level: {}", value["main"]["sea_level"].to_string());
println!("Ground level: {}", value["main"]["grnd_level"].to_string());
}
} |
//!
//! very simple program to demonstrate color selection
//!
//! ```cargo run --example color-selection```
//!
use cv::prelude::*;
use std::{
env,
error::Error,
path::PathBuf,
sync::{Arc, RwLock},
time::Duration,
};
fn main() -> Result<(), Box<dyn Error>> {
let image_path = {
let name = env::args().nth(1).unwrap_or("colors.png".to_string());
let base = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
base.join("assets").join(name)
};
let gui = cv::GUI::new("color selection");
let offsets = color_range_offsets(&gui);
let click_events = gui.mouse_events_for::<cv::MouseLeftBtnDown>();
let mut color = cv::HSV::unsafe_new(0, 255, 51);
loop {
let mut image = cv::imread(&image_path)?;
while let Ok(event) = click_events.try_recv() {
color = image.at(&event.point())?;
println!("HSV: {}, RGB: {}", color, cv::RGB::from(color));
}
let offsets = offsets.read().unwrap();
let color_range = cv::HSVRange::from_hsv(&color, *offsets)?;
let mut masked = image.convert_color().in_range(&color_range);
let contours = masked.find_contours();
image.draw_contours(&contours, cv::RGB::red(), 2);
gui.show_for(&image, Duration::from_millis(100))?;
}
}
fn color_range_offsets(gui: &cv::GUI) -> Arc<RwLock<(i32, i32, i32)>> {
let offsets = Arc::new(RwLock::new((1, 1, 1)));
std::thread::spawn({
let h = gui.slider(&"Farbwert (H)", 1, 255);
let s = gui.slider("Sättingung (S)", 1, 255);
let v = gui.slider("Dunkelstufe (V)", 1, 255);
let offsets = offsets.clone();
move || loop {
while let Ok(hv) = h.recv_timeout(Duration::from_millis(100)) {
(*offsets.write().unwrap()).0 = hv;
}
while let Ok(sv) = s.recv_timeout(Duration::from_millis(100)) {
(*offsets.write().unwrap()).1 = sv;
}
while let Ok(vv) = v.recv_timeout(Duration::from_millis(100)) {
(*offsets.write().unwrap()).2 = vv;
}
}
});
offsets
}
|
extern crate env_logger;
extern crate hyper;
extern crate hubcaps;
extern crate hyper_native_tls;
use hyper::Client;
use hyper::net::HttpsConnector;
use hyper_native_tls::NativeTlsClient;
use hubcaps::{Credentials, Github};
use std::env;
fn main() {
env_logger::init().unwrap();
match env::var("GITHUB_TOKEN").ok() {
Some(token) => {
let github =
Github::new(
format!("hubcaps/{}", env!("CARGO_PKG_VERSION")),
Client::with_connector(HttpsConnector::new(NativeTlsClient::new().unwrap())),
Credentials::Token(token),
);
for file in github
.repo("softprops", "hubcaps")
.git()
.tree("master", true)
.unwrap()
.tree
.iter()
.find(|file| file.path == "README.md")
{
let blob = github
.repo("softprops", "hubcaps")
.git()
.blob(file.sha.clone())
.unwrap();
println!("readme {:#?}", blob);
}
}
_ => println!("example missing GITHUB_TOKEN"),
}
}
|
use crate::{Runtime, RuntimeCall, RuntimeConfigs, Subspace, Sudo};
use codec::{Decode, Encode};
use scale_info::TypeInfo;
use sp_runtime::traits::{DispatchInfoOf, SignedExtension};
use sp_runtime::transaction_validity::{
InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction,
};
use sp_std::prelude::*;
/// Controls non-root access to feeds and object store
#[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)]
pub struct CheckStorageAccess;
impl SignedExtension for CheckStorageAccess {
const IDENTIFIER: &'static str = "CheckStorageAccess";
type AccountId = <Runtime as frame_system::Config>::AccountId;
type Call = <Runtime as frame_system::Config>::RuntimeCall;
type AdditionalSigned = ();
type Pre = ();
fn additional_signed(&self) -> Result<Self::AdditionalSigned, TransactionValidityError> {
Ok(())
}
fn validate(
&self,
who: &Self::AccountId,
_call: &Self::Call,
_info: &DispatchInfoOf<Self::Call>,
_len: usize,
) -> TransactionValidity {
if Subspace::is_storage_access_enabled() || Some(who) == Sudo::key().as_ref() {
Ok(ValidTransaction::default())
} else {
InvalidTransaction::BadSigner.into()
}
}
fn pre_dispatch(
self,
_who: &Self::AccountId,
_call: &Self::Call,
_info: &DispatchInfoOf<Self::Call>,
_len: usize,
) -> Result<Self::Pre, TransactionValidityError> {
Ok(())
}
}
/// Disable specific pallets.
#[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)]
pub struct DisablePallets;
impl SignedExtension for DisablePallets {
const IDENTIFIER: &'static str = "DisablePallets";
type AccountId = <Runtime as frame_system::Config>::AccountId;
type Call = <Runtime as frame_system::Config>::RuntimeCall;
type AdditionalSigned = ();
type Pre = ();
fn additional_signed(&self) -> Result<Self::AdditionalSigned, TransactionValidityError> {
Ok(())
}
fn validate(
&self,
_who: &Self::AccountId,
call: &Self::Call,
_info: &DispatchInfoOf<Self::Call>,
_len: usize,
) -> TransactionValidity {
// Disable normal balance transfers.
if matches!(
call,
RuntimeCall::Balances(
pallet_balances::Call::transfer { .. }
| pallet_balances::Call::transfer_keep_alive { .. }
| pallet_balances::Call::transfer_all { .. }
)
) && !RuntimeConfigs::enable_transfer()
{
InvalidTransaction::Call.into()
} else {
Ok(ValidTransaction::default())
}
}
fn pre_dispatch(
self,
who: &Self::AccountId,
call: &Self::Call,
info: &DispatchInfoOf<Self::Call>,
len: usize,
) -> Result<Self::Pre, TransactionValidityError> {
self.validate(who, call, info, len)?;
Ok(())
}
fn validate_unsigned(
call: &Self::Call,
_info: &DispatchInfoOf<Self::Call>,
_len: usize,
) -> TransactionValidity {
if matches!(call, RuntimeCall::Domains(_)) && !RuntimeConfigs::enable_domains() {
InvalidTransaction::Call.into()
} else {
Ok(ValidTransaction::default())
}
}
}
|
struct Fibonacci {
a: u64,
b: u64,
}
impl Fibonacci {
pub fn new() -> Fibonacci {
Fibonacci { a: 0, b: 1 }
}
}
impl Iterator for Fibonacci {
type Item = u64;
fn next(&mut self) -> Option<u64> {
let tmp: u64 = self.a;
self.a = self.b;
self.b = tmp + self.a;
Some(self.b)
}
}
pub fn solve(max: u64) -> u64 {
Fibonacci::new()
.take_while(|val| *val <= max)
.filter(|x| x % 2 == 0)
.sum()
}
#[cfg(test)]
mod tests {
#[test]
fn sum_to_89() {
assert_eq!(44, super::solve(34))
}
}
|
use diesel;
use diesel::prelude::*;
use diesel::mysql::MysqlConnection;
use schema::heroes;
#[table_name = "user"]
#[derive(AsChangeset, Serialize, Deserialize, Queryable, Insertable)]
pub struct User {
pub id: i32,
pub name: String,
pub identity: String,
pub hometown: String,
pub age: i32
}
impl User {
pub fn create(user: User, connection: &MysqlConnection) -> User {
diesel::insert_into(heroes::table)
.values(&user)
.execute(connection)
.expect("Error creating new user");
users::table.order(users::id.desc()).first(connection).unwrap()
}
pub fn read_all(connection: &MysqlConnection) -> Vec<User> {
users::table.order(users::id).load::<User>(connection).unwrap()
}
pub fn read(id: i32, connection: &MysqlConnection) -> User {
users::table.find(id).first::<User>(connection).unwrap()
}
pub fn update(id: i32, user: User, connection: &MysqlConnection) -> bool {
diesel::update(users::table.find(id)).set(&user).execute(connection).is_ok()
}
pub fn delete(id: i32, connection: &MysqlConnection) -> bool {
diesel::delete(users::table.find(id)).execute(connection).is_ok()
}
}
|
//! A multi-producer, multi-consumer channel implementation.
mod mutex_linked_list;
mod mpmc_bounded_queue;
mod channel;
pub use self::mutex_linked_list::MutexLinkedList;
pub use self::mpmc_bounded_queue::LockFreeQueue;
pub use self::channel::Failure;
use std::sync::{Arc};
use std::cell::UnsafeCell;
use self::channel::{Canal};
/// The sending-half of the mpmc channel.
pub struct Sender<T: Send> {
inner: Arc<UnsafeCell<Canal<T>>>,
}
unsafe impl<T: Send> Send for Sender<T> {}
impl<T: Send> Sender<T> {
/// Sends data to the channel.
///
/// This method will never block, but may return an error with the value
/// returned in the Err(..).
pub fn send(&self, value: T) -> Result<(), T> {
unsafe {
(*self.inner.get()).send(value)
}
}
}
impl<T: Send> Clone for Sender<T> {
fn clone(&self) -> Sender<T> {
unsafe {
(*self.inner.get()).clone_chan();
}
Sender { inner: self.inner.clone() }
}
}
impl<T: Send> Drop for Sender<T> {
fn drop(&mut self) {
unsafe {
(*self.inner.get()).drop_chan();
}
}
}
/// The receiving-half of the mpmc channel.
pub struct Receiver<T: Send> {
inner: Arc<UnsafeCell<Canal<T>>>,
}
unsafe impl<T: Send> Send for Receiver<T> {}
impl<T: Send> Receiver<T> {
/// Receive data from the channel.
///
/// This method will block until either new data is sent or all senders have
/// disconnected.
pub fn recv(&self) -> Result<T, Failure> {
unsafe {
(*self.inner.get()).recv()
}
}
}
impl<T: Send> Clone for Receiver<T> {
fn clone(&self) -> Receiver<T> {
unsafe {
(*self.inner.get()).clone_port();
}
Receiver { inner: self.inner.clone() }
}
}
impl<T: Send> Drop for Receiver<T> {
fn drop(&mut self) {
unsafe {
(*self.inner.get()).drop_port();
}
}
}
/// Create a channel pair using a lock-free queue with specified capacity.
///
/// Note: This is not ready for use in production, some bugs are still
/// being actively worked out.
pub fn mpmc_channel<T: Send>(cap: usize) -> (Sender<T>, Receiver<T>) {
let inner = Arc::new(UnsafeCell::new(Canal::new(cap)));
let sn = Sender { inner: inner.clone() };
let rc = Receiver { inner: inner };
(sn, rc)
}
#[cfg(test)]
mod tests {
use std::thread;
use mpmc::{mpmc_channel};
#[test]
fn test_producer_consumer() {
let (sn, rc) = mpmc_channel(25);
let mut send_vec = Vec::new();
for i in 0..20 {
let s = sn.clone();
send_vec.push(thread::spawn(move || {
assert!(s.send(i as u8).is_ok());
}));
}
let mut recv_vec = Vec::new();
for _i in 0..20 {
let r = rc.clone();
recv_vec.push(thread::spawn(move || {
let popped = r.recv().unwrap();
let mut found = false;
for x in 0..20 {
if popped == x {
found = true
}
}
assert!(found);
}));
}
for x in send_vec.into_iter().chain(recv_vec) {
x.join().unwrap();
}
}
}
|
pub mod query;
pub mod generator;
|
use clap::{App, Arg};
use std::fs::File;
use std::process;
use imprint_of_light::{
config::Config,
render::{render as r, Entity, Scene},
};
fn main() {
args_check();
}
fn args_check() {
let matches = App::new("imprint_of_light")
.version("0.1.0")
.author("Luke Euler <luke16times@gmail.com>")
.about("draw the light with shapes in 2D")
.arg(
Arg::with_name("config")
.short('c')
.long("config")
.value_name("FILE")
.help("the config file for rendering images")
.default_value("config.json"),
)
.get_matches();
let config_file_name = matches.value_of("config").unwrap();
let file = match File::open(config_file_name) {
Ok(f) => f,
Err(e) => {
println!("{}: {}", config_file_name, e.to_string());
process::exit(1)
}
};
let configs: Vec<Config> = serde_json::from_reader(file).unwrap();
for item in configs {
if !item.enable {
continue;
}
if item.scenes.len() == 0 {
continue;
}
println!("try to render image: {}", item.out);
let mut entities: Vec<Entity> = Vec::new();
for entity_json in item.scenes {
entities.push(entity_json.get_entity());
}
let scene = Scene { entities };
let img = r(
&scene,
(item.width, item.height),
item.stratification,
item.max_depth,
);
img.save(item.out.clone()).unwrap();
}
}
|
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - Temperature sensor configuration register 1"]
pub cfgr1: CFGR1,
_reserved1: [u8; 0x04],
#[doc = "0x08 - Temperature sensor T0 value register 1"]
pub t0valr1: T0VALR1,
_reserved2: [u8; 0x04],
#[doc = "0x10 - Temperature sensor ramp value register"]
pub rampvalr: RAMPVALR,
#[doc = "0x14 - Temperature sensor interrupt threshold register 1"]
pub itr1: ITR1,
_reserved4: [u8; 0x04],
#[doc = "0x1c - Temperature sensor data register"]
pub dr: DR,
#[doc = "0x20 - Temperature sensor status register"]
pub sr: SR,
#[doc = "0x24 - Temperature sensor interrupt enable register"]
pub itenr: ITENR,
#[doc = "0x28 - Temperature sensor clear interrupt flag register"]
pub icifr: ICIFR,
#[doc = "0x2c - Temperature sensor option register"]
pub or: OR,
}
#[doc = "CFGR1 (rw) register accessor: Temperature sensor configuration register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cfgr1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cfgr1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`cfgr1`]
module"]
pub type CFGR1 = crate::Reg<cfgr1::CFGR1_SPEC>;
#[doc = "Temperature sensor configuration register 1"]
pub mod cfgr1;
#[doc = "T0VALR1 (r) register accessor: Temperature sensor T0 value register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`t0valr1::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`t0valr1`]
module"]
pub type T0VALR1 = crate::Reg<t0valr1::T0VALR1_SPEC>;
#[doc = "Temperature sensor T0 value register 1"]
pub mod t0valr1;
#[doc = "RAMPVALR (r) register accessor: Temperature sensor ramp value register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`rampvalr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`rampvalr`]
module"]
pub type RAMPVALR = crate::Reg<rampvalr::RAMPVALR_SPEC>;
#[doc = "Temperature sensor ramp value register"]
pub mod rampvalr;
#[doc = "ITR1 (rw) register accessor: Temperature sensor interrupt threshold register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`itr1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`itr1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`itr1`]
module"]
pub type ITR1 = crate::Reg<itr1::ITR1_SPEC>;
#[doc = "Temperature sensor interrupt threshold register 1"]
pub mod itr1;
#[doc = "DR (rw) register accessor: Temperature sensor data register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dr`]
module"]
pub type DR = crate::Reg<dr::DR_SPEC>;
#[doc = "Temperature sensor data register"]
pub mod dr;
#[doc = "SR (r) register accessor: Temperature sensor status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`sr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`sr`]
module"]
pub type SR = crate::Reg<sr::SR_SPEC>;
#[doc = "Temperature sensor status register"]
pub mod sr;
#[doc = "ITENR (rw) register accessor: Temperature sensor interrupt enable register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`itenr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`itenr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`itenr`]
module"]
pub type ITENR = crate::Reg<itenr::ITENR_SPEC>;
#[doc = "Temperature sensor interrupt enable register"]
pub mod itenr;
#[doc = "ICIFR (rw) register accessor: Temperature sensor clear interrupt flag register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`icifr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`icifr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`icifr`]
module"]
pub type ICIFR = crate::Reg<icifr::ICIFR_SPEC>;
#[doc = "Temperature sensor clear interrupt flag register"]
pub mod icifr;
#[doc = "OR (rw) register accessor: Temperature sensor option register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`or::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`or::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`or`]
module"]
pub type OR = crate::Reg<or::OR_SPEC>;
#[doc = "Temperature sensor option register"]
pub mod or;
|
pub fn split_first<'a>(line: &'a str) -> Option<(&'a str, &'a str)> {
match line.find('\t') {
Some(i) => Some((&line[0..i], &line[(i + 1)..])),
None => None,
}
}
#[test]
fn split_empty() {
assert_eq!(split_first(""), None)
}
#[test]
fn split_tab() {
assert_eq!(split_first("foo\tbar"), Some(("foo", "bar")))
}
#[test]
fn split_end() {
assert_eq!(split_first("foo\t"), Some(("foo", "")))
}
#[test]
fn split_2() {
assert_eq!(split_first("foo\tbar\tblatz"), Some(("foo", "bar\tblatz")))
}
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under both the MIT license found in the
* LICENSE-MIT file in the root directory of this source tree and the Apache
* License, Version 2.0 found in the LICENSE-APACHE file in the root directory
* of this source tree.
*/
use crate::failure::Fail;
use crate::Error;
use std::error::Error as StdError;
use std::fmt::{self, Debug, Display};
/// Convert error implementing [failure::Fail] to [anyhow::Error]
pub fn convert(fail: impl Fail) -> Error {
convert_ref(&fail)
}
fn convert_ref(fail: &(impl Fail + ?Sized)) -> Error {
match fail.cause() {
Some(cause) => convert_ref(cause).context(fail.to_string()),
None => Error::new(ErrorMessage {
display: fail.to_string(),
debug: format!("{:?}", fail),
}),
}
}
struct ErrorMessage {
display: String,
debug: String,
}
impl StdError for ErrorMessage {}
impl Display for ErrorMessage {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(&self.display)
}
}
impl Debug for ErrorMessage {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(&self.debug)
}
}
|
mod lights;
mod sum;
mod area;
fn main() {
println!("Hello, world!");
// test lights
lights::test_lights();
// test sum
let acceptable_list = [1,2,3,4];
let exceptional_list = [1, u32::MAX];
assert_eq!(sum::sum_list(&acceptable_list), Some(10));
assert_eq!(sum::sum_list(&exceptional_list), None);
//
let circle = area::Circle {
x: 0.0f64,
y: 0.0f64,
radius: 3.2f64,
..Default::default()
};
let square = area::Square {
x: 0.0f64,
y: 0.0f64,
side: 2.0f64,
..Default::default()
};
area::print_area(circle);
area::print_area(square);
}
|
// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(clippy::all)]
#[allow(dead_code)]
struct Foo;
impl Iterator for Foo {
type Item = ();
fn next(&mut self) -> Option<()> {
let _ = self.len() == 0;
unimplemented!()
}
}
impl ExactSizeIterator for Foo {}
fn main() {}
|
use serenity::prelude::Mutex;
use std::collections::HashMap;
use std::net::UdpSocket;
use std::str;
use std::sync::Arc;
use player::Player;
pub struct Server {
socket: UdpSocket,
players: Arc<Mutex<HashMap<String, Player>>>
}
impl Server {
pub fn new(players: Arc<Mutex<HashMap<String, Player>>>) -> Server {
let socket = match UdpSocket::bind("0.0.0.0:3615") {
Ok(s) => s,
Err(e) => panic!("Unable to bind socket: {}", e)
};
println!("Socket started on 3615!");
Server {
socket: socket,
players: players
}
}
pub fn run(&self) {
let mut buf = [0; 2048];
loop {
match self.socket.recv_from(&mut buf) {
Ok((_amt, _src)) => {
let data = str::from_utf8(&buf).unwrap_or("e");
println!("Data recv: {}", data);
},
Err(e) => {
println!("Unable to receive datagram: {}", e);
}
}
}
}
}
|
use std::collections::HashMap;
use ash::{version::DeviceV1_0, vk, Device};
use anyhow::Result;
use colorous::Gradient;
use rustc_hash::FxHashMap;
use crate::vulkan::GfaestusVk;
use super::Texture;
use super::Texture1D;
pub struct Gradients_ {
// gradient_offsets: FxHashMap<egui::TextureId, usize>,
gradient_offsets: FxHashMap<GradientName, usize>,
pub texture: Texture,
}
impl Gradients_ {
pub fn initialize(
app: &GfaestusVk,
command_pool: vk::CommandPool,
transition_queue: vk::Queue,
width: usize,
) -> Result<Self> {
assert!(
width.is_power_of_two(),
"GradientTexture width has to be a power of two"
);
let gradient_count = Self::GRADIENT_NAMES.len();
let height = 64usize;
let size = width * height;
assert!(height.is_power_of_two() && height >= gradient_count);
// let mut gradients: HashMap<egui::TextureId, GradientTexture> =
let mut gradient_offsets: FxHashMap<GradientName, usize> =
FxHashMap::default();
let format = vk::Format::R8G8B8A8_UNORM;
// TODO fix the usage flags
let texture = Texture::allocate(
app,
command_pool,
transition_queue,
width,
height,
format,
vk::ImageUsageFlags::TRANSFER_SRC
| vk::ImageUsageFlags::TRANSFER_DST
| vk::ImageUsageFlags::STORAGE
| vk::ImageUsageFlags::SAMPLED,
)?;
let buf_size = size * std::mem::size_of::<[u8; 4]>();
let mut pixels: Vec<u8> = Vec::with_capacity(buf_size);
for (gradient_id, name) in Self::GRADIENT_NAMES.iter().enumerate() {
let gradient = name.gradient();
for i in 0..width {
let (r, g, b) = gradient.eval_rational(i, width).as_tuple();
pixels.push(r);
pixels.push(g);
pixels.push(b);
pixels.push(255);
}
let offset = pixels.len();
gradient_offsets.insert(*name, offset);
}
for _ in 0..(buf_size - pixels.len()) {
pixels.push(0);
}
texture.copy_from_slice(
app,
command_pool,
transition_queue,
width,
height,
&pixels,
)?;
Ok(Self {
gradient_offsets,
texture,
})
}
pub const GRADIENT_NAMES: [GradientName; 38] = {
use GradientName::*;
[
Blues,
BlueGreen,
BluePurple,
BrownGreen,
Cividis,
Cool,
CubeHelix,
Greens,
GreenBlue,
Greys,
Inferno,
Magma,
Oranges,
OrangeRed,
PinkGreen,
Plasma,
Purples,
PurpleBlue,
PurpleBlueGreen,
PurpleGreen,
PurpleOrange,
PurpleRed,
Rainbow,
Reds,
RedBlue,
RedGray,
RedPurple,
RedYellowBlue,
RedYellowGreen,
Sinebow,
Spectral,
Turbo,
Viridis,
Warm,
YellowGreen,
YellowGreenBlue,
YellowOrangeBrown,
YellowOrangeRed,
]
};
}
pub struct Gradients {
gradients: HashMap<egui::TextureId, GradientTexture>,
}
impl Gradients {
pub fn gradient(&self, name: GradientName) -> Option<&GradientTexture> {
let key = name.texture_id();
self.gradients.get(&key)
}
pub fn gradient_from_id(
&self,
texture_id: egui::TextureId,
) -> Option<&GradientTexture> {
self.gradients.get(&texture_id)
}
pub const GRADIENT_NAMES: [GradientName; 38] = {
use GradientName::*;
[
Blues,
BlueGreen,
BluePurple,
BrownGreen,
Cividis,
Cool,
CubeHelix,
Greens,
GreenBlue,
Greys,
Inferno,
Magma,
Oranges,
OrangeRed,
PinkGreen,
Plasma,
Purples,
PurpleBlue,
PurpleBlueGreen,
PurpleGreen,
PurpleOrange,
PurpleRed,
Rainbow,
Reds,
RedBlue,
RedGray,
RedPurple,
RedYellowBlue,
RedYellowGreen,
Sinebow,
Spectral,
Turbo,
Viridis,
Warm,
YellowGreen,
YellowGreenBlue,
YellowOrangeBrown,
YellowOrangeRed,
]
};
pub fn initialize(
app: &GfaestusVk,
command_pool: vk::CommandPool,
transition_queue: vk::Queue,
width: usize,
) -> Result<Self> {
let mut gradients: HashMap<egui::TextureId, GradientTexture> =
HashMap::new();
for name in std::array::IntoIter::new(Self::GRADIENT_NAMES) {
let gradient = name.gradient();
let texture = GradientTexture::new(
app,
command_pool,
transition_queue,
gradient,
width,
)?;
let key = name.texture_id();
gradients.insert(key, texture);
}
Ok(Self { gradients })
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum GradientName {
Blues,
BlueGreen,
BluePurple,
BrownGreen,
Cividis,
Cool,
CubeHelix,
Greens,
GreenBlue,
Greys,
Inferno,
Magma,
Oranges,
OrangeRed,
PinkGreen,
Plasma,
Purples,
PurpleBlue,
PurpleBlueGreen,
PurpleGreen,
PurpleOrange,
PurpleRed,
Rainbow,
Reds,
RedBlue,
RedGray,
RedPurple,
RedYellowBlue,
RedYellowGreen,
Sinebow,
Spectral,
Turbo,
Viridis,
Warm,
YellowGreen,
YellowGreenBlue,
YellowOrangeBrown,
YellowOrangeRed,
}
impl std::string::ToString for GradientName {
fn to_string(&self) -> String {
match self {
GradientName::Blues => "Blues".to_string(),
GradientName::BlueGreen => "BlueGreen".to_string(),
GradientName::BluePurple => "BluePurple".to_string(),
GradientName::BrownGreen => "BrownGreen".to_string(),
GradientName::Cividis => "Cividis".to_string(),
GradientName::Cool => "Cool".to_string(),
GradientName::CubeHelix => "CubeHelix".to_string(),
GradientName::Greens => "Greens".to_string(),
GradientName::GreenBlue => "GreenBlue".to_string(),
GradientName::Greys => "Greys".to_string(),
GradientName::Inferno => "Inferno".to_string(),
GradientName::Magma => "Magma".to_string(),
GradientName::Oranges => "Oranges".to_string(),
GradientName::OrangeRed => "OrangeRed".to_string(),
GradientName::PinkGreen => "PinkGreen".to_string(),
GradientName::Plasma => "Plasma".to_string(),
GradientName::Purples => "Purples".to_string(),
GradientName::PurpleBlue => "PurpleBlue".to_string(),
GradientName::PurpleBlueGreen => "PurpleBlueGreen".to_string(),
GradientName::PurpleGreen => "PurpleGreen".to_string(),
GradientName::PurpleOrange => "PurpleOrange".to_string(),
GradientName::PurpleRed => "PurpleRed".to_string(),
GradientName::Rainbow => "Rainbow".to_string(),
GradientName::Reds => "Reds".to_string(),
GradientName::RedBlue => "RedBlue".to_string(),
GradientName::RedGray => "RedGray".to_string(),
GradientName::RedPurple => "RedPurple".to_string(),
GradientName::RedYellowBlue => "RedYellowBlue".to_string(),
GradientName::RedYellowGreen => "RedYellowGreen".to_string(),
GradientName::Sinebow => "Sinebow".to_string(),
GradientName::Spectral => "Spectral".to_string(),
GradientName::Turbo => "Turbo".to_string(),
GradientName::Viridis => "Viridis".to_string(),
GradientName::Warm => "Warm".to_string(),
GradientName::YellowGreen => "YellowGreen".to_string(),
GradientName::YellowGreenBlue => "YellowGreenBlue".to_string(),
GradientName::YellowOrangeBrown => "YellowOrangeBrown".to_string(),
GradientName::YellowOrangeRed => "YellowOrangeRed".to_string(),
}
}
}
impl GradientName {
pub fn gradient(&self) -> Gradient {
use colorous::*;
match self {
GradientName::Blues => BLUES,
GradientName::BlueGreen => BLUE_GREEN,
GradientName::BluePurple => BLUE_PURPLE,
GradientName::BrownGreen => BROWN_GREEN,
GradientName::Cividis => CIVIDIS,
GradientName::Cool => COOL,
GradientName::CubeHelix => CUBEHELIX,
GradientName::Greens => GREENS,
GradientName::GreenBlue => GREEN_BLUE,
GradientName::Greys => GREYS,
GradientName::Inferno => INFERNO,
GradientName::Magma => MAGMA,
GradientName::Oranges => ORANGES,
GradientName::OrangeRed => ORANGE_RED,
GradientName::PinkGreen => PINK_GREEN,
GradientName::Plasma => PLASMA,
GradientName::Purples => PURPLES,
GradientName::PurpleBlue => PURPLE_BLUE,
GradientName::PurpleBlueGreen => PURPLE_BLUE_GREEN,
GradientName::PurpleGreen => PURPLE_GREEN,
GradientName::PurpleOrange => PURPLE_ORANGE,
GradientName::PurpleRed => PURPLE_RED,
GradientName::Rainbow => RAINBOW,
GradientName::Reds => REDS,
GradientName::RedBlue => RED_BLUE,
GradientName::RedGray => RED_GREY,
GradientName::RedPurple => RED_PURPLE,
GradientName::RedYellowBlue => RED_YELLOW_BLUE,
GradientName::RedYellowGreen => RED_YELLOW_GREEN,
GradientName::Sinebow => SINEBOW,
GradientName::Spectral => SPECTRAL,
GradientName::Turbo => TURBO,
GradientName::Viridis => VIRIDIS,
GradientName::Warm => WARM,
GradientName::YellowGreen => YELLOW_GREEN,
GradientName::YellowGreenBlue => YELLOW_GREEN_BLUE,
GradientName::YellowOrangeBrown => YELLOW_ORANGE_BROWN,
GradientName::YellowOrangeRed => YELLOW_ORANGE_RED,
}
}
pub fn texture_id(&self) -> egui::TextureId {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::default();
self.hash(&mut hasher);
let hash = hasher.finish();
egui::TextureId::User(hash)
}
}
pub struct GradientTexture {
pub texture: Texture1D,
}
impl GradientTexture {
pub fn new(
app: &GfaestusVk,
command_pool: vk::CommandPool,
transition_queue: vk::Queue,
gradient: Gradient,
width: usize,
) -> Result<Self> {
assert!(
width.is_power_of_two(),
"GradientTexture width has to be a power of two"
);
let mut colors: Vec<rgb::RGB<f32>> = Vec::with_capacity(width);
for i in 0..width {
let (r, g, b) = gradient.eval_rational(i, width).as_tuple();
let r = (r as f32) / 255.0;
let g = (g as f32) / 255.0;
let b = (b as f32) / 255.0;
let rgb_color = rgb::RGB::new(r, g, b);
colors.push(rgb_color);
}
let texture = Texture1D::create_from_colors(
app,
command_pool,
transition_queue,
&colors,
)?;
Ok(Self { texture })
}
pub fn create_sampler(device: &Device) -> Result<vk::Sampler> {
let sampler = {
let sampler_info = vk::SamplerCreateInfo::builder()
.mag_filter(vk::Filter::LINEAR)
.min_filter(vk::Filter::LINEAR)
.address_mode_u(vk::SamplerAddressMode::CLAMP_TO_EDGE)
.address_mode_v(vk::SamplerAddressMode::CLAMP_TO_EDGE)
.address_mode_w(vk::SamplerAddressMode::CLAMP_TO_EDGE)
.anisotropy_enable(false)
.border_color(vk::BorderColor::INT_OPAQUE_BLACK)
.unnormalized_coordinates(false)
.mipmap_mode(vk::SamplerMipmapMode::LINEAR)
.mip_lod_bias(0.0)
.min_lod(0.0)
.max_lod(1.0)
.build();
unsafe { device.create_sampler(&sampler_info, None) }
}?;
Ok(sampler)
}
}
|
use hyper;
use hyper::net::NetworkListener;
use hyper::server::Request;
use hyper::server::Response;
use hyper::uri::RequestUri;
use hyper::header::AccessControlAllowOrigin;
use rand::{self, Rng};
use rustc_serialize::json;
use std::collections::BTreeMap;
use std::io::Read;
use std::sync::{mpsc, Mutex};
use url;
use protocol::authentication::AuthenticationType;
use authentication::Credentials;
use ::spotilocal::ssl_context;
struct ServerHandler {
token_tx: Mutex<mpsc::Sender<String>>,
csrf: String,
}
impl ServerHandler {
fn handle_login(&self, params: &BTreeMap<String, String>) -> hyper::status::StatusCode {
let token = params.get("access_token").unwrap();
let csrf = params.get("csrf").unwrap();
if *csrf == self.csrf {
self.token_tx.lock().unwrap().send(token.to_owned()).unwrap();
hyper::status::StatusCode::Ok
} else {
hyper::status::StatusCode::Forbidden
}
}
}
impl hyper::server::Handler for ServerHandler {
fn handle<'a, 'k>(&'a self, request: Request<'a, 'k>, mut response: Response<'a, hyper::net::Fresh>) {
response.headers_mut().set(AccessControlAllowOrigin::Value("https://login.spotify.com".to_owned()));
*response.status_mut() = if let RequestUri::AbsolutePath(path) = request.uri {
let (path, query, _) = url::parse_path(&path).unwrap();
let params = query.map_or(vec![], |q| url::form_urlencoded::parse(q.as_bytes()))
.into_iter().collect::<BTreeMap<_,_>>();
debug!("{:?} {:?} {:?}", request.method, path, params);
if request.method == hyper::method::Method::Get && path == vec!["login", "facebook_login_sso.json"] {
self.handle_login(¶ms)
} else {
hyper::status::StatusCode::NotFound
}
} else {
hyper::status::StatusCode::NotFound
}
}
}
fn facebook_get_me_id(token: &str) -> Result<String, ()> {
let url = format!("https://graph.facebook.com/me?fields=id&access_token={}", token);
let client = hyper::Client::new();
let mut response = client.get(&url).send().unwrap();
let mut body = String::new();
response.read_to_string(&mut body).unwrap();
let mut result : BTreeMap<String, String> = json::decode(&body).unwrap();
Ok(result.remove("id").unwrap())
}
pub fn facebook_login() -> Result<Credentials, ()> {
let (tx, rx) = mpsc::channel();
let csrf = rand::thread_rng().gen_ascii_chars().take(32).collect::<String>();
let handler = ServerHandler {
token_tx: Mutex::new(tx),
csrf: csrf.clone()
};
let ssl = ssl_context().unwrap();
let mut listener = hyper::net::HttpsListener::new("127.0.0.1:0", ssl).unwrap();
let port = listener.local_addr().unwrap().port();
let mut server = hyper::Server::new(listener).handle(handler).unwrap();
println!("Logging in using Facebook, please visit https://login.spotify.com/login-facebook-sso/?csrf={}&port={} in your browser.",
csrf, port);
let token = rx.recv().unwrap();
let user_id = facebook_get_me_id(&token).unwrap();
let cred = Credentials {
username: user_id,
auth_type: AuthenticationType::AUTHENTICATION_FACEBOOK_TOKEN,
auth_data: token.as_bytes().to_owned(),
};
server.close().unwrap();
Ok(cred)
}
|
//! Utilites to make my life easier
/// A way to put C-literals in Rust code.
/// Does not accept byte strings.
///
/// #Usage
/// foo(c_str!("my string"));
#[macro_export]
macro_rules! c_str {
($s:expr) => {
concat!($s, "\0").as_ptr() as *const i8
}
}
/// Registers a lua function in a LuaL_reg to call the given global identifier.
///
/// Do NOT use this macro directly, use the register_for_lua! instead.
#[macro_export]
macro_rules! register_lua {
($global_name:ident, $([ $( $inner:ident; $inner_lua_name:ident ),+ ])+) => {{
use ::awesome_wayland::callbacks::Awesome;
use ::libc::c_int;
$($(unsafe extern "C" fn $inner(lua: *mut lua_State) -> c_int {
let callback = $global_name.read()
.expect("Could not lock user defined callback object");
callback.callbacks.$inner(&**LUA)
})*),*
[
$($(register_lua!($inner, $inner_lua_name)),*),*,
::lua_sys::luaL_Reg {
name: ::std::ptr::null(),
func: None
},
]
}};
($name:ident, $lua_name:ident) => {
::lua_sys::luaL_Reg {
name: c_str!(stringify!($lua_name)),
func: Some($name)
}
}
}
/// Registers a struct that implements [Awesome](callbacks/trait.Awesome.html)
///
/// Note that errors for registering the method is up to the caller
///
/// Use this in your main method, after using [register_for_lua](register_for_lua)
#[macro_export]
macro_rules! register_awesome {
($callback_impl:ident, $global_name:ident) => {{
let awesome_lib = register_lua!($global_name, [
awesome_quit; quit,
awesome_exec; exec,
awesome_spawn; spawn,
awesome_restart; restart,
awesome_connect_signal; connect_signal,
awesome_disconnect_signal; disconnect_signal,
awesome_emit_signal; emit_signal,
awesome_systray; systray,
awesome_load_image; load_image,
awesome_set_preferred_icon_size; set_preferred_icon_size,
awesome_register_xproperty; register_xproperty,
awesome_set_xproperty; set_xproperty,
awesome_get_xproperty; get_xproperty,
awesome___index; __index,
awesome___newindex; __newindex,
awesome_xkb_set_layout_group; xkb_set_layout_group,
awesome_xkb_get_layout_group; xkb_get_layout_group,
awesome_xkb_get_group_names; xkb_get_group_names,
awesome_xrdb_get_value; xrdb_get_value,
awesome_kill; kill,
awesome_sync; sync
]);
let lua = LUA.0;
unsafe {
luaA::openlib(lua, c_str!("awesome"), &awesome_lib, &awesome_lib);
}
}}
}
/// Registers a struct that implements [Button](callbacks/trait.Button.html)
///
/// Note that errors for registering the method is up to the caller
///
/// Use this in your main method, after using [register_for_lua](register_for_lua)
#[macro_export]
macro_rules! register_button {
($callback_impl:ident, $global_name:ident) => {{
use ::awesome_wayland::callbacks::Button;
use ::awesome_wayland::callbacks::button::{button_new,
button_get_button,
button_get_modifiers};
use std::ptr::null_mut;
let button_methods = register_lua!($global_name, [
button_add_signal; add_signal,
button_connect_signal; connect_signal,
button_disconnect_signal; disconnect_signal,
button_emit_signal; emit_signal,
button_instances; instances,
button_set_index_miss_handler; set_index_miss_handler,
button_set_newindex_miss_handler; set_newindex_miss_handler,
button___call; __call
]);
let button_meta = register_lua!($global_name, [
button___tostring_meta; __tostring,
button_connect_signal_meta; connect_signal,
button_disconnect_signal_meta; button_disconnect_signal,
button_emit_signal_meta; emit_signal,
button___index_meta; __index,
button___newindex_meta; __newindex
]);
let lua = LUA.0;
unsafe {
let mut button_class = luaA::BUTTON_CLASS.try_write().unwrap();
luaA::class_setup(lua, &mut *button_class, c_str!("button"), null_mut() as _,
button_new, None, None,
Some(luaA::class_index_miss_property),
Some(luaA::class_newindex_miss_property),
&button_methods, &button_meta);
luaA::class_add_property(&mut *button_class, "button",
Some(luaA::button_set_button),
Some(button_get_button),
Some(luaA::button_set_button));
luaA::class_add_property(&mut *button_class, "modifiers",
Some(luaA::button_set_modifiers),
Some(button_get_modifiers),
Some(luaA::button_set_modifiers));
}
}}
}
/// Registers a struct that implements [Client](callbacks/trait.Client.html)
///
/// Note that errors for registering the method is up to the caller
///
/// Use this in your main method, after using [register_for_lua](register_for_lua)
#[macro_export]
macro_rules! register_client {
($callback_impl:ident, $global_name:ident) => {{
use ::awesome_wayland::callbacks::Client;
let lua_reg = register_lua!($global_name, [
// Methods
client_get; get,
client___index; __index,
client___newindex; __newindex,
// Class methods
client_add_signal; add_signal,
client_connect_signal; connect_signal,
client_disconnect_signal; disconnect_signal,
client_emit_signal; emit_signal,
client_instances; instances,
client_set_index_miss_handler; set_index_miss_handler,
client_set_newindex_miss_handler; set_newindex_miss_handler,
// Object methods meta
client___tostring_meta; __tostring_meta,
client_connect_signal_meta; connect_signal_meta,
client_disconnect_signal_meta; button_disconnect_signal_meta,
// Class methods meta
client___index_meta; __index_meta,
client___newindex_meta; __newindex_meta,
client___call; __call,
// Meta
client_keys; keys,
client_isvisible; isvisible,
client_geometry; geometry,
client_apply_size_hints; apply_size_hints,
client_tags; tags,
client_kill; kill,
client_swap; swap,
client_raise; raise,
client_lower; lower,
client_unmanange; unmanange,
client_titlebar_top; titlebar_top,
client_titlebar_right; titlebar_right,
client_titlebar_bottom; titlebar_bottom,
client_titlebar_left; titlebar_left,
client_get_icon; get_icon,
// Properties
client_name; name,
client_transient_for; transient_for,
client_skip_taskbar; skip_taskbar,
client_content; content,
client_type_; type_,
client_class; class,
client_instance; instance,
client_role; role,
client_pid; pid,
client_leader_window; leader_window,
client_machine; machine,
client_icon_name; icon_name,
client_screen; screen,
client_hidden; hidden,
client_minimized; minimized,
client_fullscreen; fullscreen,
client_modal; modal,
client_group_window; group_window,
client_maximized; maximized,
client_maximized_horizontal; maximized_horizontal,
client_maximized_vertical; maximized_vertical,
client_icon; icon,
client_icon_sizes; icon_sizes,
client_ontop; ontop,
client_above; above,
client_below; below,
client_sticky; sticky,
client_size_hints_honor; size_hints_honor,
client_urgent; urgent,
client_size_hints; size_hints,
client_focusable; focusable,
client_shape_bounding; shape_bounding,
client_shape_clip; shape_clip,
client_shape_input; shape_input,
client_startup_id; startup_id,
client_client_shape_bounding; client_shape_bounding,
client_client_shape_clip; client_shape_clip,
client_first_tag; first_tag
]);
LUA.register_methods("client\0", &lua_reg)
}}
}
/// Registers a struct that implements [Drawin](callbacks/trait.Drawin.html)
///
/// Note that errors for registering the method is up to the caller
///
/// Use this in your main method, after using [register_for_lua](register_for_lua)
#[macro_export]
macro_rules! register_drawin {
($callback_impl:ident, $global_name:ident) => {{
use ::awesome_wayland::callbacks::Drawin;
let lua_reg = register_lua!($global_name, [
drawin_add_signal; add_signal,
drawin_connect_signal; connect_signal,
drawin_disconnect_signal; disconnect_signal,
drawin_emit_signal; emit_signal,
drawin_instances; instances,
drawin_set_index_miss_handler; set_index_miss_handler,
drawin_set_newindex_miss_handler; set_newindex_miss_handler,
drawin___call; __call,
drawin___tostring_meta; __tostring_meta,
drawin_connect_signal_meta; connect_signal_meta,
drawin_disconnect_signal_meta; disconnect_signal_meta,
drawin___index_meta; __index_meta,
drawin___newindex_meta; __newindex_meta
]);
LUA.register_methods("drawin\0", &lua_reg)
}}
}
/// Registers a struct that implements [Drawable](callbacks/trait.Drawable.html)
///
/// Note that errors for registering the method is up to the caller
///
/// Use this in your main method, after using [register_for_lua](register_for_lua)
#[macro_export]
macro_rules! register_drawable {
($callback_impl:ident, $global_name:ident) => {{
use ::awesome_wayland::callbacks::{drawable, Drawable};
use ::awesome_wayland::luaA::DRAWABLE_CLASS;
let drawable_methods = register_lua!($global_name, [
drawable_add_signal; add_signal,
drawable_connect_signal; connect_signal,
drawable_disconnect_signal; disconnect_signal,
drawable_emit_signal; emit_signal,
drawable_instances; instances,
drawable_set_index_miss_handler; set_index_miss_handler,
drawable_set_newindex_miss_handler; set_newindex_miss_handler
]);
let drawable_meta = register_lua!($global_name, [
drawable___tostring_meta; __to_string,
drawable_connect_signal_meta; connect_signal,
drawable_disconnect_signal_meta; disconnect_signal,
drawable___index_meta; __index,
drawable___newindex_meta; __newindex,
drawable_refresh; refresh,
drawable_geometry; geometry
]);
let lua = LUA.0;
unsafe {
let mut drawable_class = DRAWABLE_CLASS.try_write().unwrap();
luaA::class_setup(lua,
&mut *drawable_class,
c_str!("drawable"),
::std::ptr::null_mut(),
drawable::new,
Some(drawable::wipe),
None,
Some(luaA::class_index_miss_property),
Some(luaA::class_newindex_miss_property),
&drawable_methods,
&drawable_meta);
luaA::class_add_property(&mut *drawable_class,
"surface",
None,
Some(luaA::drawable_get_surface),
None);
}
}}
}
/// Registers a struct that implements [Keygrabber](callbacks/trait.Keygrabber.html)
///
/// Note that errors for registering the method is up to the caller
///
/// Use this in your main method, after using [register_for_lua](register_for_lua)
#[macro_export]
macro_rules! register_keygrabber {
($callback_impl:ident, $global_name:ident) => {{
use ::awesome_wayland::callbacks::Keygrabber;
let lua_reg = register_lua!($global_name, [
keygrabber_run; run,
keygrabber_stop; stop,
keygrabber_isrunning; isrunning,
keygrabber___index; __index,
keygrabber___newindex; __newindex
]);
LUA.register_methods("keygrabber\0", &lua_reg)
}}
}
/// Registers a struct that implements [Mousegrabber](callbacks/trait.Mousegrabber.html)
///
/// Note that errors for registering the method is up to the caller
///
/// Use this in your main method, after using [register_for_lua](register_for_lua)
#[macro_export]
macro_rules! register_mousegrabber {
($callback_impl:ident, $global_name:ident) => {{
use ::awesome_wayland::callbacks::Mousegrabber;
let lua_reg = register_lua!($global_name, [
mousegrabber_run; run,
mousegrabber_stop; stop,
mousegrabber_isrunning; isrunning,
mousegrabber___index; __index,
mousegrabber___newindex; __newindex
]);
LUA.register_methods("mousegrabber\0", &lua_reg)
}}
}
/// Registers a struct that implements [Mouse](callbacks/trait.Mouse.html)
///
/// Note that errors for registering the method is up to the caller
///
/// Use this in your main method, after using [register_for_lua](register_for_lua)
#[macro_export]
macro_rules! register_mouse {
($callback_impl:ident, $global_name:ident) => {{
use ::awesome_wayland::callbacks::Mouse;
let lua_reg = register_lua!($global_name, [
mouse___index; __index,
mouse___newindex; __newindex,
mouse_coords; coords,
mouse_object_under_pointer; object_under_pointer,
mouse_set_index_miss_handler; set_index_miss_handler,
mouse_set_newindex_miss_handler; set_newindex_miss_handler
]);
LUA.register_methods("mouse\0", &lua_reg)
}}
}
/// Registers a struct that implements [Root](callbacks/trait.Root.html)
///
/// Note that errors for registering the method is up to the caller
///
/// Use this in your main method, after using [register_for_lua](register_for_lua)
#[macro_export]
macro_rules! register_root {
($callback_impl:ident, $global_name:ident) => {{
use ::awesome_wayland::callbacks::Root;
let lua_reg = register_lua!($global_name, [
root_buttons; buttons,
root_keys; keys,
root_cursor; cursor,
root_fake_input; fake_input,
root_drawins; drawins,
root_wallpaper; wallpaper,
root_size; size,
root_size_mm; size_mm,
root_tags; tags,
root___index; __index,
root___newindex; __newindex
]);
LUA.register_methods("root\0", &lua_reg)
}}
}
/// Registers a struct that implements [Screen](callbacks/trait.Screen.html)
///
/// Note that errors for registering the method is up to the caller
///
/// Use this in your main method, after using [register_for_lua](register_for_lua)
#[macro_export]
macro_rules! register_screen {
($callback_impl:ident, $global_name:ident) => {{
use ::awesome_wayland::callbacks::Screen;
let lua_reg = register_lua!($global_name, [
screen_add_signal; add_signal,
screen_connect_signal; connect_signal,
screen_disconnect_signal; disconnect_signal,
screen_emit_signal; emit_signal,
screen_instances; instances,
screen_set_index_miss_handler; set_index_miss_handler,
screen_set_newindex_miss_handler; set_newindex_miss_handler,
screen_count; count,
screen___index; __index,
screen___newindex; __newindex,
screen___call; __call,
screen_fake_add; fake_add,
screen___tostring_meta; __tostring_meta,
screen_connect_signal_meta; connect_signal_meta,
screen_disconnect_signal_meta; disconnect_signal_meta,
screen___index_meta; __index_meta,
screen___newindex_meta; __newindex_meta,
screen_fake_remove; fake_remove,
screen_fake_resize; fake_resize,
screen_swap; swap,
// properties
screen_geometry; geometry,
screen_index; index,
screen_outputs; outputs,
screen_workarea; workarea
]);
LUA.register_methods("screen\0", &lua_reg)
}}
}
/// Registers a struct that implements [Tag](callbacks/trait.Tag.html)
///
/// Note that errors for registering the method is up to the caller
///
/// Use this in your main method, after using [register_for_lua](register_for_lua)
#[macro_export]
macro_rules! register_tag {
($callback_impl:ident, $global_name:ident) => {{
use ::awesome_wayland::callbacks::Tag;
let lua_reg = register_lua!($global_name, [
tag_add_signal; add_signal,
tag_connect_signal; connect_signal,
tag_disconnect_signal; disconnect_signal,
tag_emit_signal; emit_signal,
tag_instances; instances,
tag_set_index_miss_handler; set_index_miss_handler,
tag_set_newindex_miss_handler; set_newindex_miss_handler,
tag___call; __call,
tag___tostring_meta; __tostring_meta,
tag_connect_signal_meta; connect_signal_meta,
tag_disconnect_signal_meta; disconnect_signal_meta,
tag___index_meta; __index_meta,
tag___newindex_meta; __newindex_meta,
tag_clients_meta; clients_meta,
tag_name; name,
tag_selected; selected,
tag_activated; activated
]);
LUA.register_methods("tag\0", &lua_reg)
}}
}
/// Registers all of the callbacks to be for the passed in global.
/// This is a helpful convience macro so you don't have to write
/// out all those registers.
///
/// Note that this does absolutely no error handling what-so-ever.
/// If you want to handle the possibilty of the registerts failing
/// (which is unlikely, they should work) then use the individual register_*!
#[macro_export]
macro_rules! register_all {
($callback_impl:ident, $global_name:ident) => {{
register_awesome!($callback_impl, $global_name);
register_button!($callback_impl, $global_name);
register_client!($callback_impl, $global_name).unwrap();
register_drawin!($callback_impl, $global_name).unwrap();
register_drawable!($callback_impl, $global_name);
register_keygrabber!($callback_impl, $global_name).unwrap();
register_mousegrabber!($callback_impl, $global_name).unwrap();
register_mouse!($callback_impl, $global_name).unwrap();
register_root!($callback_impl, $global_name).unwrap();
register_screen!($callback_impl, $global_name).unwrap();
register_tag!($callback_impl, $global_name).unwrap();
}}
}
/// Registers a new instance of the passed-in user object as a global
/// singleton that will be used for all of the Lua callbacks.
///
/// This also registers a global named `LUA` that is an instance of
/// [Lua](../awesome_wayland/struct.Lua.html). This is used by both the user
/// and internally by the library, which is why it needs a pre-defined name.
#[macro_export]
macro_rules! register_for_lua {
($callback_impl:ident, $global_name:ident) => {
use ::std::sync::{RwLock, Arc};
lazy_static! {
#[allow(non_upper_case_globals)]
pub static ref $global_name: RwLock<Awesome<$callback_impl>> =
RwLock::new(Awesome::new());
pub static ref LUA: Arc<Lua> = Arc::new(Lua::new());
}
}
}
/// Defines properties for the method
/// Eventually, this will automatically set the correct values in lua
/// so that they can be used as accessors, e.g []
/// For now, it just defines them
macro_rules! properties {
($([ $( $inner:ident ),+ ])+) => {
$($(fn $inner(&self, lua: &Lua) -> c_int;)*),*
};
}
use lua_sys::*;
use libc;
#[allow(non_snake_case)]
pub unsafe fn luaL_opt(lua: *mut lua_State, f: fn(*mut lua_State, libc::c_int) -> lua_Integer,
n: libc::c_int, d: lua_Integer)
-> lua_Integer {
let lua_t = lua_type(lua, n);
let is_none_or_nil = lua_t == LUA_TNIL as i32 || lua_t == LUA_TNONE;
if is_none_or_nil {
d
} else {
f(lua, n)
}
}
/// Defines the lua object functions. This is functionally equiv to
/// the C macro LUA_OBJECT_FUNCS from the awesome lib.
#[macro_export]
macro_rules! LUA_OBJECT_FUNCS {
($lua_class:path, $type:ty, $new_name:ident) => {
use std::ptr;
pub unsafe extern fn $new_name(lua: *mut lua_State) -> *mut Object {
let type_size =::std::mem::size_of::<$type>();
let p = lua_newuserdata(lua, type_size) as *mut $type;
ptr::write_bytes::<$type>(p, 0, 1);
let class = $lua_class.try_read().unwrap();
let old_instances = class.instances.get();
class.instances.set(old_instances + 1);
luaA::settype(lua, &*class);
lua_newtable(lua);
lua_newtable(lua);
lua_setmetatable(lua, -2);
lua_newtable(lua);
lua_setfield(lua, -2, c_str!("data"));
luaA::setuservalue(lua, -2);
lua_pushvalue(lua, -1);
luaA::class_emit_signal(lua, &*class,
c_str!("new"), 1);
return p as _;
}
}
}
#[macro_export]
macro_rules! LUA_CLASS_FUNCS {
($lua_class:path, $add_sig:ident, $con_sig: ident, $discon_sig:ident,
$emit_sig:ident, $class_inst: ident, $index_miss:ident,
$newindex_miss:ident) => {
#[allow(unused_imports)]
use ::lua_sys::*;
use ::std::ptr::null_mut;
use ::libc;
unsafe extern fn $add_sig(_lua: *mut lua_State) -> libc::c_int {
eprintln!("signal usage with add_signal()");
0
}
unsafe extern fn $con_sig(lua: *mut lua_State) -> libc::c_int {
let check_string = luaL_checklstring(lua, 1, null_mut());
let mut class = $lua_class.try_write().unwrap();
::luaA::class_connect_signal_from_stack(lua,
&mut *class,
check_string,
2);
0
}
unsafe extern fn $discon_sig(lua: *mut lua_State) -> libc::c_int {
let check_string = luaL_checklstring(lua, 1, null_mut());
let mut class = $lua_class.try_write().unwrap();
::luaA::class_disconnect_signal_from_stack(lua,
&mut *class,
check_string,
2);
0
}
unsafe extern fn $emit_sig(lua: *mut lua_State) -> libc::c_int {
let check_string = luaL_checklstring(lua, 1, null_mut());
let mut class = $lua_class.try_write().unwrap();
::luaA::class_emit_signal(lua, &mut *class,
check_string, lua_gettop(lua) -1);
0
}
unsafe extern fn $class_inst(lua: *mut lua_State) -> libc::c_int {
let class = $lua_class.try_write().unwrap();
lua_pushinteger(lua, class.instances.get() as lua_Integer);
1
}
unsafe extern fn $index_miss(lua: *mut lua_State) -> libc::c_int {
let mut class = $lua_class.try_write().unwrap();
::luaA::registerfct(lua, 1, &mut class.newindex_miss_handler)
}
unsafe extern fn $newindex_miss(lua: *mut lua_State) -> libc::c_int {
let mut class = $lua_class.try_write().unwrap();
luaA::registerfct(lua, 1, &mut class.newindex_miss_handler)
}
}
}
/// See LUA_OBJECT_EXPORT_OPTIONAL_PROPRERTY in the C lib for more details
#[macro_export]
macro_rules! LUA_OBJECT_EXPORT_OPTIONAL_PROPERTY {
($f_name:ident, $type:ty, $field:ident, $pusher:ident, $empty_value:expr) => {
pub unsafe fn $f_name(lua: *mut lua_State,
ty: *mut ::object::class::Object)
-> ::libc::c_int {
let object = &mut *(ty as *mut $type);
if object.$field == $empty_value {
return 0
}
$pusher(lua, (*(ty as *mut $type)).$field.clone() as _);
1
}
}
}
/// See LUA_OBJECT_EXPORT_PROPRERTY in the C lib for more details
#[macro_export]
macro_rules! LUA_OBJECT_EXPORT_PROPERTY {
($f_name:ident, $type:ty, $field:ident, $pusher:ident) => {
pub unsafe fn $f_name(lua: *mut lua_State,
ty: *mut ::object::class::Object)
-> ::libc::c_int {
$pusher(lua, (*(ty as *mut $type)).$field.clone() as _);
1
}
}
}
/// See OBJECT_EXPORT_PROPRERTY in the C lib for more details
#[macro_export]
macro_rules! OBJECT_EXPORT_PROPERTY {
($f_name:ident, $type:ty, $field:ident, $return_t:ident) => {
pub unsafe fn $f_name(ty: *mut ::object::class::Object) -> $return_t {
(*(ty as *mut $type)).$field.clone() as _
}
}
}
/// A convienence wrapper around LUA_OBJECT_EXPORT_PROPERTY so that
/// you can define many at once without making your eyes bleed.
#[macro_export]
macro_rules! LUA_OBJECT_EXPORT_PROPERTIES {
($type_name:ty,
$([ $( $inner_f_name:ident; $field:ident; $lua_func:ident),* ])*) => {
$($(LUA_OBJECT_EXPORT_PROPERTY!($inner_f_name,
$type_name,
$field,
$lua_func);)+)+
};
}
/// A convienence wrapper around LUA_OBJECT_EXPORT_OPTIONAL_PROPERTY so that
/// you can define many at once without making your eyes bleed.
#[macro_export]
macro_rules! LUA_OBJECT_EXPORT_OPTIONAL_PROPERTIES {
($type_name:ty,
$([ $( $inner_f_name:ident; $field:ident; $lua_func:ident; $empty_value:expr),* ])*) => {
$($(LUA_OBJECT_EXPORT_OPTIONAL_PROPERTY!($inner_f_name,
$type_name,
$field,
$lua_func,
$empty_value);)+)+
};
}
|
use std::cell::RefCell;
use std::collections::HashMap;
use std::ops::Deref;
use std::rc::Rc;
use hydroflow::serde::{Deserialize, Serialize};
use hydroflow::util::cli::{ConnectedDemux, ConnectedDirect, ConnectedSink, ConnectedSource};
use hydroflow::util::{deserialize_from_bytes, serialize_to_bytes};
use hydroflow::{hydroflow_syntax, tokio};
#[derive(Serialize, Deserialize, Clone, Debug)]
struct IncrementRequest {
tweet_id: u64,
likes: i32,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
enum GossipOrIncrement {
Gossip(Vec<(u64, (usize, u32, u32))>),
Increment(u64, i32),
}
type NextStateType = (u64, bool, Rc<RefCell<(Vec<u32>, Vec<u32>)>>);
#[hydroflow::main]
async fn main() {
let mut ports = hydroflow::util::cli::init().await;
let my_id: Vec<usize> = serde_json::from_str(&std::env::args().nth(1).unwrap()).unwrap();
let my_id = my_id[0];
let num_replicas: Vec<usize> = serde_json::from_str(&std::env::args().nth(2).unwrap()).unwrap();
let num_replicas = num_replicas[0];
let increment_requests = ports
.port("increment_requests")
.connect::<ConnectedDirect>()
.await
.into_source();
let query_responses = ports
.port("query_responses")
.connect::<ConnectedDirect>()
.await
.into_sink();
let to_peer = ports
.port("to_peer")
.connect::<ConnectedDemux<ConnectedDirect>>()
.await
.into_sink();
let from_peer = ports
.port("from_peer")
.connect::<ConnectedDirect>()
.await
.into_source();
let f1 = async move {
#[cfg(target_os = "linux")]
loop {
let x = procinfo::pid::stat_self().unwrap();
let bytes = x.rss * 1024 * 4;
println!("memory,{}", bytes);
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
};
let df = hydroflow_syntax! {
next_state = union()
-> fold::<'static>((HashMap::<u64, Rc<RefCell<(Vec<u32>, Vec<u32>)>>>::new(), HashMap::new(), 0), |(cur_state, modified_tweets, last_tick): &mut (HashMap<_, _>, HashMap<_, _>, _), goi| {
if context.current_tick() != *last_tick {
modified_tweets.clear();
}
match goi {
GossipOrIncrement::Gossip(gossip) => {
for (counter_id, (gossip_i, gossip_pos, gossip_neg)) in gossip.iter() {
let gossip_i = *gossip_i;
let cur_value = cur_state.entry(*counter_id).or_insert(Rc::new(RefCell::new((
vec![0; num_replicas], vec![0; num_replicas]
))));
let mut cur_value = cur_value.as_ref().borrow_mut();
if *gossip_pos > cur_value.0[gossip_i] {
cur_value.0[gossip_i] = *gossip_pos;
modified_tweets.entry(*counter_id).or_insert(false);
}
if *gossip_neg > cur_value.1[gossip_i] {
cur_value.1[gossip_i] = *gossip_neg;
modified_tweets.entry(*counter_id).or_insert(false);
}
}
}
GossipOrIncrement::Increment(counter_id, delta) => {
let cur_value = cur_state.entry(counter_id).or_insert(Rc::new(RefCell::new((
vec![0; num_replicas], vec![0; num_replicas]
))));
let mut cur_value = cur_value.as_ref().borrow_mut();
if delta > 0 {
cur_value.0[my_id] += delta as u32;
} else {
cur_value.1[my_id] += (-delta) as u32;
}
*modified_tweets.entry(counter_id).or_insert(false) |= true;
}
}
*last_tick = context.current_tick();
})
-> filter(|(_, _, tick)| *tick == context.current_tick())
-> filter(|(_, modified_tweets, _)| !modified_tweets.is_empty())
-> map(|(state, modified_tweets, _)| modified_tweets.iter().map(|(t, is_local)| (*t, *is_local, state.get(t).unwrap().clone())).collect::<Vec<_>>())
-> tee();
source_stream(from_peer)
-> map(|x| deserialize_from_bytes::<GossipOrIncrement>(&x.unwrap()).unwrap())
-> next_state;
source_stream(increment_requests)
-> map(|x| deserialize_from_bytes::<IncrementRequest>(&x.unwrap()).unwrap())
-> map(|t| GossipOrIncrement::Increment(t.tweet_id, t.likes))
-> next_state;
all_peers = source_iter(0..num_replicas)
-> filter(|x| *x != my_id);
all_peers -> [0] broadcaster;
next_state -> [1] broadcaster;
broadcaster = cross_join::<'static, 'tick>()
-> map(|(peer, state): (_, Vec<NextStateType>)| {
(peer as u32, state.iter().filter(|t| t.1).map(|(k, _, v)| (*k, (my_id, v.as_ref().borrow().0[my_id], v.as_ref().borrow().1[my_id]))).collect())
})
-> filter(|(_, gossip): &(_, Vec<_>)| !gossip.is_empty())
-> map(|(peer, gossip): (_, _)| {
(peer, serialize_to_bytes(GossipOrIncrement::Gossip(gossip)))
})
-> dest_sink(to_peer);
next_state
-> flat_map(|a: Vec<NextStateType>| {
a.into_iter().map(|(k, _, rc_array)| {
let rc_borrowed = rc_array.as_ref().borrow();
let (pos, neg) = rc_borrowed.deref();
(k, pos.iter().sum::<u32>() as i32 - neg.iter().sum::<u32>() as i32)
}).collect::<Vec<_>>()
})
-> map(serialize_to_bytes::<(u64, i32)>)
-> dest_sink(query_responses);
};
// initial memory
#[cfg(target_os = "linux")]
{
let x = procinfo::pid::stat_self().unwrap();
let bytes = x.rss * 1024 * 4;
println!("memory,{}", bytes);
}
let f1_handle = tokio::spawn(f1);
hydroflow::util::cli::launch_flow(df).await;
f1_handle.abort();
}
|
#![allow(non_snake_case, non_upper_case_globals)]
extern crate dimensioned as dim;
use self::dim::si;
use self::dim::Sqrt;
#[derive(Copy, Clone)]
struct MetricNBody {
position: Position2D,
accel: Accel2D,
velocity: Velocity2D,
mass: si::Kilogram<f64>,
}
use std::marker::PhantomData;
#[derive(Copy, Clone, Debug)]
struct Accel2D(si::MeterPerSecond2<f64>, si::MeterPerSecond2<f64>);
const MPSS_ZERO: si::MeterPerSecond2<f64> = si::MeterPerSecond2 { value_unsafe: 0.0, _marker: PhantomData };
#[derive(Copy, Clone, Debug)]
struct Velocity2D(si::MeterPerSecond<f64>, si::MeterPerSecond<f64>);
const MPS_ZERO: si::MeterPerSecond<f64> = si::MeterPerSecond { value_unsafe: 0.0, _marker: PhantomData };
#[derive(Copy, Clone, Debug)]
struct Position2D(si::Meter<f64>, si::Meter<f64>);
impl Position2D {
fn dist(&self, other: &Position2D) -> (si::Meter<f64>, si::Meter<f64>, si::Meter<f64>) {
let &Position2D(x1, y1) = self;
let &Position2D(x2, y2) = other;
let xd = x2 - x1;
let yd = y2 - y1;
(xd, yd, (xd * xd + yd * yd).sqrt())
}
}
const MetricNBodies: [MetricNBody; 4] = [MetricNBody {
position: Position2D(si::Meter { value_unsafe: 1500.0, _marker: PhantomData }, si::Meter { value_unsafe: 2500.0, _marker: PhantomData }),
accel: Accel2D(MPSS_ZERO, MPSS_ZERO),
velocity: Velocity2D(MPS_ZERO, MPS_ZERO),
mass: si::Kilogram { value_unsafe: 2000.0, _marker: PhantomData }
},
MetricNBody {
position: Position2D(si::Meter { value_unsafe: 3500.0, _marker: PhantomData }, si::Meter { value_unsafe: 500.0, _marker: PhantomData }),
accel: Accel2D(MPSS_ZERO, MPSS_ZERO),
velocity: Velocity2D(MPS_ZERO, MPS_ZERO),
mass: si::Kilogram { value_unsafe: 2000.0, _marker: PhantomData }
},
MetricNBody {
position: Position2D(si::Meter { value_unsafe: 200.0, _marker: PhantomData }, si::Meter { value_unsafe: 4500.0, _marker: PhantomData }),
accel: Accel2D(MPSS_ZERO, MPSS_ZERO),
velocity: Velocity2D(MPS_ZERO, MPS_ZERO),
mass: si::Kilogram { value_unsafe: 2000.0, _marker: PhantomData }
},
MetricNBody {
position: Position2D(si::Meter { value_unsafe: -1500.0, _marker: PhantomData }, si::Meter { value_unsafe: 750.0, _marker: PhantomData }),
accel: Accel2D(MPSS_ZERO, MPSS_ZERO),
velocity: Velocity2D(MPS_ZERO, MPS_ZERO),
mass: si::Kilogram { value_unsafe: 2000.0, _marker: PhantomData }
}];
#[inline(never)]
pub fn dimensioned_nbody() {
let G = 6.674e-11 * si::N * si::M * si::M / (si::KG * si::KG);
let mut bodies = MetricNBodies.to_vec();
for _ in 0..10000 {
//calculate accelerations
for a in 0..bodies.len() {
bodies[a].accel = Accel2D(MPSS_ZERO, MPSS_ZERO);
for b in 0..bodies.len() {
if a == b {
continue;
}
let La = bodies[a].position;
let Lb = bodies[b].position;
let Ma = bodies[a].mass;
let Mb = bodies[b].mass;
let (Dx, Dy, dist) = La.dist(&Lb);
let force = G / ((dist * dist) / (Ma * Mb));
let Fx = force * (Dx / Dy);
let Fy = force * (Dy / Dx);
let Ax = Fx / Ma;
let Ay = Fy / Ma;
bodies[a].accel = Accel2D(bodies[a].accel.0 + Ax, bodies[a].accel.1 + Ay);
}
}
for a in 0..bodies.len() {
//integrate acceleration into velocity
let Velocity2D(Vx, Vy) = bodies[a].velocity;
let Accel2D(Ax, Ay) = bodies[a].accel;
let Vx1 = Ax * (0.1 * si::S);
let Vy1 = Ay * (0.1 * si::S);
bodies[a].velocity = Velocity2D(Vx + Vx1, Vy + Vy1);
//integrate velocity into position
let Velocity2D(Vx, Vy) = bodies[a].velocity;
let Position2D(x, y) = bodies[a].position;
bodies[a].position = Position2D(x + Vx * (0.1 * si::S), y + Vy * (0.1 * si::S));
}
}
}
|
extern crate rustc_serialize;
extern crate graphael;
use std::io::{self, BufRead, Write};
use graphael::Graph;
// Shorthand HashMap
// dict!({"yes": "1", "no": "0"}) => vec!($(($key, $value)),*).move_iter().collect();
// macro_rules! dict (
// ({$($key:expr : $value:expr),*}) => (vec!($(($key, $value)),*).move_iter().collect())
// );
fn main() {
println!("Graphael 0.1");
print!("Enter a graph name> ");
io::stdout().flush().unwrap();
// Get the graph file name from stdin or use 'langs'
let mut input = String::new();
let graph_file = match io::stdin().read_line(&mut input) {
Ok(_) => {
println!("Using '{}'", &input.trim());
input.trim().to_owned()
},
Err(e) => {
println!("Error: {}.\nUsing 'langs'", e);
"langs".to_string()
}
};
// Read a already filled database
let graph = Graph::read_from_file(format!("./data/{}.graph", graph_file));
// Current state variable to keep track of which
// type of query we are doing
let mut current_state = 0;
println!("1. Nodes with attribute.");
println!("2. Nodes with key-value.");
println!("3. Edges with label.");
println!("4. Edges from node with label.");
println!("5. Look up node.");
println!("");
print!(">>> ");
io::stdout().flush().unwrap();
// Read from stdin
let locked_in = io::stdin();
for line in locked_in.lock().lines() {
match line {
Ok(s) => {
match current_state {
// Initial state
0 => match s.trim().parse::<i32>() {
Ok(1) => current_state = 1,
Ok(2) => current_state = 2,
Ok(3) => current_state = 3,
Ok(4) => current_state = 4,
Ok(5) => current_state = 5,
_ => current_state = 0
},
1 => { // Nodes with attribute
println!("{:?}", graph.nodes_with_attr(&s.trim().to_string()));
current_state = 0
},
2 => { // Node by key-value pair
let kv : Vec<&str> = s.trim().split('=').map(|x| x.trim()).collect();
println!("{:?}", graph.nodes_with_prop(&kv[0].to_string(), &kv[1].to_string()));
current_state = 0
},
3 => { // Edges with label
println!("{:?}", graph.edges_with_label(&s.trim().to_string()));
current_state = 0
},
4 => { // Edges from node (NodeIndex) with label
let node_label : Vec<&str> = s.trim().split("HAS").map(|x| x.trim()).collect();
println!("{:?}", graph.edges_with_label_from((node_label[0].to_owned().parse::<usize>()).unwrap(), node_label[1]));
current_state = 0
},
5 => { // Look up node by id (NodeIndex)
let nodeid = (s.trim().parse::<usize>()).unwrap();
println!("{:?}", graph.get_node(nodeid));
current_state = 0
},
_ => { print!(">>> ") }
}
},
Err(e) => panic!("Error: {}", e)
};
// Check the state and print accordingly
match current_state {
1 => print!("Enter attribute> "),
2 => print!("Enter key-value> "),
3 => print!("Enter label> "),
4 => print!("Enter node and label> "),
5 => print!("Enter node id> "),
_ => {
println!("");
println!("1. Nodes with attribute.");
println!("2. Nodes with key-value.");
println!("3. Edges with label.");
println!("4. Edges from node with label.");
println!("5. Look up node by id.");
println!("");
print!(">>> ")
}
};
io::stdout().flush().unwrap();
}
}
|
mod keyed_set;
mod window;
mod physics;
mod simulation;
mod math;
use std::{
time,
io,
fs,
path,
collections::HashMap,
};
use rand::{random, seq::SliceRandom};
use raylib::prelude::*;
use crate::{
window::prelude::*,
simulation::prelude::*,
};
fn random_vector2() -> Vector2 { Vector2::new(random(), random()) }
fn random_color() -> Color { Color::new(random(), random(), random(), 255) }
fn add_random_blob(sim: &mut Simulation, names: &mut Vec<String>) -> keyed_set::Key<Blob> {
let key = sim.insert_blob(
random_vector2() * sim.size(),
20. * random::<f32>(),
random_color(),
120. * random::<f32>(),
5. * random::<f32>(),
180f32 * random::<f32>(),
170f32 * random::<f32>(),
random_color(),
random(),
random(),
25. * random::<f32>(),
random::<f32>(),
2. * random::<f32>(),
0.5 * random::<f32>(),
random::<f32>(),
);
let name = names.choose(&mut rand::thread_rng()).unwrap().to_string();
sim.get_blob_mut(key).unwrap().name = Some(name);
key
}
fn add_random_food(sim: &mut Simulation) -> keyed_set::Key<Food> {
sim.insert_food(random_vector2() * sim.size())
}
fn read_names<P: AsRef<path::Path> + ?Sized>(path: &P) -> io::Result<Vec<String>> {
let content = fs::read_to_string(path)?;
Ok(content.split_whitespace().map(|x| x.to_string()).collect())
}
struct Selection {
start_mouse_pos: Vector2,
blobs: HashMap<keyed_set::Key<Blob>, Vector2>,
}
fn main() {
// options
let food_add_delay = time::Duration::from_secs_f32(0.2);
let blob_add_delay = time::Duration::from_secs_f32(0.5);
let start_blobs = 10;
let start_foods = 100;
let window_config = WindowConfig {
width: 1300,
height: 680,
title: "Blobs",
};
// allocate resources
let mut window = Window::new(&window_config);
let mut sim = Simulation::new(Vector2::new(window.width() as f32, window.height() as f32));
let mut food_add_time = time::Instant::now();
let mut blob_add_time = time::Instant::now();
let mut names = read_names("names.txt").unwrap();
// initialize simulation
for _ in 0..start_blobs {
let blob_key = add_random_blob(&mut sim, &mut names);
}
// initialize simulation
for _ in 0..start_foods {
add_random_food(&mut sim);
}
let mut last_frame_time = time::Instant::now();
let mut selection: Option<Selection> = None;
window.draw_loop(|mut draw| {
// record time and calculate delta
let frame_time = time::Instant::now();
let delta_time = (frame_time - last_frame_time).as_secs_f32();
last_frame_time = frame_time;
// draw and simulate
draw.clear_background(Color::WHITE);
sim.draw(&mut draw);
sim.step(delta_time);
// add blob
if frame_time > blob_add_time {
blob_add_time = frame_time + blob_add_delay;
let blob_key = add_random_blob(&mut sim, &mut names);
}
// add food
if frame_time > food_add_time {
food_add_time = frame_time + food_add_delay;
add_random_food(&mut sim);
}
if draw.is_key_down(KeyboardKey::KEY_SPACE) {
add_random_blob(&mut sim, &mut names);
}
if draw.is_mouse_button_down(MouseButton::MOUSE_LEFT_BUTTON) {
if let Some(selection) = &mut selection {
for (&blob_key, start_pos) in &selection.blobs {
sim.set_blob_pos(blob_key, *start_pos + draw.get_mouse_position() - selection.start_mouse_pos);
}
} else {
let (blobs, _) = sim.select(draw.get_mouse_position());
selection = Some(Selection {
start_mouse_pos: draw.get_mouse_position(),
blobs: blobs.iter().map(|&blob_key| (blob_key, sim.get_blob(blob_key).unwrap().pos())).collect(),
});
}
} else {
selection = None;
}
if let Some(selection) = &selection {
let mut y = 10;
for (&blob_key, _) in &selection.blobs {
if let Some(blob) = sim.get_blob(blob_key) {
let font_size = 20;
draw.draw_text(
&format!("Speed: {} Pov: {} Depth: {}", blob.speed, blob.pov, blob.sight_depth()),
10, y, font_size, Color::BLACK
);
y += font_size;
}
}
}
});
} |
use super::*;
use crate::errors::*;
use crate::message::packer::*;
// An OPTResource is an OPT pseudo Resource record.
//
// The pseudo resource record is part of the extension mechanisms for DNS
// as defined in RFC 6891.
#[derive(Default, Debug, Clone, PartialEq)]
pub struct OptResource {
pub options: Vec<DnsOption>,
}
// An Option represents a DNS message option within OPTResource.
//
// The message option is part of the extension mechanisms for DNS as
// defined in RFC 6891.
#[derive(Default, Debug, Clone, PartialEq)]
pub struct DnsOption {
pub code: u16, // option code
pub data: Vec<u8>,
}
impl fmt::Display for DnsOption {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"dnsmessage.Option{{Code: {}, Data: {:?}}}",
self.code, self.data
)
}
}
impl fmt::Display for OptResource {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s: Vec<String> = self.options.iter().map(|o| o.to_string()).collect();
write!(f, "dnsmessage.OPTResource{{options: {}}}", s.join(","))
}
}
impl ResourceBody for OptResource {
fn real_type(&self) -> DnsType {
DnsType::Opt
}
fn pack(
&self,
mut msg: Vec<u8>,
_compression: &mut Option<HashMap<String, usize>>,
_compression_off: usize,
) -> Result<Vec<u8>, Error> {
for opt in &self.options {
msg = pack_uint16(msg, opt.code);
msg = pack_uint16(msg, opt.data.len() as u16);
msg = pack_bytes(msg, &opt.data);
}
Ok(msg)
}
fn unpack(&mut self, msg: &[u8], mut off: usize, length: usize) -> Result<usize, Error> {
let mut opts = vec![];
let old_off = off;
while off < old_off + length {
let (code, new_off) = unpack_uint16(msg, off)?;
off = new_off;
let (l, new_off) = unpack_uint16(msg, off)?;
off = new_off;
let mut opt = DnsOption {
code,
data: vec![0; l as usize],
};
if off + l as usize > msg.len() {
return Err(ERR_CALC_LEN.to_owned());
}
opt.data.copy_from_slice(&msg[off..off + l as usize]);
off += l as usize;
opts.push(opt);
}
self.options = opts;
Ok(off)
}
}
|
use std::collections::HashMap;
pub fn parse_input(input: &str) -> HashMap<usize, Vec<usize>> {
let mut map: HashMap<usize, Vec<usize>> = HashMap::new();
input.lines()
.map(|line| {
let mut parts = line.splitn(3, ' ');
let source = parts.next().unwrap().parse::<usize>().unwrap();
let targets = parts.skip(1).next().unwrap().split(", ").map(|part| {
part.parse::<usize>().unwrap()
}).collect::<Vec<_>>();
(source, targets)
}).for_each(|(source, targets)| {
map.insert(source, targets);
});
map
}
|
use tokio::net::TcpStream;
use tokio::prelude::*;
use std::fs::File;
use std::io::prelude::*;
#[tokio::main]
async fn main() {
let mut stream = TcpStream::connect("127.0.0.1:6142").await.unwrap();
println!("created stream");
let filename = "input.txt";
let mut f = File::open(&filename).expect("no file found");
let mut total_buffer_written = 0;
loop {
let mut buffer = vec![0; stream.send_buffer_size().unwrap()];
let mut n = f.read(&mut buffer).expect("buffer overflow") as u32;
if n == 0 {
break;
}
let mut bytes_written = 0;
while n > 0 {
let result = stream
.write(&buffer[bytes_written..(n as usize + bytes_written)])
.await;
let bytes_sent = result.unwrap();
n -= bytes_sent as u32;
bytes_written += bytes_sent;
total_buffer_written += bytes_sent;
println!("wrote to stream; success={:?} left to send, {:?} stream.send_buffer_size().unwrap() = {:?}",n, bytes_written, stream.send_buffer_size().unwrap());
}
}
}
|
use std::collections::{HashMap, HashSet};
use std::ops::Deref;
use hydroflow_lang::diagnostic::{Diagnostic, Level};
use hydroflow_lang::graph::{
eliminate_extra_unions_tees, partition_graph, FlatGraphBuilder, HydroflowGraph,
};
use hydroflow_lang::parse::{
HfStatement, IndexInt, Indexing, Pipeline, PipelineLink, PipelineStatement, PortIndex,
};
use proc_macro2::{Span, TokenStream};
use rust_sitter::errors::{ParseError, ParseErrorReason};
use syn::{parse_quote, parse_quote_spanned, Token};
mod grammar;
mod join_plan;
mod util;
use grammar::datalog::{
Aggregation, Atom, Declaration, Ident, IntExpr, Program, Rule, RuleType, TargetExpr,
};
use join_plan::{IntermediateJoinNode, JoinPlan};
use util::{repeat_tuple, Counter};
static MAGIC_RELATIONS: [&str; 1] = ["less_than"];
pub fn parse_pipeline(
code_str: &rust_sitter::Spanned<String>,
get_span: &impl Fn((usize, usize)) -> Span,
) -> Result<Pipeline, Vec<Diagnostic>> {
syn::LitStr::new(&code_str.value, get_span(code_str.span))
.parse()
.map_err(|err| {
vec![Diagnostic {
span: err.span(),
level: Level::Error,
message: format!("Failed to parse input pipeline: {}", err),
}]
})
}
pub fn parse_static(
code_str: &rust_sitter::Spanned<String>,
get_span: &impl Fn((usize, usize)) -> Span,
) -> Result<syn::Expr, Vec<Diagnostic>> {
syn::LitStr::new(&code_str.value, get_span(code_str.span))
.parse()
.map_err(|err| {
vec![Diagnostic {
span: err.span(),
level: Level::Error,
message: format!("Failed to parse static expression: {}", err),
}]
})
}
pub fn gen_hydroflow_graph(
literal: proc_macro2::Literal,
) -> Result<HydroflowGraph, Vec<Diagnostic>> {
let offset = {
// This includes the quotes, i.e. 'r#"my test"#' or '"hello\nworld"'.
let source_str = literal.to_string();
let mut source_chars = source_str.chars();
if Some('r') != source_chars.next() {
return Err(vec![Diagnostic {
span: literal.span(),
level: Level::Error,
message:
r##"Input must be a raw string `r#"..."#` for correct diagnostic messages."##
.to_owned(),
}]);
}
let hashes = source_chars.take_while(|&c| '#' == c).count();
2 + hashes
};
let get_span = |(start, end): (usize, usize)| {
let subspan = literal.subspan(start + offset..end + offset);
subspan.unwrap_or(Span::call_site())
};
let str_node: syn::LitStr = parse_quote!(#literal);
let actual_str = str_node.value();
let program: Program =
grammar::datalog::parse(&actual_str).map_err(|e| handle_errors(e, &get_span))?;
let mut inputs = Vec::new();
let mut outputs = Vec::new();
let mut persists = HashSet::new();
let mut asyncs = Vec::new();
let mut rules = Vec::new();
let mut statics = Vec::new();
for stmt in &program.rules {
match stmt {
Declaration::Input(_, ident, hf_code) => {
assert!(!MAGIC_RELATIONS.contains(&ident.name.as_str()));
inputs.push((ident, hf_code))
}
Declaration::Output(_, ident, hf_code) => {
assert!(!MAGIC_RELATIONS.contains(&ident.name.as_str()));
outputs.push((ident, hf_code))
}
Declaration::Persist(_, ident) => {
persists.insert(ident.name.clone());
}
Declaration::Async(_, ident, send_hf, recv_hf) => {
assert!(!MAGIC_RELATIONS.contains(&ident.name.as_str()));
asyncs.push((ident, send_hf, recv_hf))
}
Declaration::Rule(rule) => {
assert!(!MAGIC_RELATIONS.contains(&rule.target.name.name.as_str()));
rules.push(rule)
}
Declaration::Static(_, ident, hf_code) => {
assert!(!MAGIC_RELATIONS.contains(&ident.name.as_str()));
statics.push((ident, hf_code));
}
}
}
let mut flat_graph_builder = FlatGraphBuilder::new();
let mut tee_counter = HashMap::new();
let mut union_counter = HashMap::new();
let mut created_rules = HashSet::new();
for decl in &program.rules {
let target_ident = match decl {
Declaration::Input(_, ident, _) => ident.clone(),
Declaration::Output(_, ident, _) => ident.clone(),
Declaration::Persist(_, ident) => ident.clone(),
Declaration::Async(_, ident, _, _) => ident.clone(),
Declaration::Rule(rule) => rule.target.name.clone(),
Declaration::Static(_, ident, _) => ident.clone(),
};
if !created_rules.contains(&target_ident.value) {
created_rules.insert(target_ident.value.clone());
let insert_name = syn::Ident::new(
&format!("{}_insert", target_ident.name),
get_span(target_ident.span),
);
let read_name = syn::Ident::new(&target_ident.name, get_span(target_ident.span));
if persists.contains(&target_ident.value.name) {
// read outputs the *new* values for this tick
flat_graph_builder
.add_statement(parse_quote_spanned!{get_span(target_ident.span)=> #insert_name = union() -> unique::<'tick>(); });
flat_graph_builder
.add_statement(parse_quote_spanned!{get_span(target_ident.span)=> #read_name = difference::<'tick, 'static>() -> tee(); });
flat_graph_builder
.add_statement(parse_quote_spanned!{get_span(target_ident.span)=> #insert_name -> [pos] #read_name; });
flat_graph_builder
.add_statement(parse_quote_spanned!{get_span(target_ident.span)=> #read_name -> defer_tick() -> [neg] #read_name; });
} else {
flat_graph_builder
.add_statement(parse_quote_spanned!{get_span(target_ident.span)=> #insert_name = union() -> unique::<'tick>(); });
flat_graph_builder
.add_statement(parse_quote_spanned!{get_span(target_ident.span)=> #read_name = #insert_name -> tee(); });
}
}
}
for (target, hf_code) in inputs {
let my_union_index = union_counter
.entry(target.name.clone())
.or_insert_with(|| 0..)
.next()
.expect("Out of union indices");
let my_union_index_lit =
syn::LitInt::new(&format!("{}", my_union_index), get_span(target.span));
let name = syn::Ident::new(&format!("{}_insert", target.name), get_span(target.span));
let input_pipeline: Pipeline = parse_pipeline(&hf_code.code, &get_span)?;
flat_graph_builder.add_statement(parse_quote_spanned! {get_span(target.span)=>
#input_pipeline -> [#my_union_index_lit] #name;
});
}
for (target, hf_code) in outputs {
let my_tee_index = tee_counter
.entry(target.name.clone())
.or_insert_with(|| 0..)
.next()
.expect("Out of tee indices");
let my_tee_index_lit =
syn::LitInt::new(&format!("{}", my_tee_index), get_span(target.span));
let target_ident = syn::Ident::new(&target.name, get_span(target.span));
let output_pipeline: Pipeline = parse_pipeline(&hf_code.code, &get_span)?;
let output_pipeline = if persists.contains(&target.name) {
parse_quote_spanned! {get_span(target.span)=> persist() -> #output_pipeline}
} else {
output_pipeline
};
flat_graph_builder.add_statement(parse_quote_spanned! {get_span(target.span)=>
#target_ident [#my_tee_index_lit] -> #output_pipeline;
});
}
for (target, send_hf, recv_hf) in asyncs {
let async_send_pipeline = format!("{}_async_send", target.name);
let async_send_pipeline = syn::Ident::new(&async_send_pipeline, get_span(target.span));
let recv_union_index = union_counter
.entry(target.name.clone())
.or_insert_with(|| 0..)
.next()
.expect("Out of union indices");
let recv_union_index_lit =
syn::LitInt::new(&format!("{}", recv_union_index), get_span(target.span));
let target_ident =
syn::Ident::new(&format!("{}_insert", target.name), get_span(target.span));
let send_pipeline: Pipeline = parse_pipeline(&send_hf.code, &get_span)?;
let recv_pipeline: Pipeline = parse_pipeline(&recv_hf.code, &get_span)?;
flat_graph_builder.add_statement(parse_quote_spanned! {get_span(target.span)=>
#async_send_pipeline = union() -> unique::<'tick>() -> #send_pipeline;
});
flat_graph_builder.add_statement(parse_quote_spanned! {get_span(target.span)=>
#recv_pipeline -> [#recv_union_index_lit] #target_ident;
});
}
for (target, hf_code) in statics {
let my_union_index = union_counter
.entry(target.name.clone())
.or_insert_with(|| 0..)
.next()
.expect("Out of union indices");
let my_union_index_lit =
syn::LitInt::new(&format!("{}", my_union_index), get_span(target.span));
let name = syn::Ident::new(&format!("{}_insert", target.name), get_span(target.span));
let static_expression: syn::Expr = parse_static(&hf_code.code, &get_span)?;
flat_graph_builder.add_statement(parse_quote_spanned! {get_span(target.span)=>
source_iter(#static_expression) -> persist() -> [#my_union_index_lit] #name;
});
}
let mut next_join_idx = 0..;
let mut diagnostics = Vec::new();
for rule in rules {
let plan = compute_join_plan(&rule.sources, &persists);
generate_rule(
plan,
rule,
&mut flat_graph_builder,
&mut tee_counter,
&mut union_counter,
&mut next_join_idx,
&persists,
&mut diagnostics,
&get_span,
);
}
if !diagnostics.is_empty() {
Err(diagnostics)
} else {
let (mut flat_graph, _uses, mut diagnostics) = flat_graph_builder.build();
diagnostics.retain(Diagnostic::is_error);
if !diagnostics.is_empty() {
Err(diagnostics)
} else {
eliminate_extra_unions_tees(&mut flat_graph);
Ok(flat_graph)
}
}
}
fn handle_errors(
errors: Vec<ParseError>,
get_span: &impl Fn((usize, usize)) -> Span,
) -> Vec<Diagnostic> {
let mut diagnostics = vec![];
for error in errors {
let reason = error.reason;
let my_span = get_span((error.start, error.end));
match reason {
ParseErrorReason::UnexpectedToken(msg) => {
diagnostics.push(Diagnostic::spanned(
my_span,
Level::Error,
format!("Unexpected Token: '{msg}'", msg = msg),
));
}
ParseErrorReason::MissingToken(msg) => {
diagnostics.push(Diagnostic::spanned(
my_span,
Level::Error,
format!("Missing Token: '{msg}'", msg = msg),
));
}
ParseErrorReason::FailedNode(parse_errors) => {
if parse_errors.is_empty() {
diagnostics.push(Diagnostic::spanned(
my_span,
Level::Error,
"Failed to parse",
));
} else {
diagnostics.extend(handle_errors(parse_errors, get_span));
}
}
}
}
diagnostics
}
pub fn hydroflow_graph_to_program(flat_graph: HydroflowGraph, root: TokenStream) -> TokenStream {
let partitioned_graph =
partition_graph(flat_graph).expect("Failed to partition (cycle detected).");
let mut diagnostics = Vec::new();
let code_tokens = partitioned_graph.as_code(&root, true, quote::quote!(), &mut diagnostics);
assert_eq!(
0,
diagnostics.len(),
"Operator diagnostic occured during codegen"
);
code_tokens
}
#[allow(clippy::too_many_arguments)]
fn generate_rule(
plan: JoinPlan<'_>,
rule: &rust_sitter::Spanned<Rule>,
flat_graph_builder: &mut FlatGraphBuilder,
tee_counter: &mut HashMap<String, Counter>,
union_counter: &mut HashMap<String, Counter>,
next_join_idx: &mut Counter,
persists: &HashSet<String>,
diagnostics: &mut Vec<Diagnostic>,
get_span: &impl Fn((usize, usize)) -> Span,
) {
let target = &rule.target.name;
let target_ident = syn::Ident::new(&format!("{}_insert", target.name), get_span(target.span));
let out_expanded = join_plan::expand_join_plan(
&plan,
flat_graph_builder,
tee_counter,
next_join_idx,
rule.span,
diagnostics,
get_span,
);
let after_join = apply_aggregations(
rule,
&out_expanded,
persists.contains(&target.name),
diagnostics,
get_span,
);
let my_union_index = union_counter
.entry(target.name.clone())
.or_insert_with(|| 0..)
.next()
.expect("Out of union indices");
let my_union_index_lit = syn::LitInt::new(&format!("{}", my_union_index), Span::call_site());
let after_join_and_send: Pipeline = match rule.rule_type.value {
RuleType::Sync(_) => {
if rule.target.at_node.is_some() {
panic!("Rule must be async to send data to other nodes")
}
parse_quote_spanned!(get_span(rule.rule_type.span)=> #after_join -> [#my_union_index_lit] #target_ident)
}
RuleType::NextTick(_) => {
if rule.target.at_node.is_some() {
panic!("Rule must be async to send data to other nodes")
}
parse_quote_spanned!(get_span(rule.rule_type.span)=> #after_join -> defer_tick() -> [#my_union_index_lit] #target_ident)
}
RuleType::Async(_) => {
if rule.target.at_node.is_none() {
panic!("Async rules are only for sending data to other nodes")
}
let exprs_get_data = rule
.target
.fields
.iter()
.enumerate()
.map(|(i, f)| -> syn::Expr {
let syn_index = syn::Index::from(i);
parse_quote_spanned!(get_span(f.span)=> v.#syn_index)
});
let syn_target_index = syn::Index::from(rule.target.fields.len());
let v_type = repeat_tuple::<syn::Type, syn::Type>(
|| parse_quote!(_),
rule.target.fields.len() + 1,
);
let send_pipeline_ident = syn::Ident::new(
&format!("{}_async_send", &rule.target.name.name),
get_span(rule.target.name.span),
);
parse_quote_spanned!(get_span(rule.rule_type.span)=> #after_join -> map(|v: #v_type| (v.#syn_target_index, (#(#exprs_get_data, )*))) -> #send_pipeline_ident)
}
};
let out_name = out_expanded.name;
// If the output comes with a tee index, we must read with that. This only happens when we are
// directly outputting a transformation of a single relation on the RHS.
let out_indexing = out_expanded.tee_idx.map(|i| Indexing {
bracket_token: syn::token::Bracket::default(),
index: PortIndex::Int(IndexInt {
value: i,
span: Span::call_site(),
}),
});
flat_graph_builder.add_statement(HfStatement::Pipeline(PipelineStatement {
pipeline: Pipeline::Link(PipelineLink {
lhs: Box::new(parse_quote!(#out_name #out_indexing)), // out_name[idx]
arrow: parse_quote!(->),
rhs: Box::new(after_join_and_send),
}),
semi_token: Token),
}));
}
fn compute_join_plan<'a>(sources: &'a [Atom], persisted_rules: &HashSet<String>) -> JoinPlan<'a> {
// TODO(shadaj): smarter plans
let mut plan: JoinPlan = sources
.iter()
.filter_map(|x| match x {
Atom::Relation(negated, e) => {
if negated.is_none() && !MAGIC_RELATIONS.contains(&e.name.name.as_str()) {
Some(JoinPlan::Source(e, persisted_rules.contains(&e.name.name)))
} else {
None
}
}
_ => None,
})
.reduce(|a, b| JoinPlan::Join(Box::new(a), Box::new(b)))
.unwrap();
plan = sources
.iter()
.filter_map(|x| match x {
Atom::Relation(negated, e) => {
if negated.is_some() {
Some(JoinPlan::Source(e, persisted_rules.contains(&e.name.name)))
} else {
None
}
}
_ => None,
})
.fold(plan, |a, b| JoinPlan::AntiJoin(Box::new(a), Box::new(b)));
let predicates = sources
.iter()
.filter_map(|x| match x {
Atom::Predicate(e) => Some(e),
_ => None,
})
.collect::<Vec<_>>();
if !predicates.is_empty() {
plan = JoinPlan::Predicate(predicates, Box::new(plan))
}
plan = sources.iter().fold(plan, |acc, atom| match atom {
Atom::Relation(negated, e) => {
if MAGIC_RELATIONS.contains(&e.name.name.as_str()) {
match e.name.name.as_str() {
"less_than" => {
assert!(negated.is_none());
JoinPlan::MagicNatLt(
Box::new(acc),
e.fields[0].value.clone(),
e.fields[1].value.clone(),
)
}
o => panic!("Unknown magic relation {}", o),
}
} else {
acc
}
}
_ => acc,
});
plan
}
pub(crate) fn gen_value_expr(
expr: &IntExpr,
lookup_ident: &mut impl FnMut(&rust_sitter::Spanned<Ident>) -> syn::Expr,
get_span: &dyn Fn((usize, usize)) -> Span,
) -> syn::Expr {
match expr {
IntExpr::Ident(ident) => lookup_ident(ident),
IntExpr::Integer(i) => syn::Expr::Lit(syn::ExprLit {
attrs: Vec::new(),
lit: syn::Lit::Int(syn::LitInt::new(&i.to_string(), get_span(i.span))),
}),
IntExpr::Parenthesized(_, e, _) => {
let inner = gen_value_expr(e, lookup_ident, get_span);
parse_quote!((#inner))
}
IntExpr::Add(l, _, r) => {
let l = gen_value_expr(l, lookup_ident, get_span);
let r = gen_value_expr(r, lookup_ident, get_span);
parse_quote!(#l + #r)
}
IntExpr::Sub(l, _, r) => {
let l = gen_value_expr(l, lookup_ident, get_span);
let r = gen_value_expr(r, lookup_ident, get_span);
parse_quote!(#l - #r)
}
IntExpr::Mul(l, _, r) => {
let l = gen_value_expr(l, lookup_ident, get_span);
let r = gen_value_expr(r, lookup_ident, get_span);
parse_quote!(#l * #r)
}
IntExpr::Mod(l, _, r) => {
let l = gen_value_expr(l, lookup_ident, get_span);
let r = gen_value_expr(r, lookup_ident, get_span);
parse_quote!(#l % #r)
}
}
}
fn gen_target_expr(
expr: &TargetExpr,
lookup_ident: &mut impl FnMut(&rust_sitter::Spanned<Ident>) -> syn::Expr,
get_span: &dyn Fn((usize, usize)) -> Span,
) -> syn::Expr {
match expr {
TargetExpr::Expr(expr) => gen_value_expr(expr, lookup_ident, get_span),
TargetExpr::Aggregation(Aggregation::Count(_)) => parse_quote!(()),
TargetExpr::Aggregation(Aggregation::CountUnique(_, _, keys, _)) => {
let keys = keys
.iter()
.map(|k| gen_value_expr(&IntExpr::Ident(k.clone()), lookup_ident, get_span))
.collect::<Vec<_>>();
parse_quote!((#(#keys),*))
}
TargetExpr::Aggregation(
Aggregation::Min(_, _, a, _)
| Aggregation::Max(_, _, a, _)
| Aggregation::Sum(_, _, a, _)
| Aggregation::Choose(_, _, a, _),
) => gen_value_expr(&IntExpr::Ident(a.clone()), lookup_ident, get_span),
TargetExpr::Index(_, _, _) => unreachable!(),
}
}
fn apply_aggregations(
rule: &Rule,
out_expanded: &IntermediateJoinNode,
consumer_is_persist: bool,
diagnostics: &mut Vec<Diagnostic>,
get_span: &impl Fn((usize, usize)) -> Span,
) -> Pipeline {
let mut aggregations = vec![];
let mut fold_keyed_exprs = vec![];
let mut agg_exprs = vec![];
let mut field_use_count = HashMap::new();
for field in rule
.target
.fields
.iter()
.chain(rule.target.at_node.iter().map(|n| &n.node))
{
for ident in field.idents() {
field_use_count
.entry(ident.name.clone())
.and_modify(|e| *e += 1)
.or_insert(1);
}
}
let mut field_use_cur = HashMap::new();
let mut has_index = false;
for field in rule
.target
.fields
.iter()
.chain(rule.target.at_node.iter().map(|n| &n.node))
{
if matches!(field.deref(), TargetExpr::Index(_, _, _)) {
has_index = true;
} else {
let expr: syn::Expr = gen_target_expr(
field,
&mut |ident| {
if let Some(col) = out_expanded.variable_mapping.get(&ident.name) {
let cur_count = field_use_cur
.entry(ident.name.clone())
.and_modify(|e| *e += 1)
.or_insert(1);
let source_col_idx = syn::Index::from(*col);
let base = parse_quote_spanned!(get_span(ident.span)=> row.#source_col_idx);
if *cur_count < field_use_count[&ident.name]
&& field_use_count[&ident.name] > 1
{
parse_quote!(#base.clone())
} else {
base
}
} else {
diagnostics.push(Diagnostic::spanned(
get_span(ident.span),
Level::Error,
format!("Could not find column {} in RHS of rule", &ident.name),
));
parse_quote!(())
}
},
get_span,
);
match &field.value {
TargetExpr::Expr(_) => {
fold_keyed_exprs.push(expr);
}
TargetExpr::Aggregation(a) => {
aggregations.push(a.clone());
agg_exprs.push(expr);
}
TargetExpr::Index(_, _, _) => unreachable!(),
}
}
}
let flattened_tuple_type = &out_expanded.tuple_type;
let mut after_group_lookups: Vec<syn::Expr> = vec![];
let mut group_key_idx = 0;
let mut agg_idx = 0;
for field in rule
.target
.fields
.iter()
.chain(rule.target.at_node.iter().map(|n| &n.node))
{
match field.value {
TargetExpr::Expr(_) => {
let idx = syn::Index::from(group_key_idx);
after_group_lookups.push(parse_quote_spanned!(get_span(field.span)=> g.#idx));
group_key_idx += 1;
}
TargetExpr::Aggregation(Aggregation::CountUnique(..)) => {
let idx = syn::Index::from(agg_idx);
after_group_lookups
.push(parse_quote_spanned!(get_span(field.span)=> a.#idx.unwrap().1));
agg_idx += 1;
}
TargetExpr::Aggregation(_) => {
let idx = syn::Index::from(agg_idx);
after_group_lookups
.push(parse_quote_spanned!(get_span(field.span)=> a.#idx.unwrap()));
agg_idx += 1;
}
TargetExpr::Index(_, _, _) => {
after_group_lookups.push(parse_quote_spanned!(get_span(field.span)=> i));
}
}
}
let fold_keyed_input_type =
repeat_tuple::<syn::Type, syn::Type>(|| parse_quote!(_), fold_keyed_exprs.len());
let after_group_pipeline: Pipeline = if has_index {
if out_expanded.persisted && agg_exprs.is_empty() {
// if there is an aggregation, we will use a group which replays so we should use `'tick` instead
parse_quote!(enumerate::<'static>() -> map(|(i, (g, a)): (_, (#fold_keyed_input_type, _))| (#(#after_group_lookups, )*)))
} else {
parse_quote!(enumerate::<'tick>() -> map(|(i, (g, a)): (_, (#fold_keyed_input_type, _))| (#(#after_group_lookups, )*)))
}
} else {
parse_quote!(map(|(g, a): (#fold_keyed_input_type, _)| (#(#after_group_lookups, )*)))
};
if agg_exprs.is_empty() {
if out_expanded.persisted && !consumer_is_persist {
parse_quote!(map(|row: #flattened_tuple_type| ((#(#fold_keyed_exprs, )*), ())) -> #after_group_pipeline -> persist())
} else {
parse_quote!(map(|row: #flattened_tuple_type| ((#(#fold_keyed_exprs, )*), ())) -> #after_group_pipeline)
}
} else {
let agg_initial =
repeat_tuple::<syn::Expr, syn::Expr>(|| parse_quote!(None), agg_exprs.len());
let agg_input_type =
repeat_tuple::<syn::Type, syn::Type>(|| parse_quote!(_), agg_exprs.len());
let agg_type: syn::Type =
repeat_tuple::<syn::Type, syn::Type>(|| parse_quote!(Option<_>), agg_exprs.len());
let fold_keyed_stmts: Vec<syn::Stmt> = aggregations
.iter()
.enumerate()
.map(|(i, agg)| {
let idx = syn::Index::from(i);
let old_at_index: syn::Expr = parse_quote!(old.#idx);
let val_at_index: syn::Expr = parse_quote!(val.#idx);
let agg_expr: syn::Expr = match &agg {
Aggregation::Min(..) => {
parse_quote!(std::cmp::min(prev, #val_at_index))
}
Aggregation::Max(..) => {
parse_quote!(std::cmp::max(prev, #val_at_index))
}
Aggregation::Sum(..) => {
parse_quote!(prev + #val_at_index)
}
Aggregation::Count(..) => {
parse_quote!(prev + 1)
}
Aggregation::CountUnique(..) => {
parse_quote!({
let prev: (hydroflow::rustc_hash::FxHashSet<_>, _) = prev;
let mut set: hydroflow::rustc_hash::FxHashSet<_> = prev.0;
if set.insert(#val_at_index) {
(set, prev.1 + 1)
} else {
(set, prev.1)
}
})
}
Aggregation::Choose(..) => {
parse_quote!(prev) // choose = select any 1 element from the relation. By default we select the 1st.
}
};
let agg_initial: syn::Expr = match &agg {
Aggregation::Min(..)
| Aggregation::Max(..)
| Aggregation::Sum(..)
| Aggregation::Choose(..) => {
parse_quote!(#val_at_index)
}
Aggregation::Count(..) => {
parse_quote!(1)
}
Aggregation::CountUnique(..) => {
parse_quote!({
let mut set = hydroflow::rustc_hash::FxHashSet::<_>::default();
set.insert(#val_at_index);
(set, 1)
})
}
};
parse_quote! {
#old_at_index = if let Some(prev) = #old_at_index.take() {
Some(#agg_expr)
} else {
Some(#agg_initial)
};
}
})
.collect();
let pre_fold_keyed_map: syn::Expr = parse_quote!(|row: #flattened_tuple_type| ((#(#fold_keyed_exprs, )*), (#(#agg_exprs, )*)));
let fold_keyed_fn: syn::Expr = parse_quote!(|old: &mut #agg_type, val: #agg_input_type| {
#(#fold_keyed_stmts)*
});
if out_expanded.persisted {
parse_quote! {
map(#pre_fold_keyed_map) -> fold_keyed::<'static, #fold_keyed_input_type, #agg_type>(|| #agg_initial, #fold_keyed_fn) -> #after_group_pipeline
}
} else {
parse_quote! {
map(#pre_fold_keyed_map) -> fold_keyed::<'tick, #fold_keyed_input_type, #agg_type>(|| #agg_initial, #fold_keyed_fn) -> #after_group_pipeline
}
}
}
}
#[cfg(test)]
mod tests {
use syn::parse_quote;
use super::{gen_hydroflow_graph, hydroflow_graph_to_program};
macro_rules! test_snapshots {
($program:literal) => {
let flat_graph = gen_hydroflow_graph(parse_quote!($program)).unwrap();
let flat_graph_ref = &flat_graph;
insta::with_settings!({snapshot_suffix => "surface_graph"}, {
insta::assert_display_snapshot!(flat_graph_ref.surface_syntax_string());
});
let tokens = hydroflow_graph_to_program(flat_graph, quote::quote! { hydroflow });
let out: syn::Stmt = syn::parse_quote!(#tokens);
let wrapped: syn::File = parse_quote! {
fn main() {
#out
}
};
insta::with_settings!({snapshot_suffix => "datalog_program"}, {
insta::assert_display_snapshot!(
prettyplease::unparse(&wrapped)
);
});
};
}
#[test]
fn minimal_program() {
test_snapshots!(
r#"
.input input `source_stream(input)`
.output out `for_each(|v| out.send(v).unwrap())`
out(y, x) :- input(x, y).
"#
);
}
#[test]
fn join_with_self() {
test_snapshots!(
r#"
.input input `source_stream(input)`
.output out `for_each(|v| out.send(v).unwrap())`
out(x, y) :- input(x, y), input(y, x).
"#
);
}
#[test]
fn wildcard_fields() {
test_snapshots!(
r#"
.input input `source_stream(input)`
.output out `for_each(|v| out.send(v).unwrap())`
out(x) :- input(x, _), input(_, x).
"#
);
}
#[test]
fn join_with_other() {
test_snapshots!(
r#"
.input in1 `source_stream(in1)`
.input in2 `source_stream(in2)`
.output out `for_each(|v| out.send(v).unwrap())`
out(x, y) :- in1(x, y), in2(y, x).
"#
);
}
#[test]
fn multiple_contributors() {
test_snapshots!(
r#"
.input in1 `source_stream(in1)`
.input in2 `source_stream(in2)`
.output out `for_each(|v| out.send(v).unwrap())`
out(x, y) :- in1(x, y).
out(x, y) :- in2(y, x).
"#
);
}
#[test]
fn transitive_closure() {
test_snapshots!(
r#"
.input edges `source_stream(edges)`
.input seed_reachable `source_stream(seed_reachable)`
.output reachable `for_each(|v| reachable.send(v).unwrap())`
reachable(x) :- seed_reachable(x).
reachable(y) :- reachable(x), edges(x, y).
"#
);
}
#[test]
fn single_column_program() {
test_snapshots!(
r#"
.input in1 `source_stream(in1)`
.input in2 `source_stream(in2)`
.output out `for_each(|v| out.send(v).unwrap())`
out(x) :- in1(x), in2(x).
"#
);
}
#[test]
fn triple_relation_join() {
test_snapshots!(
r#"
.input in1 `source_stream(in1)`
.input in2 `source_stream(in2)`
.input in3 `source_stream(in3)`
.output out `for_each(|v| out.send(v).unwrap())`
out(d, c, b, a) :- in1(a, b), in2(b, c), in3(c, d).
"#
);
}
#[test]
fn local_constraints() {
test_snapshots!(
r#"
.input input `source_stream(input)`
.output out `for_each(|v| out.send(v).unwrap())`
out(x, x) :- input(x, x).
"#
);
test_snapshots!(
r#"
.input input `source_stream(input)`
.output out `for_each(|v| out.send(v).unwrap())`
out(x, x, y, y) :- input(x, x, y, y).
"#
);
}
#[test]
fn test_simple_filter() {
test_snapshots!(
r#"
.input input `source_stream(input)`
.output out `for_each(|v| out.send(v).unwrap())`
out(x, y) :- input(x, y), ( x > y ), ( y == x ).
"#
);
}
#[test]
fn test_anti_join() {
test_snapshots!(
r#"
.input ints_1 `source_stream(ints_1)`
.input ints_2 `source_stream(ints_2)`
.input ints_3 `source_stream(ints_3)`
.output result `for_each(|v| result.send(v).unwrap())`
result(x, z) :- ints_1(x, y), ints_2(y, z), !ints_3(y)
"#
);
}
#[test]
fn test_max() {
test_snapshots!(
r#"
.input ints `source_stream(ints)`
.output result `for_each(|v| result.send(v).unwrap())`
result(max(a), b) :- ints(a, b)
"#
);
}
#[test]
fn test_max_all() {
test_snapshots!(
r#"
.input ints `source_stream(ints)`
.output result `for_each(|v| result.send(v).unwrap())`
result(max(a), max(b)) :- ints(a, b)
"#
);
}
#[test]
fn test_send_to_node() {
test_snapshots!(
r#"
.input ints `source_stream(ints)`
.output result `for_each(|v| result.send(v).unwrap())`
.async result `for_each(|(node, data)| async_send_result(node, data))` `source_stream(async_receive_result)`
result@b(a) :~ ints(a, b)
"#
);
}
#[test]
fn test_aggregations_and_comments() {
test_snapshots!(
r#"
# david doesn't think this line of code will execute
.input ints `source_stream(ints)`
.output result `for_each(|v| result.send(v).unwrap())`
.output result2 `for_each(|v| result2.send(v).unwrap())`
result(count(a), b) :- ints(a, b)
result(sum(a), b) :+ ints(a, b)
result2(choose(a), b) :- ints(a, b)
"#
);
}
#[test]
fn test_aggregations_fold_keyed_expr() {
test_snapshots!(
r#"
.input ints `source_stream(ints)`
.output result `for_each(|v| result.send(v).unwrap())`
result(a % 2, sum(b)) :- ints(a, b)
"#
);
}
#[test]
fn test_non_copy_but_clone() {
test_snapshots!(
r#"
.input strings `source_stream(strings)`
.output result `for_each(|v| result.send(v).unwrap())`
result(a, a) :- strings(a)
"#
);
}
#[test]
fn test_expr_lhs() {
test_snapshots!(
r#"
.input ints `source_stream(ints)`
.output result `for_each(|v| result.send(v).unwrap())`
result(123) :- ints(a)
result(a + 123) :- ints(a)
result(a + a) :- ints(a)
result(123 - a) :- ints(a)
result(123 % (a + 5)) :- ints(a)
result(a * 5) :- ints(a)
"#
);
}
#[test]
fn test_expr_predicate() {
test_snapshots!(
r#"
.input ints `source_stream(ints)`
.output result `for_each(|v| result.send(v).unwrap())`
result(1) :- ints(a), (a == 0)
result(2) :- ints(a), (a != 0)
result(3) :- ints(a), (a - 1 == 0)
result(4) :- ints(a), (a - 1 == 1 - 1)
"#
);
}
#[test]
fn test_persist() {
test_snapshots!(
r#"
.input ints1 `source_stream(ints1)`
.persist ints1
.input ints2 `source_stream(ints2)`
.persist ints2
.input ints3 `source_stream(ints3)`
.output result `for_each(|v| result.send(v).unwrap())`
.output result2 `for_each(|v| result2.send(v).unwrap())`
.output result3 `for_each(|v| result3.send(v).unwrap())`
.output result4 `for_each(|v| result4.send(v).unwrap())`
result(a, b, c) :- ints1(a), ints2(b), ints3(c)
result2(a) :- ints1(a), !ints2(a)
intermediate(a) :- ints1(a)
result3(a) :- intermediate(a)
.persist intermediate_persist
intermediate_persist(a) :- ints1(a)
result4(a) :- intermediate_persist(a)
"#
);
}
#[test]
fn test_persist_uniqueness() {
test_snapshots!(
r#"
.persist ints1
.input ints2 `source_stream(ints2)`
ints1(a) :- ints2(a)
.output result `for_each(|v| result.send(v).unwrap())`
result(count(a)) :- ints1(a)
"#
);
}
#[test]
fn test_wildcard_join_count() {
test_snapshots!(
r#"
.input ints1 `source_stream(ints1)`
.input ints2 `source_stream(ints2)`
.output result `for_each(|v| result.send(v).unwrap())`
.output result2 `for_each(|v| result2.send(v).unwrap())`
result(count(*)) :- ints1(a, _), ints2(a)
result2(count(a)) :- ints1(a, _), ints2(a)
"#
);
}
#[test]
fn test_index() {
test_snapshots!(
r#"
.input ints `source_stream(ints)`
.output result `for_each(|v| result.send(v).unwrap())`
.output result2 `for_each(|v| result2.send(v).unwrap())`
.output result3 `for_each(|v| result3.send(v).unwrap())`
.output result4 `for_each(|v| result4.send(v).unwrap())`
.persist result5
.output result5 `for_each(|v| result5.send(v).unwrap())`
result(a, b, index()) :- ints(a, b)
result2(a, count(b), index()) :- ints(a, b)
.persist ints_persisted
ints_persisted(a, b) :- ints(a, b)
result3(a, b, index()) :- ints_persisted(a, b)
result4(a, count(b), index()) :- ints_persisted(a, b)
result5(a, b, index()) :- ints_persisted(a, b)
"#
);
}
}
|
use std::collections::{hash_map, VecDeque, BTreeMap};
use std::{io, cmp, fmt, mem, str};
use std::net::SocketAddrV6;
use std::sync::{Arc, Mutex};
use std::path::PathBuf;
use bytes::{Buf, BufMut, Bytes, ByteOrder, BigEndian, IntoBuf};
use rand::{distributions, OsRng, Rng};
use rand::distributions::Sample;
use slab::Slab;
use openssl::{self, ex_data};
use openssl::ssl::{self, SslContext, SslMethod, SslOptions, SslVersion, SslMode, Ssl, SslStream, HandshakeError, MidHandshakeSslStream,
SslStreamBuilder, SslAlert, SslRef, SslSession};
use openssl::pkey::{PKeyRef, Private};
use openssl::x509::{X509Ref, X509StoreContextRef};
use openssl::x509::verify::X509CheckFlags;
use openssl::hash::MessageDigest;
use openssl::symm::{Cipher, encrypt_aead, decrypt_aead};
use blake2::Blake2b;
use digest::{Input, VariableOutput};
use constant_time_eq::constant_time_eq;
use slog::{self, Logger};
use arrayvec::ArrayVec;
use fnv::{FnvHashMap, FnvHashSet};
use memory_stream::MemoryStream;
use transport_parameters::TransportParameters;
use coding::{self, BufExt, BufMutExt};
use {hkdf, frame, Frame, TransportError, StreamId, Side, Directionality, VERSION, MAX_CID_SIZE, RESET_TOKEN_SIZE};
use range_set::RangeSet;
use stream::{self, Stream};
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)]
pub struct ConnectionHandle(usize);
impl From<ConnectionHandle> for usize { fn from(x: ConnectionHandle) -> usize { x.0 } }
/// Parameters governing the core QUIC state machine.
pub struct Config {
/// Maximum number of peer-initiated bidirectional streams that may exist at one time.
pub max_remote_bi_streams: u16,
/// Maximum number of peer-initiated unidirectional streams that may exist at one time.
pub max_remote_uni_streams: u16,
/// Maximum duration of inactivity to accept before timing out the connection (s).
///
/// Maximum value is 600 seconds. The actual value used is the minimum of this and the peer's own idle timeout.
pub idle_timeout: u16,
/// Maximum number of bytes the peer may transmit on any one stream before becoming blocked.
///
/// This should be set to at least the expected connection latency multiplied by the maximum desired
/// throughput. Setting this smaller than `receive_window` helps ensure that a single stream doesn't monopolize
/// receive buffers, which may otherwise occur if the application chooses not to read from a large stream for a time
/// while still requiring data on other streams.
pub stream_receive_window: u32,
/// Maximum number of bytes the peer may transmit across all streams of a connection before becoming blocked.
///
/// This should be set to at least the expected connection latency multiplied by the maximum desired
/// throughput. Larger values can be useful to allow maximum throughput within a stream while another is blocked.
pub receive_window: u32,
/// Maximum number of incoming connections to buffer.
///
/// Calling `Endpoint::accept` removes a connection from the buffer, so this does not need to be large.
pub accept_buffer: u32,
/// Maximum number of tail loss probes before an RTO fires.
pub max_tlps: u32,
/// Maximum reordering in packet number space before FACK style loss detection considers a packet lost.
pub reordering_threshold: u32,
/// Maximum reordering in time space before time based loss detection considers a packet lost. 0.16 format
pub time_reordering_fraction: u16,
/// Whether time based loss detection is in use. If false, uses FACK style loss detection.
pub using_time_loss_detection: bool,
/// Minimum time in the future a tail loss probe alarm may be set for (μs).
pub min_tlp_timeout: u64,
/// Minimum time in the future an RTO alarm may be set for (μs).
pub min_rto_timeout: u64,
/// The length of the peer’s delayed ack timer (μs).
pub delayed_ack_timeout: u64,
/// The default RTT used before an RTT sample is taken (μs)
pub default_initial_rtt: u64,
/// The default max packet size used for calculating default and minimum congestion windows.
pub default_mss: u64,
/// Default limit on the amount of outstanding data in bytes.
pub initial_window: u64,
/// Default minimum congestion window.
pub minimum_window: u64,
/// Reduction in congestion window when a new loss event is detected. 0.16 format
pub loss_reduction_factor: u16,
/// List of supported application protocols.
///
/// If empty, application-layer protocol negotiation will not be preformed.
pub protocols: Vec<Box<[u8]>>,
/// Path to write NSS SSLKEYLOGFILE-compatible key log.
///
/// Enabling this compromises security by committing secret information to disk. Useful for debugging communications
/// when using tools like Wireshark.
pub keylog: Option<PathBuf>,
/// Whether to force clients to prove they can receive responses before allocating resources for them.
///
/// This adds a round trip to the handshake, increasing connection establishment latency, in exchange for improved
/// resistance to denial of service attacks.
///
/// Only meaningful for endpoints that accept incoming connections.
pub use_stateless_retry: bool,
/// Whether incoming connections are required to provide certificates.
///
/// If this is not set but a `client_cert_verifier` is supplied, a certificate will still be requested, but the
/// handshake will proceed even if one is not supplied.
pub require_client_certs: bool,
/// Function to preform application-level verification of client certificates from incoming connections.
///
/// Called with a boolean indicating whether the certificate chain is valid at the TLS level, and a
/// `X509StoreContextRef` containing said chain. Returns whether the certificate should be considered valid.
///
/// If `None`, all valid certificates will be accepted.
pub client_cert_verifier: Option<Box<Fn(bool, &mut X509StoreContextRef) -> bool + Send + Sync + 'static>>,
}
pub struct CertConfig<'a> {
/// A TLS private key.
pub private_key: &'a PKeyRef<Private>,
/// A TLS certificate corresponding to `private_key`.
pub cert: &'a X509Ref,
}
impl Default for Config {
fn default() -> Self {
const EXPECTED_RTT: u32 = 100; // ms
const MAX_STREAM_BANDWIDTH: u32 = 12500 * 1000; // bytes/s
// Window size needed to avoid pipeline stalls
const STREAM_RWND: u32 = MAX_STREAM_BANDWIDTH / 1000 * EXPECTED_RTT;
Self {
max_remote_bi_streams: 0,
max_remote_uni_streams: 0,
idle_timeout: 10,
stream_receive_window: STREAM_RWND,
receive_window: 8 * STREAM_RWND,
accept_buffer: 1024,
max_tlps: 2,
reordering_threshold: 3,
time_reordering_fraction: 0x2000, // 1/8
using_time_loss_detection: false,
min_tlp_timeout: 10 * 1000,
min_rto_timeout: 200 * 1000,
delayed_ack_timeout: 25 * 1000,
default_initial_rtt: EXPECTED_RTT as u64 * 1000,
default_mss: 1460,
initial_window: 10 * 1460,
minimum_window: 2 * 1460,
loss_reduction_factor: 0x8000, // 1/2
protocols: Vec::new(),
keylog: None,
use_stateless_retry: false,
require_client_certs: false,
client_cert_verifier: None,
}
}
}
pub struct ClientConfig<'a> {
/// The name of the server the client intends to connect to.
///
/// Used for both certificate validation, and for disambiguating between multiple domains hosted by the same IP
/// address (using SNI).
pub server_name: Option<&'a str>,
/// A ticket to resume a previous session faster than performing a full handshake.
///
/// Required for transmitting 0-RTT data.
// Encoding: u16 length, DER-encoded OpenSSL session ticket, transport params
pub session_ticket: Option<&'a [u8]>,
/// Whether to accept inauthentic or unverifiable peer certificates.
///
/// Turning this off exposes clients to man-in-the-middle attacks in the same manner as an unencrypted TCP
/// connection, but allows them to connect to servers that are using self-signed certificates.
pub accept_insecure_certs: bool,
}
impl<'a> Default for ClientConfig<'a> {
fn default() -> Self { Self {
server_name: None,
session_ticket: None,
accept_insecure_certs: false,
}}
}
/// The main entry point to the library
///
/// This object performs no I/O whatsoever. Instead, it generates a stream of I/O operations for a backend to perform
/// via `poll_io`, and consumes incoming packets and timer expirations via `handle` and `timeout`.
pub struct Endpoint {
log: Logger,
rng: OsRng,
initial_packet_number: distributions::Range<u64>,
tls: SslContext,
connection_ids_initial: FnvHashMap<ConnectionId, ConnectionHandle>,
connection_ids: FnvHashMap<ConnectionId, ConnectionHandle>,
connection_remotes: FnvHashMap<SocketAddrV6, ConnectionHandle>,
connections: Slab<Connection>,
config: Arc<Config>,
listen_keys: Option<ListenKeys>,
events: VecDeque<(ConnectionHandle, Event)>,
io: VecDeque<Io>,
dirty_conns: FnvHashSet<ConnectionHandle>,
readable_conns: FnvHashSet<ConnectionHandle>,
incoming: VecDeque<ConnectionHandle>,
incoming_handshakes: usize,
session_ticket_buffer: Arc<Mutex<Vec<Result<SslSession, ()>>>>,
}
const MIN_INITIAL_SIZE: usize = 1200;
const MIN_MTU: u16 = 1232;
const LOCAL_ID_LEN: usize = 8;
/// Ensures we can always fit all our ACKs in a single minimum-MTU packet with room to spare
const MAX_ACK_BLOCKS: usize = 64;
/// Value used in ACKs we transmit
const ACK_DELAY_EXPONENT: u8 = 3;
/// Magic value used to indicate 0-RTT support in NewSessionTicket
const TLS_MAX_EARLY_DATA: u32 = 0xffffffff;
fn reset_token_for(key: &[u8], id: &ConnectionId) -> [u8; RESET_TOKEN_SIZE] {
let mut mac = Blake2b::new_keyed(key, RESET_TOKEN_SIZE);
mac.process(id);
// TODO: Server ID??
let mut result = [0; RESET_TOKEN_SIZE];
mac.variable_result(&mut result).unwrap();
result
}
/// Information that should be preserved between restarts for server endpoints.
///
/// Keeping this around allows better behavior by clients that communicated with a previous instance of the same
/// endpoint.
#[derive(Copy, Clone)]
pub struct ListenKeys {
/// Cryptographic key used to ensure integrity of data included in handshake cookies.
///
/// Initialize with random bytes.
pub cookie: [u8; 64],
/// Cryptographic key used to send authenticated connection resets to clients who were communicating with a previous
/// instance of tihs endpoint.
///
/// Initialize with random bytes.
pub reset: [u8; 64],
}
impl ListenKeys {
/// Generate new keys.
///
/// Be careful to use a cryptography-grade RNG.
pub fn new<R: Rng>(rng: &mut R) -> Self {
let mut cookie = [0; 64];
let mut reset = [0; 64];
rng.fill_bytes(&mut cookie);
rng.fill_bytes(&mut reset);
Self { cookie, reset }
}
}
#[derive(Debug, Fail)]
pub enum EndpointError {
#[fail(display = "failed to configure TLS: {}", _0)]
Tls(ssl::Error),
#[fail(display = "failed open keylog file: {}", _0)]
Keylog(io::Error),
#[fail(display = "protocol ID longer than 255 bytes")]
ProtocolTooLong(Box<[u8]>),
}
impl From<ssl::Error> for EndpointError { fn from(x: ssl::Error) -> Self { EndpointError::Tls(x) } }
impl From<openssl::error::ErrorStack> for EndpointError { fn from(x: openssl::error::ErrorStack) -> Self { EndpointError::Tls(x.into()) } }
impl Endpoint {
pub fn new(log: Logger, config: Config, cert: Option<CertConfig>, listen: Option<ListenKeys>) -> Result<Self, EndpointError> {
let rng = OsRng::new().unwrap();
let config = Arc::new(config);
let mut tls = SslContext::builder(SslMethod::tls())?;
tls.set_min_proto_version(Some(SslVersion::TLS1_3))?;
tls.set_max_proto_version(Some(SslVersion::TLS1_3))?;
tls.set_options(
SslOptions::NO_COMPRESSION | SslOptions::NO_SSLV2 | SslOptions::NO_SSLV3 | SslOptions::NO_TLSV1 |
SslOptions::NO_TLSV1_1 | SslOptions::NO_TLSV1_2 | SslOptions::DONT_INSERT_EMPTY_FRAGMENTS
);
tls.clear_options(SslOptions::ENABLE_MIDDLEBOX_COMPAT);
tls.set_mode(
SslMode::ACCEPT_MOVING_WRITE_BUFFER | SslMode::ENABLE_PARTIAL_WRITE | SslMode::RELEASE_BUFFERS
);
tls.set_default_verify_paths()?;
if !config.use_stateless_retry {
tls.set_max_early_data(TLS_MAX_EARLY_DATA)?;
}
if let Some(ref listen) = listen {
let cookie_factory = Arc::new(CookieFactory::new(listen.cookie));
{
let cookie_factory = cookie_factory.clone();
tls.set_stateless_cookie_generate_cb(move |tls, buf| {
let conn = tls.ex_data(*CONNECTION_INFO_INDEX).unwrap();
Ok(cookie_factory.generate(conn, buf))
});
}
tls.set_stateless_cookie_verify_cb(move |tls, cookie| {
let conn = tls.ex_data(*CONNECTION_INFO_INDEX).unwrap();
cookie_factory.verify(conn, cookie)
});
}
let reset_key = listen.as_ref().map(|x| x.reset);
tls.add_custom_ext(
26, ssl::ExtensionContext::TLS1_3_ONLY | ssl::ExtensionContext::CLIENT_HELLO | ssl::ExtensionContext::TLS1_3_ENCRYPTED_EXTENSIONS,
{ let config = config.clone();
move |tls, ctx, _| {
let conn = tls.ex_data(*CONNECTION_INFO_INDEX).unwrap();
let mut buf = Vec::new();
let mut params = TransportParameters {
initial_max_streams_bidi: config.max_remote_bi_streams,
initial_max_streams_uni: config.max_remote_uni_streams,
initial_max_data: config.receive_window,
initial_max_stream_data: config.stream_receive_window,
ack_delay_exponent: ACK_DELAY_EXPONENT,
..TransportParameters::default()
};
let am_server = ctx == ssl::ExtensionContext::TLS1_3_ENCRYPTED_EXTENSIONS;
let side;
if am_server {
params.stateless_reset_token = Some(reset_token_for(reset_key.as_ref().unwrap(), &conn.id));
side = Side::Server;
} else {
side = Side::Client;
}
params.write(side, &mut buf);
Ok(Some(buf))
}
},
|tls, ctx, data, _| {
let side = if ctx == ssl::ExtensionContext::CLIENT_HELLO { Side::Server } else { Side::Client };
match TransportParameters::read(side, &mut data.into_buf()) {
Ok(params) => {
tls.set_ex_data(*TRANSPORT_PARAMS_INDEX, Ok(params));
Ok(())
}
Err(e) => {
use transport_parameters::Error::*;
tls.set_ex_data(*TRANSPORT_PARAMS_INDEX, Err(e));
Err(match e {
VersionNegotiation => SslAlert::ILLEGAL_PARAMETER,
IllegalValue => SslAlert::ILLEGAL_PARAMETER,
Malformed => SslAlert::DECODE_ERROR,
})
}
}
}
)?;
if let Some(ref cert) = cert {
tls.set_private_key(cert.private_key)?;
tls.set_certificate(cert.cert)?;
tls.check_private_key()?;
}
if !config.protocols.is_empty() {
let mut buf = Vec::new();
for protocol in &config.protocols {
if protocol.len() > 255 { return Err(EndpointError::ProtocolTooLong(protocol.clone())); }
buf.push(protocol.len() as u8);
buf.extend_from_slice(protocol);
}
tls.set_alpn_protos(&buf)?;
tls.set_alpn_select_callback(move |_ssl, protos| {
if let Some(x) = ssl::select_next_proto(&buf, protos) {
Ok(x)
} else {
Err(ssl::AlpnError::ALERT_FATAL)
}
});
}
if let Some(ref path) = config.keylog {
let file = ::std::fs::File::create(path).map_err(EndpointError::Keylog)?;
let file = Mutex::new(file);
tls.set_keylog_callback(move |_, line| {
use std::io::Write;
let mut file = file.lock().unwrap();
let _ = file.write_all(line.as_bytes());
let _ = file.write_all(b"\n");
});
}
let session_ticket_buffer = Arc::new(Mutex::new(Vec::new()));
{
let session_ticket_buffer = session_ticket_buffer.clone();
tls.set_session_cache_mode(ssl::SslSessionCacheMode::BOTH);
tls.set_new_session_callback(move |tls, session| {
if tls.is_server() { return; }
let mut buffer = session_ticket_buffer.lock().unwrap();
match session.max_early_data() {
0 | TLS_MAX_EARLY_DATA => {}
_ => {
buffer.push(Err(()));
}
}
buffer.push(Ok(session));
});
}
let verify_flag = if config.require_client_certs { ssl::SslVerifyMode::PEER | ssl::SslVerifyMode::FAIL_IF_NO_PEER_CERT } else { ssl::SslVerifyMode::empty() };
if config.client_cert_verifier.is_some() {
let config = config.clone();
tls.set_verify_callback(ssl::SslVerifyMode::PEER | verify_flag, move |x, y| (config.client_cert_verifier.as_ref().unwrap())(x, y));
} else {
tls.set_verify(verify_flag);
}
let tls = tls.build();
Ok(Self {
log, rng, config, tls,
listen_keys: listen,
initial_packet_number: distributions::Range::new(0, 2u64.pow(32) - 1024),
connection_ids_initial: FnvHashMap::default(),
connection_ids: FnvHashMap::default(),
connection_remotes: FnvHashMap::default(),
connections: Slab::new(),
events: VecDeque::new(),
io: VecDeque::new(),
dirty_conns: FnvHashSet::default(),
readable_conns: FnvHashSet::default(),
incoming: VecDeque::new(),
incoming_handshakes: 0,
session_ticket_buffer,
})
}
fn listen(&self) -> bool { self.listen_keys.is_some() }
/// Get an application-facing event
pub fn poll(&mut self) -> Option<(ConnectionHandle, Event)> {
if let Some(x) = self.events.pop_front() { return Some(x); }
loop {
let &conn = self.readable_conns.iter().next()?;
if let Some(&stream) = self.connections[conn.0].readable_streams.iter().next() {
self.connections[conn.0].readable_streams.remove(&stream);
let rs = self.connections[conn.0].streams.get_mut(&stream).unwrap()
.recv_mut().unwrap();
let fresh = mem::replace(&mut rs.fresh, false);
return Some((conn, Event::StreamReadable { stream, fresh }));
}
self.readable_conns.remove(&conn);
}
}
/// Get a pending IO operation
pub fn poll_io(&mut self, now: u64) -> Option<Io> {
loop {
if let Some(x) = self.io.pop_front() { return Some(x); }
let &conn = self.dirty_conns.iter().next()?;
// TODO: Only determine a single operation; only remove from dirty set if that fails
self.flush_pending(now, conn);
self.dirty_conns.remove(&conn);
}
}
/// Process an incoming UDP datagram
pub fn handle(&mut self, now: u64, remote: SocketAddrV6, mut data: Bytes) {
let datagram_len = data.len();
while !data.is_empty() {
let (packet, rest) = match Packet::decode(&data, LOCAL_ID_LEN) {
Ok(x) => x,
Err(HeaderError::UnsupportedVersion { source, destination }) => {
if !self.listen() {
debug!(self.log, "dropping packet with unsupported version");
return;
}
trace!(self.log, "sending version negotiation");
// Negotiate versions
let mut buf = Vec::<u8>::new();
Header::VersionNegotiate { ty: self.rng.gen(), source_id: destination, destination_id: source }.encode(&mut buf);
buf.write::<u32>(0x0a1a2a3a); // reserved version
buf.write(VERSION); // supported version
self.io.push_back(Io::Transmit { destination: remote, packet: buf.into() });
return;
}
Err(e) => {
trace!(self.log, "unable to process packet"; "reason" => %e);
return;
}
};
self.handle_packet(now, remote, packet, datagram_len);
data = rest;
}
}
fn handle_packet(&mut self, now: u64, remote: SocketAddrV6, packet: Packet, datagram_len: usize) {
//
// Handle packet on existing connection, if any
//
let dest_id = packet.header.destination_id().clone();
if let Some(&conn) = self.connection_ids.get(&dest_id) {
self.handle_connected(now, conn, remote, packet);
return;
}
if let Some(&conn) = self.connection_ids_initial.get(&dest_id) {
self.handle_connected(now, conn, remote, packet);
return;
}
if let Some(&conn) = self.connection_remotes.get(&remote) {
if let Some(token) = self.connections[conn.0].params.stateless_reset_token {
if packet.payload.len() >= 16 && &packet.payload[packet.payload.len() - 16..] == token {
if !self.connections[conn.0].state.as_ref().unwrap().is_drained() {
debug!(self.log, "got stateless reset"; "connection" => %self.connections[conn.0].local_id);
self.io.push_back(Io::TimerStop { connection: conn, timer: Timer::LossDetection });
self.io.push_back(Io::TimerStop { connection: conn, timer: Timer::Close });
self.io.push_back(Io::TimerStop { connection: conn, timer: Timer::Idle });
self.events.push_back((conn, Event::ConnectionLost { reason: ConnectionError::Reset }));
self.connections[conn.0].state = Some(State::Drained);
}
return;
}
}
}
//
// Potentially create a new connection
//
if !self.listen() {
debug!(self.log, "dropping packet from unrecognized connection"; "header" => ?packet.header);
return;
}
let key_phase = packet.header.key_phase();
if let Header::Long { ty, destination_id, source_id, number } = packet.header {
match ty {
packet::INITIAL => {
if datagram_len >= MIN_INITIAL_SIZE {
self.handle_initial(now, remote, destination_id, source_id, number, &packet.header_data, &packet.payload);
} else {
debug!(self.log, "ignoring short initial on {connection}", connection=destination_id.clone());
}
return;
}
packet::ZERO_RTT => {
// MAY buffer a limited amount
trace!(self.log, "dropping 0-RTT packet for unknown connection {connection}", connection=destination_id.clone());
return;
}
_ => {
debug!(self.log, "ignoring packet for unknown connection {connection} with unexpected type {type:02x}",
connection=destination_id.clone(), type=ty);
return;
}
}
}
//
// If we got this far, we're a server receiving a seemingly valid packet for an unknown connection. Send a stateless reset.
//
if !dest_id.is_empty() {
debug!(self.log, "sending stateless reset");
let mut buf = Vec::<u8>::new();
// Bound padding size to at most 8 bytes larger than input to mitigate amplification attacks
let padding = self.rng.gen_range(0, cmp::max(RESET_TOKEN_SIZE + 8, packet.payload.len()) - RESET_TOKEN_SIZE);
buf.reserve_exact(1 + MAX_CID_SIZE + 1 + padding + RESET_TOKEN_SIZE);
Header::Short {
id: ConnectionId::random(&mut self.rng, MAX_CID_SIZE as u8), number: PacketNumber::U8(self.rng.gen()), key_phase
}.encode(&mut buf);
{
let start = buf.len();
buf.resize(start + padding, 0);
self.rng.fill_bytes(&mut buf[start..start+padding]);
}
buf.extend(&reset_token_for(&self.listen_keys.as_ref().unwrap().reset, &dest_id));
self.io.push_back(Io::Transmit { destination: remote, packet: buf.into() });
} else {
trace!(self.log, "dropping unrecognized short packet without ID");
}
}
/// Initiate a connection
pub fn connect(&mut self, remote: SocketAddrV6, config: ClientConfig) -> Result<ConnectionHandle, ConnectError> {
let local_id = ConnectionId::random(&mut self.rng, LOCAL_ID_LEN as u8);
let remote_id = ConnectionId::random(&mut self.rng, MAX_CID_SIZE as u8);
trace!(self.log, "initial dcid"; "value" => %remote_id);
let conn = self.add_connection(remote_id.clone(), local_id.clone(), remote_id, remote, Side::Client);
let mut tls = Ssl::new(&self.tls)?;
if !config.accept_insecure_certs {
tls.set_verify_callback(ssl::SslVerifyMode::PEER, |x, _| x);
let param = tls.param_mut();
if let Some(name) = config.server_name {
param.set_hostflags(X509CheckFlags::NO_PARTIAL_WILDCARDS);
match name.parse() {
Ok(ip) => { param.set_ip(ip).expect("failed to inform TLS of remote ip"); }
Err(_) => { param.set_host(name).expect("failed to inform TLS of remote hostname"); }
}
}
} else {
tls.set_verify(ssl::SslVerifyMode::NONE);
}
tls.set_ex_data(*CONNECTION_INFO_INDEX, ConnectionInfo { id: local_id.clone(), remote });
if let Some(name) = config.server_name { tls.set_hostname(name)?; }
let result = if let Some(session) = config.session_ticket {
if session.len() < 2 { return Err(ConnectError::MalformedSession); }
let mut buf = io::Cursor::new(session);
let len = buf.get::<u16>().map_err(|_| ConnectError::MalformedSession)? as usize;
if buf.remaining() < len { return Err(ConnectError::MalformedSession); }
let session = SslSession::from_der(&buf.bytes()[0..len]).map_err(|_| ConnectError::MalformedSession)?;
buf.advance(len);
let params = TransportParameters::read(Side::Client, &mut buf).map_err(|_| ConnectError::MalformedSession)?;
self.connections[conn.0].set_params(params);
unsafe { tls.set_session(&session) }?;
let mut tls = SslStreamBuilder::new(tls, MemoryStream::new());
tls.set_connect_state();
if session.max_early_data() == TLS_MAX_EARLY_DATA {
trace!(self.log, "{connection} enabling 0rtt", connection=local_id.clone());
tls.write_early_data(&[])?; // Prompt OpenSSL to generate early keying material, read below
self.connections[conn.0].zero_rtt_crypto = Some(ZeroRttCrypto::new(tls.ssl()));
}
tls.handshake()
} else {
tls.connect(MemoryStream::new())
};
let mut tls = match result {
Ok(_) => unreachable!(),
Err(HandshakeError::WouldBlock(tls)) => tls,
Err(e) => panic!("unexpected TLS error: {}", e),
};
self.transmit_handshake(conn, &tls.get_mut().take_outgoing());
self.connections[conn.0].state = Some(State::Handshake(state::Handshake {
tls, clienthello_packet: None, remote_id_set: false
}));
self.dirty_conns.insert(conn);
Ok(conn)
}
fn gen_initial_packet_num(&mut self) -> u32 { self.initial_packet_number.sample(&mut self.rng) as u32 }
fn add_connection(&mut self, initial_id: ConnectionId, local_id: ConnectionId, remote_id: ConnectionId, remote: SocketAddrV6, side: Side) -> ConnectionHandle {
debug_assert!(!local_id.is_empty());
let packet_num = self.gen_initial_packet_num();
let i = self.connections.insert(Connection::new(initial_id, local_id.clone(), remote_id, remote, packet_num.into(), side, &self.config));
self.connection_ids.insert(local_id, ConnectionHandle(i));
self.connection_remotes.insert(remote, ConnectionHandle(i));
ConnectionHandle(i)
}
fn handle_initial(&mut self, now: u64, remote: SocketAddrV6, dest_id: ConnectionId, source_id: ConnectionId,
packet_number: u32, header: &[u8], payload: &[u8])
{
let crypto = CryptoContext::handshake(&dest_id, Side::Server);
let payload = if let Some(x) = crypto.decrypt(packet_number as u64, header, payload) { x.into() } else {
debug!(self.log, "failed to authenticate initial packet");
return;
};
let local_id = ConnectionId::random(&mut self.rng, LOCAL_ID_LEN as u8);
if self.incoming.len() + self.incoming_handshakes == self.config.accept_buffer as usize {
debug!(self.log, "rejecting connection due to full accept buffer");
let n = self.gen_initial_packet_num();
self.io.push_back(Io::Transmit {
destination: remote,
packet: handshake_close(&crypto, &source_id, &local_id, n, TransportError::SERVER_BUSY, None),
});
return;
}
let mut stream = MemoryStream::new();
if !parse_initial(&self.log, &mut stream, payload) { return; } // TODO: Send close?
trace!(self.log, "got initial");
let mut tls = Ssl::new(&self.tls).unwrap(); // TODO: is this reliable?
tls.set_ex_data(*CONNECTION_INFO_INDEX, ConnectionInfo { id: local_id.clone(), remote });
let mut tls = SslStreamBuilder::new(tls, stream);
tls.set_accept_state();
let zero_rtt_crypto;
if self.config.use_stateless_retry {
zero_rtt_crypto = None;
match tls.stateless() {
Ok(true) => {} // Continue on to the below accept call
Ok(false) => {
let data = tls.get_mut().take_outgoing();
trace!(self.log, "sending HelloRetryRequest"; "connection" => %local_id, "len" => data.len());
let mut buf = Vec::<u8>::new();
Header::Long {
ty: packet::RETRY,
number: packet_number,
destination_id: source_id, source_id: local_id,
}.encode(&mut buf);
let header_len = buf.len();
let mut ack = RangeSet::new();
ack.insert_one(packet_number as u64);
frame::Ack::encode(0, &ack, &mut buf);
frame::Stream {
id: StreamId(0),
offset: 0,
fin: false,
data: data,
}.encode(false, &mut buf);
set_payload_length(&mut buf, header_len);
let payload = crypto.encrypt(packet_number as u64, &buf[0..header_len], &buf[header_len..]);
debug_assert_eq!(payload.len(), buf.len() - header_len + AEAD_TAG_SIZE);
buf.truncate(header_len);
buf.extend_from_slice(&payload);
self.io.push_back(Io::Transmit { destination: remote, packet: buf.into() });
return;
}
Err(e) => {
debug!(self.log, "stateless handshake failed"; "connection" => %local_id, "reason" => %e);
let n = self.gen_initial_packet_num();
self.io.push_back(Io::Transmit {
destination: remote,
packet: handshake_close(&crypto, &source_id, &local_id, n, TransportError::TLS_HANDSHAKE_FAILED,
Some(&tls.get_mut().take_outgoing())),
});
return;
}
}
} else {
match tls.read_early_data(&mut [0; 1]) {
Ok(0) => { zero_rtt_crypto = None; }
Ok(_) => {
debug!(self.log, "got TLS early data"; "connection" => local_id.clone());
let n = self.gen_initial_packet_num();
self.io.push_back(Io::Transmit {
destination: remote,
packet: handshake_close(&crypto, &source_id, &local_id, n, TransportError::PROTOCOL_VIOLATION, None),
});
return;
}
Err(ref e) if e.code() == ssl::ErrorCode::WANT_READ => {
trace!(self.log, "{connection} enabled 0rtt", connection=local_id.clone());
zero_rtt_crypto = Some(ZeroRttCrypto::new(tls.ssl()));
}
Err(e) => {
debug!(self.log, "failure in SSL_read_early_data"; "connection" => local_id.clone(), "reason" => %e);
let n = self.gen_initial_packet_num();
self.io.push_back(Io::Transmit {
destination: remote,
packet: handshake_close(&crypto, &source_id, &local_id, n, TransportError::TLS_HANDSHAKE_FAILED, None),
});
return;
}
}
}
match tls.handshake() {
Ok(_) => unreachable!(),
Err(HandshakeError::WouldBlock(mut tls)) => {
trace!(self.log, "performing handshake"; "connection" => local_id.clone());
if let Some(params) = tls.ssl().ex_data(*TRANSPORT_PARAMS_INDEX).cloned() {
let params = params.expect("transport parameter errors should have aborted the handshake");
let conn = self.add_connection(dest_id.clone(), local_id, source_id, remote, Side::Server);
self.connection_ids_initial.insert(dest_id, conn);
self.connections[conn.0].zero_rtt_crypto = zero_rtt_crypto;
self.connections[conn.0].on_packet_authenticated(now, packet_number as u64);
self.transmit_handshake(conn, &tls.get_mut().take_outgoing());
self.connections[conn.0].state = Some(State::Handshake(state::Handshake {
tls, clienthello_packet: None, remote_id_set: true,
}));
self.connections[conn.0].set_params(params);
self.dirty_conns.insert(conn);
self.incoming_handshakes += 1;
} else {
debug!(self.log, "ClientHello missing transport params extension");
let n = self.gen_initial_packet_num();
self.io.push_back(Io::Transmit {
destination: remote,
packet: handshake_close(&crypto, &source_id, &local_id, n, TransportError::TRANSPORT_PARAMETER_ERROR, None),
});
}
}
Err(HandshakeError::Failure(mut tls)) => {
let code = if let Some(params_err) = tls.ssl().ex_data(*TRANSPORT_PARAMS_INDEX).and_then(|x| x.err()) {
debug!(self.log, "received invalid transport parameters"; "connection" => %local_id, "reason" => %params_err);
TransportError::TRANSPORT_PARAMETER_ERROR
} else {
debug!(self.log, "accept failed"; "reason" => %tls.error());
TransportError::TLS_HANDSHAKE_FAILED
};
let n = self.gen_initial_packet_num();
self.io.push_back(Io::Transmit {
destination: remote,
packet: handshake_close(&crypto, &source_id, &local_id, n, code, Some(&tls.get_mut().take_outgoing())),
});
}
Err(HandshakeError::SetupFailure(e)) => {
error!(self.log, "accept setup failed"; "reason" => %e);
let n = self.gen_initial_packet_num();
self.io.push_back(Io::Transmit {
destination: remote,
packet: handshake_close(&crypto, &source_id, &local_id, n, TransportError::INTERNAL_ERROR, None),
});
}
}
}
fn process_payload(&mut self, now: u64, conn: ConnectionHandle, number: u64, payload: Bytes, tls: &mut MemoryStream)
-> Result<bool, state::CloseReason>
{
let cid = self.connections[conn.0].local_id.clone();
for frame in frame::Iter::new(payload) {
match frame {
Frame::Padding => {}
_ => {
trace!(self.log, "got frame"; "connection" => cid.clone(), "type" => %frame.ty());
}
}
match frame {
Frame::Ack(_) => {}
_ => { self.connections[conn.0].permit_ack_only = true; }
}
match frame {
Frame::Stream(frame) => {
trace!(self.log, "got stream"; "id" => frame.id.0, "offset" => frame.offset, "len" => frame.data.len(), "fin" => frame.fin);
let data_recvd = self.connections[conn.0].data_recvd;
let max_data = self.connections[conn.0].local_max_data;
let new_bytes = match self.connections[conn.0].get_recv_stream(frame.id) {
Err(e) => {
debug!(self.log, "received illegal stream frame"; "stream" => frame.id.0);
self.events.push_back((conn, Event::ConnectionLost { reason: e.into() }));
return Err(e.into());
}
Ok(None) => {
trace!(self.log, "dropping frame for closed stream");
continue;
}
Ok(Some(stream)) => {
let end = frame.offset + frame.data.len() as u64;
let rs = stream.recv_mut().unwrap();
if let Some(final_offset) = rs.final_offset() {
if end > final_offset || (frame.fin && end != final_offset) {
debug!(self.log, "final offset error"; "frame end" => end, "final offset" => final_offset);
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::FINAL_OFFSET_ERROR.into() }));
return Err(TransportError::FINAL_OFFSET_ERROR.into());
}
}
let prev_end = rs.limit();
let new_bytes = end.saturating_sub(prev_end);
if end > rs.max_data || data_recvd + new_bytes > max_data {
debug!(self.log, "flow control error";
"stream" => frame.id.0, "recvd" => data_recvd, "new bytes" => new_bytes,
"max data" => max_data, "end" => end, "stream max data" => rs.max_data);
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::FLOW_CONTROL_ERROR.into() }));
return Err(TransportError::FLOW_CONTROL_ERROR.into());
}
if frame.fin {
match rs.state {
stream::RecvState::Recv { ref mut size } => { *size = Some(end); }
_ => {}
}
}
rs.recvd.insert(frame.offset..end);
if frame.id == StreamId(0) {
if frame.fin {
debug!(self.log, "got fin on stream 0"; "connection" => cid);
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
return Err(TransportError::PROTOCOL_VIOLATION.into());
}
tls.insert(frame.offset, &frame.data);
} else {
rs.buffer(frame.data, frame.offset);
}
if let stream::RecvState::Recv { size: Some(size) } = rs.state {
if rs.recvd.len() == 1 && rs.recvd.iter().next().unwrap() == (0..size) {
rs.state = stream::RecvState::DataRecvd { size };
}
}
new_bytes
}
};
if frame.id != StreamId(0) {
self.connections[conn.0].readable_streams.insert(frame.id);
self.readable_conns.insert(conn);
}
self.connections[conn.0].data_recvd += new_bytes;
}
Frame::Ack(ack) => {
self.on_ack_received(now, conn, ack);
for stream in self.connections[conn.0].finished_streams.drain(..) {
self.events.push_back((conn, Event::StreamFinished { stream }));
}
}
Frame::Padding | Frame::Ping => {}
Frame::ConnectionClose(reason) => {
self.events.push_back((conn, Event::ConnectionLost { reason: ConnectionError::ConnectionClosed { reason } }));
return Ok(true);
}
Frame::ApplicationClose(reason) => {
self.events.push_back((conn, Event::ConnectionLost { reason: ConnectionError::ApplicationClosed { reason } }));
return Ok(true);
}
Frame::Invalid(ty) => {
debug!(self.log, "received malformed frame"; "type" => %ty);
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::frame(ty).into() }));
return Err(TransportError::frame(ty).into());
}
Frame::PathChallenge(x) => {
self.connections[conn.0].pending.path_challenge(number, x);
}
Frame::PathResponse(_) => {
debug!(self.log, "unsolicited PATH_RESPONSE");
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::UNSOLICITED_PATH_RESPONSE.into() }));
return Err(TransportError::UNSOLICITED_PATH_RESPONSE.into());
}
Frame::MaxData(bytes) => {
let was_blocked = self.connections[conn.0].blocked();
self.connections[conn.0].max_data = cmp::max(bytes, self.connections[conn.0].max_data);
if was_blocked && !self.connections[conn.0].blocked() {
for stream in self.connections[conn.0].blocked_streams.drain() {
self.events.push_back((conn, Event::StreamWritable { stream }));
}
}
}
Frame::MaxStreamData { id, offset } => {
if id.initiator() != self.connections[conn.0].side && id.directionality() == Directionality::Uni {
debug!(self.log, "got MAX_STREAM_DATA on recv-only stream");
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
return Err(TransportError::PROTOCOL_VIOLATION.into());
}
if let Some(stream) = self.connections[conn.0].streams.get_mut(&id) {
let ss = stream.send_mut().unwrap();
if offset > ss.max_data {
trace!(self.log, "stream limit increased"; "stream" => id.0,
"old" => ss.max_data, "new" => offset, "current offset" => ss.offset);
if ss.offset == ss.max_data {
self.events.push_back((conn, Event::StreamWritable { stream: id }));
}
ss.max_data = offset;
}
} else {
debug!(self.log, "got MAX_STREAM_DATA on unopened stream");
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
return Err(TransportError::PROTOCOL_VIOLATION.into());
}
}
Frame::MaxStreamId(id) => {
let limit = match id.directionality() {
Directionality::Uni => &mut self.connections[conn.0].max_uni_streams,
Directionality::Bi => &mut self.connections[conn.0].max_bi_streams,
};
if id.index() > *limit {
*limit = id.index();
self.events.push_back((conn, Event::StreamAvailable { directionality: id.directionality() }));
}
}
Frame::RstStream(frame::RstStream { id, error_code, final_offset }) => {
if id == StreamId(0) {
debug!(self.log, "got RST_STREAM on stream 0");
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
return Err(TransportError::PROTOCOL_VIOLATION.into());
}
let offset = match self.connections[conn.0].get_recv_stream(id) {
Err(e) => {
debug!(self.log, "received illegal RST_STREAM");
self.events.push_back((conn, Event::ConnectionLost { reason: e.into() }));
return Err(e.into());
}
Ok(None) => {
trace!(self.log, "received RST_STREAM on closed stream");
continue;
}
Ok(Some(stream)) => {
let rs = stream.recv_mut().unwrap();
if let Some(offset) = rs.final_offset() {
if offset != final_offset {
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::FINAL_OFFSET_ERROR.into() }));
return Err(TransportError::FINAL_OFFSET_ERROR.into());
}
}
if !rs.is_closed() {
rs.state = stream::RecvState::ResetRecvd { size: final_offset, error_code };
}
rs.limit()
}
};
self.connections[conn.0].data_recvd += final_offset.saturating_sub(offset);
self.connections[conn.0].readable_streams.insert(id);
self.readable_conns.insert(conn);
}
Frame::Blocked { offset } => {
debug!(self.log, "peer claims to be blocked at connection level"; "offset" => offset);
}
Frame::StreamBlocked { id, offset } => {
debug!(self.log, "peer claims to be blocked at stream level"; "stream" => id, "offset" => offset);
}
Frame::StreamIdBlocked { id } => {
debug!(self.log, "peer claims to be blocked at stream ID level"; "stream" => id);
}
Frame::StopSending { id, error_code } => {
if self.connections[conn.0].streams.get(&id).map_or(true, |x| x.send().map_or(true, |ss| ss.offset == 0)) {
debug!(self.log, "got STOP_SENDING on invalid stream");
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
return Err(TransportError::PROTOCOL_VIOLATION.into());
}
self.reset(conn, id, 0);
self.connections[conn.0].streams.get_mut(&id).unwrap().send_mut().unwrap().state =
stream::SendState::ResetSent { stop_reason: Some(error_code) };
}
Frame::NewConnectionId { .. } => {
if self.connections[conn.0].remote_id.is_empty() {
debug!(self.log, "got NEW_CONNECTION_ID for connection {connection} with empty remote ID",
connection=self.connections[conn.0].local_id.clone());
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
return Err(TransportError::PROTOCOL_VIOLATION.into());
}
trace!(self.log, "ignoring NEW_CONNECTION_ID (unimplemented)");
}
}
}
Ok(false)
}
fn drive_tls(&mut self, conn: ConnectionHandle, tls: &mut SslStream<MemoryStream>) -> Result<(), TransportError> {
if tls.get_ref().read_blocked() { return Ok(()); }
let prev_offset = tls.get_ref().read_offset();
let status = tls.ssl_read(&mut [0; 1]);
let progress = tls.get_ref().read_offset() - prev_offset;
trace!(self.log, "stream 0 read {bytes} bytes", bytes=progress);
self.connections[conn.0].streams.get_mut(&StreamId(0)).unwrap()
.recv_mut().unwrap().max_data += progress;
self.connections[conn.0].pending.max_stream_data.insert(StreamId(0));
// Process any new session tickets that might have been delivered
{
let mut buffer = self.session_ticket_buffer.lock().unwrap();
for session in buffer.drain(..) {
if let Ok(session) = session {
trace!(self.log, "{connection} got session ticket", connection=self.connections[conn.0].local_id.clone());
let params = &self.connections[conn.0].params;
let session = session.to_der().expect("failed to serialize session ticket");
let mut buf = Vec::new();
buf.put_u16_be(session.len() as u16);
buf.extend_from_slice(&session);
params.write(Side::Server, &mut buf);
self.events.push_back((conn, Event::NewSessionTicket { ticket: buf.into() }));
} else {
debug!(self.log, "{connection} got malformed session ticket",
connection=self.connections[conn.0].local_id.clone());
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
return Err(TransportError::PROTOCOL_VIOLATION.into());
}
}
}
match status {
Err(ref e) if e.code() == ssl::ErrorCode::WANT_READ => Ok(()),
Ok(_) => {
debug!(self.log, "got TLS application data");
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
Err(TransportError::PROTOCOL_VIOLATION.into())
}
Err(ref e) if e.code() == ssl::ErrorCode::SSL => {
debug!(self.log, "TLS error"; "error" => %e);
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::TLS_FATAL_ALERT_RECEIVED.into() }));
Err(TransportError::TLS_FATAL_ALERT_RECEIVED.into())
}
Err(ref e) if e.code() == ssl::ErrorCode::ZERO_RETURN => {
debug!(self.log, "TLS session terminated unexpectedly");
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
Err(TransportError::PROTOCOL_VIOLATION.into())
}
Err(e) => {
error!(self.log, "unexpected TLS error"; "error" => %e);
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::INTERNAL_ERROR.into() }));
Err(TransportError::INTERNAL_ERROR.into())
}
}
}
fn handle_connected_inner(&mut self, now: u64, conn: ConnectionHandle, remote: SocketAddrV6, packet: Packet, state: State) -> State { match state {
State::Handshake(mut state) => {
match packet.header {
Header::Long { ty: packet::RETRY, number, destination_id: conn_id, source_id: remote_id, .. } => {
// FIXME: the below guards fail to handle repeated retries resulting from retransmitted initials
if state.clienthello_packet.is_none() {
// Received Retry as a server
debug!(self.log, "received retry from client"; "connection" => %conn_id);
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
State::handshake_failed(TransportError::PROTOCOL_VIOLATION, None)
} else if state.clienthello_packet.unwrap() > number {
// Retry corresponds to an outdated Initial; must be a duplicate, so ignore it
State::Handshake(state)
} else if state.tls.get_ref().read_offset() != 0 {
// This condition works because Handshake packets are the only ones that we allow to make lasting changes to the read_offset
debug!(self.log, "received retry after a handshake packet");
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
State::handshake_failed(TransportError::PROTOCOL_VIOLATION,None)
} else if let Some(payload) = self.connections[conn.0].decrypt(true, number as u64, &packet.header_data, &packet.payload) {
let mut new_stream = MemoryStream::new();
if !parse_initial(&self.log, &mut new_stream, payload.into()) {
debug!(self.log, "invalid retry payload");
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
return State::handshake_failed(TransportError::PROTOCOL_VIOLATION, None);
}
*state.tls.get_mut() = new_stream;
match state.tls.handshake() {
Err(HandshakeError::WouldBlock(mut tls)) => {
self.on_packet_authenticated(now, conn, number as u64);
trace!(self.log, "resending ClientHello"; "remote_id" => %remote_id);
let local_id = self.connections[conn.0].local_id.clone();
// Discard transport state
self.connections[conn.0] = Connection::new(
remote_id.clone(), local_id, remote_id, remote,
self.initial_packet_number.sample(&mut self.rng).into(), Side::Client, &self.config
);
// Send updated ClientHello
self.transmit_handshake(conn, &tls.get_mut().take_outgoing());
// Prepare to receive Handshake packets that start stream 0 from offset 0
tls.get_mut().reset_read();
State::Handshake(state::Handshake { tls, clienthello_packet: state.clienthello_packet, remote_id_set: state.remote_id_set })
},
Ok(_) => {
debug!(self.log, "unexpectedly completed handshake in RETRY packet");
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
State::handshake_failed(TransportError::PROTOCOL_VIOLATION, None)
}
Err(HandshakeError::Failure(mut tls)) => {
debug!(self.log, "handshake failed"; "reason" => %tls.error());
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::TLS_HANDSHAKE_FAILED.into() }));
State::handshake_failed(TransportError::TLS_HANDSHAKE_FAILED, Some(tls.get_mut().take_outgoing().to_owned().into()))
}
Err(HandshakeError::SetupFailure(e)) => {
error!(self.log, "handshake setup failed"; "reason" => %e);
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::INTERNAL_ERROR.into() }));
State::handshake_failed(TransportError::INTERNAL_ERROR, None)
}
}
} else {
debug!(self.log, "failed to authenticate retry packet");
State::Handshake(state)
}
}
Header::Long { ty: packet::HANDSHAKE, destination_id: id, source_id: remote_id, number, .. } => {
if !state.remote_id_set {
trace!(self.log, "got remote connection id"; "connection" => %id, "remote_id" => %remote_id);
self.connections[conn.0].remote_id = remote_id;
state.remote_id_set = true;
}
let payload = if let Some(x) = self.connections[conn.0].decrypt(true, number as u64, &packet.header_data, &packet.payload) { x } else {
debug!(self.log, "failed to authenticate handshake packet");
return State::Handshake(state);
};
self.on_packet_authenticated(now, conn, number as u64);
// Complete handshake (and ultimately send Finished)
for frame in frame::Iter::new(payload.into()) {
match frame {
Frame::Ack(_) => {}
_ => { self.connections[conn.0].permit_ack_only = true; }
}
match frame {
Frame::Padding => {}
Frame::Stream(frame::Stream { id: StreamId(0), offset, data, .. }) => {
state.tls.get_mut().insert(offset, &data);
}
Frame::Stream(frame::Stream { .. }) => {
debug!(self.log, "non-stream-0 stream frame in handshake");
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
return State::handshake_failed(TransportError::PROTOCOL_VIOLATION, None);
}
Frame::Ack(ack) => {
self.on_ack_received(now, conn, ack);
}
Frame::ConnectionClose(reason) => {
self.events.push_back((conn, Event::ConnectionLost { reason: ConnectionError::ConnectionClosed { reason } }));
return State::Draining(state.into());
}
Frame::ApplicationClose(reason) => {
self.events.push_back((conn, Event::ConnectionLost { reason: ConnectionError::ApplicationClosed { reason } }));
return State::Draining(state.into());
}
Frame::PathChallenge(value) => {
self.connections[conn.0].handshake_pending.path_challenge(number as u64, value);
}
_ => {
debug!(self.log, "unexpected frame type in handshake"; "connection" => %id, "type" => %frame.ty());
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
return State::handshake_failed(TransportError::PROTOCOL_VIOLATION, None);
}
}
}
if state.tls.get_ref().read_blocked() {
return State::Handshake(state);
}
let prev_offset = state.tls.get_ref().read_offset();
match state.tls.handshake() {
Ok(mut tls) => {
if self.connections[conn.0].side == Side::Client {
if let Some(params) = tls.ssl().ex_data(*TRANSPORT_PARAMS_INDEX).cloned() {
self.connections[conn.0].set_params(params.expect("transport param errors should fail the handshake"));
} else {
debug!(self.log, "server didn't send transport params");
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::TRANSPORT_PARAMETER_ERROR.into() }));
return State::handshake_failed(TransportError::TLS_HANDSHAKE_FAILED,
Some(tls.get_mut().take_outgoing().to_owned().into()));
}
}
trace!(self.log, "{connection} established", connection=id.clone());
self.connections[conn.0].handshake_cleanup(&self.config);
if self.connections[conn.0].side == Side::Client {
self.transmit_handshake(conn, &tls.get_mut().take_outgoing());
} else {
self.connections[conn.0].transmit(StreamId(0), tls.get_mut().take_outgoing()[..].into());
}
match self.connections[conn.0].side {
Side::Client => {
self.events.push_back((conn, Event::Connected {
protocol: tls.ssl().selected_alpn_protocol().map(|x| x.into()),
}));
}
Side::Server => {
self.incoming_handshakes -= 1;
self.incoming.push_back(conn);
}
}
self.connections[conn.0].crypto = Some(CryptoContext::established(tls.ssl(), self.connections[conn.0].side));
self.connections[conn.0].streams.get_mut(&StreamId(0)).unwrap()
.recv_mut().unwrap().max_data += tls.get_ref().read_offset() - prev_offset;
self.connections[conn.0].pending.max_stream_data.insert(StreamId(0));
State::Established(state::Established { tls })
}
Err(HandshakeError::WouldBlock(mut tls)) => {
trace!(self.log, "handshake ongoing"; "connection" => %id);
self.connections[conn.0].handshake_cleanup(&self.config);
self.connections[conn.0].streams.get_mut(&StreamId(0)).unwrap()
.recv_mut().unwrap().max_data += tls.get_ref().read_offset() - prev_offset;
{
let response = tls.get_mut().take_outgoing();
if !response.is_empty() {
self.transmit_handshake(conn, &response);
}
}
State::Handshake(state::Handshake { tls, clienthello_packet: state.clienthello_packet, remote_id_set: state.remote_id_set })
}
Err(HandshakeError::Failure(mut tls)) => {
let code = if let Some(params_err) = tls.ssl().ex_data(*TRANSPORT_PARAMS_INDEX).and_then(|x| x.err()) {
debug!(self.log, "received invalid transport parameters"; "connection" => %id, "reason" => %params_err);
TransportError::TRANSPORT_PARAMETER_ERROR
} else {
debug!(self.log, "handshake failed"; "reason" => %tls.error());
TransportError::TLS_HANDSHAKE_FAILED
};
self.events.push_back((conn, Event::ConnectionLost { reason: code.into() }));
State::handshake_failed(code, Some(tls.get_mut().take_outgoing().to_owned().into()))
}
Err(HandshakeError::SetupFailure(e)) => {
error!(self.log, "handshake failed"; "connection" => %id, "reason" => %e);
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::INTERNAL_ERROR.into() }));
State::handshake_failed(TransportError::INTERNAL_ERROR, None)
}
}
}
Header::Long { ty: packet::INITIAL, .. } if self.connections[conn.0].side == Side::Server => {
trace!(self.log, "dropping duplicate Initial");
State::Handshake(state)
}
Header::Long { ty: packet::ZERO_RTT, number, destination_id: ref id, .. } if self.connections[conn.0].side == Side::Server => {
let payload = if let Some(ref crypto) = self.connections[conn.0].zero_rtt_crypto {
if let Some(x) = crypto.decrypt(number as u64, &packet.header_data, &packet.payload) { x }
else {
debug!(self.log, "{connection} failed to authenticate 0-RTT packet", connection=id.clone());
return State::Handshake(state);
}
} else {
debug!(self.log, "{connection} ignoring unsupported 0-RTT packet", connection=id.clone());
return State::Handshake(state);
};
self.on_packet_authenticated(now, conn, number as u64);
match self.process_payload(now, conn, number as u64, payload.into(), state.tls.get_mut()) {
Err(e) => State::HandshakeFailed(state::HandshakeFailed { reason: e, app_closed: false, alert: None }),
Ok(true) => State::Draining(state.into()),
Ok(false) => State::Handshake(state),
}
}
Header::Long { ty, .. } => {
debug!(self.log, "unexpected packet type"; "type" => format!("{:02X}", ty));
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
State::handshake_failed(TransportError::PROTOCOL_VIOLATION, None)
}
Header::VersionNegotiate { destination_id: id, .. } => {
let mut payload = io::Cursor::new(&packet.payload[..]);
if packet.payload.len() % 4 != 0 {
debug!(self.log, "malformed version negotiation"; "connection" => %id);
self.events.push_back((conn, Event::ConnectionLost { reason: TransportError::PROTOCOL_VIOLATION.into() }));
return State::handshake_failed(TransportError::PROTOCOL_VIOLATION, None);
}
while payload.has_remaining() {
let version = payload.get::<u32>().unwrap();
if version == VERSION {
// Our version is supported, so this packet is spurious
return State::Handshake(state);
}
}
debug!(self.log, "remote doesn't support our version");
self.events.push_back((conn, Event::ConnectionLost { reason: ConnectionError::VersionMismatch }));
State::Draining(state.into())
}
// TODO: SHOULD buffer these to improve reordering tolerance.
Header::Short { .. } => {
trace!(self.log, "dropping short packet during handshake");
State::Handshake(state)
}
}
}
State::Established(mut state) => {
let id = self.connections[conn.0].local_id.clone();
if let Header::Long { .. } = packet.header {
trace!(self.log, "discarding unprotected packet"; "connection" => %id);
return State::Established(state);
}
let (payload, number) = match self.connections[conn.0].decrypt_packet(false, packet) {
Ok(x) => x,
Err(None) => {
trace!(self.log, "failed to authenticate packet"; "connection" => %id);
return State::Established(state);
}
Err(Some(e)) => {
warn!(self.log, "got illegal packet"; "connection" => %id);
self.events.push_back((conn, Event::ConnectionLost { reason: e.into() }));
return State::closed(e);
}
};
self.on_packet_authenticated(now, conn, number);
if self.connections[conn.0].awaiting_handshake {
assert_eq!(self.connections[conn.0].side, Side::Client,
"only the client confirms handshake completion based on a protected packet");
// Forget about unacknowledged handshake packets
self.connections[conn.0].handshake_cleanup(&self.config);
}
match self.process_payload(now, conn, number, payload.into(), state.tls.get_mut())
.and_then(|x| { self.drive_tls(conn, &mut state.tls)?; Ok(x) })
{
Err(e) => State::closed(e),
Ok(true) => {
// Inform OpenSSL that the connection is being closed gracefully. This ensures that a resumable
// session is not erased from the anti-replay cache as it otherwise might be.
state.tls.shutdown().unwrap();
State::Draining(state.into())
}
Ok(false) => State::Established(state),
}
}
State::HandshakeFailed(state) => {
if let Ok((payload, _)) = self.connections[conn.0].decrypt_packet(true, packet) {
for frame in frame::Iter::new(payload.into()) {
match frame {
Frame::ConnectionClose(_) | Frame::ApplicationClose(_) => {
trace!(self.log, "draining");
return State::Draining(state.into());
}
_ => {}
}
}
}
State::HandshakeFailed(state)
}
State::Closed(state) => {
if let Ok((payload, _)) = self.connections[conn.0].decrypt_packet(false, packet) {
for frame in frame::Iter::new(payload.into()) {
match frame {
Frame::ConnectionClose(_) | Frame::ApplicationClose(_) => {
trace!(self.log, "draining");
return State::Draining(state.into());
}
_ => {}
}
}
}
State::Closed(state)
}
State::Draining(x) => State::Draining(x),
State::Drained => State::Drained,
}}
fn handle_connected(&mut self, now: u64, conn: ConnectionHandle, remote: SocketAddrV6, packet: Packet) {
trace!(self.log, "connection got packet"; "connection" => %self.connections[conn.0].local_id, "len" => packet.payload.len());
let was_closed = self.connections[conn.0].state.as_ref().unwrap().is_closed();
// State transitions
let state = self.connections[conn.0].state.take().unwrap();
let state = self.handle_connected_inner(now, conn, remote, packet, state);
if !was_closed && state.is_closed() {
self.close_common(now, conn);
}
// Transmit CONNECTION_CLOSE if necessary
match state {
State::HandshakeFailed(ref state) => {
if !was_closed && self.connections[conn.0].side == Side::Server {
self.incoming_handshakes -= 1;
}
let n = self.connections[conn.0].get_tx_number();
self.io.push_back(Io::Transmit {
destination: remote,
packet: handshake_close(&self.connections[conn.0].handshake_crypto,
&self.connections[conn.0].remote_id,
&self.connections[conn.0].local_id,
n as u32, state.reason.clone(), state.alert.as_ref().map(|x| &x[..])),
});
self.reset_idle_timeout(now, conn);
}
State::Closed(ref state) => {
self.io.push_back(Io::Transmit {
destination: remote,
packet: self.connections[conn.0].make_close(&state.reason),
});
self.reset_idle_timeout(now, conn);
}
_ => {}
}
self.connections[conn.0].state = Some(state);
self.dirty_conns.insert(conn);
}
fn reset_idle_timeout(&mut self, now: u64, conn: ConnectionHandle) {
let dt = cmp::min(self.config.idle_timeout, self.connections[conn.0].params.idle_timeout) as u64 * 1000000;
self.connections[conn.0].set_idle = Some(Some(now + dt));
}
fn flush_pending(&mut self, now: u64, conn: ConnectionHandle) {
let mut sent = false;
while let Some(packet) = self.connections[conn.0].next_packet(&self.log, &self.config, now) {
self.io.push_back(Io::Transmit {
destination: self.connections[conn.0].remote,
packet: packet.into(),
});
sent = true;
}
if sent {
self.reset_idle_timeout(now, conn);
}
{
let c = &mut self.connections[conn.0];
if let Some(setting) = c.set_idle.take() {
if let Some(time) = setting {
self.io.push_back(Io::TimerStart { connection: conn, timer: Timer::Idle, time });
} else {
self.io.push_back(Io::TimerStop { connection: conn, timer: Timer::Idle });
}
}
if let Some(setting) = c.set_loss_detection.take() {
if let Some(time) = setting {
self.io.push_back(Io::TimerStart { connection: conn, timer: Timer::LossDetection, time });
} else {
self.io.push_back(Io::TimerStop { connection: conn, timer: Timer::LossDetection });
}
}
}
}
fn forget(&mut self, conn: ConnectionHandle) {
if self.connections[conn.0].side == Side::Server {
self.connection_ids_initial.remove(&self.connections[conn.0].initial_id);
}
self.connection_ids.remove(&self.connections[conn.0].local_id);
self.connection_remotes.remove(&self.connections[conn.0].remote);
self.dirty_conns.remove(&conn);
self.readable_conns.remove(&conn);
self.connections.remove(conn.0);
}
/// Handle a timer expiring
pub fn timeout(&mut self, now: u64, conn: ConnectionHandle, timer: Timer) {
match timer {
Timer::Close => {
self.io.push_back(Io::TimerStop { connection: conn, timer: Timer::Idle });
self.events.push_back((conn, Event::ConnectionDrained));
if self.connections[conn.0].state.as_ref().unwrap().is_app_closed() {
self.forget(conn);
} else {
self.connections[conn.0].state = Some(State::Drained);
}
}
Timer::Idle => {
self.close_common(now, conn);
let state = State::Draining(match self.connections[conn.0].state.take().unwrap() {
State::Handshake(x) => x.into(),
State::HandshakeFailed(x) => x.into(),
State::Established(x) => x.into(),
State::Closed(x) => x.into(),
State::Draining(x) => x.into(),
State::Drained => unreachable!(),
});
self.connections[conn.0].state = Some(state);
self.events.push_back((conn, Event::ConnectionLost {
reason: ConnectionError::TimedOut,
}));
self.dirty_conns.insert(conn); // Ensure the loss detection timer cancellation goes through
}
Timer::LossDetection => {
if self.connections[conn.0].awaiting_handshake {
trace!(self.log, "retransmitting handshake packets"; "connection" => %self.connections[conn.0].local_id);
let packets = self.connections[conn.0].sent_packets.iter()
.filter_map(|(&packet, info)| if info.handshake { Some(packet) } else { None })
.collect::<Vec<_>>();
for number in packets {
let mut info = self.connections[conn.0].sent_packets.remove(&number).unwrap();
self.connections[conn.0].handshake_pending += info.retransmits;
self.connections[conn.0].bytes_in_flight -= info.bytes as u64;
}
self.connections[conn.0].handshake_count += 1;
} else if self.connections[conn.0].loss_time != 0 {
// Early retransmit or Time Loss Detection
let largest = self.connections[conn.0].largest_acked_packet;
self.connections[conn.0].detect_lost_packets(&self.config, now, largest);
} else if self.connections[conn.0].tlp_count < self.config.max_tlps {
trace!(self.log, "sending TLP {number} in {pn}",
number=self.connections[conn.0].tlp_count,
pn=self.connections[conn.0].largest_sent_packet + 1;
"outstanding" => ?self.connections[conn.0].sent_packets.keys().collect::<Vec<_>>(),
"in flight" => self.connections[conn.0].bytes_in_flight);
// Tail Loss Probe.
self.io.push_back(Io::Transmit {
destination: self.connections[conn.0].remote,
packet: self.connections[conn.0].force_transmit(&self.config, now),
});
self.reset_idle_timeout(now, conn);
self.connections[conn.0].tlp_count += 1;
} else {
trace!(self.log, "RTO fired, retransmitting"; "pn" => self.connections[conn.0].largest_sent_packet + 1,
"outstanding" => ?self.connections[conn.0].sent_packets.keys().collect::<Vec<_>>(),
"in flight" => self.connections[conn.0].bytes_in_flight);
// RTO
if self.connections[conn.0].rto_count == 0 {
self.connections[conn.0].largest_sent_before_rto = self.connections[conn.0].largest_sent_packet;
}
for _ in 0..2 {
self.io.push_back(Io::Transmit {
destination: self.connections[conn.0].remote,
packet: self.connections[conn.0].force_transmit(&self.config, now),
});
}
self.reset_idle_timeout(now, conn);
self.connections[conn.0].rto_count += 1;
}
self.connections[conn.0].set_loss_detection_alarm(&self.config);
self.dirty_conns.insert(conn);
}
}
}
fn transmit_handshake(&mut self, conn: ConnectionHandle, messages: &[u8]) {
let offset = {
let ss = self.connections[conn.0].streams.get_mut(&StreamId(0)).unwrap().send_mut().unwrap();
let x = ss.offset;
ss.offset += messages.len() as u64;
ss.bytes_in_flight += messages.len() as u64;
x
};
self.connections[conn.0].handshake_pending.stream.push_back(frame::Stream { id: StreamId(0), fin: false, offset, data: messages.into()});
self.connections[conn.0].awaiting_handshake = true;
}
fn on_ack_received(&mut self, now: u64, conn: ConnectionHandle, ack: frame::Ack) {
trace!(self.log, "got ack"; "ranges" => ?ack.iter().collect::<Vec<_>>());
let was_blocked = self.connections[conn.0].blocked();
self.connections[conn.0].on_ack_received(&self.config, now, ack);
if was_blocked && !self.connections[conn.0].blocked() {
for stream in self.connections[conn.0].blocked_streams.drain() {
self.events.push_back((conn, Event::StreamWritable { stream }));
}
}
}
/// Transmit data on a stream
///
/// Returns the number of bytes written on success.
///
/// # Panics
/// - when applied to a stream that does not have an active outgoing channel
pub fn write(&mut self, conn: ConnectionHandle, stream: StreamId, data: &[u8]) -> Result<usize, WriteError> {
let r = self.connections[conn.0].write(stream, data);
match r {
Ok(n) => {
self.dirty_conns.insert(conn);
trace!(self.log, "write"; "connection" => %self.connections[conn.0].local_id, "stream" => stream.0, "len" => n)
}
Err(WriteError::Blocked) => {
if self.connections[conn.0].congestion_blocked() {
trace!(self.log, "write blocked by congestion"; "connection" => %self.connections[conn.0].local_id);
} else {
trace!(self.log, "write blocked by flow control"; "connection" => %self.connections[conn.0].local_id, "stream" => stream.0);
}
}
_ => {}
}
r
}
/// Indicate that no more data will be sent on a stream
///
/// All previously transmitted data will still be delivered. Incoming data on bidirectional streams is unaffected.
///
/// # Panics
/// - when applied to a stream that does not have an active outgoing channel
pub fn finish(&mut self, conn: ConnectionHandle, stream: StreamId) {
self.connections[conn.0].finish(stream);
self.dirty_conns.insert(conn);
}
/// Read data from a stream
///
/// Treats a stream like a simple pipe, similar to a TCP connection. Subject to head-of-line blocking within the
/// stream. Consider `read_unordered` for higher throughput.
///
/// # Panics
/// - when applied to a stream that does not have an active incoming channel
pub fn read(&mut self, conn: ConnectionHandle, stream: StreamId, buf: &mut [u8]) -> Result<usize, ReadError> {
self.dirty_conns.insert(conn); // May need to send flow control frames after reading
match self.connections[conn.0].read(stream, buf) {
x@Err(ReadError::Finished) | x@Err(ReadError::Reset { .. }) => {
self.connections[conn.0].maybe_cleanup(stream);
x
}
x => x
}
}
/// Read data from a stream out of order
///
/// Unlike `read`, this interface is not subject to head-of-line blocking within the stream, and hence can achieve
/// higher throughput over lossy links.
///
/// Some segments may be received multiple times.
///
/// On success, returns `Ok((data, offset))` where `offset` is the position `data` begins in the stream.
///
/// # Panics
/// - when applied to a stream that does not have an active incoming channel
pub fn read_unordered(&mut self, conn: ConnectionHandle, stream: StreamId) -> Result<(Bytes, u64), ReadError> {
self.dirty_conns.insert(conn); // May need to send flow control frames after reading
match self.connections[conn.0].read_unordered(stream) {
x@Err(ReadError::Finished) | x@Err(ReadError::Reset { .. }) => {
self.connections[conn.0].maybe_cleanup(stream);
x
}
x => x
}
}
/// Abandon transmitting data on a stream
///
/// # Panics
/// - when applied to a receive stream or an unopened send stream
pub fn reset(&mut self, conn: ConnectionHandle, stream: StreamId, error_code: u16) {
assert!(stream.directionality() == Directionality::Bi || stream.initiator() == self.connections[conn.0].side,
"only streams supporting outgoing data may be reset");
{
// reset is a noop on a closed stream
let stream = if let Some(x) = self.connections[conn.0].streams.get_mut(&stream) { x.send_mut().unwrap() } else { return; };
match stream.state {
stream::SendState::DataRecvd | stream::SendState::ResetSent { .. } | stream::SendState::ResetRecvd { .. } => { return; } // Nothing to do
_ => {}
}
stream.state = stream::SendState::ResetSent { stop_reason: None };
}
self.connections[conn.0].pending.rst_stream.push((stream, error_code));
self.dirty_conns.insert(conn);
}
/// Instruct the peer to abandon transmitting data on a stream
///
/// # Panics
/// - when applied to a stream that has not begin receiving data
pub fn stop_sending(&mut self, conn: ConnectionHandle, stream: StreamId, error_code: u16) {
self.connections[conn.0].stop_sending(stream, error_code);
self.dirty_conns.insert(conn);
}
/// Create a new stream
///
/// Returns `None` if the maximum number of streams currently permitted by the remote endpoint are already open.
pub fn open(&mut self, conn: ConnectionHandle, direction: Directionality) -> Option<StreamId> {
self.connections[conn.0].open(&self.config, direction)
}
/// Ping the remote endpoint
///
/// Useful for preventing an otherwise idle connection from timing out.
pub fn ping(&mut self, conn: ConnectionHandle) {
self.connections[conn.0].pending.ping = true;
self.dirty_conns.insert(conn);
}
fn close_common(&mut self, now: u64, conn: ConnectionHandle) {
trace!(self.log, "connection closed");
self.connections[conn.0].set_loss_detection = Some(None);
self.io.push_back(Io::TimerStart {
connection: conn,
timer: Timer::Close,
time: now + 3 * self.connections[conn.0].rto(&self.config),
});
}
/// Close a connection immediately
///
/// This does not ensure delivery of outstanding data. It is the application's responsibility to call this only when
/// all important communications have been completed.
pub fn close(&mut self, now: u64, conn: ConnectionHandle, error_code: u16, reason: Bytes) {
if let State::Drained = *self.connections[conn.0].state.as_ref().unwrap() {
self.forget(conn);
return;
}
if let State::Established(ref mut state) = *self.connections[conn.0].state.as_mut().unwrap() {
// Inform OpenSSL that the connection is being closed gracefully. This ensures that a resumable session is
// not erased from the anti-replay cache as it otherwise might be.
state.tls.shutdown().unwrap();
}
let was_closed = self.connections[conn.0].state.as_ref().unwrap().is_closed();
let reason = state::CloseReason::Application(frame::ApplicationClose { error_code, reason });
if !was_closed {
self.close_common(now, conn);
self.io.push_back(Io::Transmit {
destination: self.connections[conn.0].remote,
packet: self.connections[conn.0].make_close(&reason),
});
self.reset_idle_timeout(now, conn);
self.dirty_conns.insert(conn);
}
self.connections[conn.0].state = Some(match self.connections[conn.0].state.take().unwrap() {
State::Handshake(_) => State::HandshakeFailed(state::HandshakeFailed { reason, alert: None, app_closed: true }),
State::HandshakeFailed(x) => State::HandshakeFailed(state::HandshakeFailed { app_closed: true, ..x }),
State::Established(_) => State::Closed(state::Closed { reason, app_closed: true }),
State::Closed(x) => State::Closed(state::Closed { app_closed: true, ..x}),
State::Draining(x) => State::Draining(state::Draining { app_closed: true, ..x}),
State::Drained => unreachable!(),
});
}
fn on_packet_authenticated(&mut self, now: u64, conn: ConnectionHandle, packet: u64) {
trace!(self.log, "packet authenticated"; "connection" => %self.connections[conn.0].local_id, "pn" => packet);
self.reset_idle_timeout(now, conn);
self.connections[conn.0].on_packet_authenticated(now, packet);
}
/// Look up whether we're the client or server of `conn`.
pub fn get_side(&self, conn: ConnectionHandle) -> Side { self.connections[conn.0].side }
/// The `ConnectionId` used for `conn` locally.
pub fn get_local_id(&self, conn: ConnectionHandle) -> &ConnectionId { &self.connections[conn.0].local_id }
/// The `ConnectionId` used for `conn` by the peer.
pub fn get_remote_id(&self, conn: ConnectionHandle) -> &ConnectionId { &self.connections[conn.0].remote_id }
pub fn get_remote_address(&self, conn: ConnectionHandle) -> &SocketAddrV6 { &self.connections[conn.0].remote }
pub fn get_protocol(&self, conn: ConnectionHandle) -> Option<&[u8]> {
if let State::Established(ref state) = *self.connections[conn.0].state.as_ref().unwrap() {
state.tls.ssl().selected_alpn_protocol()
} else { None }
}
/// The number of bytes of packets containing retransmittable frames that have not been acknowleded or declared lost
pub fn get_bytes_in_flight(&self, conn: ConnectionHandle) -> u64 { self.connections[conn.0].bytes_in_flight }
/// Number of bytes worth of non-ack-only packets that may be sent.
pub fn get_congestion_state(&self, conn: ConnectionHandle) -> u64 {
let c = &self.connections[conn.0];
c.congestion_window.saturating_sub(c.bytes_in_flight)
}
/// The name a client supplied via SNI.
///
/// None if no name was supplied or if this connection was locally-initiated.
pub fn get_servername(&self, conn: ConnectionHandle) -> Option<&str> {
match *self.connections[conn.0].state.as_ref().unwrap() {
State::Handshake(ref state) => state.tls.ssl().servername(ssl::NameType::HOST_NAME),
State::Established(ref state) => state.tls.ssl().servername(ssl::NameType::HOST_NAME),
_ => None,
}
}
/// Whether a previous session was successfully resumed by `conn`.
pub fn get_session_resumed(&self, conn: ConnectionHandle) -> bool {
if let State::Established(ref state) = self.connections[conn.0].state.as_ref().unwrap() {
state.tls.ssl().session_reused()
} else { false }
}
pub fn accept(&mut self) -> Option<ConnectionHandle> { self.incoming.pop_front() }
}
/// Protocol-level identifier for a connection.
///
/// Mainly useful for identifying this connection's packets on the wire with tools like Wireshark.
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct ConnectionId(ArrayVec<[u8; MAX_CID_SIZE]>);
impl ::std::ops::Deref for ConnectionId {
type Target = [u8];
fn deref(&self) -> &[u8] { &self.0 }
}
impl ::std::ops::DerefMut for ConnectionId {
fn deref_mut(&mut self) -> &mut [u8] { &mut self.0 }
}
impl ConnectionId {
pub(crate) fn new(data: [u8; MAX_CID_SIZE], len: usize) -> Self {
let mut x = ConnectionId(data.into());
x.0.truncate(len);
x
}
fn random<R: Rng>(rng: &mut R, len: u8) -> Self {
debug_assert!(len as usize <= MAX_CID_SIZE);
let mut v = ArrayVec::from([0; MAX_CID_SIZE]);
rng.fill_bytes(&mut v[0..len as usize]);
v.truncate(len as usize);
ConnectionId(v)
}
}
impl fmt::Display for ConnectionId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for byte in &self.0 {
write!(f, "{:02x}", byte)?;
}
Ok(())
}
}
impl slog::Value for ConnectionId {
fn serialize(&self, _: &slog::Record, key: slog::Key, serializer: &mut slog::Serializer) -> slog::Result {
serializer.emit_arguments(key, &format_args!("{}", self))
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
enum Crypto {
ZeroRtt,
Handshake,
OneRtt,
}
struct Connection {
/// DCID of Initial packet
initial_id: ConnectionId,
local_id: ConnectionId,
remote_id: ConnectionId,
remote: SocketAddrV6,
state: Option<State>,
side: Side,
mtu: u16,
rx_packet: u64,
rx_packet_time: u64,
crypto: Option<CryptoContext>,
prev_crypto: Option<(u64, CryptoContext)>,
zero_rtt_crypto: Option<ZeroRttCrypto>,
key_phase: bool,
params: TransportParameters,
/// Streams with data buffered for reading by the application
readable_streams: FnvHashSet<StreamId>,
/// Streams on which writing was blocked on *connection-level* flow or congestion control
blocked_streams: FnvHashSet<StreamId>,
/// Limit on outgoing data, dictated by peer
max_data: u64,
data_sent: u64,
/// Sum of end offsets of all streams. Includes gaps, so it's an upper bound.
data_recvd: u64,
/// Limit on incoming data
local_max_data: u64,
//
// Loss Detection
//
/// The number of times the handshake packets have been retransmitted without receiving an ack.
handshake_count: u32,
/// The number of times a tail loss probe has been sent without receiving an ack.
tlp_count: u32,
/// The number of times an rto has been sent without receiving an ack.
rto_count: u32,
/// The largest packet number gap between the largest acked retransmittable packet and an unacknowledged
/// retransmittable packet before it is declared lost.
reordering_threshold: u32,
/// The time at which the next packet will be considered lost based on early transmit or exceeding the reordering
/// window in time.
loss_time: u64,
/// The most recent RTT measurement made when receiving an ack for a previously unacked packet. μs
latest_rtt: u64,
/// The smoothed RTT of the connection, computed as described in RFC6298. μs
smoothed_rtt: u64,
/// The RTT variance, computed as described in RFC6298
rttvar: u64,
/// The minimum RTT seen in the connection, ignoring ack delay.
min_rtt: u64,
/// The maximum ack delay in an incoming ACK frame for this connection.
///
/// Excludes ack delays for ack only packets and those that create an RTT sample less than min_rtt.
max_ack_delay: u64,
/// The last packet number sent prior to the first retransmission timeout.
largest_sent_before_rto: u64,
/// The time the most recently sent retransmittable packet was sent.
time_of_last_sent_retransmittable_packet: u64,
/// The time the most recently sent handshake packet was sent.
time_of_last_sent_handshake_packet: u64,
/// The packet number of the most recently sent packet.
largest_sent_packet: u64,
/// The largest packet number the remote peer acknowledged in an ACK frame.
largest_acked_packet: u64,
/// Transmitted but not acked
sent_packets: BTreeMap<u64, SentPacket>,
//
// Congestion Control
//
/// The sum of the size in bytes of all sent packets that contain at least one retransmittable frame, and have not
/// been acked or declared lost.
///
/// The size does not include IP or UDP overhead. Packets only containing ACK frames do not count towards
/// bytes_in_flight to ensure congestion control does not impede congestion feedback.
bytes_in_flight: u64,
/// Maximum number of bytes in flight that may be sent.
congestion_window: u64,
/// The largest packet number sent when QUIC detects a loss. When a larger packet is acknowledged, QUIC exits recovery.
end_of_recovery: u64,
/// Slow start threshold in bytes. When the congestion window is below ssthresh, the mode is slow start and the
/// window grows by the number of bytes acknowledged.
ssthresh: u64,
//
// Handshake retransmit state
//
/// Whether we've sent handshake packets that have not been either explicitly acknowledged or rendered moot by
/// handshake completion, i.e. whether we're waiting for proof that the peer has advanced their handshake state
/// machine.
awaiting_handshake: bool,
handshake_pending: Retransmits,
handshake_crypto: CryptoContext,
//
// Transmit queue
//
pending: Retransmits,
pending_acks: RangeSet,
/// Set iff we have received a non-ack frame since the last ack-only packet we sent
permit_ack_only: bool,
// Timer updates: None if no change, Some(None) to stop, Some(Some(_)) to reset
set_idle: Option<Option<u64>>,
set_loss_detection: Option<Option<u64>>,
//
// Stream states
//
streams: FnvHashMap<StreamId, Stream>,
next_uni_stream: u64,
next_bi_stream: u64,
// Locally initiated
max_uni_streams: u64,
max_bi_streams: u64,
// Remotely initiated
max_remote_uni_streams: u64,
max_remote_bi_streams: u64,
finished_streams: Vec<StreamId>,
}
/// Represents one or more packets subject to retransmission
#[derive(Debug, Clone)]
struct SentPacket {
time: u64,
/// 0 iff ack-only
bytes: u16,
handshake: bool,
acks: RangeSet,
retransmits: Retransmits
}
impl SentPacket {
fn ack_only(&self) -> bool { self.bytes == 0 }
}
#[derive(Debug, Clone)]
struct Retransmits {
max_data: bool,
max_uni_stream_id: bool,
max_bi_stream_id: bool,
ping: bool,
new_connection_id: Option<ConnectionId>,
stream: VecDeque<frame::Stream>,
/// packet number, token
path_response: Option<(u64, u64)>,
rst_stream: Vec<(StreamId, u16)>,
stop_sending: Vec<(StreamId, u16)>,
max_stream_data: FnvHashSet<StreamId>,
}
impl Retransmits {
fn is_empty(&self) -> bool {
!self.max_data && !self.max_uni_stream_id && !self.max_bi_stream_id && !self.ping
&& self.new_connection_id.is_none() && self.stream.is_empty() && self.path_response.is_none()
&& self.rst_stream.is_empty() && self.stop_sending.is_empty() && self.max_stream_data.is_empty()
}
fn path_challenge(&mut self, packet: u64, token: u64) {
match self.path_response {
None => { self.path_response = Some((packet, token)); }
Some((existing, _)) if packet > existing => { self.path_response = Some((packet, token)); }
Some(_) => {}
}
}
}
impl Default for Retransmits {
fn default() -> Self { Self {
max_data: false,
max_uni_stream_id: false,
max_bi_stream_id: false,
ping: false,
new_connection_id: None,
stream: VecDeque::new(),
path_response: None,
rst_stream: Vec::new(),
stop_sending: Vec::new(),
max_stream_data: FnvHashSet::default(),
}}
}
impl ::std::ops::AddAssign for Retransmits {
fn add_assign(&mut self, rhs: Self) {
self.max_data |= rhs.max_data;
self.ping |= rhs.ping;
self.max_uni_stream_id |= rhs.max_uni_stream_id;
self.max_bi_stream_id |= rhs.max_bi_stream_id;
if let Some(x) = rhs.new_connection_id { self.new_connection_id = Some(x); }
self.stream.extend(rhs.stream.into_iter());
if let Some((packet, token)) = rhs.path_response { self.path_challenge(packet, token); }
self.rst_stream.extend_from_slice(&rhs.rst_stream);
self.stop_sending.extend_from_slice(&rhs.stop_sending);
self.max_stream_data.extend(&rhs.max_stream_data);
}
}
impl ::std::iter::FromIterator<Retransmits> for Retransmits {
fn from_iter<T>(iter: T) -> Self
where T: IntoIterator<Item = Retransmits>
{
let mut result = Retransmits::default();
for packet in iter {
result += packet;
}
result
}
}
impl Connection {
fn new(initial_id: ConnectionId, local_id: ConnectionId, remote_id: ConnectionId, remote: SocketAddrV6,
initial_packet_number: u64, side: Side, config: &Config) -> Self
{
let handshake_crypto = CryptoContext::handshake(&initial_id, side);
let mut streams = FnvHashMap::default();
for i in 0..config.max_remote_uni_streams {
streams.insert(StreamId::new(!side, Directionality::Uni, i as u64), stream::Recv::new(config.stream_receive_window as u64).into());
}
streams.insert(StreamId(0), Stream::new_bi(config.stream_receive_window as u64));
let max_remote_bi_streams = config.max_remote_bi_streams as u64 + match side { Side::Server => 1, _ => 0 };
for i in match side { Side::Server => 1, _ => 0 }..max_remote_bi_streams {
streams.insert(StreamId::new(!side, Directionality::Bi, i as u64), Stream::new_bi(config.stream_receive_window as u64));
}
Self {
initial_id, local_id, remote_id, remote, side,
state: None,
mtu: MIN_MTU,
rx_packet: 0,
rx_packet_time: 0,
crypto: None,
prev_crypto: None,
zero_rtt_crypto: None,
key_phase: false,
params: TransportParameters::default(),
readable_streams: FnvHashSet::default(),
blocked_streams: FnvHashSet::default(),
max_data: 0,
data_sent: 0,
data_recvd: 0,
local_max_data: config.receive_window as u64,
handshake_count: 0,
tlp_count: 0,
rto_count: 0,
reordering_threshold: if config.using_time_loss_detection { u32::max_value() } else { config.reordering_threshold },
loss_time: 0,
latest_rtt: 0,
smoothed_rtt: 0,
rttvar: 0,
min_rtt: u64::max_value(),
max_ack_delay: 0,
largest_sent_before_rto: 0,
time_of_last_sent_retransmittable_packet: 0,
time_of_last_sent_handshake_packet: 0,
largest_sent_packet: initial_packet_number.overflowing_sub(1).0,
largest_acked_packet: 0,
sent_packets: BTreeMap::new(),
bytes_in_flight: 0,
congestion_window: config.initial_window,
end_of_recovery: 0,
ssthresh: u64::max_value(),
awaiting_handshake: false,
handshake_pending: Retransmits::default(),
handshake_crypto,
pending: Retransmits::default(),
pending_acks: RangeSet::new(),
permit_ack_only: false,
set_idle: None,
set_loss_detection: None,
streams,
next_uni_stream: 0,
next_bi_stream: match side { Side::Client => 1, Side::Server => 0 },
max_uni_streams: 0,
max_bi_streams: 0,
max_remote_uni_streams: config.max_remote_uni_streams as u64,
max_remote_bi_streams,
finished_streams: Vec::new(),
}
}
fn get_tx_number(&mut self) -> u64 {
self.largest_sent_packet = self.largest_sent_packet.overflowing_add(1).0;
// TODO: Handle packet number overflow gracefully
assert!(self.largest_sent_packet <= 2u64.pow(62)-1);
self.largest_sent_packet
}
fn on_packet_sent(&mut self, config: &Config, now: u64, packet_number: u64, packet: SentPacket) {
self.largest_sent_packet = packet_number;
let bytes = packet.bytes;
let handshake = packet.handshake;
if handshake {
self.awaiting_handshake = true;
}
self.sent_packets.insert(packet_number, packet);
if bytes != 0 {
self.time_of_last_sent_retransmittable_packet = now;
if handshake {
self.time_of_last_sent_handshake_packet = now;
}
self.bytes_in_flight += bytes as u64;
self.set_loss_detection_alarm(config);
}
}
/// Updates set_loss_detection
fn on_ack_received(&mut self, config: &Config, now: u64, ack: frame::Ack) {
self.largest_acked_packet = cmp::max(self.largest_acked_packet, ack.largest); // TODO: Validate
if let Some(info) = self.sent_packets.get(&ack.largest).cloned() {
self.latest_rtt = now - info.time;
let delay = ack.delay << self.params.ack_delay_exponent;
self.update_rtt(delay, info.ack_only());
}
for range in &ack {
// Avoid DoS from unreasonably huge ack ranges
let packets = self.sent_packets.range(range).map(|(&n, _)| n).collect::<Vec<_>>();
for packet in packets {
self.on_packet_acked(config, packet);
}
}
self.detect_lost_packets(config, now, ack.largest);
self.set_loss_detection_alarm(config);
}
fn update_rtt(&mut self, ack_delay: u64, ack_only: bool) {
self.min_rtt = cmp::min(self.min_rtt, self.latest_rtt);
if self.latest_rtt - self.min_rtt > ack_delay {
self.latest_rtt -= ack_delay;
if !ack_only {
self.max_ack_delay = cmp::max(self.max_ack_delay, ack_delay);
}
}
if self.smoothed_rtt == 0 {
self.smoothed_rtt = self.latest_rtt;
self.rttvar = self.latest_rtt / 2;
} else {
let rttvar_sample = (self.smoothed_rtt as i64 - self.latest_rtt as i64).abs() as u64;
self.rttvar = (3 * self.rttvar + rttvar_sample) / 4;
self.smoothed_rtt = (7 * self.smoothed_rtt + self.latest_rtt) / 8;
}
}
// Not timing-aware, so it's safe to call this for inferred acks, such as arise from high-latency handshakes
fn on_packet_acked(&mut self, config: &Config, packet: u64) {
let info = if let Some(x) = self.sent_packets.remove(&packet) { x } else { return; };
if info.bytes != 0 {
// Congestion control
self.bytes_in_flight -= info.bytes as u64;
// Do not increase congestion window in recovery period.
if !self.in_recovery(packet) {
if self.congestion_window < self.ssthresh {
// Slow start.
self.congestion_window += info.bytes as u64;
} else {
// Congestion avoidance.
self.congestion_window += config.default_mss * info.bytes as u64 / self.congestion_window;
}
}
}
// Loss recovery
// If a packet sent prior to RTO was acked, then the RTO was spurious. Otherwise, inform congestion control.
if self.rto_count > 0 && packet > self.largest_sent_before_rto {
// Retransmission timeout verified
self.congestion_window = config.minimum_window;
}
self.handshake_count = 0;
self.tlp_count = 0;
self.rto_count = 0;
// Update state for confirmed delivery of frames
for (id, _) in info.retransmits.rst_stream {
if let stream::SendState::ResetSent { stop_reason } = self.streams.get_mut(&id).unwrap().send_mut().unwrap().state {
self.streams.get_mut(&id).unwrap().send_mut().unwrap().state = stream::SendState::ResetRecvd { stop_reason };
if stop_reason.is_none() {
self.maybe_cleanup(id);
}
}
}
for frame in info.retransmits.stream {
let recvd = {
let ss = if let Some(x) = self.streams.get_mut(&frame.id) { x.send_mut().unwrap() } else { continue; };
ss.bytes_in_flight -= frame.data.len() as u64;
if ss.state == stream::SendState::DataSent && ss.bytes_in_flight == 0 {
ss.state = stream::SendState::DataRecvd;
true
} else { false }
};
if recvd {
self.maybe_cleanup(frame.id);
self.finished_streams.push(frame.id);
}
}
self.pending_acks.subtract(&info.acks);
}
fn detect_lost_packets(&mut self, config: &Config, now: u64, largest_acked: u64) {
self.loss_time = 0;
let mut lost_packets = Vec::<u64>::new();
let delay_until_lost;
let rtt = cmp::max(self.latest_rtt, self.smoothed_rtt);
if config.using_time_loss_detection {
// factor * (1 + fraction)
delay_until_lost = rtt + (rtt * config.time_reordering_fraction as u64) >> 16;
} else if largest_acked == self.largest_sent_packet {
// Early retransmit alarm.
delay_until_lost = (5 * rtt) / 4;
} else {
delay_until_lost = u64::max_value();
}
for (&packet, info) in self.sent_packets.range(0..largest_acked) {
let time_since_sent = now - info.time;
let delta = largest_acked - packet;
// Use of >= for time comparison here is critical so that we successfully detect lost packets in testing
// when rtt = 0
if time_since_sent >= delay_until_lost || delta > self.reordering_threshold as u64 {
lost_packets.push(packet);
} else if self.loss_time == 0 && delay_until_lost != u64::max_value() {
self.loss_time = now + delay_until_lost - time_since_sent;
}
}
if let Some(largest_lost) = lost_packets.last().cloned() {
let old_bytes_in_flight = self.bytes_in_flight;
for packet in lost_packets {
let mut info = self.sent_packets.remove(&packet).unwrap();
if info.handshake {
self.handshake_pending += info.retransmits;
} else {
self.pending += info.retransmits;
}
self.bytes_in_flight -= info.bytes as u64;
}
// Don't apply congestion penalty for lost ack-only packets
let lost_nonack = old_bytes_in_flight != self.bytes_in_flight;
// Start a new recovery epoch if the lost packet is larger than the end of the previous recovery epoch.
if lost_nonack && !self.in_recovery(largest_lost) {
self.end_of_recovery = self.largest_sent_packet;
// *= factor
self.congestion_window = (self.congestion_window * config.loss_reduction_factor as u64) >> 16;
self.congestion_window = cmp::max(self.congestion_window, config.minimum_window);
self.ssthresh = self.congestion_window;
}
}
}
fn in_recovery(&self, packet: u64) -> bool { packet <= self.end_of_recovery }
fn set_loss_detection_alarm(&mut self, config: &Config) {
if self.bytes_in_flight == 0 {
self.set_loss_detection = Some(None);
return;
}
let mut alarm_duration: u64;
if self.awaiting_handshake {
// Handshake retransmission alarm.
if self.smoothed_rtt == 0 {
alarm_duration = 2 * config.default_initial_rtt;
} else {
alarm_duration = 2 * self.smoothed_rtt;
}
alarm_duration = cmp::max(alarm_duration + self.max_ack_delay,
config.min_tlp_timeout);
alarm_duration = alarm_duration * 2u64.pow(self.handshake_count);
self.set_loss_detection = Some(Some(self.time_of_last_sent_handshake_packet + alarm_duration));
return;
}
if self.loss_time != 0 {
// Early retransmit timer or time loss detection.
alarm_duration = self.loss_time - self.time_of_last_sent_retransmittable_packet;
} else {
// TLP or RTO alarm
alarm_duration = self.rto(config);
if self.tlp_count < config.max_tlps {
// Tail Loss Probe
let tlp_duration = cmp::max((3 * self.smoothed_rtt) / 2 + self.max_ack_delay,
config.min_tlp_timeout);
alarm_duration = cmp::min(alarm_duration, tlp_duration);
}
}
self.set_loss_detection = Some(Some(self.time_of_last_sent_retransmittable_packet + alarm_duration));
}
/// Retransmit time-out
fn rto(&self, config: &Config) -> u64 {
let computed = self.smoothed_rtt + 4 * self.rttvar + self.max_ack_delay;
cmp::max(computed, config.min_rto_timeout) * 2u64.pow(self.rto_count)
}
fn on_packet_authenticated(&mut self, now: u64, packet: u64) {
self.pending_acks.insert_one(packet);
if self.pending_acks.len() > MAX_ACK_BLOCKS {
self.pending_acks.pop_min();
}
if packet > self.rx_packet {
self.rx_packet = packet;
self.rx_packet_time = now;
}
}
/// Consider all previously transmitted handshake packets to be delivered. Called when we receive a new handshake packet.
fn handshake_cleanup(&mut self, config: &Config) {
if !self.awaiting_handshake { return; }
self.awaiting_handshake = false;
self.handshake_pending = Retransmits::default();
let mut packets = Vec::new();
for (&packet, info) in &self.sent_packets {
if info.handshake { packets.push(packet); }
}
for packet in packets {
self.on_packet_acked(config, packet);
}
self.set_loss_detection_alarm(config);
}
fn update_keys(&mut self, packet: u64, header: &[u8], payload: &[u8]) -> Option<Vec<u8>> {
let new = self.crypto.as_mut().unwrap().update(self.side);
let data = new.decrypt(packet, header, payload)?;
let old = mem::replace(self.crypto.as_mut().unwrap(), new);
self.prev_crypto = Some((packet, old));
self.key_phase = !self.key_phase;
Some(data)
}
fn decrypt(&self, handshake: bool, packet: u64, header: &[u8], payload: &[u8]) -> Option<Vec<u8>> {
if handshake {
self.handshake_crypto.decrypt(packet, header, payload)
} else {
if let Some((boundary, ref prev)) = self.prev_crypto {
if packet < boundary {
return prev.decrypt(packet, header, payload);
}
}
self.crypto.as_ref().unwrap().decrypt(packet, header, payload)
}
}
fn transmit(&mut self, stream: StreamId, data: Bytes) {
let ss = self.streams.get_mut(&stream).unwrap().send_mut().unwrap();
assert_eq!(ss.state, stream::SendState::Ready);
let offset = ss.offset;
ss.offset += data.len() as u64;
ss.bytes_in_flight += data.len() as u64;
if stream != StreamId(0) {
self.data_sent += data.len() as u64;
}
self.pending.stream.push_back(frame::Stream {
offset, fin: false, data,
id: stream,
});
}
fn next_packet(&mut self, log: &Logger, config: &Config, now: u64) -> Option<Vec<u8>> {
let established = match *self.state.as_ref().unwrap() {
State::Handshake(_) => false,
State::Established(_) => true,
ref e => { assert!(e.is_closed()); return None; }
};
let mut buf = Vec::new();
let mut sent = Retransmits::default();
let acks;
let number;
let ack_only;
let is_initial;
let header_len;
let crypto;
{
let pending;
if (!established || self.awaiting_handshake) && (!self.handshake_pending.is_empty() || (!self.pending_acks.is_empty() && self.permit_ack_only)) {
// (re)transmit handshake data in long-header packets
buf.reserve_exact(self.mtu as usize);
number = self.get_tx_number();
trace!(log, "sending handshake packet"; "pn" => number);
let ty = if self.side == Side::Client && self.handshake_pending.stream.front().map_or(false, |x| x.offset == 0) {
match *self.state.as_mut().unwrap() {
State::Handshake(ref mut state) => {
if state.clienthello_packet.is_none() {
state.clienthello_packet = Some(number as u32);
}
}
_ => {}
}
is_initial = true;
packet::INITIAL
} else {
is_initial = false;
packet::HANDSHAKE
};
Header::Long {
ty, number: number as u32, source_id: self.local_id.clone(), destination_id: self.remote_id.clone()
}.encode(&mut buf);
pending = &mut self.handshake_pending;
crypto = Crypto::Handshake;
} else if established || (self.zero_rtt_crypto.is_some() && self.side == Side::Client) {
// Send 0RTT or 1RTT data
is_initial = false;
if self.congestion_blocked() || self.pending.is_empty() && (!self.permit_ack_only || self.pending_acks.is_empty())
{
return None;
}
number = self.get_tx_number();
buf.reserve_exact(self.mtu as usize);
trace!(log, "sending protected packet"; "pn" => number);
if !established {
crypto = Crypto::ZeroRtt;
Header::Long {
ty: packet::ZERO_RTT, number: number as u32,
source_id: self.local_id.clone(), destination_id: self.initial_id.clone()
}.encode(&mut buf);
} else {
crypto = Crypto::OneRtt;
Header::Short {
id: self.remote_id.clone(),
number: PacketNumber::new(number, self.largest_acked_packet),
key_phase: self.key_phase
}.encode(&mut buf);
}
pending = &mut self.pending;
} else {
return None;
}
ack_only = pending.is_empty();
header_len = buf.len() as u16;
let max_size = self.mtu as usize - AEAD_TAG_SIZE;
// PING
if pending.ping {
trace!(log, "ping");
pending.ping = false;
sent.ping = true;
buf.write(frame::Type::PING);
}
// ACK
// We will never ack protected packets in handshake packets because handshake_cleanup ensures we never send
// handshake packets after receiving protected packets.
// 0-RTT packets must never carry acks (which would have to be of handshake packets)
if !self.pending_acks.is_empty() && crypto != Crypto::ZeroRtt {
let delay = (now - self.rx_packet_time) >> ACK_DELAY_EXPONENT;
trace!(log, "ACK"; "ranges" => ?self.pending_acks.iter().collect::<Vec<_>>(), "delay" => delay);
frame::Ack::encode(delay, &self.pending_acks, &mut buf);
acks = self.pending_acks.clone();
} else {
acks = RangeSet::new();
}
// PATH_RESPONSE
if buf.len() + 9 < max_size {
// No need to retransmit these, so we don't save the value after encoding it.
if let Some((_, x)) = pending.path_response.take() {
trace!(log, "PATH_RESPONSE"; "value" => format!("{:08x}", x));
buf.write(frame::Type::PATH_RESPONSE);
buf.write(x);
}
}
// RST_STREAM
while buf.len() + 19 < max_size {
let (id, error_code) = if let Some(x) = pending.rst_stream.pop() { x } else { break; };
let stream = if let Some(x) = self.streams.get(&id) { x } else { continue; };
trace!(log, "RST_STREAM"; "stream" => id.0);
sent.rst_stream.push((id, error_code));
frame::RstStream {
id, error_code,
final_offset: stream.send().unwrap().offset,
}.encode(&mut buf);
}
// STOP_SENDING
while buf.len() + 11 < max_size {
let (id, error_code) = if let Some(x) = pending.stop_sending.pop() { x } else { break; };
let stream = if let Some(x) = self.streams.get(&id) { x.recv().unwrap() } else { continue; };
if stream.is_finished() { continue; }
trace!(log, "STOP_SENDING"; "stream" => id.0);
sent.stop_sending.push((id, error_code));
buf.write(frame::Type::STOP_SENDING);
buf.write(id);
buf.write(error_code);
}
// MAX_DATA
if pending.max_data && buf.len() + 9 < max_size {
trace!(log, "MAX_DATA"; "value" => self.local_max_data);
pending.max_data = false;
sent.max_data = true;
buf.write(frame::Type::MAX_DATA);
buf.write_var(self.local_max_data);
}
// MAX_STREAM_DATA
while buf.len() + 17 < max_size {
let id = if let Some(x) = pending.max_stream_data.iter().next() { *x } else { break; };
pending.max_stream_data.remove(&id);
let rs = if let Some(x) = self.streams.get(&id) { x.recv().unwrap() } else { continue; };
if rs.is_finished() { continue; }
sent.max_stream_data.insert(id);
trace!(log, "MAX_STREAM_DATA"; "stream" => id.0, "value" => rs.max_data);
buf.write(frame::Type::MAX_STREAM_DATA);
buf.write(id);
buf.write_var(rs.max_data);
}
// MAX_STREAM_ID uni
if pending.max_uni_stream_id && buf.len() + 9 < max_size {
pending.max_uni_stream_id = false;
sent.max_uni_stream_id = true;
trace!(log, "MAX_STREAM_ID (unidirectional)");
buf.write(frame::Type::MAX_STREAM_ID);
buf.write(StreamId::new(!self.side, Directionality::Uni, self.max_remote_uni_streams - 1));
}
// MAX_STREAM_ID bi
if pending.max_bi_stream_id && buf.len() + 9 < max_size {
pending.max_bi_stream_id = false;
sent.max_bi_stream_id = true;
trace!(log, "MAX_STREAM_ID (bidirectional)");
buf.write(frame::Type::MAX_STREAM_ID);
buf.write(StreamId::new(!self.side, Directionality::Bi, self.max_remote_bi_streams - 1));
}
// STREAM
while buf.len() + 25 < max_size {
let mut stream = if let Some(x) = pending.stream.pop_front() { x } else { break; };
if stream.id != StreamId(0) && self.streams.get(&stream.id).map_or(true, |s| s.send().unwrap().state.was_reset()) {
continue;
}
let len = cmp::min(stream.data.len(), max_size as usize - buf.len() - 25);
let data = stream.data.split_to(len);
let fin = stream.fin && stream.data.is_empty();
trace!(log, "STREAM"; "id" => stream.id.0, "off" => stream.offset, "len" => len, "fin" => fin);
let frame = frame::Stream {
id: stream.id,
offset: stream.offset,
fin: fin,
data: data,
};
frame.encode(true, &mut buf);
sent.stream.push_back(frame);
if !stream.data.is_empty() {
let stream = frame::Stream { offset: stream.offset + len as u64, ..stream };
pending.stream.push_front(stream);
}
}
}
if is_initial && buf.len() < MIN_INITIAL_SIZE - AEAD_TAG_SIZE {
buf.resize(MIN_INITIAL_SIZE - AEAD_TAG_SIZE, frame::Type::PADDING.into());
}
if crypto != Crypto::OneRtt {
set_payload_length(&mut buf, header_len as usize);
}
self.encrypt(crypto, number, &mut buf, header_len);
// If we sent any acks, don't immediately resend them. Setting this even if ack_only is false needlessly
// prevents us from ACKing the next packet if it's ACK-only, but saves the need for subtler logic to avoid
// double-transmitting acks all the time.
self.permit_ack_only &= acks.is_empty();
self.on_packet_sent(config, now, number, SentPacket {
acks,
time: now, bytes: if ack_only { 0 } else { buf.len() as u16 },
handshake: crypto == Crypto::Handshake,
retransmits: sent,
});
Some(buf)
}
fn encrypt(&self, crypto: Crypto, number: u64, buf: &mut Vec<u8>, header_len: u16) {
let payload = match crypto {
Crypto::ZeroRtt => self.zero_rtt_crypto.as_ref().unwrap().encrypt(number, &buf[0..header_len as usize], &buf[header_len as usize..]),
Crypto::Handshake => self.handshake_crypto.encrypt(number, &buf[0..header_len as usize], &buf[header_len as usize..]),
Crypto::OneRtt => self.crypto.as_ref().unwrap().encrypt(number, &buf[0..header_len as usize], &buf[header_len as usize..]),
};
debug_assert_eq!(payload.len(), buf.len() - header_len as usize + AEAD_TAG_SIZE);
buf.truncate(header_len as usize);
buf.extend_from_slice(&payload);
}
// TLP/RTO transmit
fn force_transmit(&mut self, config: &Config, now: u64) -> Box<[u8]> {
let number = self.get_tx_number();
let mut buf = Vec::new();
Header::Short {
id: self.remote_id.clone(),
number: PacketNumber::new(number, self.largest_acked_packet),
key_phase: self.key_phase
}.encode(&mut buf);
let header_len = buf.len() as u16;
buf.push(frame::Type::PING.into());
self.encrypt(Crypto::OneRtt, number, &mut buf, header_len);
self.on_packet_sent(config, now, number, SentPacket {
time: now, bytes: buf.len() as u16, handshake: false, acks: RangeSet::new(), retransmits: Retransmits::default()
});
buf.into()
}
fn make_close(&mut self, reason: &state::CloseReason) -> Box<[u8]> {
let number = self.get_tx_number();
let mut buf = Vec::new();
Header::Short {
id: self.remote_id.clone(),
number: PacketNumber::new(number, self.largest_acked_packet),
key_phase: self.key_phase
}.encode(&mut buf);
let header_len = buf.len() as u16;
let max_len = self.mtu - header_len - AEAD_TAG_SIZE as u16;
match *reason {
state::CloseReason::Application(ref x) => x.encode(&mut buf, max_len),
state::CloseReason::Connection(ref x) => x.encode(&mut buf, max_len),
}
self.encrypt(Crypto::OneRtt, number, &mut buf, header_len);
buf.into()
}
fn set_params(&mut self, params: TransportParameters) {
self.max_bi_streams = params.initial_max_streams_bidi as u64;
if self.side == Side::Client { self.max_bi_streams += 1; } // Account for TLS stream
self.max_uni_streams = params.initial_max_streams_uni as u64;
self.max_data = params.initial_max_data as u64;
for i in 0..self.max_remote_bi_streams {
let id = StreamId::new(!self.side, Directionality::Bi, i as u64);
self.streams.get_mut(&id).unwrap().send_mut().unwrap().max_data = params.initial_max_stream_data as u64;
}
self.params = params;
}
fn open(&mut self, config: &Config, direction: Directionality) -> Option<StreamId> {
let (id, mut stream) = match direction {
Directionality::Uni if self.next_uni_stream < self.max_uni_streams => {
self.next_uni_stream += 1;
(StreamId::new(self.side, direction, self.next_uni_stream - 1), stream::Send::new().into())
}
Directionality::Bi if self.next_bi_stream < self.max_bi_streams => {
self.next_bi_stream += 1;
(StreamId::new(self.side, direction, self.next_bi_stream - 1), Stream::new_bi(config.stream_receive_window as u64))
}
_ => { return None; } // TODO: Queue STREAM_ID_BLOCKED
};
stream.send_mut().unwrap().max_data = self.params.initial_max_stream_data as u64;
let old = self.streams.insert(id, stream);
assert!(old.is_none());
Some(id)
}
/// Discard state for a stream if it's fully closed.
///
/// Called when one side of a stream transitions to a closed state
fn maybe_cleanup(&mut self, id: StreamId) {
match self.streams.entry(id) {
hash_map::Entry::Vacant(_) => unreachable!(),
hash_map::Entry::Occupied(e) => {
if e.get().is_closed() {
e.remove_entry();
if id.initiator() != self.side {
match id.directionality() {
Directionality::Uni => {
self.max_remote_uni_streams += 1;
self.pending.max_uni_stream_id = true;
}
Directionality::Bi => {
self.max_remote_bi_streams += 1;
self.pending.max_bi_stream_id = true;
}
}
}
}
}
}
}
fn finish(&mut self, id: StreamId) {
let ss = self.streams.get_mut(&id).expect("unknown stream").send_mut().expect("recv-only stream");
assert_eq!(ss.state, stream::SendState::Ready);
ss.state = stream::SendState::DataSent;
for frame in &mut self.pending.stream {
if frame.id == id && frame.offset + frame.data.len() as u64 == ss.offset {
frame.fin = true;
return;
}
}
self.pending.stream.push_back(frame::Stream { id, data: Bytes::new(), offset: ss.offset, fin: true });
}
fn read_unordered(&mut self, id: StreamId) -> Result<(Bytes, u64), ReadError> {
assert_ne!(id, StreamId(0), "cannot read an internal stream");
let rs = self.streams.get_mut(&id).unwrap().recv_mut().unwrap();
rs.unordered = true;
// TODO: Drain rs.assembler to handle ordered-then-unordered reads reliably
// Return data we already have buffered, regardless of state
if let Some(x) = rs.buffered.pop_front() {
// TODO: Reduce granularity of flow control credit, while still avoiding stalls, to reduce overhead
self.local_max_data += x.0.len() as u64; // BUG: Don't issue credit for already-received data!
self.pending.max_data = true;
// Only bother issuing stream credit if the peer wants to send more
if let stream::RecvState::Recv { size: None } = rs.state {
rs.max_data += x.0.len() as u64;
self.pending.max_stream_data.insert(id);
}
Ok(x)
} else {
match rs.state {
stream::RecvState::ResetRecvd { error_code, .. } => {
rs.state = stream::RecvState::Closed;
Err(ReadError::Reset { error_code })
}
stream::RecvState::Closed => unreachable!(),
stream::RecvState::Recv { .. } => Err(ReadError::Blocked),
stream::RecvState::DataRecvd { .. } => {
rs.state = stream::RecvState::Closed;
Err(ReadError::Finished)
}
}
}
}
fn read(&mut self, id: StreamId, buf: &mut [u8]) -> Result<usize, ReadError> {
assert_ne!(id, StreamId(0), "cannot read an internal stream");
let rs = self.streams.get_mut(&id).unwrap().recv_mut().unwrap();
assert!(!rs.unordered, "cannot perform ordered reads following unordered reads on a stream");
for (data, offset) in rs.buffered.drain(..) {
rs.assembler.insert(offset, &data);
}
if !rs.assembler.blocked() {
let n = rs.assembler.read(buf);
// TODO: Reduce granularity of flow control credit, while still avoiding stalls, to reduce overhead
self.local_max_data += n as u64;
self.pending.max_data = true;
// Only bother issuing stream credit if the peer wants to send more
if let stream::RecvState::Recv { size: None } = rs.state {
rs.max_data += n as u64;
self.pending.max_stream_data.insert(id);
}
Ok(n)
} else {
match rs.state {
stream::RecvState::ResetRecvd { error_code, .. } => {
rs.state = stream::RecvState::Closed;
Err(ReadError::Reset { error_code })
}
stream::RecvState::Closed => unreachable!(),
stream::RecvState::Recv { .. } => Err(ReadError::Blocked),
stream::RecvState::DataRecvd { .. } => {
rs.state = stream::RecvState::Closed;
Err(ReadError::Finished)
}
}
}
}
fn stop_sending(&mut self, id: StreamId, error_code: u16) {
assert!(id.directionality() == Directionality::Bi || id.initiator() != self.side,
"only streams supporting incoming data may be reset");
let stream = self.streams.get(&id).expect("stream must have begun sending to be stopped")
.recv().unwrap();
// Only bother if there's data we haven't received yet
if !stream.is_finished() {
self.pending.stop_sending.push((id, error_code));
}
}
fn congestion_blocked(&self) -> bool {
self.congestion_window.saturating_sub(self.bytes_in_flight) < self.mtu as u64
}
fn blocked(&self) -> bool {
self.data_sent >= self.max_data || self.congestion_blocked()
}
fn decrypt_packet(&mut self, handshake: bool, packet: Packet) -> Result<(Vec<u8>, u64), Option<TransportError>> {
let (key_phase, number) = match packet.header {
Header::Short { key_phase, number, .. } if !handshake => (key_phase, number),
Header::Long { number, .. } if handshake => (false, PacketNumber::U32(number)),
_ => { return Err(None); }
};
let number = number.expand(self.rx_packet);
if key_phase != self.key_phase {
if number <= self.rx_packet {
// Illegal key update
return Err(Some(TransportError::PROTOCOL_VIOLATION));
}
if let Some(payload) = self.update_keys(number, &packet.header_data, &packet.payload) {
Ok((payload, number))
} else {
// Invalid key update
Err(None)
}
} else if let Some(payload) = self.decrypt(handshake, number, &packet.header_data, &packet.payload) {
Ok((payload, number))
} else {
// Unable to authenticate
Err(None)
}
}
fn get_recv_stream(&mut self, id: StreamId) -> Result<Option<&mut Stream>, TransportError> {
if self.side == id.initiator() {
match id.directionality() {
Directionality::Uni => { return Err(TransportError::STREAM_STATE_ERROR); }
Directionality::Bi if id.index() >= self.next_bi_stream => { return Err(TransportError::STREAM_STATE_ERROR); }
Directionality::Bi => {}
};
} else {
let limit = match id.directionality() {
Directionality::Bi => self.max_remote_bi_streams,
Directionality::Uni => self.max_remote_uni_streams,
};
if id.index() >= limit {
return Err(TransportError::STREAM_ID_ERROR);
}
}
Ok(self.streams.get_mut(&id))
}
fn write(&mut self, stream: StreamId, data: &[u8]) -> Result<usize, WriteError> {
if self.state.as_ref().unwrap().is_closed() { return Err(WriteError::Blocked); }
assert!(stream.directionality() == Directionality::Bi || stream.initiator() == self.side);
if self.blocked() {
self.blocked_streams.insert(stream);
return Err(WriteError::Blocked);
}
let (stop_reason, stream_budget) = {
let ss = self.streams.get_mut(&stream).expect("stream already closed").send_mut().unwrap();
(match ss.state {
stream::SendState::ResetSent { ref mut stop_reason }
| stream::SendState::ResetRecvd { ref mut stop_reason } => stop_reason.take(),
_ => None,
},
ss.max_data - ss.offset)
};
if let Some(error_code) = stop_reason {
self.maybe_cleanup(stream);
return Err(WriteError::Stopped { error_code });
}
if stream_budget == 0 {
return Err(WriteError::Blocked);
}
let conn_budget = self.max_data - self.data_sent;
let n = conn_budget.min(stream_budget).min(data.len() as u64) as usize;
self.transmit(stream, (&data[0..n]).into());
Ok(n)
}
}
#[derive(Debug, Fail, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum ReadError {
/// No more data is currently available on this stream.
#[fail(display = "blocked")]
Blocked,
/// The peer abandoned transmitting data on this stream.
#[fail(display = "reset by peer: error {}", error_code)]
Reset { error_code: u16 },
/// The data on this stream has been fully delivered and no more will be transmitted.
#[fail(display = "finished")]
Finished,
}
#[derive(Debug, Fail, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum WriteError {
/// The peer is not able to accept additional data, or the connection is congested.
#[fail(display = "unable to accept further writes")]
Blocked,
/// The peer is no longer accepting data on this stream.
#[fail(display = "stopped by peer: error {}", error_code)]
Stopped { error_code: u16 },
}
#[derive(Debug, Fail)]
pub enum ConnectError {
#[fail(display = "session ticket was malformed")]
MalformedSession,
#[fail(display = "TLS error: {}", _0)]
Tls(ssl::Error),
}
impl From<ssl::Error> for ConnectError { fn from(x: ssl::Error) -> Self { ConnectError::Tls(x) } }
impl From<openssl::error::ErrorStack> for ConnectError { fn from(x: openssl::error::ErrorStack) -> Self { ConnectError::Tls(x.into()) } }
#[derive(Debug, Clone)]
enum Header {
Long {
ty: u8,
source_id: ConnectionId,
destination_id: ConnectionId,
number: u32,
},
Short {
id: ConnectionId,
number: PacketNumber,
key_phase: bool,
},
VersionNegotiate {
ty: u8,
source_id: ConnectionId,
destination_id: ConnectionId,
}
}
impl Header {
fn destination_id(&self) -> &ConnectionId {
use self::Header::*;
match *self {
Long { ref destination_id, .. } => destination_id,
Short { ref id, .. } => id,
VersionNegotiate { ref destination_id, .. } => destination_id,
}
}
fn key_phase(&self) -> bool {
match *self {
Header::Short { key_phase, .. } => key_phase,
_ => false,
}
}
}
// An encoded packet number
#[derive(Debug, Copy, Clone)]
enum PacketNumber {
U8(u8),
U16(u16),
U32(u32),
}
impl PacketNumber {
fn new(n: u64, largest_acked: u64) -> Self {
if largest_acked == 0 { return PacketNumber::U32(n as u32); }
let range = (n - largest_acked) / 2;
if range < 1 << 8 {
PacketNumber::U8(n as u8)
} else if range < 1 << 16 {
PacketNumber::U16(n as u16)
} else if range < 1 << 32 {
PacketNumber::U32(n as u32)
} else {
panic!("packet number too large to encode")
}
}
fn ty(&self) -> u8 {
use self::PacketNumber::*;
match *self {
U8(_) => 0x00,
U16(_) => 0x01,
U32(_) => 0x02,
}
}
fn encode<W: BufMut>(&self, w: &mut W) {
use self::PacketNumber::*;
match *self {
U8(x) => w.write(x),
U16(x) => w.write(x),
U32(x) => w.write(x),
}
}
fn expand(&self, prev: u64) -> u64 {
use self::PacketNumber::*;
let t = prev + 1;
// Compute missing bits that minimize the difference from expected
let d = match *self {
U8(_) => 1 << 8,
U16(_) => 1 << 16,
U32(_) => 1 << 32,
};
let x = match *self {
U8(x) => x as u64,
U16(x) => x as u64,
U32(x) => x as u64,
};
if t > d/2 {
x + d * ((t + d/2 - x) / d)
} else {
x % d
}
}
}
const KEY_PHASE_BIT: u8 = 0x40;
impl Header {
fn encode<W: BufMut>(&self, w: &mut W) {
use self::Header::*;
match *self {
Long { ty, ref source_id, ref destination_id, number } => {
w.write(0b10000000 | ty);
w.write(VERSION);
let mut dcil = destination_id.len() as u8;
if dcil > 0 { dcil -= 3; }
let mut scil = source_id.len() as u8;
if scil > 0 { scil -= 3; }
w.write(dcil << 4 | scil);
w.put_slice(destination_id);
w.put_slice(source_id);
w.write::<u16>(0); // Placeholder for payload length; see `set_payload_length`
w.write(number);
}
Short { ref id, number, key_phase } => {
let ty = number.ty() | 0x30
| if key_phase { KEY_PHASE_BIT } else { 0 };
w.write(ty);
w.put_slice(id);
number.encode(w);
}
VersionNegotiate { ty, ref source_id, ref destination_id } => {
w.write(0x80 | ty);
w.write::<u32>(0);
let mut dcil = destination_id.len() as u8;
if dcil > 0 { dcil -= 3; }
let mut scil = source_id.len() as u8;
if scil > 0 { scil -= 3; }
w.write(dcil << 4 | scil);
w.put_slice(destination_id);
w.put_slice(source_id);
}
}
}
}
struct Packet {
header: Header,
header_data: Bytes,
payload: Bytes,
}
#[derive(Debug, Fail, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
enum HeaderError {
#[fail(display = "unsupported version")]
UnsupportedVersion { source: ConnectionId, destination: ConnectionId },
#[fail(display = "invalid header: {}", _0)]
InvalidHeader(&'static str),
}
impl From<coding::UnexpectedEnd> for HeaderError {
fn from(_: coding::UnexpectedEnd) -> Self { HeaderError::InvalidHeader("unexpected end of packet") }
}
impl Packet {
fn decode(packet: &Bytes, dest_id_len: usize) -> Result<(Self, Bytes), HeaderError> {
let mut buf = io::Cursor::new(&packet[..]);
let ty = buf.get::<u8>()?;
let long = ty & 0x80 != 0;
let ty = ty & !0x80;
let mut cid_stage = [0; MAX_CID_SIZE];
if long {
let version = buf.get::<u32>()?;
let ci_lengths = buf.get::<u8>()?;
let mut dcil = ci_lengths >> 4;
if dcil > 0 { dcil += 3 };
let mut scil = ci_lengths & 0xF;
if scil > 0 { scil += 3 };
if buf.remaining() < (dcil + scil) as usize { return Err(HeaderError::InvalidHeader("connection IDs longer than packet")); }
buf.copy_to_slice(&mut cid_stage[0..dcil as usize]);
let destination_id = ConnectionId::new(cid_stage, dcil as usize);
buf.copy_to_slice(&mut cid_stage[0..scil as usize]);
let source_id = ConnectionId::new(cid_stage, scil as usize);
Ok(match version {
0 => {
let header_data = packet.slice(0, buf.position() as usize);
let payload = packet.slice(buf.position() as usize, packet.len());
(Packet {
header: Header::VersionNegotiate { ty, source_id, destination_id },
header_data, payload,
}, Bytes::new())
}
VERSION => {
let len = buf.get_var()?;
let number = buf.get()?;
let header_data = packet.slice(0, buf.position() as usize);
if buf.position() + len > packet.len() as u64 { return Err(HeaderError::InvalidHeader("payload longer than packet")); }
let payload = if len == 0 { Bytes::new() } else { packet.slice(buf.position() as usize, (buf.position() + len) as usize) };
(Packet {
header: Header::Long { ty, source_id, destination_id, number },
header_data, payload,
}, packet.slice((buf.position() + len) as usize, packet.len()))
}
_ => return Err(HeaderError::UnsupportedVersion { source: source_id, destination: destination_id }),
})
} else {
if buf.remaining() < dest_id_len { return Err(HeaderError::InvalidHeader("destination connection ID longer than packet")); }
buf.copy_to_slice(&mut cid_stage[0..dest_id_len]);
let id = ConnectionId::new(cid_stage, dest_id_len);
let key_phase = ty & KEY_PHASE_BIT != 0;
let number = match ty & 0b11 {
0x0 => PacketNumber::U8(buf.get()?),
0x1 => PacketNumber::U16(buf.get()?),
0x2 => PacketNumber::U32(buf.get()?),
_ => { return Err(HeaderError::InvalidHeader("unknown packet type")); }
};
let header_data = packet.slice(0, buf.position() as usize);
let payload = packet.slice(buf.position() as usize, packet.len());
Ok((Packet {
header: Header::Short { id, number, key_phase },
header_data, payload,
}, Bytes::new()))
}
}
}
enum State {
Handshake(state::Handshake),
Established(state::Established),
HandshakeFailed(state::HandshakeFailed),
Closed(state::Closed),
Draining(state::Draining),
/// Waiting for application to call close so we can dispose of the resources
Drained,
}
impl State {
fn closed<R: Into<state::CloseReason>>(reason: R) -> Self {
State::Closed(state::Closed {
reason: reason.into(), app_closed: false,
})
}
fn handshake_failed<R: Into<state::CloseReason>>(reason: R, alert: Option<Box<[u8]>>) -> Self {
State::HandshakeFailed(state::HandshakeFailed {
reason: reason.into(), alert, app_closed: false,
})
}
fn is_closed(&self) -> bool {
match *self {
State::HandshakeFailed(_) => true,
State::Closed(_) => true,
State::Draining(_) => true,
State::Drained => true,
_ => false,
}
}
fn is_app_closed(&self) -> bool {
match *self {
State::HandshakeFailed(ref x) => x.app_closed,
State::Closed(ref x) => x.app_closed,
State::Draining(ref x) => x.app_closed,
_ => false,
}
}
fn is_drained(&self) -> bool {
if let State::Drained = *self { true } else { false }
}
}
mod state {
use super::*;
pub struct Handshake {
pub tls: MidHandshakeSslStream<MemoryStream>,
/// The number of the packet that first contained the latest version of the TLS ClientHello. Present iff we're
/// the client.
pub clienthello_packet: Option<u32>,
pub remote_id_set: bool,
}
pub struct Established {
pub tls: SslStream<MemoryStream>,
}
pub struct HandshakeFailed { // Closed
pub reason: CloseReason,
pub alert: Option<Box<[u8]>>,
pub app_closed: bool,
}
#[derive(Clone)]
pub enum CloseReason {
Connection(frame::ConnectionClose),
Application(frame::ApplicationClose),
}
impl From<TransportError> for CloseReason { fn from(x: TransportError) -> Self { CloseReason::Connection(x.into()) } }
impl From<frame::ConnectionClose> for CloseReason { fn from(x: frame::ConnectionClose) -> Self { CloseReason::Connection(x) } }
impl From<frame::ApplicationClose> for CloseReason { fn from(x: frame::ApplicationClose) -> Self { CloseReason::Application(x) } }
pub struct Closed {
pub reason: CloseReason,
pub app_closed: bool,
}
pub struct Draining {
pub app_closed: bool,
}
impl From<Handshake> for Draining {
fn from(_: Handshake) -> Self { Draining { app_closed: false } }
}
impl From<HandshakeFailed> for Draining {
fn from(x: HandshakeFailed) -> Self { Draining { app_closed: x.app_closed } }
}
impl From<Established> for Draining {
fn from(_: Established) -> Self { Draining { app_closed: false } }
}
impl From<Closed> for Draining {
fn from(x: Closed) -> Self { Draining { app_closed: x.app_closed } }
}
}
struct CookieFactory {
mac_key: [u8; 64]
}
const COOKIE_MAC_BYTES: usize = 64;
impl CookieFactory {
fn new(mac_key: [u8; 64]) -> Self {
Self { mac_key }
}
fn generate(&self, conn: &ConnectionInfo, out: &mut [u8]) -> usize {
let mac = self.generate_mac(conn);
out[0..COOKIE_MAC_BYTES].copy_from_slice(&mac);
COOKIE_MAC_BYTES
}
fn generate_mac(&self, conn: &ConnectionInfo) -> [u8; COOKIE_MAC_BYTES] {
let mut mac = Blake2b::new_keyed(&self.mac_key, COOKIE_MAC_BYTES);
mac.process(&conn.remote.ip().octets());
{
let mut buf = [0; 2];
BigEndian::write_u16(&mut buf, conn.remote.port());
mac.process(&buf);
}
let mut result = [0; COOKIE_MAC_BYTES];
mac.variable_result(&mut result).unwrap();
result
}
fn verify(&self, conn: &ConnectionInfo, cookie_data: &[u8]) -> bool {
let expected = self.generate_mac(conn);
if !constant_time_eq(cookie_data, &expected) { return false; }
true
}
}
struct ConnectionInfo {
id: ConnectionId,
remote: SocketAddrV6,
}
lazy_static! {
static ref CONNECTION_INFO_INDEX: ex_data::Index<Ssl, ConnectionInfo> = Ssl::new_ex_index().unwrap();
static ref TRANSPORT_PARAMS_INDEX: ex_data::Index<Ssl, Result<TransportParameters, ::transport_parameters::Error>>
= Ssl::new_ex_index().unwrap();
}
/// Events of interest to the application
#[derive(Debug)]
pub enum Event {
/// A connection was successfully established.
Connected {
protocol: Option<Box<[u8]>>,
},
/// A connection was lost.
ConnectionLost {
reason: ConnectionError
},
/// A closed connection was dropped.
ConnectionDrained,
/// A stream has data or errors waiting to be read
StreamReadable {
/// The affected stream
stream: StreamId,
/// Whether this is the first event on the stream
fresh: bool,
},
/// A formerly write-blocked stream might now accept a write
StreamWritable {
stream: StreamId,
},
/// All data sent on `stream` has been received by the peer
StreamFinished {
stream: StreamId,
},
/// At least one new stream of a certain directionality may be opened
StreamAvailable {
directionality: Directionality,
},
NewSessionTicket {
ticket: Box<[u8]>,
},
}
/// I/O operations to be immediately executed the backend.
#[derive(Debug)]
pub enum Io {
Transmit {
destination: SocketAddrV6,
packet: Box<[u8]>,
},
/// Start or reset a timer
TimerStart {
connection: ConnectionHandle,
timer: Timer,
/// Absolute μs
time: u64,
},
TimerStop {
connection: ConnectionHandle,
timer: Timer,
}
}
#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub enum Timer {
Close,
LossDetection,
Idle,
}
impl slog::Value for Timer {
fn serialize(&self, _: &slog::Record, key: slog::Key, serializer: &mut slog::Serializer) -> slog::Result {
serializer.emit_arguments(key, &format_args!("{:?}", self))
}
}
/// Reasons why a connection might be lost.
#[derive(Debug, Clone, Fail)]
pub enum ConnectionError {
/// The peer doesn't implement any supported version.
#[fail(display = "peer doesn't implement any supported version")]
VersionMismatch,
/// The peer violated the QUIC specification as understood by this implementation.
#[fail(display = "{}", error_code)]
TransportError { error_code: TransportError },
/// The peer's QUIC stack aborted the connection automatically.
#[fail(display = "aborted by peer: {}", reason)]
ConnectionClosed { reason: frame::ConnectionClose },
/// The peer closed the connection.
#[fail(display = "closed by peer: {}", reason)]
ApplicationClosed { reason: frame::ApplicationClose },
/// The peer is unable to continue processing this connection, usually due to having restarted.
#[fail(display = "reset by peer")]
Reset,
/// The peer has become unreachable.
#[fail(display = "timed out")]
TimedOut,
}
impl From<TransportError> for ConnectionError {
fn from(x: TransportError) -> Self { ConnectionError::TransportError { error_code: x } }
}
impl From<ConnectionError> for io::Error {
fn from(x: ConnectionError) -> io::Error {
use self::ConnectionError::*;
match x {
TimedOut => io::Error::new(io::ErrorKind::TimedOut, "timed out"),
Reset => io::Error::new(io::ErrorKind::ConnectionReset, "reset by peer"),
ApplicationClosed { reason } => io::Error::new(io::ErrorKind::ConnectionAborted, format!("closed by peer application: {}", reason)),
ConnectionClosed { reason } => io::Error::new(io::ErrorKind::ConnectionAborted, format!("peer detected an error: {}", reason)),
TransportError { error_code } => io::Error::new(io::ErrorKind::Other, format!("{}", error_code)),
VersionMismatch => io::Error::new(io::ErrorKind::Other, "version mismatch"),
}
}
}
mod packet {
pub const INITIAL: u8 = 0x7F;
pub const RETRY: u8 = 0x7E;
pub const ZERO_RTT: u8 = 0x7C;
pub const HANDSHAKE: u8 = 0x7D;
}
/// Forward data from an Initial or Retry packet to a stream for a TLS context
fn parse_initial(log: &Logger, stream: &mut MemoryStream, payload: Bytes) -> bool {
for frame in frame::Iter::new(payload) {
match frame {
Frame::Padding => {}
Frame::Ack(_) => {}
Frame::Stream(frame::Stream { id: StreamId(0), fin: false, offset, data, .. }) => {
stream.insert(offset, &data);
}
x => { debug!(log, "unexpected frame in initial/retry packet"; "ty" => %x.ty()); return false; } // Invalid packet
}
}
if stream.read_blocked() {
debug!(log, "initial/retry packet missing stream frame(s)");
false
} else { true }
}
fn handshake_close<R>(crypto: &CryptoContext,
remote_id: &ConnectionId, local_id: &ConnectionId, packet_number: u32,
reason: R, tls_alert: Option<&[u8]>) -> Box<[u8]>
where R: Into<state::CloseReason>
{
let mut buf = Vec::<u8>::new();
Header::Long {
ty: packet::HANDSHAKE, destination_id: remote_id.clone(), source_id: local_id.clone(), number: packet_number
}.encode(&mut buf);
let header_len = buf.len();
let max_len = MIN_MTU - header_len as u16 - AEAD_TAG_SIZE as u16;
match reason.into() {
state::CloseReason::Application(ref x) => x.encode(&mut buf, max_len),
state::CloseReason::Connection(ref x) => x.encode(&mut buf, max_len),
}
if let Some(data) = tls_alert {
if !data.is_empty() {
frame::Stream { id: StreamId(0), fin: false, offset: 0, data }.encode(false, &mut buf);
}
}
set_payload_length(&mut buf, header_len);
let payload = crypto.encrypt(packet_number as u64, &buf[0..header_len], &buf[header_len..]);
debug_assert_eq!(payload.len(), buf.len() - header_len + AEAD_TAG_SIZE);
buf.truncate(header_len);
buf.extend_from_slice(&payload);
buf.into()
}
fn set_payload_length(packet: &mut [u8], header_len: usize) {
let len = packet.len() - header_len + AEAD_TAG_SIZE;
assert!(len < 2usize.pow(14)); // Fits in reserved space
BigEndian::write_u16(&mut packet[header_len-6..], len as u16 | 0b01 << 14);
}
const HANDSHAKE_SALT: [u8; 20] = [0x9c, 0x10, 0x8f, 0x98, 0x52, 0x0a, 0x5c, 0x5c, 0x32, 0x96, 0x8e, 0x95, 0x0e, 0x8a, 0x2c, 0x5f, 0xe0, 0x6d, 0x6c, 0x38];
#[derive(Clone)]
pub struct CryptoState {
secret: Box<[u8]>,
key: Box<[u8]>,
iv: Box<[u8]>,
}
impl CryptoState {
fn new(digest: MessageDigest, cipher: Cipher, secret: Box<[u8]>) -> Self {
let key = hkdf::qexpand(digest, &secret, b"key", cipher.key_len() as u16);
let iv = hkdf::qexpand(digest, &secret, b"iv", cipher.iv_len().unwrap() as u16);
Self { secret, key, iv }
}
fn update(&self, digest: MessageDigest, cipher: Cipher, side: Side) -> CryptoState {
let secret = hkdf::qexpand(digest, &self.secret, if side == Side::Client { b"client 1rtt" } else { b"server 1rtt" }, digest.size() as u16);
Self::new(digest, cipher, secret)
}
}
pub struct ZeroRttCrypto {
state: CryptoState,
cipher: Cipher,
}
impl ZeroRttCrypto {
fn new(tls: &SslRef) -> Self {
let tls_cipher = tls.current_cipher().unwrap();
let digest = tls_cipher.handshake_digest().unwrap();
let cipher = Cipher::from_nid(tls_cipher.cipher_nid().unwrap()).unwrap();
const LABEL: &str = "EXPORTER-QUIC 0rtt";
let mut secret = vec![0; digest.size()];
tls.export_keying_material_early(&mut secret, &LABEL, b"").unwrap();
Self {
state: CryptoState::new(digest, cipher, secret.into()),
cipher
}
}
fn encrypt(&self, packet: u64, header: &[u8], payload: &[u8]) -> Vec<u8> {
let mut tag = [0; AEAD_TAG_SIZE];
let mut nonce = [0; 12];
BigEndian::write_u64(&mut nonce[4..12], packet);
for i in 0..12 {
nonce[i] ^= self.state.iv[i];
}
let mut buf = encrypt_aead(self.cipher, &self.state.key, Some(&nonce), header, payload, &mut tag).unwrap();
buf.extend_from_slice(&tag);
buf
}
fn decrypt(&self, packet: u64, header: &[u8], payload: &[u8]) -> Option<Vec<u8>> {
let mut nonce = [0; 12];
BigEndian::write_u64(&mut nonce[4..12], packet);
for i in 0..12 {
nonce[i] ^= self.state.iv[i];
}
if payload.len() < AEAD_TAG_SIZE { return None; }
let (payload, tag) = payload.split_at(payload.len() - AEAD_TAG_SIZE);
decrypt_aead(self.cipher, &self.state.key, Some(&nonce), header, payload, tag).ok()
}
}
#[derive(Clone)]
pub struct CryptoContext {
local: CryptoState,
remote: CryptoState,
digest: MessageDigest,
cipher: Cipher,
}
impl CryptoContext {
fn handshake(id: &ConnectionId, side: Side) -> Self {
let digest = MessageDigest::sha256();
let cipher = Cipher::aes_128_gcm();
let hs_secret = hkdf::extract(digest, &HANDSHAKE_SALT, &id.0);
let (local_label, remote_label) = if side == Side::Client { (b"client hs", b"server hs") } else { (b"server hs", b"client hs") };
let local = CryptoState::new(digest, cipher, hkdf::qexpand(digest, &hs_secret, &local_label[..], digest.size() as u16));
let remote = CryptoState::new(digest, cipher, hkdf::qexpand(digest, &hs_secret, &remote_label[..], digest.size() as u16));
CryptoContext {
local, remote, digest, cipher,
}
}
fn established(tls: &SslRef, side: Side) -> Self {
let tls_cipher = tls.current_cipher().unwrap();
let digest = tls_cipher.handshake_digest().unwrap();
let cipher = Cipher::from_nid(tls_cipher.cipher_nid().unwrap()).unwrap();
const SERVER_LABEL: &str = "EXPORTER-QUIC server 1rtt";
const CLIENT_LABEL: &str = "EXPORTER-QUIC client 1rtt";
let (local_label, remote_label) = if side == Side::Client { (CLIENT_LABEL, SERVER_LABEL) } else { (SERVER_LABEL, CLIENT_LABEL) };
let mut local_secret = vec![0; digest.size()];
tls.export_keying_material(&mut local_secret, local_label, Some(b"")).unwrap();
let local = CryptoState::new(digest, cipher, local_secret.into());
let mut remote_secret = vec![0; digest.size()];
tls.export_keying_material(&mut remote_secret, remote_label, Some(b"")).unwrap();
let remote = CryptoState::new(digest, cipher, remote_secret.into());
CryptoContext {
local, remote, digest, cipher
}
}
fn update(&self, side: Side) -> Self {
CryptoContext {
local: self.local.update(self.digest, self.cipher, side),
remote: self.local.update(self.digest, self.cipher, !side),
digest: self.digest, cipher: self.cipher,
}
}
fn encrypt(&self, packet: u64, header: &[u8], payload: &[u8]) -> Vec<u8> {
// FIXME: Output to caller-owned memory with preexisting header; retain crypter
let mut tag = [0; AEAD_TAG_SIZE];
let mut nonce = [0; 12];
BigEndian::write_u64(&mut nonce[4..12], packet);
for i in 0..12 {
nonce[i] ^= self.local.iv[i];
}
let mut buf = encrypt_aead(self.cipher, &self.local.key, Some(&nonce), header, payload, &mut tag).unwrap();
buf.extend_from_slice(&tag);
buf
}
fn decrypt(&self, packet: u64, header: &[u8], payload: &[u8]) -> Option<Vec<u8>> {
let mut nonce = [0; 12];
BigEndian::write_u64(&mut nonce[4..12], packet);
for i in 0..12 {
nonce[i] ^= self.remote.iv[i];
}
if payload.len() < AEAD_TAG_SIZE { return None; }
let (payload, tag) = payload.split_at(payload.len() - AEAD_TAG_SIZE);
decrypt_aead(self.cipher, &self.remote.key, Some(&nonce), header, payload, tag).ok()
}
}
const AEAD_TAG_SIZE: usize = 16;
#[cfg(test)]
mod test {
use super::*;
use rand;
#[test]
fn packet_number() {
for prev in 0..1024 {
for x in 0..256 {
let found = PacketNumber::U8(x as u8).expand(prev);
assert!(found as i64 - (prev+1) as i64 <= 128 || prev < 128 );
}
}
// Order of operations regression test
assert_eq!(PacketNumber::U32(0xa0bd197c).expand(0xa0bd197a), 0xa0bd197c);
}
#[test]
fn handshake_crypto_roundtrip() {
let conn = ConnectionId::random(&mut rand::thread_rng(), MAX_CID_SIZE as u8);
let client = CryptoContext::handshake(&conn, Side::Client);
let server = CryptoContext::handshake(&conn, Side::Server);
let header = b"header";
let payload = b"payload";
let encrypted = client.encrypt(0, header, payload);
let decrypted = server.decrypt(0, header, &encrypted).unwrap();
assert_eq!(decrypted, payload);
}
#[test]
fn key_derivation() {
let id = ConnectionId([0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08].iter().cloned().collect());
let digest = MessageDigest::sha256();
let cipher = Cipher::aes_128_gcm();
let hs_secret = hkdf::extract(digest, &HANDSHAKE_SALT, &id.0);
assert_eq!(&hs_secret[..],
[0xa5, 0x72, 0xb0, 0x24, 0x5a, 0xf1, 0xed, 0xdf,
0x5c, 0x61, 0xc6, 0xe3, 0xf7, 0xf9, 0x30, 0x4c,
0xa6, 0x6b, 0xfb, 0x4c, 0xaa, 0xf7, 0x65, 0x67,
0xd5, 0xcb, 0x8d, 0xd1, 0xdc, 0x4e, 0x82, 0x0b]);
let client_secret = hkdf::qexpand(digest, &hs_secret, b"client hs", digest.size() as u16);
assert_eq!(&client_secret[..],
[0x83, 0x55, 0xf2, 0x1a, 0x3d, 0x8f, 0x83, 0xec,
0xb3, 0xd0, 0xf9, 0x71, 0x08, 0xd3, 0xf9, 0x5e,
0x0f, 0x65, 0xb4, 0xd8, 0xae, 0x88, 0xa0, 0x61,
0x1e, 0xe4, 0x9d, 0xb0, 0xb5, 0x23, 0x59, 0x1d]);
let client_state = CryptoState::new(digest, cipher, client_secret);
assert_eq!(&client_state.key[..],
[0x3a, 0xd0, 0x54, 0x2c, 0x4a, 0x85, 0x84, 0x74,
0x00, 0x63, 0x04, 0x9e, 0x3b, 0x3c, 0xaa, 0xb2]);
assert_eq!(&client_state.iv[..],
[0xd1, 0xfd, 0x26, 0x05, 0x42, 0x75, 0x3a, 0xba,
0x38, 0x58, 0x9b, 0xad]);
let server_secret = hkdf::qexpand(digest, &hs_secret, b"server hs", digest.size() as u16);
assert_eq!(&server_secret[..],
[0xf8, 0x0e, 0x57, 0x71, 0x48, 0x4b, 0x21, 0xcd,
0xeb, 0xb5, 0xaf, 0xe0, 0xa2, 0x56, 0xa3, 0x17,
0x41, 0xef, 0xe2, 0xb5, 0xc6, 0xb6, 0x17, 0xba,
0xe1, 0xb2, 0xf1, 0x5a, 0x83, 0x04, 0x83, 0xd6]);
let server_state = CryptoState::new(digest, cipher, server_secret);
assert_eq!(&server_state.key[..],
[0xbe, 0xe4, 0xc2, 0x4d, 0x2a, 0xf1, 0x33, 0x80,
0xa9, 0xfa, 0x24, 0xa5, 0xe2, 0xba, 0x2c, 0xff]);
assert_eq!(&server_state.iv[..],
[0x25, 0xb5, 0x8e, 0x24, 0x6d, 0x9e, 0x7d, 0x5f,
0xfe, 0x43, 0x23, 0xfe]);
}
}
|
use crate::test::externalities::TestExternalities;
use crate::{
node::InternalNode,
rpc::{self, RpcExtension},
types,
};
use jsonrpc_core_client::{transports::local, RpcChannel};
use crate::node::TestRuntimeRequirements;
/// A black box node, either runs a background node,
/// or connects via ws to a running node.
pub enum BlackBoxNode<N> {
/// Connects to an external node.
External(String),
/// Spawns a pristine node.
Internal(InternalNode<N>),
}
/// A black box test.
pub struct BlackBox<N> {
node: BlackBoxNode<N>,
}
impl<N> BlackBox<N>
where
N: TestRuntimeRequirements,
{
/// Execute provided `Fn` in an externalities provided environment.
pub async fn with_state<T>(&mut self, func: impl FnOnce() -> T) -> T {
TestExternalities::<N>::new(self.rpc()).execute_with(func)
}
/// Wait `number` of blocks.
pub fn wait_blocks(&self, _number: impl Into<types::BlockNumber<N::Runtime>>) {
// TODO: no-op
}
}
impl<N> rpc::RpcExtension for BlackBox<N> {
fn rpc<TClient: From<RpcChannel> + 'static>(&mut self) -> TClient {
let client = match self.node {
BlackBoxNode::External(ref url) => futures::executor::block_on(rpc::connect_ws(&url)).unwrap(),
BlackBoxNode::Internal(ref mut node) => {
use futures01::Future;
let (client, fut) = local::connect::<TClient, _, _>(node.rpc_handler());
node.compat_runtime().spawn(fut.map_err(|_| ()));
client
}
};
client
}
}
impl<N> BlackBox<N>{
/// Create an instance of `BlackBox`.
pub fn new(node: BlackBoxNode<N>) -> Self {
Self { node }
}
}
|
use std::fs;
fn transform(mut value: i64, subject_number: i64) -> i64 {
value *= subject_number;
value = value % 20201227;
value
}
fn main() -> Result<(), Box<dyn std::error::Error>>{
let filename = "/home/remy/AOC/2020/25/input";
let data = fs::read_to_string(filename).unwrap();
let mut splitted = data.split("\n");
let pub1 = splitted.next().unwrap().parse::<i64>().unwrap();
let pub2 = splitted.next().unwrap().parse::<i64>().unwrap();
println!("Pub 1: {}", pub1);
println!("Pub 2: {}", pub2);
let mut t1 = 7;
let mut i1 = 1;
loop {
if t1 == pub1 {
break;
}
t1 = transform(t1, 7);
i1+=1;
}
println!("{} {} {}", pub1, t1, i1);
let mut result = 1;
for _ in 0..i1 {
result = transform(result,pub2);
}
println!("result: {}", result);
Ok(())
}
|
use dicom::core::dicom_value;
use dicom::object::{mem::InMemDicomObject, StandardDataDictionary};
use dicom::{
core::{DataElement, PrimitiveValue, VR},
dictionary_std::tags,
};
use dicom_ul::pdu;
use dicom_ul::{
association::client::ClientAssociationOptions,
pdu::{PDataValueType, Pdu},
};
use pdu::PDataValue;
use structopt::StructOpt;
/// DICOM C-ECHO SCU
#[derive(Debug, StructOpt)]
struct App {
/// socket address to SCP (example: "127.0.0.1:104")
addr: String,
/// verbose mode
#[structopt(short = "v", long = "verbose")]
verbose: bool,
/// the C-ECHO message ID
#[structopt(short = "m", long = "message-id", default_value = "1")]
message_id: u16,
/// the calling AE title
#[structopt(long = "calling-ae-title", default_value = "ECHOSCU")]
calling_ae_title: String,
/// the called AE title
#[structopt(long = "called-ae-title", default_value = "ANY-SCP")]
called_ae_title: String,
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let App {
addr,
verbose,
message_id,
called_ae_title,
calling_ae_title,
} = App::from_args();
let mut association = ClientAssociationOptions::new()
.with_abstract_syntax("1.2.840.10008.1.1")
.calling_ae_title(calling_ae_title)
.called_ae_title(called_ae_title)
.establish(&addr)?;
let pc = association
.presentation_contexts()
.first()
.ok_or("No presentation context accepted")?
.clone();
if verbose {
println!("Association with {} successful", addr);
}
// commands are always in implict VR LE
let ts = dicom::transfer_syntax::entries::IMPLICIT_VR_LITTLE_ENDIAN.erased();
let obj = create_echo_command(message_id);
let mut data = Vec::new();
obj.write_dataset_with_ts(&mut data, &ts)?;
association.send(&Pdu::PData {
data: vec![PDataValue {
presentation_context_id: pc.id,
value_type: PDataValueType::Command,
is_last: true,
data,
}],
})?;
if verbose {
println!(
"Echo message sent (msg id {}), awaiting reply...",
message_id
);
}
let pdu = association.receive()?;
match pdu {
Pdu::PData { data } => {
let data_value = &data[0];
let v = &data_value.data;
let obj = InMemDicomObject::read_dataset_with_ts(v.as_slice(), &ts)?;
if verbose {
println!("{:?}", obj);
}
// check status
let status_elem = obj.element(tags::STATUS)?;
if verbose {
println!("Status: {}", status_elem.to_int::<u16>()?);
}
// msg ID response, should be equal to sent msg ID
let msg_id_elem = obj.element(tags::MESSAGE_ID_BEING_RESPONDED_TO)?;
assert_eq!(message_id, msg_id_elem.to_int()?);
if verbose {
println!("C-ECHO successful.");
}
}
pdu => panic!("Unexpected pdu {:?}", pdu),
}
Ok(())
}
fn create_echo_command(message_id: u16) -> InMemDicomObject<StandardDataDictionary> {
let mut obj = InMemDicomObject::create_empty();
// group length
obj.put(DataElement::new(
tags::COMMAND_GROUP_LENGTH,
VR::UI,
PrimitiveValue::from(8 + 18 + 8 + 2 + 8 + 2 + 8 + 2),
));
// service
obj.put(DataElement::new(
tags::AFFECTED_SOP_CLASS_UID,
VR::UI,
dicom_value!(Str, "1.2.840.10008.1.1\0"),
));
// command
obj.put(DataElement::new(
tags::COMMAND_FIELD,
VR::US,
dicom_value!(U16, [0x0030]),
));
// message ID
obj.put(DataElement::new(
tags::MESSAGE_ID,
VR::US,
dicom_value!(U16, [message_id]),
));
// data set type
obj.put(DataElement::new(
tags::COMMAND_DATA_SET_TYPE,
VR::US,
dicom_value!(U16, [0x0101]),
));
obj
}
|
//! Models and structs used by and for the deployment process.
use std::borrow::Cow;
use std::collections::HashMap;
use std::fmt;
use std::path::{Path, PathBuf};
use std::time::{Duration, SystemTime, SystemTimeError};
use serde::{Deserialize, Serialize};
use crate::profile::dotfile::Dotfile;
use crate::profile::Priority;
/// Contains the status of a deployed item.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum ItemStatus {
/// The item was successfully created.
Success,
/// The item deployment failed.
Failed(Cow<'static, str>),
/// The item deployment was skipped.
Skipped(Cow<'static, str>),
}
impl ItemStatus {
/// Marks the item operation as successful.
pub const fn success() -> Self {
Self::Success
}
/// Marks the item operation as failed.
pub fn failed<S: Into<Cow<'static, str>>>(reason: S) -> Self {
Self::Failed(reason.into())
}
/// Indicates that the item operation was skipped.
pub fn skipped<S: Into<Cow<'static, str>>>(reason: S) -> Self {
Self::Skipped(reason.into())
}
/// Checks if the item operation was successful.
pub fn is_success(&self) -> bool {
self == &Self::Success
}
/// Checks if the item operation has failed.
pub const fn is_failed(&self) -> bool {
matches!(self, &Self::Failed(_))
}
/// Checks if the item operation was skipped.
pub const fn is_skipped(&self) -> bool {
matches!(self, &Self::Skipped(_))
}
}
impl fmt::Display for ItemStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Success => f.write_str("Success"),
Self::Failed(reason) => write!(f, "Failed: {reason}"),
Self::Skipped(reason) => write!(f, "Skipped: {reason}"),
}
}
}
impl<E> From<E> for ItemStatus
where
E: std::error::Error,
{
fn from(value: E) -> Self {
Self::failed(value.to_string())
}
}
/// Defines the type of dotfile.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum DeployedDotfileKind {
/// A normal dotfile.
Dotfile(Dotfile),
/// A dotfile that is contained in a directory that is deployed.
///
/// PathBuf is the deploy path of the `parent` dotfile.
/// The parent should always be of type `Dotfile(_)`.
Child(PathBuf),
}
impl DeployedDotfileKind {
/// Checks whether the deployed dotfile type is a normal dotfile.
pub const fn is_dotfile(&self) -> bool {
matches!(self, Self::Dotfile(_))
}
/// Checks whether the deployed dotfile type is a child dotfile.
pub const fn is_child(&self) -> bool {
matches!(self, Self::Child(_))
}
}
/// Stores the result of a dotfile deployment operation.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct DeployedDotfile {
/// The status of the deployed dotfile.
pub status: ItemStatus,
/// The kind of the deployed dotfile.
pub kind: DeployedDotfileKind,
}
impl DeployedDotfile {
/// Returns the status of the dotfile operation.
pub const fn status(&self) -> &ItemStatus {
&self.status
}
/// Returns the kind of the dotfile operation.
pub const fn kind(&self) -> &DeployedDotfileKind {
&self.kind
}
}
impl AsRef<ItemStatus> for DeployedDotfile {
fn as_ref(&self) -> &ItemStatus {
self.status()
}
}
/// Stores the result of a symlink deployment operation.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct DeployedSymlink {
/// The status of the deployed symlink.
pub status: ItemStatus,
/// The source path of the link.
pub source: PathBuf,
}
impl DeployedSymlink {
/// Returns the status of the link operation.
pub const fn status(&self) -> &ItemStatus {
&self.status
}
/// Returns the source path of the link .
pub fn source(&self) -> &Path {
self.source.as_path()
}
}
impl AsRef<ItemStatus> for DeployedSymlink {
fn as_ref(&self) -> &ItemStatus {
self.status()
}
}
/// Describes the status of a profile deployment.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum DeploymentStatus {
/// The profile is deployed successfully.
Success,
/// There were errors during the deployment.
Failed(Cow<'static, str>),
}
impl DeploymentStatus {
/// Returns success.
pub const fn success() -> Self {
Self::Success
}
/// Returns a failure.
pub fn failed<S: Into<Cow<'static, str>>>(reason: S) -> Self {
Self::Failed(reason.into())
}
/// Checks if the deployment was successful.
pub fn is_success(&self) -> bool {
self == &Self::Success
}
/// Checks if the deployment has failed.
pub const fn is_failed(&self) -> bool {
matches!(self, &Self::Failed(_))
}
}
impl fmt::Display for DeploymentStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Success => f.write_str("Success"),
Self::Failed(reason) => write!(f, "Failed: {reason}"),
}
}
}
impl<E> From<E> for DeploymentStatus
where
E: std::error::Error,
{
fn from(value: E) -> Self {
Self::failed(value.to_string())
}
}
/// Describes the deployment of a profile.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Deployment {
/// The time the deployment was started.
time_start: SystemTime,
/// The time the deployment was finished.
time_end: SystemTime,
/// The status of the deployment.
status: DeploymentStatus,
/// The dotfiles that were deployed.
dotfiles: HashMap<PathBuf, DeployedDotfile>,
/// The links that were deployed.
symlinks: HashMap<PathBuf, DeployedSymlink>,
}
impl Deployment {
/// Returns the time the deployment was started.
pub const fn time_start(&self) -> &SystemTime {
&self.time_start
}
/// Returns the time the deployment was finished.
pub const fn time_end(&self) -> &SystemTime {
&self.time_end
}
/// Returns the duration the deployment took.
pub fn duration(&self) -> Result<Duration, SystemTimeError> {
self.time_end.duration_since(self.time_start)
}
/// Returns the status of the deployment.
pub const fn status(&self) -> &DeploymentStatus {
&self.status
}
/// Returns the dotfiles.
pub const fn dotfiles(&self) -> &HashMap<PathBuf, DeployedDotfile> {
&self.dotfiles
}
/// Returns the symlinks.
pub const fn symlinks(&self) -> &HashMap<PathBuf, DeployedSymlink> {
&self.symlinks
}
/// Builds the deployment.
pub fn build() -> DeploymentBuilder {
DeploymentBuilder::default()
}
}
/// A builder for a [`Deployment`].
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DeploymentBuilder {
/// The start time of the deployment.
///
/// This used to keep track of the total execution time of the deployment
/// process.
time_start: SystemTime,
/// All dotfiles which were already process by the deployment process.
dotfiles: HashMap<PathBuf, DeployedDotfile>,
/// All symlinks which were already process by the deployment process.
symlinks: HashMap<PathBuf, DeployedSymlink>,
}
impl DeploymentBuilder {
/// Adds a dotfile with the given `status` to the builder.
pub fn add_dotfile(
&mut self,
path: PathBuf,
dotfile: Dotfile,
status: ItemStatus,
) -> &mut Self {
self.dotfiles.insert(
path,
DeployedDotfile {
kind: DeployedDotfileKind::Dotfile(dotfile),
status,
},
);
self
}
/// Adds the child of a dotfile directory with the given `status` to the
/// builder.
pub fn add_child(&mut self, path: PathBuf, parent: PathBuf, status: ItemStatus) -> &mut Self {
self.dotfiles.insert(
path,
DeployedDotfile {
kind: DeployedDotfileKind::Child(parent),
status,
},
);
self
}
/// Adds a symlink with the given `status` to the builder.
pub fn add_link(&mut self, source: PathBuf, target: PathBuf, status: ItemStatus) -> &mut Self {
self.symlinks
.insert(target, DeployedSymlink { source, status });
self
}
/// Checks if the builder already contains a dotfile for the given `path`.
pub fn contains<P: AsRef<Path>>(&self, path: P) -> bool {
self.dotfiles.contains_key(path.as_ref())
}
/// Gets any dotfile already deployed at `path`.
///
/// This function ignores the status of the dotfile.
pub fn get_dotfile<P: AsRef<Path>>(&self, path: P) -> Option<&Dotfile> {
let mut value = self.dotfiles.get(path.as_ref())?;
loop {
match &value.kind {
DeployedDotfileKind::Dotfile(dotfile) => return Some(dotfile),
DeployedDotfileKind::Child(parent_path) => {
value = self.dotfiles.get(parent_path)?
}
}
}
}
/// Gets any dotfile already deployed at `path`.
///
/// This function only returns a dotfile with [`ItemStatus::Success`].
pub fn get_deployed_dotfile<P: AsRef<Path>>(&self, path: P) -> Option<&Dotfile> {
let mut value = self.dotfiles.get(path.as_ref())?;
loop {
if !value.status.is_success() {
return None;
}
match &value.kind {
DeployedDotfileKind::Dotfile(dotfile) => return Some(dotfile),
DeployedDotfileKind::Child(parent_path) => {
value = self.dotfiles.get(parent_path)?
}
}
}
}
/// Gets the priority of the dotfile already deployed at `path`.
///
/// This function only evaluates a dotfile with [`ItemStatus::Success`].
pub fn get_priority<P: AsRef<Path>>(&self, path: P) -> Option<&Priority> {
self.get_deployed_dotfile(path)
.and_then(|d| d.priority.as_ref())
}
/// Checks if a dotfile was already successfully deployed at `path`.
///
/// This function only evaluates a dotfile with [`ItemStatus::Success`].
pub fn is_deployed<P: AsRef<Path>>(&self, path: P) -> Option<bool> {
self.dotfiles
.get(path.as_ref())
.map(|dotfile| dotfile.status.is_success())
}
/// Consumes self and creates a [`Deployment`] from it.
///
/// This will try to guess the state of the deployment by looking for any
/// failed deployed dotfile.
pub fn finish(self) -> Deployment {
let failed_dotfiles = self
.dotfiles
.values()
.filter(|d| d.status.is_failed())
.count();
let failed_links = self
.symlinks
.values()
.filter(|d| d.status.is_failed())
.count();
let status = if failed_dotfiles > 0 {
DeploymentStatus::failed(format!(
"Deployment of {failed_dotfiles} dotfiles and {failed_links} links failed"
))
} else {
DeploymentStatus::Success
};
Deployment {
time_start: self.time_start,
time_end: SystemTime::now(),
status,
dotfiles: self.dotfiles,
symlinks: self.symlinks,
}
}
/// Consumes self and creates a [`Deployment`] from it.
///
/// This will mark the deployment as success.
pub fn success(self) -> Deployment {
Deployment {
time_start: self.time_start,
time_end: SystemTime::now(),
status: DeploymentStatus::Success,
dotfiles: self.dotfiles,
symlinks: self.symlinks,
}
}
/// Consumes self and creates a [`Deployment`] from it.
///
/// This will mark the deployment as failed with the reason given with
/// `reason`.
pub fn failed<S: Into<Cow<'static, str>>>(self, reason: S) -> Deployment {
Deployment {
time_start: self.time_start,
time_end: SystemTime::now(),
status: DeploymentStatus::Failed(reason.into()),
dotfiles: self.dotfiles,
symlinks: self.symlinks,
}
}
}
impl Default for DeploymentBuilder {
fn default() -> Self {
Self {
time_start: SystemTime::now(),
dotfiles: HashMap::new(),
symlinks: HashMap::new(),
}
}
}
#[cfg(test)]
mod tests {
use color_eyre::Result;
use super::*;
#[test]
fn deployment_builder() -> Result<()> {
crate::tests::setup_test_env();
let builder = Deployment::build();
let deployment = builder.success();
assert!(deployment.status().is_success());
assert!(deployment.duration()? >= Duration::from_secs(0));
Ok(())
}
}
|
use svm_types::SectionKind;
use crate::{Field, ParseError, ReadExt, WriteExt};
pub const CODE_SECTION: u16 = 0x00_01;
pub const DATA_SECTION: u16 = 0x00_02;
pub const CTORS_SECTION: u16 = 0x00_03;
pub const SCHEMA_SECTION: u16 = 0x00_04;
pub const API_SECTION: u16 = 0x00_05;
pub const HEADER_SECTION: u16 = 0x00_06;
pub const DEPLOY_SECTION: u16 = 0x00_07;
pub fn encode(kind: SectionKind, w: &mut Vec<u8>) {
let raw = match kind {
SectionKind::Code => CODE_SECTION,
SectionKind::Data => DATA_SECTION,
SectionKind::Ctors => CTORS_SECTION,
SectionKind::Schema => SCHEMA_SECTION,
SectionKind::Api => API_SECTION,
SectionKind::Header => HEADER_SECTION,
SectionKind::Deploy => DEPLOY_SECTION,
};
w.write_u16_be(raw);
}
pub fn decode(cursor: &mut std::io::Cursor<&[u8]>) -> Result<SectionKind, ParseError> {
let value = cursor.read_u16_be();
if value.is_err() {
return Err(ParseError::NotEnoughBytes(Field::SectionKind));
}
match value.unwrap() {
CODE_SECTION => Ok(SectionKind::Code),
DATA_SECTION => Ok(SectionKind::Data),
CTORS_SECTION => Ok(SectionKind::Ctors),
SCHEMA_SECTION => Ok(SectionKind::Schema),
API_SECTION => Ok(SectionKind::Api),
HEADER_SECTION => Ok(SectionKind::Header),
DEPLOY_SECTION => Ok(SectionKind::Deploy),
_ => Err(ParseError::InvalidSection),
}
}
|
//! Weechat Infolist module.
use std::ffi::CStr;
use std::os::raw::c_void;
use std::ptr;
use weechat_sys::{t_gui_buffer, t_infolist, t_weechat_plugin};
use crate::{Buffer, LossyCString, Weechat};
use std::borrow::Cow;
/// Weechat Infolist type.
pub struct Infolist {
pub(crate) ptr: *mut t_infolist,
pub(crate) weechat_ptr: *mut t_weechat_plugin,
}
impl Drop for Infolist {
fn drop(&mut self) {
let weechat = Weechat::from_ptr(self.weechat_ptr);
let free = weechat.get().infolist_free.unwrap();
unsafe { free(self.ptr) }
}
}
impl Weechat {
/// Get an infolist.
/// * `name` - The name of the infolist.
/// * `arguments` - Optional arguments for the infolist. See the weechat
/// C API documentation for valid values.
/// Returns an Infolist object that behaves like a cursor that can be moved
/// back and forth to access individual Infolist items.
pub fn infolist_get(
&self,
name: &str,
arguments: &str,
) -> Option<Infolist> {
let name = LossyCString::new(name);
let arguments = LossyCString::new(arguments);
let infolist_get = self.get().infolist_get.unwrap();
let ptr = unsafe {
infolist_get(
self.ptr,
name.as_ptr(),
ptr::null_mut(),
arguments.as_ptr(),
)
};
if ptr.is_null() {
None
} else {
Some(Infolist {
ptr,
weechat_ptr: self.ptr,
})
}
}
}
impl Infolist {
/// Move the "cursor" to the next item in an infolist.
pub fn next(&self) -> bool {
let weechat = Weechat::from_ptr(self.weechat_ptr);
let infolist_next = weechat.get().infolist_next.unwrap();
let ret = unsafe { infolist_next(self.ptr) };
ret == 1
}
/// Move the "cursor" to the previous item in an infolist.
pub fn prev(&self) -> bool {
let weechat = Weechat::from_ptr(self.weechat_ptr);
let infolist_prev = weechat.get().infolist_prev.unwrap();
let ret = unsafe { infolist_prev(self.ptr) };
ret == 1
}
/// Get the list of fields for current infolist item.
/// Returns a string with the comma separated list of type/name tuple
/// separated by a colon.
/// The types are: "i" (integer), "s" (string), "p" (pointer), "b" (buffer),
/// "t" (time).
/// Example: "i:my_integer,s:my_string"
pub fn fields(&self) -> Option<Cow<str>> {
let weechat = Weechat::from_ptr(self.weechat_ptr);
let infolist_fields = weechat.get().infolist_fields.unwrap();
unsafe {
let ret = infolist_fields(self.ptr);
if ret.is_null() {
None
} else {
Some(CStr::from_ptr(ret).to_string_lossy())
}
}
}
fn get_pointer(&self, name: &str) -> *mut c_void {
let weechat = Weechat::from_ptr(self.weechat_ptr);
let infolist_pointer = weechat.get().infolist_pointer.unwrap();
let name = LossyCString::new(name);
unsafe { infolist_pointer(self.ptr, name.as_ptr()) }
}
/// Get the buffer of the current infolist item.
/// If the infolist item doesn't have a buffer None is returned.
pub fn get_buffer(&self) -> Option<Buffer> {
let ptr = self.get_pointer("buffer");
if ptr.is_null() {
None
} else {
Some(Buffer::from_ptr(self.weechat_ptr, ptr as *mut t_gui_buffer))
}
}
/// Get the value of a string variable in the current infolist item.
/// * `name` - The variable name of the infolist item.
pub fn get_string(&self, name: &str) -> Option<Cow<str>> {
let weechat = Weechat::from_ptr(self.weechat_ptr);
let infolist_string = weechat.get().infolist_string.unwrap();
let name = LossyCString::new(name);
unsafe {
let ret = infolist_string(self.ptr, name.as_ptr());
if ret.is_null() {
None
} else {
Some(CStr::from_ptr(ret).to_string_lossy())
}
}
}
}
|
mod protos;
use futures::*;
use futures::Stream;
use futures::sync::oneshot;
use std::env;
use std::iter;
use std::sync::{Arc};
use std::{io, thread};
use std::io::Read;
use protobuf::RepeatedField;
use grpcio::*;
use protos::multiplay::*;
use protos::multiplay_grpc::{Multiplay, User};
use mongodb::{bson, doc};
use mongodb::oid::ObjectId;
use mongodb::{Client, ThreadedClient};
use mongodb::db::ThreadedDatabase;
#[derive(Clone)]
struct MultiplayService {
client: Client
}
#[derive(Clone)]
struct UserService {
client: Client
}
impl Multiplay for MultiplayService {
fn get_users(&mut self,
ctx: RpcContext,
req: GetUsersRequest,
resp: ServerStreamingSink<GetUsersResponse>
) {
println!("{}",req.get_room_id());
let db = self.client.db("multiplay-grpc").clone();
let users = iter::repeat(())
.map(move |()| {
let coll = db.collection("users");
let mut reply = GetUsersResponse::new();
let result_users = coll.find(None, None)
.expect("Failed to get users");
let users_vec: Vec<UserPosition> = result_users
.map(|user| {
let mut user_position = UserPosition::new();
let doc = user.unwrap();
user_position.set_id(doc.get_object_id("_id").unwrap().to_hex());
user_position.set_x(doc.get_f64("x").unwrap());
user_position.set_y(doc.get_f64("y").unwrap());
user_position.set_y(doc.get_f64("z").unwrap());
user_position
})
.collect();
reply.set_users(RepeatedField::from_vec(users_vec));
(reply, WriteFlags::default())
});
let f = resp
.send_all(stream::iter_ok::<_, Error>(users))
.map(|_| {})
.map_err(|e| println!("failed to handle listfeatures request: {:?}", e));
ctx.spawn(f)
}
fn set_position(&mut self,
ctx: RpcContext,
req: RequestStream<SetPositionRequest>,
resp: ClientStreamingSink<SetPositionResponse>
) {
let db = self.client.db("multiplay-grpc").clone();
println!("get!!request");
let f = req.map(move |position| {
let coll = db.collection("users");
println!("Receive: {:?}", position);
let id = position.get_id().to_string();
let filter = doc!{"_id": ObjectId::with_string(&id).unwrap()};
let new_position = doc!{
"$set": {
"x": position.get_x(),
"y": position.get_y(),
"z": position.get_z(),
},
};
let coll_result = coll.find_one_and_update(filter.clone(), new_position, None)
.expect("Faild to get player");
let player = coll_result.expect("result is None");
println!("player : {}",player);
id
})
.collect()
.and_then(|ids| {
let id = ids.first().unwrap();
let mut rep = SetPositionResponse::new();
rep.set_id(id.clone());
rep.set_status("ok".to_string());
resp.success(rep)
})
.map_err(|e| println!("failed to record route: {:?}", e));
ctx.spawn(f)
}
fn connect_position(&mut self,
ctx: RpcContext,
req: RequestStream<ConnectPositionRequest>,
resp: DuplexSink<ConnectPositionResponse>
) {
let db = self.client.db("multiplay-grpc").clone();
let to_send = req
.map(move |position| {
println!("Receive: {:?}", position);
let coll = db.collection("users");
let id = position.get_id().to_string();
let filter = doc!{"_id": ObjectId::with_string(&id).unwrap()};
let new_position = doc!{
"$set": {
"x": position.get_x(),
"y": position.get_y(),
"z": position.get_z(),
},
};
let coll_result = coll.find_one_and_update(filter.clone(), new_position, None)
.expect("Faild to get player");
let player = coll_result.expect("result is None");
println!("player : {}",player);
let result_users = coll.find(None, None)
.expect("Failed to get users");
let users = result_users
.map(move |user| {
let mut user_position = UserPosition::new();
let doc = user.unwrap();
user_position.set_id(doc.get_object_id("_id").unwrap().to_hex());
user_position.set_x(doc.get_f64("x").unwrap());
user_position.set_y(doc.get_f64("y").unwrap());
user_position.set_z(doc.get_f64("z").unwrap());
user_position
})
.collect();
let mut reply = ConnectPositionResponse::new();
reply.set_users(RepeatedField::from_vec(users));
(reply, WriteFlags::default())
});
let f = resp
.send_all(to_send)
.map(|_| {})
.map_err(|e| println!("failed : {:?}", e));
ctx.spawn(f)
}
}
impl User for UserService {
fn create(&mut self, ctx: RpcContext, req: CreateUserRequest, sink: UnarySink<CreateUserResponse>) {
let coll = self.client.db("multiplay-grpc").collection("users");
let user_name = req.get_name();
println!("{}", &user_name);
let new_user = doc! {
"name": user_name,
"x": 0.0,
"y": 0.0,
"z": 0.0,
};
let result_bson = coll.insert_one(new_user.clone(), None)
.expect("Failed to insert doc.").inserted_id.expect("Failed to get inserted id");
let result_id = result_bson.as_object_id().unwrap().to_hex();
println!("{}",&result_id);
let mut resp = CreateUserResponse::new();
resp.set_id(result_id);
let f = sink
.success(resp)
.map_err(move |e| println!("failed to reply {:?}: {:?}", req, e));
ctx.spawn(f)
}
}
fn main() {
let client = Client::connect("localhost", 27017)
.expect("Failed to initialize standalone client.");
let env = Arc::new(Environment::new(1));
let user_service = protos::multiplay_grpc::create_user(UserService { client: client.clone()});
let multiplay_service = protos::multiplay_grpc::create_multiplay(MultiplayService { client: client.clone() });
let host = env::var("RUST_GRPC_HOST").unwrap_or("127.0.0.1".to_string());
let port = env::var("RUST_GRPC_PORT").unwrap_or("57601".to_string());
let mut server = ServerBuilder::new(env)
.register_service(user_service)
.register_service(multiplay_service)
.bind(host, port.parse().unwrap())
.build()
.unwrap();
server.start();
for &(ref host, port) in server.bind_addrs() {
println!("listening on {}:{}", host, port);
}
let (tx, rx) = oneshot::channel();
thread::spawn(move || {
println!("Press ENTER to exit...");
let _ = io::stdin().read(&mut [0]).unwrap();
tx.send(())
});
let _ = rx.wait();
let _ = server.shutdown().wait();
}
|
#[allow(unused_variables)]
use std::env;
fn main()
{
let args = env::args();
let str = env::args().collect::<Vec<String>>();
println!("{:?}", args);
println!("{:?}", str);
}
|
use crate::helper::{
do_accumulator_node, do_get_body_by_hash, do_get_headers, do_get_info_by_hash, do_get_txn_info,
do_state_node,
};
use crate::txn_sync::GetTxnsHandler;
use actix::prelude::*;
use actix::{Actor, Addr, AsyncContext, Context, StreamHandler};
use anyhow::Result;
use chain::ChainActorRef;
use crypto::hash::HashValue;
use logger::prelude::*;
use network::RawRpcRequestMessage;
use starcoin_accumulator::node::AccumulatorStoreType;
use starcoin_accumulator::AccumulatorNode;
use starcoin_canonical_serialization::SCSCodec;
use starcoin_state_tree::StateNode;
use starcoin_storage::Store;
/// Sync message which inbound
use starcoin_sync_api::{BlockBody, GetBlockHeaders, GetBlockHeadersByNumber, SyncRpcRequest};
use std::sync::Arc;
use traits::ChainAsyncService;
use traits::Consensus;
use txpool::TxPoolService;
use types::{
block::{BlockHeader, BlockInfo},
transaction::TransactionInfo,
};
pub struct ProcessActor<C>
where
C: Consensus + Sync + Send + 'static + Clone,
{
processor: Arc<Processor<C>>,
}
impl<C> ProcessActor<C>
where
C: Consensus + Sync + Send + 'static + Clone,
{
pub fn launch(
chain_reader: ChainActorRef<C>,
txpool: TxPoolService,
storage: Arc<dyn Store>,
rpc_rx: futures::channel::mpsc::UnboundedReceiver<RawRpcRequestMessage>,
) -> Result<Addr<ProcessActor<C>>> {
Ok(ProcessActor::create(
move |ctx: &mut Context<ProcessActor<C>>| {
ctx.add_stream(rpc_rx);
ProcessActor {
processor: Arc::new(Processor::new(chain_reader, txpool, storage)),
}
},
))
}
}
impl<C> Actor for ProcessActor<C>
where
C: Consensus + Sync + Send + 'static + Clone,
{
type Context = Context<Self>;
}
impl<C> StreamHandler<RawRpcRequestMessage> for ProcessActor<C>
where
C: Consensus + Sync + Send + 'static + Clone,
{
fn handle(&mut self, msg: RawRpcRequestMessage, _ctx: &mut Self::Context) {
let responder = msg.responder.clone();
let processor = self.processor.clone();
if let Ok(req) = SyncRpcRequest::decode(msg.request.as_slice()) {
Arbiter::spawn(async move {
match req {
SyncRpcRequest::GetBlockHeaders(get_block_headers) => {
let headers = Processor::handle_get_block_headers_msg(
processor.clone(),
get_block_headers,
)
.await;
if let Err(e) = do_get_headers(responder, headers).await {
error!("do_get_headers request failed : {:?}", e);
}
}
SyncRpcRequest::GetBlockHeadersByNumber(get_block_headers_by_number) => {
let headers = Processor::handle_get_block_headers_by_number_msg(
processor.clone(),
get_block_headers_by_number,
)
.await;
if let Err(e) = do_get_headers(responder, headers).await {
error!("do_get_headers request failed : {:?}", e);
}
}
SyncRpcRequest::GetBlockInfos(hashs) => {
let infos =
Processor::handle_get_block_info_by_hash_msg(processor.clone(), hashs)
.await;
if let Err(e) = do_get_info_by_hash(responder, infos).await {
error!("do_get_info_by_hash request failed : {:?}", e);
}
}
SyncRpcRequest::GetBlockBodies(hashs) => {
let bodies =
Processor::handle_get_body_by_hash_msg(processor.clone(), hashs).await;
if let Err(e) = do_get_body_by_hash(responder, bodies).await {
error!("do_get_body_by_hash request failed : {:?}", e);
}
}
SyncRpcRequest::GetStateNodeByNodeHash(state_node_key) => {
let mut keys = Vec::new();
keys.push(state_node_key);
let mut state_nodes =
Processor::handle_state_node_msg(processor.clone(), keys).await;
if let Some((_, state_node_res)) = state_nodes.pop() {
if let Some(state_node) = state_node_res {
if let Err(e) = do_state_node(responder, state_node).await {
error!("do state_node request failed : {:?}", e);
}
} else {
debug!("{:?}", "state_node is none.");
}
} else {
debug!("{:?}", "state_nodes is none.");
}
}
SyncRpcRequest::GetAccumulatorNodeByNodeHash(
accumulator_node_key,
accumulator_type,
) => {
let mut keys = Vec::new();
keys.push(accumulator_node_key);
let mut accumulator_nodes = Processor::handle_accumulator_node_msg(
processor.clone(),
keys,
accumulator_type,
)
.await;
if let Some((_, accumulator_node_res)) = accumulator_nodes.pop() {
if let Some(accumulator_node) = accumulator_node_res {
if let Err(e) =
do_accumulator_node(responder, accumulator_node).await
{
error!("do accumulator_node request failed : {:?}", e);
}
} else {
debug!("accumulator_node {:?} is none.", accumulator_node_key);
}
} else {
debug!("{:?}", "accumulator_nodes is none.");
}
}
SyncRpcRequest::GetTxns(msg) => {
let handler = GetTxnsHandler::new(
processor.txpool.clone(),
processor.storage.clone(),
);
let result = handler.handle(responder, msg).await;
if let Err(e) = result {
warn!("handle get txn fail, error: {:?}", e);
}
}
SyncRpcRequest::GetTxnInfo(txn_info_hash) => {
let txn_info =
Processor::handle_get_txn_info_msg(processor.clone(), txn_info_hash)
.await;
if let Err(e) = do_get_txn_info(responder, txn_info).await {
error!("do_get_headers request failed : {:?}", e);
}
}
}
});
}
}
}
/// Process request for syncing block
pub struct Processor<C>
where
C: Consensus + Sync + Send + 'static + Clone,
{
chain_reader: ChainActorRef<C>,
txpool: TxPoolService,
storage: Arc<dyn Store>,
}
impl<C> Processor<C>
where
C: Consensus + Sync + Send + 'static + Clone,
{
pub fn new(
chain_reader: ChainActorRef<C>,
txpool: TxPoolService,
storage: Arc<dyn Store>,
) -> Self {
Processor {
chain_reader,
txpool,
storage,
}
}
pub async fn handle_get_block_headers_by_number_msg(
processor: Arc<Processor<C>>,
get_block_headers_by_number: GetBlockHeadersByNumber,
) -> Vec<BlockHeader> {
let mut headers = Vec::new();
let mut last_number = get_block_headers_by_number.number;
while headers.len() < get_block_headers_by_number.max_size {
if let Ok(header) = processor
.chain_reader
.clone()
.master_block_header_by_number(last_number)
.await
{
headers.push(header);
} else {
break;
}
if last_number == 0 {
break;
}
last_number = if last_number > get_block_headers_by_number.step as u64 {
last_number - get_block_headers_by_number.step as u64
} else {
0
}
}
headers
}
pub async fn handle_get_block_headers_msg(
processor: Arc<Processor<C>>,
get_block_headers: GetBlockHeaders,
) -> Vec<BlockHeader> {
let mut headers = Vec::new();
if let Ok(Some(header)) = processor
.chain_reader
.clone()
.get_header_by_hash(&get_block_headers.block_id)
.await
{
let mut last_number = header.number();
while headers.len() < get_block_headers.max_size {
let block_number = if get_block_headers.reverse {
if last_number > get_block_headers.step as u64 {
last_number - get_block_headers.step as u64
} else {
0
}
} else {
last_number + get_block_headers.step as u64
};
if let Ok(header) = processor
.chain_reader
.clone()
.master_block_header_by_number(block_number)
.await
{
headers.push(header);
} else {
break;
}
if block_number == 0 {
break;
}
last_number = block_number;
}
}
headers
}
pub async fn handle_get_txn_info_msg(
processor: Arc<Processor<C>>,
txn_info_hash: HashValue,
) -> Option<TransactionInfo> {
if let Ok(txn_info) = processor.storage.get_transaction_info(txn_info_hash) {
txn_info
} else {
None
}
}
pub async fn handle_get_body_by_hash_msg(
processor: Arc<Processor<C>>,
hashs: Vec<HashValue>,
) -> Vec<BlockBody> {
let mut bodies = Vec::new();
for hash in hashs {
let transactions = match processor.chain_reader.clone().get_block_by_hash(hash).await {
Ok(block) => block.transactions().to_vec(),
_ => Vec::new(),
};
let body = BlockBody { transactions, hash };
bodies.push(body);
}
bodies
}
pub async fn handle_get_block_info_by_hash_msg(
processor: Arc<Processor<C>>,
hashs: Vec<HashValue>,
) -> Vec<BlockInfo> {
let mut infos = Vec::new();
for hash in hashs {
if let Ok(Some(block_info)) = processor
.chain_reader
.clone()
.get_block_info_by_hash(&hash)
.await
{
infos.push(block_info);
}
}
infos
}
pub async fn handle_state_node_msg(
processor: Arc<Processor<C>>,
nodes_hash: Vec<HashValue>,
) -> Vec<(HashValue, Option<StateNode>)> {
let mut state_nodes = Vec::new();
nodes_hash
.iter()
.for_each(|node_key| match processor.storage.get(node_key) {
Ok(node) => state_nodes.push((*node_key, node)),
Err(e) => error!("handle state_node {:?} err : {:?}", node_key, e),
});
state_nodes
}
pub async fn handle_accumulator_node_msg(
processor: Arc<Processor<C>>,
nodes_hash: Vec<HashValue>,
accumulator_type: AccumulatorStoreType,
) -> Vec<(HashValue, Option<AccumulatorNode>)> {
let mut accumulator_nodes = Vec::new();
nodes_hash.iter().for_each(|node_key| {
match processor
.storage
.get_node(accumulator_type.clone(), *node_key)
{
Ok(node) => accumulator_nodes.push((*node_key, node)),
Err(e) => error!("handle accumulator_node {:?} err : {:?}", node_key, e),
}
});
accumulator_nodes
}
}
|
//! A thread-safe metrics library.
//!
//! Many programs need to information about runtime performance: the number of requests
//! served, a distribution of request latency, the number of failures, the number of loop
//! iterations, etc. `tacho::new` creates a shareable, scopable metrics registry and a
//! `Reporter`. The `Scope` supports the creation of `Counter`, `Gauge`, and `Stat`
//! handles that may be used to report values. Each of these receivers maintains a
//! reference back to the central stats registry.
//!
//! ## Performance
//!
//! Labels are stored in a `BTreeMap` because they are used as hash keys and, therefore,
//! need to implement `Hash`.
#![cfg_attr(test, feature(test))]
extern crate hdrsample;
#[macro_use]
extern crate log;
extern crate ordermap;
extern crate parking_lot;
#[cfg(test)]
extern crate test;
use hdrsample::Histogram;
use ordermap::OrderMap;
use parking_lot::Mutex;
use std::collections::BTreeMap;
use std::fmt;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
mod report;
pub use report::{Reporter, Report};
type Labels = BTreeMap<&'static str, String>;
type CounterMap = OrderMap<Key, Arc<AtomicUsize>>;
type GaugeMap = OrderMap<Key, Arc<AtomicUsize>>;
type StatMap = OrderMap<Key, Arc<Mutex<HistogramWithSum>>>;
/// Creates a metrics registry.
///
/// The returned `Scope` may be you used to instantiate metrics. Labels may be attached to
/// the scope so that all metrics created by this `Scope` are annotated.
///
/// The returned `Reporter` supports consumption of metrics values.
pub fn new() -> (Scope, Reporter) {
let registry = Arc::new(Mutex::new(Registry::default()));
let scope = Scope {
prefix: Vec::new(),
labels: Labels::new(),
registry: registry.clone(),
};
(scope, Reporter::new(registry))
}
/// Describes a metric.
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct Key {
name: &'static str,
prefix: Vec<&'static str>,
labels: Labels,
}
impl Key {
fn new(name: &'static str, prefix: Vec<&'static str>, labels: Labels) -> Key {
Key {
name,
prefix,
labels,
}
}
pub fn name(&self) -> &'static str {
self.name
}
pub fn prefix(&self) -> &[&'static str] {
&self.prefix
}
pub fn labels(&self) -> &Labels {
&self.labels
}
}
#[derive(Default)]
struct Registry {
counters: CounterMap,
gauges: GaugeMap,
stats: StatMap,
}
/// Supports creation of scoped metrics.
///
/// `Scope`s may be cloned without copying the underlying metrics registry.
///
/// Labels may be attached to the scope so that all metrics created by the `Scope` are
/// labeled.
#[derive(Clone)]
pub struct Scope {
prefix: Vec<&'static str>,
labels: Labels,
registry: Arc<Mutex<Registry>>,
}
impl Scope {
/// Accesses scoping labels.
pub fn labels(&self) -> &Labels {
&self.labels
}
/// Adds a label into scope (potentially overwriting).
pub fn labeled<D: fmt::Display>(mut self, k: &'static str, v: D) -> Self {
self.labels.insert(k, format!("{}", v));
self
}
/// Appends a prefix to the current scope.
pub fn prefixed(mut self, value: &'static str) -> Self {
self.prefix.push(value);
self
}
/// Creates a Counter with the given name.
pub fn counter(&self, name: &'static str) -> Counter {
let key = Key::new(name, self.prefix.clone(), self.labels.clone());
let mut reg = self.registry.lock();
if let Some(c) = reg.counters.get(&key) {
return Counter(c.clone());
}
let c = Arc::new(AtomicUsize::new(0));
let counter = Counter(c.clone());
reg.counters.insert(key, c);
counter
}
/// Creates a Gauge with the given name.
pub fn gauge(&self, name: &'static str) -> Gauge {
let key = Key::new(name, self.prefix.clone(), self.labels.clone());
let mut reg = self.registry.lock();
if let Some(g) = reg.gauges.get(&key) {
return Gauge(g.clone());
}
let g = Arc::new(AtomicUsize::new(0));
let gauge = Gauge(g.clone());
reg.gauges.insert(key, g);
gauge
}
/// Creates a Stat with the given name.
///
/// The underlying histogram is automatically resized as values are added.
pub fn stat(&self, name: &'static str) -> Stat {
let key = Key::new(name, self.prefix.clone(), self.labels.clone());
self.mk_stat(key, None)
}
/// Creates a Stat with the given name and histogram paramters.
pub fn stat_with_bounds(&self, name: &'static str, low: u64, high: u64) -> Stat {
let key = Key::new(name, self.prefix.clone(), self.labels.clone());
self.mk_stat(key, Some((low, high)))
}
fn mk_stat(&self, key: Key, bounds: Option<(u64, u64)>) -> Stat {
let mut reg = self.registry.lock();
if let Some(h) = reg.stats.get(&key) {
return Stat { histo: h.clone(), bounds };
}
let histo = Arc::new(Mutex::new(HistogramWithSum::new(bounds)));
reg.stats.insert(key, histo.clone());
Stat { histo, bounds }
}
}
impl fmt::Debug for Scope {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Scope")
.field("prefix", &self.prefix)
.field("labels", &self.labels)
.finish()
}
}
/// Counts monotically.
#[derive(Clone)]
pub struct Counter(Arc<AtomicUsize>);
impl Counter {
pub fn incr(&self, v: usize) {
self.0.fetch_add(v, Ordering::AcqRel);
}
}
/// Captures an instantaneous value.
#[derive(Clone)]
pub struct Gauge(Arc<AtomicUsize>);
impl Gauge {
pub fn incr(&self, v: usize) {
self.0.fetch_add(v, Ordering::AcqRel);
}
pub fn decr(&self, v: usize) {
self.0.fetch_sub(v, Ordering::AcqRel);
}
pub fn set(&self, v: usize) {
self.0.store(v, Ordering::Release);
}
}
/// Histograms hold up to 4 significant figures.
const HISTOGRAM_PRECISION: u8 = 4;
/// Tracks a distribution of values with their sum.
///
/// `hdrsample::Histogram` does not track a sum by default; but prometheus expects a `sum`
/// for histograms.
#[derive(Clone)]
pub struct HistogramWithSum {
histogram: Histogram<u64>,
sum: u64,
}
impl HistogramWithSum {
/// Constructs a new `HistogramWithSum`, possibly with bounds.
fn new(bounds: Option<(u64, u64)>) -> Self {
let h = match bounds {
None => Histogram::<u64>::new(HISTOGRAM_PRECISION),
Some((l, h)) => Histogram::<u64>::new_with_bounds(l, h, HISTOGRAM_PRECISION),
};
let histogram = h.expect("failed to create histogram");
HistogramWithSum { histogram, sum: 0 }
}
/// Record a value to
fn record(&mut self, v: u64) {
if let Err(e) = self.histogram.record(v) {
error!("failed to add value to histogram: {:?}", e);
}
self.sum = self.sum.saturating_add(v);
}
pub fn histogram(&self) -> &Histogram<u64> {
&self.histogram
}
pub fn count(&self) -> u64 {
self.histogram.count()
}
pub fn max(&self) -> u64 {
self.histogram.max()
}
pub fn min(&self) -> u64 {
self.histogram.min()
}
pub fn sum(&self) -> u64 {
self.sum
}
pub fn clear(&mut self) {
self.histogram.reset();
self.sum = 0;
}
}
/// Captures a distribution of values.
#[derive(Clone)]
pub struct Stat {
histo: Arc<Mutex<HistogramWithSum>>,
bounds: Option<(u64, u64)>,
}
impl Stat {
pub fn add(&self, v: u64) {
let mut histo = self.histo.lock();
histo.record(v);
}
pub fn add_values(&mut self, vs: &[u64]) {
let mut histo = self.histo.lock();
for v in vs {
histo.record(*v)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use test::{Bencher, black_box};
static DEFAULT_METRIC_NAME: &'static str = "a_sufficiently_long_name";
#[bench]
fn bench_scope_clone(b: &mut Bencher) {
let (metrics, _) = super::new();
b.iter(move || black_box(metrics.clone()));
}
#[bench]
fn bench_scope_label(b: &mut Bencher) {
let (metrics, _) = super::new();
b.iter(move || { black_box(metrics.clone().labeled("foo", "bar")) });
}
#[bench]
fn bench_scope_clone_x1000(b: &mut Bencher) {
let scopes = mk_scopes(1000, "bench_scope_clone_x1000");
b.iter(move || for scope in &scopes {
black_box(scope.clone());
});
}
#[bench]
fn bench_scope_label_x1000(b: &mut Bencher) {
let scopes = mk_scopes(1000, "bench_scope_label_x1000");
b.iter(move || for scope in &scopes {
black_box(scope.clone().labeled("foo", "bar"));
});
}
#[bench]
fn bench_counter_create(b: &mut Bencher) {
let (metrics, _) = super::new();
b.iter(move || black_box(metrics.counter(DEFAULT_METRIC_NAME)));
}
#[bench]
fn bench_gauge_create(b: &mut Bencher) {
let (metrics, _) = super::new();
b.iter(move || black_box(metrics.gauge(DEFAULT_METRIC_NAME)));
}
#[bench]
fn bench_stat_create(b: &mut Bencher) {
let (metrics, _) = super::new();
b.iter(move || black_box(metrics.stat(DEFAULT_METRIC_NAME)));
}
#[bench]
fn bench_counter_create_x1000(b: &mut Bencher) {
let scopes = mk_scopes(1000, "bench_counter_create_x1000");
b.iter(move || for scope in &scopes {
black_box(scope.counter(DEFAULT_METRIC_NAME));
});
}
#[bench]
fn bench_gauge_create_x1000(b: &mut Bencher) {
let scopes = mk_scopes(1000, "bench_gauge_create_x1000");
b.iter(move || for scope in &scopes {
black_box(scope.gauge(DEFAULT_METRIC_NAME));
});
}
#[bench]
fn bench_stat_create_x1000(b: &mut Bencher) {
let scopes = mk_scopes(1000, "bench_stat_create_x1000");
b.iter(move || for scope in &scopes {
black_box(scope.stat(DEFAULT_METRIC_NAME));
});
}
#[bench]
fn bench_counter_update(b: &mut Bencher) {
let (metrics, _) = super::new();
let c = metrics.counter(DEFAULT_METRIC_NAME);
b.iter(move || {
c.incr(1);
black_box(&c);
});
}
#[bench]
fn bench_gauge_update(b: &mut Bencher) {
let (metrics, _) = super::new();
let g = metrics.gauge(DEFAULT_METRIC_NAME);
b.iter(move || {
g.set(1);
black_box(&g);
});
}
#[bench]
fn bench_stat_update(b: &mut Bencher) {
let (scope, _) = super::new();
let s = scope.stat(DEFAULT_METRIC_NAME);
b.iter(move || {
s.add(1);
black_box(&s);
});
}
#[bench]
fn bench_counter_update_x1000(b: &mut Bencher) {
let scopes = mk_scopes(1000, "bench_counter_update_x1000");
let counters: Vec<Counter> = scopes
.iter()
.map(|s| s.counter(DEFAULT_METRIC_NAME))
.collect();
b.iter(move || {
for c in &counters {
c.incr(1);
}
black_box(&counters);
});
}
#[bench]
fn bench_gauge_update_x1000(b: &mut Bencher) {
let scopes = mk_scopes(1000, "bench_gauge_update_x1000");
let gauges: Vec<Gauge> = scopes
.iter()
.map(|s| s.gauge(DEFAULT_METRIC_NAME))
.collect();
b.iter(move || {
for g in &gauges {
g.set(1);
}
black_box(&gauges);
});
}
#[bench]
fn bench_stat_update_x1000(b: &mut Bencher) {
let scopes = mk_scopes(1000, "bench_stat_update_x1000");
let stats: Vec<Stat> = scopes
.iter()
.map(|s| s.stat(DEFAULT_METRIC_NAME))
.collect();
b.iter(move || {
for s in &stats {
s.add(1)
}
black_box(&stats);
});
}
#[bench]
fn bench_stat_add_x1000(b: &mut Bencher) {
let (metrics, _) = super::new();
let s = metrics.stat(DEFAULT_METRIC_NAME);
b.iter(move || {
for i in 0..1000 {
s.add(i);
}
black_box(&s);
});
}
fn mk_scopes(n: usize, name: &str) -> Vec<Scope> {
let (metrics, _) = super::new();
let metrics = metrics.prefixed("t").labeled("test_name", name).labeled(
"total_iterations",
n,
);
(0..n)
.map(|i| metrics.clone().labeled("iteration", format!("{}", i)))
.collect()
}
#[test]
fn test_report_peek() {
let (metrics, reporter) = super::new();
let metrics = metrics.labeled("joy", "painting");
let happy_accidents = metrics.counter("happy_accidents");
let paint_level = metrics.gauge("paint_level");
let mut stroke_len = metrics.stat("stroke_len");
happy_accidents.incr(1);
paint_level.set(2);
stroke_len.add_values(&[1, 2, 3]);
{
let report = reporter.peek();
{
let k = report
.counters()
.keys()
.find(|k| k.name() == "happy_accidents")
.expect("expected counter: happy_accidents");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert_eq!(report.counters().get(&k), Some(&1));
}
{
let k = report
.gauges()
.keys()
.find(|k| k.name() == "paint_level")
.expect("expected gauge: paint_level");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert_eq!(report.gauges().get(&k), Some(&2));
}
assert_eq!(
report.gauges().keys().find(|k| k.name() == "brush_width"),
None
);
{
let k = report
.stats()
.keys()
.find(|k| k.name() == "stroke_len")
.expect("expected stat: stroke_len");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert!(report.stats().contains_key(&k));
}
assert_eq!(report.stats().keys().find(|k| k.name() == "tree_len"), None);
}
drop(paint_level);
let brush_width = metrics.gauge("brush_width");
let mut tree_len = metrics.stat("tree_len");
happy_accidents.incr(2);
brush_width.set(5);
stroke_len.add_values(&[1, 2, 3]);
tree_len.add_values(&[3, 4, 5]);
{
let report = reporter.peek();
{
let k = report
.counters()
.keys()
.find(|k| k.name() == "happy_accidents")
.expect("expected counter: happy_accidents");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert_eq!(report.counters().get(&k), Some(&3));
}
{
let k = report
.gauges()
.keys()
.find(|k| k.name() == "paint_level")
.expect("expected gauge: paint_level");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert_eq!(report.gauges().get(&k), Some(&2));
}
{
let k = report
.gauges()
.keys()
.find(|k| k.name() == "brush_width")
.expect("expected gauge: brush_width");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert_eq!(report.gauges().get(&k), Some(&5));
}
{
let k = report
.stats()
.keys()
.find(|k| k.name() == "stroke_len")
.expect("expected stat: stroke_len");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert!(report.stats().contains_key(&k));
}
{
let k = report
.stats()
.keys()
.find(|k| k.name() == "tree_len")
.expect("expected stat: tree_len");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert!(report.stats().contains_key(&k));
}
}
}
#[test]
fn test_report_take() {
let (metrics, mut reporter) = super::new();
let metrics = metrics.labeled("joy", "painting");
let happy_accidents = metrics.counter("happy_accidents");
let paint_level = metrics.gauge("paint_level");
let mut stroke_len = metrics.stat("stroke_len");
happy_accidents.incr(1);
paint_level.set(2);
stroke_len.add_values(&[1, 2, 3]);
{
let report = reporter.take();
{
let k = report
.counters()
.keys()
.find(|k| k.name() == "happy_accidents")
.expect("expected counter: happy_accidents");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert_eq!(report.counters().get(&k), Some(&1));
}
{
let k = report
.gauges()
.keys()
.find(|k| k.name() == "paint_level")
.expect("expected gauge");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert_eq!(report.gauges().get(&k), Some(&2));
}
assert_eq!(
report.gauges().keys().find(|k| k.name() == "brush_width"),
None
);
{
let k = report
.stats()
.keys()
.find(|k| k.name() == "stroke_len")
.expect("expected stat");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert!(report.stats().contains_key(&k));
}
assert_eq!(report.stats().keys().find(|k| k.name() == "tree_len"), None);
{
let k = report
.stats()
.keys()
.find(|k| k.name() == "stroke_len")
.expect("expected stat");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert!(report.stats().contains_key(&k));
}
}
drop(paint_level);
drop(stroke_len);
{
let report = reporter.take();
{
let counters = report.counters();
let k = counters
.keys()
.find(|k| k.name() == "happy_accidents")
.expect("expected counter: happy_accidents");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert_eq!(counters.get(&k), Some(&1));
}
{
let k = report
.gauges()
.keys()
.find(|k| k.name() == "paint_level")
.expect("expected gauge");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert_eq!(report.gauges().get(&k), Some(&2));
}
{
let k = report
.stats()
.keys()
.find(|k| k.name() == "stroke_len")
.expect("expected stat");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert!(report.stats().contains_key(&k));
}
}
let brush_width = metrics.gauge("brush_width");
let mut tree_len = metrics.stat("tree_len");
happy_accidents.incr(2);
brush_width.set(5);
tree_len.add_values(&[3, 4, 5]);
{
let report = reporter.take();
{
let k = report
.counters()
.keys()
.find(|k| k.name() == "happy_accidents")
.expect("expected counter: happy_accidents");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert_eq!(report.counters().get(&k), Some(&3));
}
assert_eq!(
report.gauges().keys().find(|k| k.name() == "paint_level"),
None
);
{
let k = report
.gauges()
.keys()
.find(|k| k.name() == "brush_width")
.expect("expected gauge");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert_eq!(report.gauges().get(&k), Some(&5));
}
assert_eq!(
report.stats().keys().find(|k| k.name() == "stroke_len"),
None
);
{
let k = report
.stats()
.keys()
.find(|k| k.name() == "tree_len")
.expect("expeced stat");
assert_eq!(k.labels.get("joy"), Some(&"painting".to_string()));
assert!(report.stats().contains_key(&k));
}
}
}
}
|
use anyhow::Result;
use wasmtime::{Linker, Trap};
use super::namespace_matches_filter;
use crate::state::ProcessState;
// Register WASI APIs to the linker
pub(crate) fn register(
linker: &mut Linker<ProcessState>,
namespace_filter: &[String],
) -> Result<()> {
// Add all WASI functions at first
wasmtime_wasi::sync::snapshots::preview_1::add_wasi_snapshot_preview1_to_linker(
linker,
|ctx| &mut ctx.wasi,
)?;
// Override all functions not matched with a trap implementation.
if !namespace_matches_filter("wasi_snapshot_preview1", "args_get", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "args_get", |_: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::args_get` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "args_sizes_get", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "args_sizes_get", |_: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::args_sizes_get` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "clock_res_get", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "clock_res_get", |_: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::clock_res_get` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "clock_time_get", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "clock_time_get", |_: i32, _:i64, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::clock_time_get` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "environ_get", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "environ_get", |_: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::environ_get` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_advise", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "fd_advise", |_: i32, _:i64, _:i64, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::fd_advise` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_allocate", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "fd_allocate", |_: i32, _:i64, _:i64| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::fd_allocate` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_close", namespace_filter) {
linker.func_wrap(
"wasi_snapshot_preview1",
"fd_close",
|_: i32| -> Result<i32, Trap> {
Err(Trap::new(
"Host function `wasi_snapshot_preview1::fd_close` unavailable in this environment.",
))
},
)?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_datasync", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "fd_datasync", |_: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::fd_datasync` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_fdstat_get", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "fd_fdstat_get", |_: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::fd_fdstat_get` unavailable in this environment."))
})?;
}
if !namespace_matches_filter(
"wasi_snapshot_preview1",
"fd_fdstat_set_flags",
namespace_filter,
) {
linker.func_wrap("wasi_snapshot_preview1", "fd_fdstat_set_flags", |_: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::fd_fdstat_set_flags` unavailable in this environment."))
})?;
}
if !namespace_matches_filter(
"wasi_snapshot_preview1",
"fd_fdstat_set_rights",
namespace_filter,
) {
linker.func_wrap("wasi_snapshot_preview1", "fd_fdstat_set_rights", |_: i32, _:i64, _:i64| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::fd_fdstat_set_rights` unavailable in this environment."))
})?;
}
if !namespace_matches_filter(
"wasi_snapshot_preview1",
"fd_filestat_get",
namespace_filter,
) {
linker.func_wrap("wasi_snapshot_preview1", "fd_filestat_get", |_: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::fd_filestat_get` unavailable in this environment."))
})?;
}
if !namespace_matches_filter(
"wasi_snapshot_preview1",
"fd_filestat_set_size",
namespace_filter,
) {
linker.func_wrap("wasi_snapshot_preview1", "fd_filestat_set_size", |_: i32, _: i64| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::fd_filestat_set_size` unavailable in this environment."))
})?;
}
if !namespace_matches_filter(
"wasi_snapshot_preview1",
"fd_filestat_set_times",
namespace_filter,
) {
linker.func_wrap("wasi_snapshot_preview1", "fd_filestat_set_times", |_: i32, _:i64, _:i64, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::fd_filestat_set_times` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_pread", namespace_filter) {
linker.func_wrap(
"wasi_snapshot_preview1",
"fd_pread",
|_: i32, _: i32, _: i32, _: i64, _: i32| -> Result<i32, Trap> {
Err(Trap::new(
"Host function `wasi_snapshot_preview1::fd_pread` unavailable in this environment.",
))
},
)?;
}
if !namespace_matches_filter(
"wasi_snapshot_preview1",
"fd_prestat_dir_name",
namespace_filter,
) {
linker.func_wrap("wasi_snapshot_preview1", "fd_prestat_dir_name", |_: i32, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::fd_prestat_dir_name` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_prestat_get", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "fd_prestat_get", |_: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::fd_prestat_get` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_pwrite", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "fd_pwrite", |_: i32, _: i32, _: i32, _: i64, _:i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::fd_pwrite` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_read", namespace_filter) {
linker.func_wrap(
"wasi_snapshot_preview1",
"fd_read",
|_: i32, _: i32, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new(
"Host function `wasi_snapshot_preview1::fd_read` unavailable in this environment.",
))
},
)?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_readdir", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "fd_readdir", |_: i32, _: i32, _: i32, _: i64, _:i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::fd_readdir` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_renumber", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "fd_renumber", |_: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::fd_renumber` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_seek", namespace_filter) {
linker.func_wrap(
"wasi_snapshot_preview1",
"fd_seek",
|_: i32, _: i64, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new(
"Host function `wasi_snapshot_preview1::fd_seek` unavailable in this environment.",
))
},
)?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_sync", namespace_filter) {
linker.func_wrap(
"wasi_snapshot_preview1",
"fd_sync",
|_: i32| -> Result<i32, Trap> {
Err(Trap::new(
"Host function `wasi_snapshot_preview1::fd_sync` unavailable in this environment.",
))
},
)?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_tell", namespace_filter) {
linker.func_wrap(
"wasi_snapshot_preview1",
"fd_tell",
|_: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new(
"Host function `wasi_snapshot_preview1::fd_tell` unavailable in this environment.",
))
},
)?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "fd_write", namespace_filter) {
linker.func_wrap(
"wasi_snapshot_preview1",
"fd_write",
|_: i32, _: i32, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new(
"Host function `wasi_snapshot_preview1::fd_write` unavailable in this environment.",
))
},
)?;
}
if !namespace_matches_filter(
"wasi_snapshot_preview1",
"fdpath_create_directory_tell",
namespace_filter,
) {
linker.func_wrap("wasi_snapshot_preview1", "fdpath_create_directory_tell", |_: i32, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::path_create_directory` unavailable in this environment."))
})?;
}
if !namespace_matches_filter(
"wasi_snapshot_preview1",
"path_filestat_get",
namespace_filter,
) {
linker.func_wrap("wasi_snapshot_preview1", "path_filestat_get", |_: i32, _: i32, _: i32, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::path_filestat_get` unavailable in this environment."))
})?;
}
if !namespace_matches_filter(
"wasi_snapshot_preview1",
"path_filestat_set_times",
namespace_filter,
) {
linker.func_wrap("wasi_snapshot_preview1", "path_filestat_set_times", |_: i32, _: i32, _: i32, _: i32, _: i64, _: i64, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::path_filestat_set_times` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "path_link", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "path_link", |_: i32, _: i32, _: i32, _: i32, _: i32, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::path_link` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "path_open", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "path_open", |_: i32, _: i32, _: i32, _: i32, _: i32, _: i64, _: i64, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::path_open` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "path_readlink", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "path_readlink", |_: i32, _: i32, _: i32, _: i32, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::path_readlink` unavailable in this environment."))
})?;
}
if !namespace_matches_filter(
"wasi_snapshot_preview1",
"path_remove_directory",
namespace_filter,
) {
linker.func_wrap("wasi_snapshot_preview1", "path_remove_directory", |_: i32, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::path_remove_directory` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "path_rename", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "path_rename", |_: i32, _: i32, _: i32, _: i32, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::path_rename` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "path_symlink", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "path_symlink", |_: i32, _: i32, _: i32, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::path_symlink` unavailable in this environment."))
})?;
}
if !namespace_matches_filter(
"wasi_snapshot_preview1",
"path_unlink_file",
namespace_filter,
) {
linker.func_wrap("wasi_snapshot_preview1", "path_unlink_file", |_: i32, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::path_unlink_file` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "poll_oneoff", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "poll_oneoff", |_: i32, _: i32, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::poll_oneoff` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "proc_exit", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "proc_exit", |_: i32| -> Result<(), Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::proc_exit` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "proc_raise", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "proc_raise", |_: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::proc_raise` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "random_get", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "random_get", |_: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::random_get` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "sched_yield", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "sched_yield", || -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::sched_yield` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "sock_recv", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "sock_recv", |_: i32, _: i32, _: i32, _: i32, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::sock_recv` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "sock_send", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "sock_send", |_: i32, _: i32, _: i32, _: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::sock_send` unavailable in this environment."))
})?;
}
if !namespace_matches_filter("wasi_snapshot_preview1", "sock_shutdown", namespace_filter) {
linker.func_wrap("wasi_snapshot_preview1", "sock_shutdown", |_: i32, _: i32| -> Result<i32, Trap> {
Err(Trap::new("Host function `wasi_snapshot_preview1::sock_shutdown` unavailable in this environment."))
})?;
}
Ok(())
}
|
use crate::nes::ram::Ram;
use super::sprite::Sprite;
pub type SpritesWithCtx = Vec<SpriteWithCtx>;
#[derive(Debug)]
pub struct SpriteWithCtx {
pub sprite: Sprite,
}
impl SpriteWithCtx {
}
#[cfg(test)]
mod sprite_with_ctx_test {
use super::*;
} |
use std::collections::HashMap;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::str::FromStr;
#[derive(Clone, Debug, PartialEq, Eq)]
struct Ingredient {
chemical: String,
amount: usize,
}
impl Ingredient {
fn new(chemical: String, amount: usize) -> Self {
Self { chemical, amount }
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
struct ParseIngredientError;
impl From<std::num::ParseIntError> for ParseIngredientError {
fn from(_err: std::num::ParseIntError) -> Self {
Self
}
}
impl FromStr for Ingredient {
type Err = ParseIngredientError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// "12 ABC"
let mut words = s.split(' ');
let amount = words.next().ok_or(ParseIngredientError)?;
let amount = amount.parse()?;
let chemical = words.next().ok_or(ParseIngredientError)?;
let ingredient = Ingredient::new(chemical.to_string(), amount);
Ok(ingredient)
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct Recipe {
inputs: Vec<Ingredient>,
output: Ingredient,
}
impl Recipe {
fn new(inputs: Vec<Ingredient>, output: Ingredient) -> Self {
Self { inputs, output }
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
struct ParseRecipeError;
impl From<ParseIngredientError> for ParseRecipeError {
fn from(_err: ParseIngredientError) -> Self {
Self
}
}
impl FromStr for Recipe {
type Err = ParseRecipeError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// "12 ABC, 34 DEF => 56 GHI"
let mut segments = s.split("=>");
let inputs = segments.next().ok_or(ParseRecipeError)?;
let inputs = inputs
.split(',')
.map(|i| i.trim().parse())
.collect::<Result<_, _>>()?;
let output = segments.next().ok_or(ParseRecipeError)?;
let output = output.trim().parse()?;
let recipe = Recipe::new(inputs, output);
Ok(recipe)
}
}
pub(crate) fn day14() {
let mut recipes = HashMap::new();
let input = File::open("data/day14.txt").expect("Failed to open input");
let buffered = BufReader::new(input);
for line in buffered.lines().map(|line| line.unwrap()) {
let recipe: Recipe = line.parse().expect("Couldn't parse recipe");
recipes.insert(recipe.output.chemical.to_owned(), recipe);
}
// Part one.
let mut needed = HashMap::new();
needed.insert("FUEL".to_owned(), 1);
let ore_needed = produce(&recipes, needed);
println!("Part one answer is: {}", ore_needed);
// Part two.
let mut lower = 0;
let mut upper = 10_000_000;
let mut middle = 0;
while lower <= upper {
middle = (lower + upper) / 2;
let mut needed = HashMap::new();
needed.insert("FUEL".to_owned(), middle);
let ore_needed = produce(&recipes, needed);
if ore_needed > 1_000_000_000_000 {
upper = middle - 1;
} else {
lower = middle + 1;
}
}
println!("Part two answer is: {}", middle);
}
fn produce(recipes: &HashMap<String, Recipe>, mut needed: HashMap<String, usize>) -> u64 {
let mut surplus = HashMap::new();
let mut ore_used = 0u64;
while !needed.is_empty() {
let (output, needed_amount) = needed.iter().next().unwrap();
let output = output.clone();
let needed_amount = *needed_amount;
let recipe = recipes.get(&output).unwrap();
let multiplier = (needed_amount + recipe.output.amount - 1) / recipe.output.amount;
for ingredient in &recipe.inputs {
let mut ingredient_amount = multiplier * ingredient.amount;
if ingredient.chemical == "ORE" {
ore_used += ingredient_amount as u64;
} else {
surplus
.entry(ingredient.chemical.clone())
.and_modify(|surplus_amount| {
if *surplus_amount > ingredient_amount {
*surplus_amount -= ingredient_amount;
ingredient_amount = 0;
} else {
ingredient_amount -= *surplus_amount;
*surplus_amount = 0;
}
});
if ingredient_amount > 0 {
let amount = needed.entry(ingredient.chemical.clone()).or_insert(0);
*amount += ingredient_amount;
}
}
}
needed.remove(&output);
let produced_amount = multiplier * recipe.output.amount;
surplus.insert(output, produced_amount - needed_amount);
}
ore_used
}
|
use super::trait_simple_demo::Display;
use super::trait_simple_demo::Summary;
use super::trait_simple_demo::Tweet;
use std::fmt::Display as Display_std;
pub fn notify<T: Summary + Display>(item: T) {
println!("Breaking news! Sumarray+Display {}", item.summarize());
}
//another style
// pub fn notify(item: impl Summary + Display) {
// }
pub fn trait_complex_test() {
println!(
"{}",
"------------trait complex test start-------------------"
);
let tweet = returns_summarizable();
notify(tweet);
}
//fn some_function<T: Display + Clone, U: Clone + Copy>(_t: T, _u: U) -> i32 {
// 1
//}
// another writing style
//fn some_function(item1:impl Display + Clone,item2:impl Clone + Copy) -> i32{
// 1
//}
fn returns_summarizable() -> impl Summary + Display {
Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
}
}
//为了只对实现了 PartialOrd+Copy 的类型调用这些代码
// better writing style
fn largest<T: PartialOrd + Copy>(list: &[T]) -> T {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
pub fn trait_bound_test() {
println!(
"{}",
"------------trait complex bound test start-------------------"
);
let number_list = vec![34, 50, 25, 100, 65];
let result = largest(&number_list);
println!("The largest number is {}", result);
let char_list = vec!['y', 'm', 'a', 'q'];
let result = largest(&char_list);
println!("The largest char is {}", result);
}
struct Pair<T> {
x: T,
y: T,
}
impl<T> Pair<T> {
fn new(x: T, y: T) -> Self {
Self { x, y }
}
}
// only for who impl Display+PartialOrd can can cmd_display method
impl<T: Display_std + PartialOrd> Pair<T> {
fn cmp_display(&self) {
if self.x >= self.y {
println!("The largest member is x = {}", self.x);
} else {
println!("The largest member is y = {}", self.y);
}
}
}
pub fn pair_bound_test() {
println!(
"{}",
"------------trait pair bound test start-------------------"
);
let pair = Pair::new(100, 200);
Pair::cmp_display(&pair);
}
|
#[doc = "Register `MACCSRSWCR` reader"]
pub type R = crate::R<MACCSRSWCR_SPEC>;
#[doc = "Register `MACCSRSWCR` writer"]
pub type W = crate::W<MACCSRSWCR_SPEC>;
#[doc = "Field `RCWE` reader - Register Clear on Write 1 Enable When this bit is set, the access mode to some register fields changes to rc_w1 (clear on write) meaning that the application needs to set that respective bit to 1 to clear it. When this bit is reset, the access mode to these register fields remains rc_r (clear on read)."]
pub type RCWE_R = crate::BitReader;
#[doc = "Field `RCWE` writer - Register Clear on Write 1 Enable When this bit is set, the access mode to some register fields changes to rc_w1 (clear on write) meaning that the application needs to set that respective bit to 1 to clear it. When this bit is reset, the access mode to these register fields remains rc_r (clear on read)."]
pub type RCWE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SEEN` reader - Slave Error Response Enable When this bit is set, the MAC responds with a Slave Error for accesses to reserved registers in CSR space. When this bit is reset, the MAC responds with an Okay response to any register accessed from CSR space."]
pub type SEEN_R = crate::BitReader;
#[doc = "Field `SEEN` writer - Slave Error Response Enable When this bit is set, the MAC responds with a Slave Error for accesses to reserved registers in CSR space. When this bit is reset, the MAC responds with an Okay response to any register accessed from CSR space."]
pub type SEEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - Register Clear on Write 1 Enable When this bit is set, the access mode to some register fields changes to rc_w1 (clear on write) meaning that the application needs to set that respective bit to 1 to clear it. When this bit is reset, the access mode to these register fields remains rc_r (clear on read)."]
#[inline(always)]
pub fn rcwe(&self) -> RCWE_R {
RCWE_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 8 - Slave Error Response Enable When this bit is set, the MAC responds with a Slave Error for accesses to reserved registers in CSR space. When this bit is reset, the MAC responds with an Okay response to any register accessed from CSR space."]
#[inline(always)]
pub fn seen(&self) -> SEEN_R {
SEEN_R::new(((self.bits >> 8) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - Register Clear on Write 1 Enable When this bit is set, the access mode to some register fields changes to rc_w1 (clear on write) meaning that the application needs to set that respective bit to 1 to clear it. When this bit is reset, the access mode to these register fields remains rc_r (clear on read)."]
#[inline(always)]
#[must_use]
pub fn rcwe(&mut self) -> RCWE_W<MACCSRSWCR_SPEC, 0> {
RCWE_W::new(self)
}
#[doc = "Bit 8 - Slave Error Response Enable When this bit is set, the MAC responds with a Slave Error for accesses to reserved registers in CSR space. When this bit is reset, the MAC responds with an Okay response to any register accessed from CSR space."]
#[inline(always)]
#[must_use]
pub fn seen(&mut self) -> SEEN_W<MACCSRSWCR_SPEC, 8> {
SEEN_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "CSR software control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`maccsrswcr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`maccsrswcr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct MACCSRSWCR_SPEC;
impl crate::RegisterSpec for MACCSRSWCR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`maccsrswcr::R`](R) reader structure"]
impl crate::Readable for MACCSRSWCR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`maccsrswcr::W`](W) writer structure"]
impl crate::Writable for MACCSRSWCR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets MACCSRSWCR to value 0"]
impl crate::Resettable for MACCSRSWCR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use std::collections::HashMap;
struct Solution {}
impl Solution {
pub fn two_sum( nums: Vec<i32>, target: i32) -> Vec<i32> {
let mut map : HashMap<&i32, usize> = HashMap::new();
let mut i = 0;
while i < nums.len() {
match map.get(&(target-nums[i])) {
Some(&v) => return vec![i as i32, v as i32],
None => map.insert(&nums[i], i),
};
i+=1;
}
vec![]
/*
while i < N {
match map.get(&(target-nums[i])) {
None => {
map.insert(nums[i], i);
},
};
i+=1;
}
res*/
}
}
fn main() {
/*
println!("{:?}", Solution::fast_two_sum(vec![3,2,4], 6));
println!("{:?}", Solution::fast_two_sum(vec![2,5,5,11], 10));
println!("{:?}", Solution::fast_two_sum(vec![2,7,11], 9));
*/
println!("{:?}", Solution::two_sum(vec![2,7,11,7,2], 9));
println!("{:?}", Solution::two_sum(vec![3,2,4], 6));
println!("{:?}", Solution::two_sum(vec![2,5,5,11], 10));
}
|
use std::fs::File;
use std::io::Read;
fn main() {
let mut file = File::open("d05-input").expect("file not found");
let mut input = String::new();
file.read_to_string(&mut input).expect("something went wrong reading file");
let mut ids: Vec<u32> = vec![];
for line in input.lines() {
let mut row = (0.0f32, 127.0f32);
let mut col = (0.0f32, 7.0f32);
for i in line.chars() {
match i {
'F' => row.1 = (row.1 - ((row.1 - row.0) / 2.0)).floor(),
'B' => row.0 = (row.0 + ((row.1 - row.0) / 2.0)).ceil(),
'L' => col.1 = (col.1 - ((col.1 - col.0) / 2.0)).floor(),
'R' => col.0 = (col.0 + ((col.1 - col.0) / 2.0)).ceil(),
_ => panic!("Invalid code"),
}
}
let id = (row.0 * 8.0 + col.0) as u32;
ids.push(id);
}
ids.sort();
println!("Highest ID found: {}, lowest: {}", ids[ids.len() - 1], ids[0]);
for (count, i) in ids.iter().enumerate() {
let mut j = 0;
if count + 1 < ids.len(){
j = ids[count + 1];
}
if j != 0 && &j - i > 1 {
println!("Missing seat between {} & {}", i, j);
}
}
} |
use crate::io::*;
use crate::model::rnn::*;
use crate::model::seq2seq::*;
use crate::model::*;
use crate::optimizer::{AdaGrad, NewAdam, NewSGD, Optimizer, SGD};
use crate::params::Update;
use crate::types::*;
use crate::util::*;
use itertools::izip;
// extern crate ndarray;
// use ndarray::iter::AxisChunksIter;
use ndarray::{s, Array, Array1, Array2, Axis, Dim, Dimension, Ix2, RemoveAxis, Slice};
pub struct Seq2SeqTrainer<'a, E: Encode, D: Decode<Dim = E::Dim>> {
pub model: Seq2Seq<E, D>,
optimizer: NewAdam,
params: Vec<&'a Update>,
ppl_list: Vec<f32>,
acc_list: Vec<f32>,
max_iters: usize,
}
impl<'a, E: Encode, D: Decode<Dim = E::Dim>> Seq2SeqTrainer<'a, E, D> {
pub fn new(model: Seq2Seq<E, D>, params: Vec<&'a Update>, optimizer: NewAdam) -> Self {
Self {
model,
params,
optimizer,
ppl_list: Vec::new(),
acc_list: Vec::new(),
max_iters: 0,
}
}
pub fn print_ppl(&self) {
putsd!(self.ppl_list);
}
pub fn print_acc(&self) {
putsd!(self.acc_list);
}
fn print_progress(
&mut self,
epoch: usize,
iter: usize,
start_time: std::time::Instant,
loss: f32,
) {
let elapsed_time = std::time::Instant::now() - start_time;
println!(
"|epoch {}| iter {}/{} | time {}[s] | loss {}",
epoch,
iter + 1,
self.max_iters,
elapsed_time.as_secs(),
loss
);
}
pub fn fit(
&mut self,
x_train: Array2<usize>,
t_train: Array2<usize>,
max_epoch: usize,
batch_size: usize,
eval_interval: Option<usize>,
eval_problem: Option<(Seq, Seq, Vec<char>)>,
reversed: bool,
) {
let data_len = x_train.dim().0;
self.max_iters = data_len / batch_size;
let start_time = std::time::Instant::now();
let eval_interval = eval_interval.unwrap_or(self.max_iters);
let (x_test, t_test, chars) = eval_problem.unwrap_or_default();
let do_eval = x_test.len() > 0;
for epoch in 1..=max_epoch {
let epoch_idx = random_index(data_len);
let epoch_data = pickup(&x_train, Axis(0), &epoch_idx);
let epoch_target = pickup(&t_train, Axis(0), &epoch_idx);
let mut eval_loss = 0.0;
for (iter, (batch_x, batch_t)) in (izip![
epoch_data.axis_chunks_iter(Axis(0), batch_size),
epoch_target.axis_chunks_iter(Axis(0), batch_size)
])
.enumerate()
{
eval_loss += self.model.forward(batch_x.to_owned(), batch_t.to_owned());
self.model.backward();
self.optimizer.clipgrads(&self.params);
self.optimizer.update(&self.params);
if (iter + 1) % eval_interval == 0 {
// let ppl = (eval_loss / eval_interval as f32).exp();
let loss = (eval_loss / eval_interval as f32);
self.print_progress(epoch, iter, start_time, loss);
self.ppl_list.push(loss);
eval_loss = 0.0;
}
}
if do_eval {
self.eval(&x_test, &t_test, &chars, reversed);
}
}
}
pub fn eval(&mut self, x_test: &Seq, t_test: &Seq, chars: &Vec<char>, reversed: bool) {
let mut correct_count = 0.0;
let start_id = t_test[[0, 0]];
let sample_size = t_test.dim().1 - 1; // t_testの2つ目以降を予測する
let mut wrong_count = 0;
for (i, (_x, _t)) in x_test
.axis_chunks_iter(Axis(0), 1)
.zip(t_test.axis_chunks_iter(Axis(0), 1))
.enumerate()
{
let guess = self.model.generate(_x.to_owned(), start_id, sample_size);
self.params.iter().inspect(|p| p.reset_grads()); // generateでもgrads保存してしまう仕様なので、全部消す
let is_correct = _t.iter().zip(guess.iter()).all(|(a, g)| a == g); // answerとguessを比較
correct_count += if is_correct { 1.0 } else { 0.0 };
// 間違った場合を、最大10個まで出力する
if !is_correct && wrong_count < 10 {
wrong_count += 1;
let mut problem: String = _x.iter().map(|i| chars[*i]).collect();
let guess: String = guess.iter().map(|i| chars[*i]).collect();
let ans: String = _t.iter().map(|i| chars[*i]).collect();
if reversed {
problem = rev_string(problem)
}
println!("{}{}", problem, ans);
println!("{}{}", problem, guess);
println!("{}", if guess == ans { "collect!" } else { "wrong!" });
}
}
let acc = correct_count / x_test.dim().0 as f32;
putsd!(acc);
self.acc_list.push(acc);
}
}
pub struct RnnlmTrainer<'a, R: Rnnlm> {
pub model: R,
optimizer: NewSGD,
params: Vec<&'a Update>,
ppl_list: Vec<f32>,
acc_list: Vec<f32>,
max_iters: usize,
}
impl<'a, R: Rnnlm> RnnlmTrainer<'a, R> {
pub fn new(model: R, optimizer: NewSGD, params: Vec<&'a Update>) -> Self {
Self {
model,
params,
optimizer,
ppl_list: Vec::new(),
acc_list: Vec::new(),
max_iters: 0,
}
}
pub fn print_ppl(&self) {
putsd!(self.ppl_list);
}
fn print_progress(
&mut self,
epoch: usize,
iter: usize,
start_time: std::time::Instant,
ppl: f32,
) {
let elapsed_time = std::time::Instant::now() - start_time;
println!(
"|epoch {}| iter {}/{} | time {}[s] | perplexity {}",
epoch,
iter + 1,
self.max_iters,
elapsed_time.as_secs(),
ppl
);
self.ppl_list.push(ppl);
}
fn get_baches(
corpus: &'a Vec<usize>,
batch_size: usize,
time_size: usize,
time_position: &mut usize,
) -> (Array2<usize>, Array2<usize>) {
let data_size = corpus.len() - 1;
let batch_time_offset = data_size / batch_size; // 各列でどれだけずらすか
let time_shift = (batch_time_offset / time_size) * time_size; // time_sizeで割り切れるようにする
/// ↓各列の先頭
let time_offsets: Vec<_> = (0..batch_size)
.map(|i| *time_position + i * batch_time_offset)
.collect();
// 各列で、
let position = |i, j| (time_offsets[i] + j) % data_size;
let xsa = Array2::from_shape_fn((batch_size, time_shift), |(i, j)| corpus[position(i, j)]);
let tsa = Array2::from_shape_fn((batch_size, time_shift), |(i, j)| {
corpus[position(i, j) + 1]
});
*time_position += time_shift; // 次の1列目はこの位置に来る
(xsa, tsa)
}
pub fn fit(
&mut self,
corpus: &Vec<usize>,
max_epoch: usize,
batch_size: usize,
time_size: usize,
eval_interval: Option<usize>,
corpus_val: Option<&Vec<usize>>,
) {
let data_size = corpus.len() - 1;
// (batch_size, time_size)型のデータを学習に用いる
self.max_iters = (data_size / batch_size) / time_size;
let eval_interval = eval_interval.unwrap_or(self.max_iters);
let mut time_position = 0;
let start_time = std::time::Instant::now();
let mut best_ppl = std::f32::INFINITY;
for epoch in 1..=max_epoch {
let mut eval_loss = 0.0;
let (xsa, tsa) = Self::get_baches(corpus, batch_size, time_size, &mut time_position);
let x_batches = xsa.axis_chunks_iter(Axis(1), time_size);
let t_batches = tsa.axis_chunks_iter(Axis(1), time_size);
for (iter, (batch_x, batch_t)) in x_batches.zip(t_batches).enumerate() {
eval_loss += self.model.forward(batch_x.to_owned(), batch_t.to_owned());
self.model.backward();
self.optimizer.update_clip_lr(&self.params);
// self.optimizer.update();
if (iter + 1) % eval_interval == 0 {
let ppl = (eval_loss / eval_interval as f32).exp();
self.print_progress(epoch, iter, start_time, ppl);
eval_loss = 0.0;
}
}
match corpus_val {
None => {}
Some(_corpus_val) => {
self.model.reset_state(); // train_corpusでの記憶をなくす
let ppl = self.eval(_corpus_val, batch_size, time_size);
self.model.reset_state(); // evalでの記憶をなくす, TODO: 本当はここでtrain中の記憶を復活させるべきな気もする。
if best_ppl > ppl {
best_ppl = ppl;
} else {
self.optimizer.lr /= 4.0;
}
}
}
}
}
pub fn eval(&mut self, corpus_eval: &Vec<usize>, batch_size: usize, time_size: usize) -> f32 {
let data_size = corpus_eval.len() - 1;
// (batch_size, time_size)型のデータを学習に用いる
let mut eval_loss = 0.0;
let max_iters = data_size / (batch_size * time_size);
let time_shift = max_iters * time_size;
let mut time_position = 0;
let (xsa, tsa) = Self::get_baches(corpus_eval, batch_size, time_size, &mut time_position);
let x_batches = xsa.axis_chunks_iter(Axis(1), time_size);
let t_batches = tsa.axis_chunks_iter(Axis(1), time_size);
for (iter, (batch_x, batch_t)) in x_batches.zip(t_batches).enumerate() {
eval_loss += self
.model
.eval_forward(batch_x.to_owned(), batch_t.to_owned());
if (iter + 1) % 10 == 0 {
println!("|iter {}/{} |", iter + 1, max_iters);
}
}
(eval_loss / max_iters as f32).exp() // ppl
}
}
/// データ型をf32から、単語idのusizeにしようとしたら、どうしても
/// Modelを作り替える必要があったので
pub struct Trainer2<M: Model2, T: Optimizer> {
pub model: M,
optimizer: T,
loss_list: Vec<f32>,
}
impl<M: Model2, T: Optimizer> Trainer2<M, T> {
pub fn new(model: M, optimizer: T) -> Self {
Self {
model,
optimizer,
loss_list: Vec::new(),
}
}
pub fn fit<D: RemoveAxis>(
&mut self,
x: Array<usize, D>, // 単語idの入力
t: Array1<usize>, // 分類学習
max_epoch: usize,
batch_size: usize,
// max_grad: Option<f32>,
eval_interval: Option<usize>,
) {
let (data_len, input_dim) = match x.shape() {
&[a, _, b] => (a, b),
&[a, b] => (a, b),
_ => panic!("KAMO: dimension of x must be 2 or 3 in model.fit!"),
};
let max_iters = data_len / batch_size;
self.loss_list = Vec::<f32>::new();
let eval_interval = eval_interval.unwrap_or(max_iters);
for epoch in 1..=max_epoch {
let idx = random_index(data_len);
// 一定回数イテレーションするたびに平均の損失を記録する
let mut total_loss: f32 = 0.0;
let mut loss_count: i32 = 0;
for iters in 1..=max_iters {
let batch_idx = &idx[(iters - 1) * batch_size..iters * batch_size];
let batch_data = pickup(&x, Axis(0), batch_idx);
let batch_target = pickup(&t, Axis(0), batch_idx);
let loss = self.model.forward(batch_data, batch_target);
self.model.backward();
let grads = self.model.grads();
self.optimizer.update2d(self.model.params(), grads);
total_loss += loss; // 1バッチごとに損失を加算していく
loss_count += 1; // バッチ回数を記録
if iters % eval_interval == 0 {
let avg_loss = total_loss / loss_count as f32;
println!(
"|epoch {}| iter {}/{} | loss {}",
epoch, iters, max_iters, avg_loss
);
self.loss_list.push(avg_loss);
total_loss = 0.0;
loss_count = 0;
}
}
}
}
pub fn show_loss(&self) {
println!(
"{:?}",
// self.loss_list.iter().step_by(100).collect::<Vec<_>>()
self.loss_list
)
}
pub fn save_params(&mut self) {
for p in self.model.params() {}
}
}
pub struct Trainer<M: Model, T: Optimizer> {
pub model: M,
optimizer: T,
loss_list: Vec<f32>,
}
impl<M: Model, T: Optimizer> Trainer<M, T> {
pub fn new(model: M, optimizer: T) -> Self {
Self {
model,
optimizer,
loss_list: Vec::new(),
}
}
pub fn fit<D: RemoveAxis>(
&mut self,
x: Array<f32, D>,
t: Arr2d,
max_epoch: usize,
batch_size: usize,
max_grad: Option<f32>,
eval_interval: Option<usize>,
) {
let (data_len, input_dim) = match x.shape() {
&[a, _, b] => (a, b),
&[a, b] => (a, b),
_ => panic!("KAMO: dimension of x must be 2 or 3 in model.fit!"),
};
let target_size = t.shape()[1];
let max_iters = data_len / batch_size;
self.loss_list = Vec::<f32>::new();
let eval_interval = eval_interval.unwrap_or(max_iters);
for epoch in 1..=max_epoch {
let idx = random_index(data_len);
// 一定回数イテレーションするたびに平均の損失を記録する
let mut total_loss: f32 = 0.0;
let mut loss_count: i32 = 0;
for iters in 1..=max_iters {
let batch_idx = &idx[(iters - 1) * batch_size..iters * batch_size];
// x[batch_idx, :]的なことをしたいのだが...。簡潔にかけないのか?
// let batch_data =
// Array::from_shape_fn((batch_size, input_dim), |(i, j)| x[[batch_idx[i], j]]);
let batch_data = pickup(&x, Axis(0), batch_idx);
// let batch_target =
// Array::from_shape_fn((batch_size, target_size), |(i, j)| t[[batch_idx[i], j]]);
let batch_target = pickup(&t, Axis(0), batch_idx);
let loss = self.model.forwardx(batch_data, batch_target);
self.model.backward(batch_size);
let grads1d = self.model.grads1d();
let grads2d = self.model.grads2d();
// self.optimizer
// .update1d(self.model.params1d(), self.model.grads1d());
self.optimizer.update1d(self.model.params1d(), grads1d); // こう書かないと文句言われるんだが、無駄じゃないか?
self.optimizer.update2d(self.model.params2d(), grads2d);
total_loss += loss; // 1バッチごとに損失を加算していく
loss_count += 1; // バッチ回数を記録
if iters % eval_interval == 0 {
let avg_loss = total_loss / loss_count as f32;
println!(
"|epoch {}| iter {}/{} | loss {}",
epoch, iters, max_iters, avg_loss
);
self.loss_list.push(avg_loss);
total_loss = 0.0;
loss_count = 0;
}
}
}
}
pub fn show_loss(&self) {
println!("{:?}", self.loss_list)
}
}
|
use crate::matrix::{Matrix, MatrixOps};
use std::error::Error;
pub fn read_csv_by_path(file_path: &str) -> Result<(Vec<Matrix>, Vec<Matrix>), Box<dyn Error>> {
let mut rdr = csv::Reader::from_path(file_path)?;
let mut label_matrix_vec = Vec::new();
let mut data_matrix_vec = Vec::new();
for result in rdr.records() {
let record = result?;
let mut label_vec = vec![0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01];
let label: usize = record.get(0).unwrap().to_string().parse().unwrap();
label_vec[label] = 0.99;
label_matrix_vec.push(Matrix::new(vec![label_vec]));
let mut data_vec = Vec::new();
for i in 1..record.len() {
let data: f64 = record.get(i).unwrap().to_string().parse().unwrap();
data_vec.push(data / 255.0 * 0.99 + 0.01)
}
data_matrix_vec.push(Matrix::new(vec![data_vec]))
}
Ok((label_matrix_vec, data_matrix_vec))
}
pub fn show_result(predict: Matrix, label: Matrix) {
let mut predict_ans = 0;
let mut max = predict.data[0][0];
for i in 1..predict.data[0].len() {
if max < predict.data[0][i] {
predict_ans = i;
max = predict.data[0][i];
}
}
let mut label_ans = 0;
for i in 0..10 {
if label.data[0][i] == 0.99 {
label_ans = i;
}
}
println!("Predict is {}, Label is {}", predict_ans, label_ans);
}
#[cfg(test)]
mod dataset_test {
use super::read_csv_by_path;
use crate::matrix::MatrixOps;
#[test]
fn test_read_csv_by_path() {
println!("********[TEST] Test Dataset Read CSV By Path Function********");
let (label, data) = read_csv_by_path("data/mnist_test_10.csv").unwrap();
for i in 0..label.len() {
label[i].show();
data[i].show();
}
println!("********************************");
}
}
|
use crate::magnificent;
use std::fs;
lalrpop_mod!(pub m3); // generated parser
pub fn parse_m3(_input: &str) -> Result<magnificent::Program, String> {
// let raw_program = grammer::PredParser::new().parse(input)
// .or_else(|e| panic!("m3 parser failed: {}", e))
Err("unimplemented".to_string())
}
pub fn validate_raw_program(prog: &magnificent::Program) -> Result<(), String> {
for r in prog.iter() {
if r.len() != prog.num_tapes() {
return Err(format!("Rule {:?} specifies incorrect number of tapes", r));
}
}
Ok(())
}
// Helper function to read / parse programs
pub fn read_program(filepath: &str) -> magnificent::Program {
let input = fs::read_to_string(filepath).expect("failed to read program file");
let program = m3::ProgramParser::new()
.parse(&input)
.expect("failed to parse program file");
validate_raw_program(&program).expect("invalid program");
program
}
#[cfg(test)]
mod test {
use super::m3;
use super::validate_raw_program;
use crate::magnificent;
use std::fs;
#[test]
pub fn test_parse_m3() {
let input = r"
tapes: 2
0 [1, -1] 0";
let program = m3::ProgramParser::new()
.parse(input)
.expect("m3 parser failed");
validate_raw_program(&program).expect("Invalid program");
assert_eq!(program.num_tapes(), 2);
let mut rules_iter = program.iter();
assert_eq!(
rules_iter.next().unwrap(),
&magnificent::Rule::new(0, 0, vec![1, -1])
);
let input = r"
tapes: 3
0 [1, -1, 2] 1
1 [0, 1, 0] 2";
let program = m3::ProgramParser::new()
.parse(input)
.expect("m3 parser failed");
validate_raw_program(&program).expect("Invalid program");
assert_eq!(program.num_tapes(), 3);
let mut rules_iter = program.iter();
assert_eq!(
rules_iter.next().unwrap(),
&magnificent::Rule::new(0, 1, vec![1, -1, 2])
);
assert_eq!(
rules_iter.next().unwrap(),
&magnificent::Rule::new(1, 2, vec![0, 1, 0])
);
let input = r"
tapes: 5
0 [1, 2, 3, 4, 5] 1
1 [-1, 2, 3, 4, 5] 2
2 [-1, -2, 3, 4, 5] 0
0 [0, 0, 0, 4, 5] 4
4 [1, 2, 3, -4, -5] 5
5 [1, 2, 3, 0, 0] 0
";
let program = m3::ProgramParser::new()
.parse(input)
.expect("m3 parser failed");
validate_raw_program(&program).expect("Invalid program");
}
// Test parsing a program with malformed tapes statement
#[test]
#[should_panic(expected = "m3 parser failed")]
pub fn test_bad_parse1() {
let input = r"
tape 3
0 [1, -1, 2] 1";
let program = m3::ProgramParser::new()
.parse(input)
.expect("m3 parser failed");
validate_raw_program(&program).expect("Invalid program");
}
// Test parsing a program with missing next state
#[test]
#[should_panic(expected = "m3 parser failed")]
pub fn test_bad_parse2() {
let input = r"
tapes: 3
0 [1, -1, 2]";
let program = m3::ProgramParser::new()
.parse(input)
.expect("m3 parser failed");
validate_raw_program(&program).expect("Invalid program");
}
// Test parsing a program with invalid rule width
#[test]
#[should_panic(expected = "Invalid program")]
pub fn test_bad_parse3() {
let input = r"
tapes: 1
0 [1, -1, 2, 0] 0";
let program = m3::ProgramParser::new().parse(input).unwrap(); //.expect("m3 parser failed");
validate_raw_program(&program).expect("Invalid program");
}
// Test parsing of a file on disk
#[test]
pub fn test_parse_adder() {
const ADDER_PROGRAM: &str = "examples/adder.m3";
let input = fs::read_to_string(ADDER_PROGRAM).expect("failed to read program file");
let program = m3::ProgramParser::new()
.parse(&input)
.expect("failed to parse program file");
validate_raw_program(&program).expect("invalid program");
// Interpret the parsed program to make sure it works
let machine = magnificent::Machine::new(0, vec![1, 1]);
let end_machine = magnificent::interpret(machine, &program, 100);
assert!(end_machine.is_ok());
let (_, end_machine) = end_machine.unwrap();
assert_eq!(end_machine.tape_pos(0), 2);
}
// Test parsing of a file on disk
#[test]
pub fn test_parse_mult() {
const MULT_PROGRAM: &str = "examples/mult.m3";
let input = fs::read_to_string(MULT_PROGRAM).expect("failed to read program file");
let program = m3::ProgramParser::new()
.parse(&input)
.expect("failed to parse program file");
validate_raw_program(&program).expect("invalid program");
// Interpret the parsed program to make sure it works
let machine = magnificent::Machine::new(0, vec![0, 2, 0, 3 - 1]);
let end_machine = magnificent::interpret(machine, &program, 100);
assert!(end_machine.is_ok());
let (_, end_machine) = end_machine.unwrap();
assert_eq!(end_machine.tape_pos(0), 6);
}
// Test parsing of the 6-rule multiplier
#[test]
pub fn test_parse_6_rule_mult() {
const MULT_PROGRAM: &str = "examples/6-rule-mult.m3";
let input = fs::read_to_string(MULT_PROGRAM).expect("failed to read program file");
let program = m3::ProgramParser::new()
.parse(&input)
.expect("failed to parse program file");
validate_raw_program(&program).expect("invalid program");
// Interpret the parsed program to make sure it works
let x = 7;
let y = 11;
let machine = magnificent::Machine::new(0, vec![0, x, y, 0]);
let end_machine = magnificent::interpret(machine, &program, 1000);
assert!(end_machine.is_ok());
let (_, end_machine) = end_machine.unwrap();
assert_eq!(end_machine.tape_pos(0), x * y);
}
}
|
fn main() {
proconio::input! {
n: usize,
store: [(i32, i32, i32); n],
}
let mut price_min = std::i32::MAX;
for s in store {
if s.2 - s.0 <= 0 {
continue;
}
if s.1 < price_min {
price_min = s.1;
}
}
if price_min == std::i32::MAX {
println!("-1");
return;
}
println!("{}", price_min);
} |
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::CMDSTAT {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct CTSIZER {
bits: u16,
}
impl CTSIZER {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = "Possible values of the field `CMDSTAT`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CMDSTATR {
#[doc = "Error encountered with command value."]
ERR,
#[doc = "Actively processing command value."]
ACTIVE,
#[doc = "Idle state, no active command, no error value."]
IDLE,
#[doc = "Command in progress, but waiting on data from host value."]
WAIT,
#[doc = r" Reserved"]
_Reserved(u8),
}
impl CMDSTATR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
CMDSTATR::ERR => 1,
CMDSTATR::ACTIVE => 2,
CMDSTATR::IDLE => 4,
CMDSTATR::WAIT => 6,
CMDSTATR::_Reserved(bits) => bits,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> CMDSTATR {
match value {
1 => CMDSTATR::ERR,
2 => CMDSTATR::ACTIVE,
4 => CMDSTATR::IDLE,
6 => CMDSTATR::WAIT,
i => CMDSTATR::_Reserved(i),
}
}
#[doc = "Checks if the value of the field is `ERR`"]
#[inline]
pub fn is_err(&self) -> bool {
*self == CMDSTATR::ERR
}
#[doc = "Checks if the value of the field is `ACTIVE`"]
#[inline]
pub fn is_active(&self) -> bool {
*self == CMDSTATR::ACTIVE
}
#[doc = "Checks if the value of the field is `IDLE`"]
#[inline]
pub fn is_idle(&self) -> bool {
*self == CMDSTATR::IDLE
}
#[doc = "Checks if the value of the field is `WAIT`"]
#[inline]
pub fn is_wait(&self) -> bool {
*self == CMDSTATR::WAIT
}
}
#[doc = r" Value of the field"]
pub struct CCMDR {
bits: u8,
}
impl CCMDR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _CTSIZEW<'a> {
w: &'a mut W,
}
impl<'a> _CTSIZEW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 4095;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `CMDSTAT`"]
pub enum CMDSTATW {
#[doc = "Error encountered with command value."]
ERR,
#[doc = "Actively processing command value."]
ACTIVE,
#[doc = "Idle state, no active command, no error value."]
IDLE,
#[doc = "Command in progress, but waiting on data from host value."]
WAIT,
}
impl CMDSTATW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
CMDSTATW::ERR => 1,
CMDSTATW::ACTIVE => 2,
CMDSTATW::IDLE => 4,
CMDSTATW::WAIT => 6,
}
}
}
#[doc = r" Proxy"]
pub struct _CMDSTATW<'a> {
w: &'a mut W,
}
impl<'a> _CMDSTATW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: CMDSTATW) -> &'a mut W {
unsafe { self.bits(variant._bits()) }
}
#[doc = "Error encountered with command value."]
#[inline]
pub fn err(self) -> &'a mut W {
self.variant(CMDSTATW::ERR)
}
#[doc = "Actively processing command value."]
#[inline]
pub fn active(self) -> &'a mut W {
self.variant(CMDSTATW::ACTIVE)
}
#[doc = "Idle state, no active command, no error value."]
#[inline]
pub fn idle(self) -> &'a mut W {
self.variant(CMDSTATW::IDLE)
}
#[doc = "Command in progress, but waiting on data from host value."]
#[inline]
pub fn wait(self) -> &'a mut W {
self.variant(CMDSTATW::WAIT)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 7;
const OFFSET: u8 = 5;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CCMDW<'a> {
w: &'a mut W,
}
impl<'a> _CCMDW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 31;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 8:19 - The current number of bytes still to be transferred with this command. This field will count down to zero."]
#[inline]
pub fn ctsize(&self) -> CTSIZER {
let bits = {
const MASK: u16 = 4095;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) as u16
};
CTSIZER { bits }
}
#[doc = "Bits 5:7 - The current status of the command execution."]
#[inline]
pub fn cmdstat(&self) -> CMDSTATR {
CMDSTATR::_from({
const MASK: u8 = 7;
const OFFSET: u8 = 5;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bits 0:4 - current command that is being executed"]
#[inline]
pub fn ccmd(&self) -> CCMDR {
let bits = {
const MASK: u8 = 31;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u8
};
CCMDR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 8:19 - The current number of bytes still to be transferred with this command. This field will count down to zero."]
#[inline]
pub fn ctsize(&mut self) -> _CTSIZEW {
_CTSIZEW { w: self }
}
#[doc = "Bits 5:7 - The current status of the command execution."]
#[inline]
pub fn cmdstat(&mut self) -> _CMDSTATW {
_CMDSTATW { w: self }
}
#[doc = "Bits 0:4 - current command that is being executed"]
#[inline]
pub fn ccmd(&mut self) -> _CCMDW {
_CCMDW { w: self }
}
}
|
use fibers_transport::{PollRecv, PollSend, Result, TcpTransport, Transport};
use stun_codec::{Attribute, DecodedMessage, Message, TransactionId};
use super::StunTransport;
/// TCP transport layer that can be used for STUN.
#[derive(Debug)]
pub struct StunTcpTransporter<T> {
inner: T,
}
impl<A, T> StunTcpTransporter<T>
where
A: Attribute,
T: TcpTransport<SendItem = Message<A>, RecvItem = DecodedMessage<A>>,
{
/// Makes a new `StunTcpTransporter` instance.
pub fn new(inner: T) -> Self {
StunTcpTransporter { inner }
}
/// Returns a reference to the inner transporter.
pub fn inner_ref(&self) -> &T {
&self.inner
}
/// Returns a mutable reference to the inner transporter.
pub fn inner_mut(&mut self) -> &mut T {
&mut self.inner
}
}
impl<A, T> Transport for StunTcpTransporter<T>
where
A: Attribute,
T: TcpTransport<SendItem = Message<A>, RecvItem = DecodedMessage<A>>,
{
type PeerAddr = ();
type SendItem = Message<A>;
type RecvItem = DecodedMessage<A>;
fn start_send(&mut self, (): Self::PeerAddr, item: Self::SendItem) -> Result<()> {
track!(self.inner.start_send((), item))
}
fn poll_send(&mut self) -> PollSend {
track!(self.inner.poll_send())
}
fn poll_recv(&mut self) -> PollRecv<(Self::PeerAddr, Self::RecvItem)> {
track!(self.inner.poll_recv())
}
}
impl<A, T> StunTransport<A> for StunTcpTransporter<T>
where
A: Attribute,
T: TcpTransport<SendItem = Message<A>, RecvItem = DecodedMessage<A>>,
{
fn finish_transaction(&mut self, _peer: &(), _transaction_id: TransactionId) -> Result<()> {
Ok(())
}
}
|
use abstract_ns;
use abstract_ns::HostResolve;
use domain;
use futures::prelude::*;
use ns_dns_tokio;
use std::net::IpAddr;
use std::path::Path;
use std::str::FromStr;
use tokio_core::reactor::Handle;
use transport;
#[derive(Clone, Debug)]
pub struct Config(domain::resolv::ResolvConf);
#[derive(Clone, Debug)]
pub struct Resolver(ns_dns_tokio::DnsResolver);
pub enum IpAddrFuture {
DNS(ns_dns_tokio::HostFuture),
Fixed(IpAddr),
InvalidDNSName(String),
}
pub enum Error {
InvalidDNSName(String),
NoAddressesFound,
ResolutionFailed(<ns_dns_tokio::HostFuture as Future>::Error),
}
impl Config {
/// Note that this ignores any errors reading or parsing the resolve.conf
/// file, just like the `domain` crate does.
pub fn from_file(resolve_conf_path: &Path) -> Self {
let mut resolv_conf = domain::resolv::ResolvConf::new();
let _ = resolv_conf.parse_file(resolve_conf_path);
resolv_conf.finalize();
Config(resolv_conf)
}
}
impl Resolver {
pub fn new(config: Config, executor: &Handle) -> Self {
Resolver(ns_dns_tokio::DnsResolver::new_from_resolver(
domain::resolv::Resolver::from_conf(executor, config.0),
))
}
pub fn resolve_host(&self, host: &transport::Host) -> IpAddrFuture {
match *host {
transport::Host::DnsName(ref name) => {
trace!("resolve {}", name);
match abstract_ns::Name::from_str(name) {
Ok(name) => IpAddrFuture::DNS(self.0.resolve_host(&name)),
Err(_) => IpAddrFuture::InvalidDNSName(name.clone()),
}
}
transport::Host::Ip(addr) => IpAddrFuture::Fixed(addr),
}
}
}
impl Future for IpAddrFuture {
// TODO: Return the IpList so the user can try all of them.
type Item = IpAddr;
type Error = Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match *self {
IpAddrFuture::DNS(ref mut inner) => match inner.poll() {
Ok(Async::NotReady) => Ok(Async::NotReady),
Ok(Async::Ready(ips)) => ips.pick_one()
.map(Async::Ready)
.ok_or(Error::NoAddressesFound),
Err(e) => Err(Error::ResolutionFailed(e)),
},
IpAddrFuture::Fixed(addr) => Ok(Async::Ready(addr)),
IpAddrFuture::InvalidDNSName(ref name) => Err(Error::InvalidDNSName(name.clone())),
}
}
}
|
use crate::grid::PbcInfo;
pub type Coord<const D: usize> = [f64; D];
/// Add two points of identical dimension.
pub fn add_coords<const D: usize>(x0: &Coord<D>, x1: &Coord<D>) -> Coord<D> {
let mut buf: Coord<D> = x0.clone();
buf.iter_mut().zip(x1.iter()).for_each(|(a, b)| *a += b);
buf
}
/// Calculate the Euclidian distance between two points of identical dimension.
pub fn calc_distance<const D: usize>(x0: &Coord<D>, x1: &Coord<D>) -> f64 {
x0.iter()
.zip(x1.iter())
.map(|(a, b)| (a - b).powi(2))
.sum::<f64>()
.sqrt()
}
/// Return the distance between two coordinates, with periodic boundary conditions taken into account.
pub fn calc_distance_pbc<const D: usize>(x0: &Coord<D>, x1: &Coord<D>, pbc: &PbcInfo<D>) -> f64 {
x0.iter()
.zip(x1.iter())
.zip(pbc.box_size.iter().zip(pbc.inv_box_size.iter()))
.map(|((a, b), (size, inv_size))| {
let dx = b - a;
dx - size * (dx * inv_size).round()
})
.map(|v| v.powi(2))
.sum::<f64>()
.sqrt()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn add_coords_works_as_expected_0_dims() {
assert_eq!(&add_coords(&[], &[]), &[]);
}
#[test]
fn add_coords_works_as_expected_2_dims() {
assert_eq!(&add_coords(&[3.0, 5.0], &[7.0, 11.0]), &[10.0, 16.0]);
}
#[test]
fn add_coords_works_as_expected_6_dims() {
assert_eq!(
&add_coords(
&[0.0, 1.0, 2.0, 3.0, 4.0, 5.0],
&[6.0, 7.0, 8.0, 9.0, 10.0, 11.0]
),
&[6.0, 8.0, 10.0, 12.0, 14.0, 16.0]
);
}
#[test]
fn calc_distance_works_as_expected_0_dims() {
assert_eq!(calc_distance(&[], &[]), 0.0);
}
#[test]
fn calc_distance_works_as_expected_2_dims() {
let x0 = [1.0, 2.0];
let x1 = [2.0, 3.0];
assert_eq!(calc_distance(&x0, &x1), 2.0_f64.sqrt());
}
#[test]
fn calc_distance_with_pbc_works_as_expected() {
let dx = 2.0;
let dy = 4.0;
let box_size = [dx, dy];
let pbc = PbcInfo::new(box_size);
let x0 = 1.0;
let y0 = 2.0;
assert_eq!(
calc_distance_pbc(&[x0, y0], &[2.0, 3.0], &pbc),
2.0_f64.sqrt()
);
assert_eq!(
calc_distance_pbc(&[x0 + dx, y0], &[2.0, 3.0], &pbc),
2.0_f64.sqrt()
);
assert_eq!(
calc_distance_pbc(&[x0 + 5.0 * dx, y0], &[2.0, 3.0], &pbc),
2.0_f64.sqrt()
);
assert_eq!(
calc_distance_pbc(&[x0 + 5.0 * dx, y0 + 3.0 * dy], &[2.0, 3.0], &pbc),
2.0_f64.sqrt()
);
assert_eq!(
calc_distance_pbc(&[x0 - 5.0 * dx, y0 - 3.0 * dy], &[2.0, 3.0], &pbc),
2.0_f64.sqrt()
);
}
#[test]
fn calc_distance_works_as_expected_6_dims() {
let x0 = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
let x1 = [2.0, 3.0, 4.0, 5.0, 6.0, 7.0];
assert_eq!(calc_distance(&x0, &x1), 6.0_f64.sqrt());
}
}
|
#[doc = "Control register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about avaliable fields see [ctl](ctl) module"]
pub type CTL = crate::Reg<u32, _CTL>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _CTL;
#[doc = "`read()` method returns [ctl::R](ctl::R) reader structure"]
impl crate::Readable for CTL {}
#[doc = "`write(|w| ..)` method takes [ctl::W](ctl::W) writer structure"]
impl crate::Writable for CTL {}
#[doc = "Control register"]
pub mod ctl;
#[doc = "Synchronization control register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about avaliable fields see [sync_ctl](sync_ctl) module"]
pub type SYNC_CTL = crate::Reg<u32, _SYNC_CTL>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _SYNC_CTL;
#[doc = "`read()` method returns [sync_ctl::R](sync_ctl::R) reader structure"]
impl crate::Readable for SYNC_CTL {}
#[doc = "`write(|w| ..)` method takes [sync_ctl::W](sync_ctl::W) writer structure"]
impl crate::Writable for SYNC_CTL {}
#[doc = "Synchronization control register"]
pub mod sync_ctl;
#[doc = "LUT component input selection\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about avaliable fields see [lut_sel](lut_sel) module"]
pub type LUT_SEL = crate::Reg<u32, _LUT_SEL>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _LUT_SEL;
#[doc = "`read()` method returns [lut_sel::R](lut_sel::R) reader structure"]
impl crate::Readable for LUT_SEL {}
#[doc = "`write(|w| ..)` method takes [lut_sel::W](lut_sel::W) writer structure"]
impl crate::Writable for LUT_SEL {}
#[doc = "LUT component input selection"]
pub mod lut_sel;
#[doc = "LUT component control register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about avaliable fields see [lut_ctl](lut_ctl) module"]
pub type LUT_CTL = crate::Reg<u32, _LUT_CTL>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _LUT_CTL;
#[doc = "`read()` method returns [lut_ctl::R](lut_ctl::R) reader structure"]
impl crate::Readable for LUT_CTL {}
#[doc = "`write(|w| ..)` method takes [lut_ctl::W](lut_ctl::W) writer structure"]
impl crate::Writable for LUT_CTL {}
#[doc = "LUT component control register"]
pub mod lut_ctl;
#[doc = "Data unit component input selection\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about avaliable fields see [du_sel](du_sel) module"]
pub type DU_SEL = crate::Reg<u32, _DU_SEL>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _DU_SEL;
#[doc = "`read()` method returns [du_sel::R](du_sel::R) reader structure"]
impl crate::Readable for DU_SEL {}
#[doc = "`write(|w| ..)` method takes [du_sel::W](du_sel::W) writer structure"]
impl crate::Writable for DU_SEL {}
#[doc = "Data unit component input selection"]
pub mod du_sel;
#[doc = "Data unit component control register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about avaliable fields see [du_ctl](du_ctl) module"]
pub type DU_CTL = crate::Reg<u32, _DU_CTL>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _DU_CTL;
#[doc = "`read()` method returns [du_ctl::R](du_ctl::R) reader structure"]
impl crate::Readable for DU_CTL {}
#[doc = "`write(|w| ..)` method takes [du_ctl::W](du_ctl::W) writer structure"]
impl crate::Writable for DU_CTL {}
#[doc = "Data unit component control register"]
pub mod du_ctl;
#[doc = "Data register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about avaliable fields see [data](data) module"]
pub type DATA = crate::Reg<u32, _DATA>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _DATA;
#[doc = "`read()` method returns [data::R](data::R) reader structure"]
impl crate::Readable for DATA {}
#[doc = "`write(|w| ..)` method takes [data::W](data::W) writer structure"]
impl crate::Writable for DATA {}
#[doc = "Data register"]
pub mod data;
|
// By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
fn main() {
let now = Instant::now();
println!("imperative: {}", e2_imperative());
println!("time:{:?}", now.elapsed());
let now = Instant::now();
println!("functional: {}", e2_functional());
println!("time:{:?}", now.elapsed());
let now = Instant::now();
println!("functional2: {}", e2_functional2());
println!("time:{:?}", now.elapsed());
let now = Instant::now(); // 1.
println!("recursive: {}", e2_recursive(4000000, 1, 1));
println!("time:{:?}", now.elapsed());
let now = Instant::now(); // 0.
println!(
"recursive2: {:?}",
e2_recursive2(4000000, 1, 1, [].to_vec())
.iter()
.sum::<u32>()
);
println!("time:{:?}", now.elapsed());
}
struct Fib {
index: u32,
a: u32,
b: u32,
}
impl Fib {
fn new() -> Fib {
Fib {
index: 0,
a: 1,
b: 1,
}
}
fn next(&mut self) {
self.index += 1;
let temp = self.a;
self.a = self.a + self.b;
self.b = temp;
}
}
fn e2_imperative() -> u32 {
// run through fibs less than 4m.
let mut sum: u32 = 0;
let mut f = Fib::new();
while f.a < 4000000 {
if f.a % 2 == 0 {
sum += f.a;
}
f.next();
}
sum
}
fn e2_functional() -> u32 {
// Use scan to capture fibonacci state, with take_while and filter to capture evens less than 4m
let sum: u32 = (1..)
.scan((1, 1), |state, _| {
// inefficient, since not using (1..), see next for improvement
let temp = state.0;
state.0 = state.1 + state.0;
state.1 = temp;
Some(state.0)
})
.take_while(|&x| x < 4000000)
.filter(|x| x % 2 == 0)
.sum();
sum
}
fn e2_functional2() -> u32 {
// 0
// Same as above, with a bit more Rust sugar-sophistication, 50ish% faster
let sum: u32 = std::iter::repeat_with({
// do stateful, repeated computation
let mut state = (1, 1);
move || {
// capture state, move inside scope
let next = (state.1, state.0 + state.1);
std::mem::replace(&mut state, next).0 // moves next into state, returns state.0
}
})
.take_while(|&x| x < 4000000)
.filter(|x| x % 2 == 0)
.sum();
sum
}
fn e2_recursive(lim: u32, f0: u32, f1: u32) -> u32 {
if f0 > lim {
0
} else if f0 % 2 == 0 {
f0 + e2_recursive(lim, f0 + f1, f0)
} else {
e2_recursive(lim, f0 + f1, f0)
}
}
fn e2_recursive2(lim: u32, f0: u32, f1: u32, mut v: Vec<u32>) -> Vec<u32> {
if f0 > lim {
v
} else if f0 % 2 == 0 {
v.push(f0);
e2_recursive2(lim, f0 + f1, f0, v)
} else {
e2_recursive2(lim, f0 + f1, f0, v)
}
}
// Learnings
/*
0. Recursive solution that adds, rather than creating a vector is interestingly much faster
1. Recursive solutions continue to be surprisingly fast
*/
// Output
/*
imperative: 4613732
time:34.237µs
functional: 4613732
time:13.705µs
functional2: 4613732
time:6.168µs
recursive: 4613732
time:1.869µs
recursive2: 4613732
time:15.49µs
*/
|
pub
fn palindrome() {
// a palindrome!
let mut v = vec!["a man", "a plan", "a canal", "panama"];
println!("{:?}", v);
v.reverse();
// reasonable yet disappointing:
println!("{:?}", v);
assert_eq!(v, vec!["panama", "a canal", "a plan", "a man"]);
}
|
use utils;
use std::collections::HashSet;
use std::iter::FromIterator;
pub fn problem_047() -> u64 {
let k = 4;
let mut consecutive: Vec<u64> = vec![];
for n in 2..1000000 {
let prime_factors = utils::prime_factors(n);
let distinct_prime_factors: HashSet<&u64> = HashSet::from_iter(prime_factors.iter());
if distinct_prime_factors.len() == k {
consecutive.push(n);
if consecutive.len() == k {
return consecutive[0];
}
} else {
consecutive = vec![];
}
}
0
}
#[cfg(test)]
mod test {
use super::*;
use test::Bencher;
#[test]
fn test_problem_047() {
let ans: u64 = problem_047();
println!("Answer to Problem 47: {}", ans);
assert!(ans == 134043)
}
#[bench]
fn bench_problem_047(b: &mut Bencher) {
b.iter(|| problem_047());
}
}
|
use std::collections::HashMap;
use std::fs;
fn main() -> Result<(), String> {
let input = fs::read_to_string("input/data.txt").map_err(|e| e.to_string())?;
let as_array = get_as_array(&input);
let ChildProcessResult { metadata_sum, .. } = part1(&as_array);
assert_eq!(metadata_sum, 36891);
println!("Part 1 = {}", metadata_sum);
let ChildProcessResult { metadata_sum, .. } = part2(&as_array);
assert_eq!(metadata_sum, 20083);
println!("Part 2 = {}", metadata_sum);
Ok(())
}
fn get_as_array(input: &str) -> Vec<usize> {
input
.split(' ')
.map(|item| item.parse::<usize>().unwrap())
.collect()
}
#[derive(Debug, PartialEq, Eq)]
struct ChildProcessResult {
total_length: usize,
metadata_sum: usize,
}
fn part1(data: &[usize]) -> ChildProcessResult {
let num_children = data[0];
let num_metadata = data[1];
let mut running_metadata_sum = 0;
let mut running_len_total = 2;
for _ in 0..num_children {
let ChildProcessResult {
total_length: additional_len,
metadata_sum: additional_sum,
} = part1(&data[running_len_total..]);
running_len_total += additional_len;
running_metadata_sum += additional_sum;
}
let this_metadata_sum: usize = data[running_len_total..running_len_total + num_metadata]
.iter()
.sum();
ChildProcessResult {
total_length: running_len_total + num_metadata,
metadata_sum: running_metadata_sum + this_metadata_sum,
}
}
fn part2(data: &[usize]) -> ChildProcessResult {
let num_children = data[0];
let num_metadata = data[1];
let mut running_len_total = 2;
if num_children == 0 {
return ChildProcessResult {
total_length: 2 + num_metadata,
metadata_sum: data[running_len_total..running_len_total + num_metadata]
.iter()
.sum(),
};
}
let mut child_results: HashMap<usize, usize> =
HashMap::with_capacity(num_children);
for child_num in 0..num_children {
let child_result: ChildProcessResult = part2(&data[running_len_total..]);
running_len_total += child_result.total_length;
child_results.insert(child_num + 1, child_result.metadata_sum);
}
let metadata_sum: usize = data[running_len_total..running_len_total + num_metadata]
.iter()
.fold(0, |acc, metadata_value| {
acc + child_results.get(metadata_value).unwrap_or(&0)
});
ChildProcessResult {
total_length: running_len_total + num_metadata,
metadata_sum,
}
}
#[cfg(test)]
mod test_process_child {
use super::{process_child, ChildProcessResult};
#[test]
fn no_children() {
assert_eq!(
ChildProcessResult {
total_length: 5,
metadata_sum: 9 + 5 + 12
},
process_child(&[0, 3, 9, 5, 12]),
)
}
#[test]
fn one_child() {
assert_eq!(
ChildProcessResult {
total_length: 7,
metadata_sum: 9 + 5 + 12
},
process_child(&[1, 2, 0, 1, 9, 5, 12]),
)
}
#[test]
fn sub_children() {
assert_eq!(
ChildProcessResult {
total_length: 16,
metadata_sum: 1 + 1 + 2 + 10 + 11 + 12 + 2 + 99
},
process_child(&[2, 3, 0, 3, 10, 11, 12, 1, 1, 0, 1, 99, 2, 1, 1, 2]),
)
}
#[test]
fn branched_children() {
assert_eq!(
ChildProcessResult {
total_length: 16,
metadata_sum: 1 + 1 + 6 + 8 + 4 + 0 + 2 + 9
},
process_child(&[2, 3, 1, 1, 0, 3, 1, 1, 6, 8, 0, 1, 4, 0, 2, 9]),
// [A, A, B, B, C, C, c, c, c, b, D, D, d, a, a, a]
)
}
}
|
/* required because there can be no os-level abstractions for an os :) */
/* println macro fails, panic handler unavailable, and many others */
/* the main function is pointless as there is no runtime that could call it :( */
#![no_std]
#![no_main]
/* overwriting the os entry point with this _start function. It's special! */
/* no_mangle would tell the compiler not to uniquefy the function and really output a function called _start */
#[no_mangle]
pub extern "C" fn _start() -> ! {
loop{}
}
use core::panic::PanicInfo;
/* the never type means that function should never return */
/* coz bruhh, to whom would an os return a value?? */
#[panic_handler]
fn panic(_info: &PanicInfo) -> ! {
loop{}
}
|
pub fn jump_naive(nums: Vec<i32>) -> i32 {
use std::cmp::min;
let n = nums.len();
if n == 1 {
return 0
} else if n == 2 {
return 1
}
let mut min_jump = vec![0; n];
for i in (0..n-1).rev() {
min_jump[i] = *&min_jump[i+1..=min(i+nums[i] as usize, n-1)].iter().fold(n as i32, |acc, x| min(acc, *x)) + 1;
}
min_jump[0]
}
// Reference: https://leetcode.com/problems/jump-game-ii/discuss/706435/C%2B%2B-o(n)-solution
// The idea is to maintain a range (start, end] s.t. every position in this range needs an exact s steps to reach
pub fn jump(nums: Vec<i32>) -> i32{
use std::cmp::max;
let n = nums.len();
if n == 1 {
return 0
} else if n == 2 {
return 1
} else if nums[0] >= n as i32 - 1 {
return 1
}
let mut end = nums[0];
let mut next_end = nums[0];
let mut s = 1;
for j in 1..n {
next_end = max(next_end, j as i32 + nums[j]);
if next_end >= n as i32 - 1 {
return s+1
}
if j as i32 == end {
end = next_end;
s += 1;
}
}
s
}
#[test]
fn test_jump() {
assert_eq!(jump(vec![3, 2, 1]), 1);
assert_eq!(jump(vec![2, 3, 1, 1, 4]), 2);
assert_eq!(jump(vec![1, 2, 3]), 2);
} |
use ::{ Asn1DerError, types::{ FromDerObject, IntoDerObject }, der::{ DerObject, DerTag} };
impl FromDerObject for () {
fn from_der_object(der_object: DerObject) -> Result<Self, Asn1DerError> {
if der_object.tag != DerTag::Null { return Err(Asn1DerError::InvalidTag) }
if !der_object.value.data.is_empty() { Err(Asn1DerError::InvalidEncoding) }
else { Ok(()) }
}
}
impl IntoDerObject for () {
fn into_der_object(self) -> DerObject {
DerObject::new(DerTag::Null, Vec::new().into())
}
fn serialized_len(&self) -> usize {
DerObject::compute_serialized_len(0)
}
} |
use failure::Fallible;
use regex::Regex;
use rustberry::playback_requests::PlaybackRequest;
use rustberry::rfid::*;
fn derive_spotify_uri_from_url(url: &str) -> Fallible<String> {
let re = Regex::new(r"https://open.spotify.com/(?P<type>(track|album))/(?P<id>[a-zA-Z0-9]+)")
.expect("Failed to compile regex");
let uri = match re.captures(&url) {
Some(captures) => {
println!("ok");
format!("spotify:{}:{}", &captures["type"], &captures["id"])
}
None => {
println!("Failed to parse Spotify URL: {}", url);
std::process::exit(1);
}
};
Ok(uri)
}
struct Written {
_request: PlaybackRequest,
_uid: String,
}
fn run_application() -> Fallible<Written> {
let url = dialoguer::Input::<String>::new()
.with_prompt("Spotify URL")
.interact()?;
let uri = derive_spotify_uri_from_url(&url)?;
let request = PlaybackRequest::SpotifyUri(uri);
println!("Play Request: {:?}", &request);
let request_deserialized = serde_json::to_string(&request)?;
let mut rc = RfidController::new()?;
let tag = rc.open_tag().expect("Failed to open RFID tag").unwrap();
let uid = format!("{:?}", tag.uid);
println!("RFID Tag UID: {}", uid);
let mut tag_writer = tag.new_writer();
tag_writer.write_string(&request_deserialized)?;
Ok(Written {
_request: request,
_uid: uid,
})
}
fn main() {
match run_application() {
Ok(_written) => {
println!("Successfully written play request to RFID tag.");
}
Err(err) => {
println!("Failed to write the play request to RFID tag: {}", err);
}
}
}
|
use std::collections::HashMap;
fn main() {
const MAX_NUMBERS: u32 = 30000000;
let input = "6,13,1,15,2,0"; // "0,3,6";
let mut input = input.split(',').rev().map(|n| n.parse().unwrap());
let mut last_num = input.next().unwrap();
let input = input.rev();
let mut spoken_count = 0;
let mut spoken_numbers = HashMap::new();
for n in input {
spoken_numbers.insert(n, spoken_count);
spoken_count += 1;
}
spoken_count += 1; // last number isn't mapped
for i in spoken_count..MAX_NUMBERS {
let last_num_idx = i - 1;
last_num = spoken_numbers
.insert(last_num, last_num_idx)
.map_or(0, |prev| last_num_idx - prev);
}
let answer = last_num;
println!("{}", answer);
}
|
extern crate libnanomsg;
extern crate libc;
extern crate core;
use std::str;
use std::fmt;
pub use self::NanoErrorKind::*;
pub type NanoResult<T> = Result<T, NanoError>;
#[deriving(Show, Clone, PartialEq, FromPrimitive)]
pub enum NanoErrorKind {
Unknown = 0i,
OperationNotSupported = libnanomsg::ENOTSUP as int,
ProtocolNotSupported = libnanomsg::EPROTONOSUPPORT as int,
NoBufferSpace = libnanomsg::ENOBUFS as int,
NetworkDown = libnanomsg::ENETDOWN as int,
AddressInUse = libnanomsg::EADDRINUSE as int,
AddressNotAvailable = libnanomsg::EADDRNOTAVAIL as int,
ConnectionRefused = libnanomsg::ECONNREFUSED as int,
OperationNowInProgress = libnanomsg::EINPROGRESS as int,
NotSocket = libnanomsg::ENOTSOCK as int,
AddressFamilyNotSupported = libnanomsg::EAFNOSUPPORT as int,
WrongProtocol = libnanomsg::EPROTO as int,
TryAgain = libnanomsg::EAGAIN as int,
BadFileDescriptor = libnanomsg::EBADF as int,
InvalidArgument = libnanomsg::EINVAL as int,
TooManyOpenFiles = libnanomsg::EMFILE as int,
BadAddress = libnanomsg::EFAULT as int,
PermisionDenied = libnanomsg::EACCESS as int,
NetworkReset = libnanomsg::ENETRESET as int,
NetworkUnreachable = libnanomsg::ENETUNREACH as int,
HostUnreachable = libnanomsg::EHOSTUNREACH as int,
NotConnected = libnanomsg::ENOTCONN as int,
MessageTooLong = libnanomsg::EMSGSIZE as int,
Timeout = libnanomsg::ETIMEDOUT as int,
ConnectionAbort = libnanomsg::ECONNABORTED as int,
ConnectionReset = libnanomsg::ECONNRESET as int,
ProtocolNotAvailable = libnanomsg::ENOPROTOOPT as int,
AlreadyConnected = libnanomsg::EISCONN as int,
SocketTypeNotSupported = libnanomsg::ESOCKTNOSUPPORT as int
}
#[deriving(PartialEq)]
pub struct NanoError {
pub description: &'static str,
pub kind: NanoErrorKind
}
impl NanoError {
pub fn new(description: &'static str, kind: NanoErrorKind) -> NanoError {
NanoError {
description: description,
kind: kind
}
}
pub fn from_nn_errno(nn_errno: libc::c_int) -> NanoError {
let maybe_error_kind = FromPrimitive::from_i64(nn_errno as i64);
let error_kind = maybe_error_kind.unwrap_or(Unknown);
unsafe {
let c_desc = libnanomsg::nn_strerror(nn_errno);
let desc = str::from_c_str(c_desc);
NanoError::new(desc, error_kind)
}
}
}
impl fmt::Show for NanoError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "An error has ocurred: Kind: {} Description: {}", self.kind, self.description)
}
}
pub fn last_nano_error() -> NanoError {
let nn_errno = unsafe { libnanomsg::nn_errno() };
NanoError::from_nn_errno(nn_errno)
}
#[cfg(test)]
mod tests {
#![allow(unused_must_use)]
#[phase(plugin, link)]
extern crate log;
extern crate libnanomsg;
extern crate libc;
use super::*;
fn assert_convert_error_code_to_error_kind(error_code: libc::c_int, expected_error_kind: NanoErrorKind) {
let i64_error_code = error_code as i64;
let converted_error_kind = FromPrimitive::from_i64(i64_error_code);
match converted_error_kind {
Some(error_kind) => assert_eq!(expected_error_kind, error_kind),
None => panic!("Failed to convert error code to NanoErrorKind")
}
}
#[test]
fn can_convert_error_code_to_error_kind() {
assert_convert_error_code_to_error_kind(libnanomsg::ENOTSUP, OperationNotSupported);
assert_convert_error_code_to_error_kind(libnanomsg::EPROTONOSUPPORT, ProtocolNotSupported);
assert_convert_error_code_to_error_kind(libnanomsg::EADDRINUSE, AddressInUse);
assert_convert_error_code_to_error_kind(libnanomsg::EHOSTUNREACH, HostUnreachable);
}
}
|
use super::*;
pub(crate) fn clear_history_command(ctx: &Context) -> CommandResult {
let (index, _) = ctx.state.buffers().current();
ctx.request(Request::ClearHistory(index));
Ok(Response::Nothing)
}
|
use anyhow::Error;
use clap::{Arg, ArgMatches};
use k8s_openapi::api::apps::v1::Deployment;
use k8s_openapi::api::core::v1::{Secret, ServiceAccount};
use k8s_openapi::api::rbac::v1::{ClusterRole, ClusterRoleBinding};
use kube::api::{DeleteParams, PostParams};
use kube::client::APIClient;
use kube::Api;
use async_trait::async_trait;
#[derive(Debug, PartialEq, Clone)]
pub struct Targets {
namespace: String,
registry_ulrs: Vec<String>,
}
impl Targets {
pub fn args() -> Vec<Arg<'static, 'static>> {
vec![
Arg::with_name("registry_url")
.multiple(true)
.short("r")
.long("registry-url")
.number_of_values(1)
.default_value("https://gcr.io")
.value_name("URL")
.help("The registry to forward authentication for"),
Arg::with_name("namespace")
.short("n")
.long("namespace")
.takes_value(true)
.value_name("NAMESPACE")
.help("The namespace to create the secret in, if not current namespace"),
]
}
pub fn new(namespace: &str, registry_urls: &[&str]) -> Self {
Self {
namespace: namespace.into(),
registry_ulrs: registry_urls.iter().map(|s| (*s).to_string()).collect(),
}
}
pub fn from_matches(matches: &ArgMatches, default_namespace: &str) -> Self {
Self {
namespace: matches
.value_of("namespace")
.map(From::from)
.unwrap_or_else(|| default_namespace.into()),
registry_ulrs: matches
.values_of("registry_url")
.map(|v| v.map(|s| s.to_string()).collect())
.unwrap_or_else(Vec::new),
}
}
pub fn namespace(&self) -> &str {
&self.namespace
}
pub fn registry_urls(&self) -> Vec<&str> {
self.registry_ulrs.iter().map(AsRef::as_ref).collect()
}
}
#[async_trait]
pub trait KubeCrud {
async fn upsert(&self, client: &APIClient, namespace: &str, name: &str) -> anyhow::Result<()>;
async fn delete(client: &APIClient, namespace: &str, name: &str) -> anyhow::Result<()>;
}
#[async_trait]
impl KubeCrud for Secret {
async fn upsert(&self, client: &APIClient, namespace: &str, name: &str) -> anyhow::Result<()> {
let pp = PostParams::default();
let secrets: Api<Secret> = Api::namespaced(client.clone(), namespace);
if secrets.get(&name).await.is_ok() {
secrets.replace(&name, &pp, &self).await?;
debug!("Secret {} updated", name);
} else {
secrets.create(&pp, &self).await?;
debug!("Secret {} created", name);
}
Ok(())
}
async fn delete(client: &APIClient, namespace: &str, name: &str) -> anyhow::Result<()> {
let dp = DeleteParams::default();
let objects: Api<Secret> = Api::namespaced(client.clone(), namespace);
if objects.delete(&name, &dp).await.is_ok() {
debug!("Secret {} deleted", name);
}
Ok(())
}
}
#[async_trait]
impl KubeCrud for ServiceAccount {
async fn upsert(&self, client: &APIClient, namespace: &str, name: &str) -> anyhow::Result<()> {
let pp = PostParams::default();
let accounts: Api<ServiceAccount> = Api::namespaced(client.clone(), namespace);
if let Ok(existing) = accounts.get(&name).await {
let object = self.clone();
let object = ServiceAccount {
metadata: object.metadata.or(existing.metadata),
automount_service_account_token: object
.automount_service_account_token
.or(existing.automount_service_account_token),
secrets: object.secrets.or(existing.secrets),
image_pull_secrets: object.image_pull_secrets.or(existing.image_pull_secrets),
};
accounts.replace(&name, &pp, &object).await?;
debug!("ServiceAccount {} updated", name);
} else {
accounts.create(&pp, &self).await?;
debug!("ServiceAccount {} created", name);
}
Ok(())
}
async fn delete(client: &APIClient, namespace: &str, name: &str) -> anyhow::Result<()> {
let dp = DeleteParams::default();
let objects: Api<ServiceAccount> = Api::namespaced(client.clone(), namespace);
if objects.delete(&name, &dp).await.is_ok() {
debug!("ServiceAccount {} deleted", name);
}
Ok(())
}
}
#[async_trait]
impl KubeCrud for Deployment {
async fn upsert(&self, client: &APIClient, namespace: &str, name: &str) -> Result<(), Error> {
let pp = PostParams::default();
let deployments: Api<Deployment> = Api::namespaced(client.clone(), namespace);
if deployments.get(&name).await.is_ok() {
if deployments.replace(&name, &pp, &self).await.is_err() {
debug!(
"Deployment {} cannot be updated, trying to delete and recreate",
name
);
deployments.delete(&name, &DeleteParams::default()).await?;
deployments.create(&pp, &self).await?;
};
debug!("Deployment {} updated", name);
} else {
deployments.create(&pp, &self).await?;
debug!("Deployment {} created", name);
}
Ok(())
}
async fn delete(client: &APIClient, namespace: &str, name: &str) -> anyhow::Result<()> {
let dp = DeleteParams::default();
let objects: Api<Deployment> = Api::namespaced(client.clone(), namespace);
if objects.delete(&name, &dp).await.is_ok() {
debug!("Deployment {} deleted", name);
}
Ok(())
}
}
#[async_trait]
impl KubeCrud for ClusterRoleBinding {
async fn upsert(&self, client: &APIClient, _namespace: &str, name: &str) -> Result<(), Error> {
let pp = PostParams::default();
let bindings: Api<ClusterRoleBinding> = Api::all(client.clone());
if bindings.get(&name).await.is_ok() {
bindings.replace(&name, &pp, &self).await?;
debug!("ClusterRoleBinding {} updated", name);
} else {
bindings.create(&pp, &self).await?;
debug!("ClusterRoleBinding {} created", name);
}
Ok(())
}
async fn delete(client: &APIClient, _namespace: &str, name: &str) -> anyhow::Result<()> {
let dp = DeleteParams::default();
let objects: Api<ClusterRoleBinding> = Api::all(client.clone());
if objects.delete(&name, &dp).await.is_ok() {
debug!("ClusterRoleBinding {} deleted", name);
}
Ok(())
}
}
#[async_trait]
impl KubeCrud for ClusterRole {
async fn upsert(&self, client: &APIClient, _namespace: &str, name: &str) -> Result<(), Error> {
let pp = PostParams::default();
let bindings: Api<ClusterRole> = Api::all(client.clone());
if bindings.get(&name).await.is_ok() {
bindings.replace(&name, &pp, &self).await?;
debug!("ClusterRole {} updated", name);
} else {
bindings.create(&pp, &self).await?;
debug!("ClusterRole {} created", name);
}
Ok(())
}
async fn delete(client: &APIClient, _namespace: &str, name: &str) -> anyhow::Result<()> {
let dp = DeleteParams::default();
let objects: Api<ClusterRole> = Api::all(client.clone());
if objects.delete(&name, &dp).await.is_ok() {
debug!("ClusterRole {} deleted", name);
}
Ok(())
}
}
|
use bitflags::bitflags; // bitflags! macro
bitflags! {
struct Permission: u8 {
const NONE = 0b0000;
const CREATE = 0b1000;
const READ = 0b0100;
const UPDATE = 0b0010;
const DELETE = 0b0001;
}
}
enum Group {
Guest,
User,
Admin,
Owner,
}
struct User {
id: Option<u32>,
name: Option<String>,
}
struct UserPermissions {
id: Permission,
name: Permission,
}
trait PermissionInfo {
type Model;
type ModelPermissions;
fn get_permissions(g: Group) -> Self::ModelPermissions;
fn check_permissions(
self,
group: Group,
desired: Self::ModelPermissions,
) -> Result<(), String>;
}
impl PermissionInfo for User {
type Model = Self;
type ModelPermissions = UserPermissions;
fn get_permissions(g: Group) -> Self::ModelPermissions {
todo!();
}
fn check_permissions(
self,
group: Group,
desired: Self::ModelPermissions,
) -> Result<(), String> {
let perms = Self::get_permissions(group);
if (perms.id & desired.id) != desired.id {
return Err(String::from("id does not satisfy permissions"));
}
if (perms.name & desired.name) != desired.name {
return Err(String::from("name does not satisfy permissions"));
}
Ok(())
}
}
// #[permissions(guest:R,user:CR,owner:CRUD)]
// Action -> Requires some permissions
// Action -> Can derive permissions from subactions
// All actions should be compile-time known?
// // // // struct Action {
// // // // // instance
// // // // // Fields: ...
// // // // // fn action()
// // // // }
// or
// |
// v
// Permission requirements
// Something like: #[crud_enable(table_name:users)]
// #[action(permissions(StructName:CR, field1:C, field2:R))]
// fn some_action() -> StructName {
// }
// Maybe should be enclosed and within that encolsure to have permissions somehow???
// to be able to derive on the fly and then just call u.get_all()
// u : User // u.get_all() -> from db: get_all is action
// who asked for u.get_all() ?
// Does group G ((has Permissions P) for action A) on resource T
// trait Permissions<T> {
// fn has_permissions(g: Group, a: Action) {}
// }
fn main() {
// it works!
let flags: Permission = Permission::CREATE | Permission::READ;
println!("{:?}", flags);
}
// delete account should just disable account
|
//! Compute the binary representation of a type
use base_db::CrateId;
use chalk_ir::{AdtId, TyKind};
use hir_def::{
layout::{
Abi, FieldsShape, Integer, Layout, LayoutCalculator, LayoutError, Primitive, ReprOptions,
RustcEnumVariantIdx, Scalar, Size, StructKind, TargetDataLayout, Variants, WrappingRange,
},
LocalFieldId,
};
use stdx::never;
use crate::{db::HirDatabase, Interner, Substitution, Ty};
use self::adt::struct_variant_idx;
pub use self::{
adt::{layout_of_adt_query, layout_of_adt_recover},
target::target_data_layout_query,
};
macro_rules! user_error {
($x: expr) => {
return Err(LayoutError::UserError(format!($x)))
};
}
mod adt;
mod target;
struct LayoutCx<'a> {
krate: CrateId,
target: &'a TargetDataLayout,
}
impl<'a> LayoutCalculator for LayoutCx<'a> {
type TargetDataLayoutRef = &'a TargetDataLayout;
fn delay_bug(&self, txt: &str) {
never!("{}", txt);
}
fn current_data_layout(&self) -> &'a TargetDataLayout {
self.target
}
}
fn scalar_unit(dl: &TargetDataLayout, value: Primitive) -> Scalar {
Scalar::Initialized { value, valid_range: WrappingRange::full(value.size(dl)) }
}
fn scalar(dl: &TargetDataLayout, value: Primitive) -> Layout {
Layout::scalar(dl, scalar_unit(dl, value))
}
pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty, krate: CrateId) -> Result<Layout, LayoutError> {
let Some(target) = db.target_data_layout(krate) else { return Err(LayoutError::TargetLayoutNotAvailable) };
let cx = LayoutCx { krate, target: &target };
let dl = &*cx.current_data_layout();
Ok(match ty.kind(Interner) {
TyKind::Adt(AdtId(def), subst) => db.layout_of_adt(*def, subst.clone())?,
TyKind::Scalar(s) => match s {
chalk_ir::Scalar::Bool => Layout::scalar(
dl,
Scalar::Initialized {
value: Primitive::Int(Integer::I8, false),
valid_range: WrappingRange { start: 0, end: 1 },
},
),
chalk_ir::Scalar::Char => Layout::scalar(
dl,
Scalar::Initialized {
value: Primitive::Int(Integer::I32, false),
valid_range: WrappingRange { start: 0, end: 0x10FFFF },
},
),
chalk_ir::Scalar::Int(i) => scalar(
dl,
Primitive::Int(
match i {
chalk_ir::IntTy::Isize => dl.ptr_sized_integer(),
chalk_ir::IntTy::I8 => Integer::I8,
chalk_ir::IntTy::I16 => Integer::I16,
chalk_ir::IntTy::I32 => Integer::I32,
chalk_ir::IntTy::I64 => Integer::I64,
chalk_ir::IntTy::I128 => Integer::I128,
},
true,
),
),
chalk_ir::Scalar::Uint(i) => scalar(
dl,
Primitive::Int(
match i {
chalk_ir::UintTy::Usize => dl.ptr_sized_integer(),
chalk_ir::UintTy::U8 => Integer::I8,
chalk_ir::UintTy::U16 => Integer::I16,
chalk_ir::UintTy::U32 => Integer::I32,
chalk_ir::UintTy::U64 => Integer::I64,
chalk_ir::UintTy::U128 => Integer::I128,
},
false,
),
),
chalk_ir::Scalar::Float(f) => scalar(
dl,
match f {
chalk_ir::FloatTy::F32 => Primitive::F32,
chalk_ir::FloatTy::F64 => Primitive::F64,
},
),
},
TyKind::Tuple(len, tys) => {
let kind = if *len == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
let fields = tys
.iter(Interner)
.map(|k| layout_of_ty(db, k.assert_ty_ref(Interner), krate))
.collect::<Result<Vec<_>, _>>()?;
let fields = fields.iter().collect::<Vec<_>>();
let fields = fields.iter().collect::<Vec<_>>();
cx.univariant(dl, &fields, &ReprOptions::default(), kind).ok_or(LayoutError::Unknown)?
}
TyKind::Array(element, count) => {
let count = match count.data(Interner).value {
chalk_ir::ConstValue::Concrete(c) => match c.interned {
hir_def::type_ref::ConstScalar::Int(x) => x as u64,
hir_def::type_ref::ConstScalar::UInt(x) => x as u64,
hir_def::type_ref::ConstScalar::Unknown => {
user_error!("unknown const generic parameter")
}
_ => user_error!("mismatched type of const generic parameter"),
},
_ => return Err(LayoutError::HasPlaceholder),
};
let element = layout_of_ty(db, element, krate)?;
let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow)?;
let abi = if count != 0 && matches!(element.abi, Abi::Uninhabited) {
Abi::Uninhabited
} else {
Abi::Aggregate { sized: true }
};
let largest_niche = if count != 0 { element.largest_niche } else { None };
Layout {
variants: Variants::Single { index: struct_variant_idx() },
fields: FieldsShape::Array { stride: element.size, count },
abi,
largest_niche,
align: element.align,
size,
}
}
TyKind::Slice(element) => {
let element = layout_of_ty(db, element, krate)?;
Layout {
variants: Variants::Single { index: struct_variant_idx() },
fields: FieldsShape::Array { stride: element.size, count: 0 },
abi: Abi::Aggregate { sized: false },
largest_niche: None,
align: element.align,
size: Size::ZERO,
}
}
// Potentially-wide pointers.
TyKind::Ref(_, _, pointee) | TyKind::Raw(_, pointee) => {
let mut data_ptr = scalar_unit(dl, Primitive::Pointer);
if matches!(ty.kind(Interner), TyKind::Ref(..)) {
data_ptr.valid_range_mut().start = 1;
}
// let pointee = tcx.normalize_erasing_regions(param_env, pointee);
// if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
// return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
// }
let unsized_part = struct_tail_erasing_lifetimes(db, pointee.clone());
let metadata = match unsized_part.kind(Interner) {
TyKind::Slice(_) | TyKind::Str => {
scalar_unit(dl, Primitive::Int(dl.ptr_sized_integer(), false))
}
TyKind::Dyn(..) => {
let mut vtable = scalar_unit(dl, Primitive::Pointer);
vtable.valid_range_mut().start = 1;
vtable
}
_ => {
// pointee is sized
return Ok(Layout::scalar(dl, data_ptr));
}
};
// Effectively a (ptr, meta) tuple.
cx.scalar_pair(data_ptr, metadata)
}
TyKind::FnDef(_, _) => layout_of_unit(&cx, dl)?,
TyKind::Str => Layout {
variants: Variants::Single { index: struct_variant_idx() },
fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
abi: Abi::Aggregate { sized: false },
largest_niche: None,
align: dl.i8_align,
size: Size::ZERO,
},
TyKind::Never => Layout {
variants: Variants::Single { index: struct_variant_idx() },
fields: FieldsShape::Primitive,
abi: Abi::Uninhabited,
largest_niche: None,
align: dl.i8_align,
size: Size::ZERO,
},
TyKind::Dyn(_) | TyKind::Foreign(_) => {
let mut unit = layout_of_unit(&cx, dl)?;
match unit.abi {
Abi::Aggregate { ref mut sized } => *sized = false,
_ => user_error!("bug"),
}
unit
}
TyKind::Function(_) => {
let mut ptr = scalar_unit(dl, Primitive::Pointer);
ptr.valid_range_mut().start = 1;
Layout::scalar(dl, ptr)
}
TyKind::Closure(_, _)
| TyKind::OpaqueType(_, _)
| TyKind::Generator(_, _)
| TyKind::GeneratorWitness(_, _) => return Err(LayoutError::NotImplemented),
TyKind::AssociatedType(_, _)
| TyKind::Error
| TyKind::Alias(_)
| TyKind::Placeholder(_)
| TyKind::BoundVar(_)
| TyKind::InferenceVar(_, _) => return Err(LayoutError::HasPlaceholder),
})
}
fn layout_of_unit(cx: &LayoutCx<'_>, dl: &TargetDataLayout) -> Result<Layout, LayoutError> {
cx.univariant::<RustcEnumVariantIdx, &&Layout>(
dl,
&[],
&ReprOptions::default(),
StructKind::AlwaysSized,
)
.ok_or(LayoutError::Unknown)
}
fn struct_tail_erasing_lifetimes(db: &dyn HirDatabase, pointee: Ty) -> Ty {
match pointee.kind(Interner) {
TyKind::Adt(AdtId(hir_def::AdtId::StructId(i)), subst) => {
let data = db.struct_data(*i);
let mut it = data.variant_data.fields().iter().rev();
match it.next() {
Some((f, _)) => field_ty(db, (*i).into(), f, subst),
None => pointee,
}
}
_ => pointee,
}
}
fn field_ty(
db: &dyn HirDatabase,
def: hir_def::VariantId,
fd: LocalFieldId,
subst: &Substitution,
) -> Ty {
db.field_types(def)[fd].clone().substitute(Interner, subst)
}
#[cfg(test)]
mod tests;
|
use std::fmt;
use std::str::FromStr;
#[derive(Debug, PartialEq, Clone)]
/// A list of supported keys that we can query from the OS. Outside of mod.
pub enum Keycode {
Key0,
Key1,
Key2,
Key3,
Key4,
Key5,
Key6,
Key7,
Key8,
Key9,
A,
B,
C,
D,
E,
F,
G,
H,
I,
J,
K,
L,
M,
N,
O,
P,
Q,
R,
S,
T,
U,
V,
W,
X,
Y,
Z,
F1,
F2,
F3,
F4,
F5,
F6,
F7,
F8,
F9,
F10,
F11,
F12,
Escape,
Space,
LControl,
RControl,
LShift,
RShift,
LAlt,
RAlt,
Meta,
Enter,
Up,
Down,
Left,
Right,
Backspace,
CapsLock,
Tab,
Home,
End,
PageUp,
PageDown,
Insert,
Delete,
// The following keys names represent the position of the key in a US keyboard,
// not the sign value. In a different keyboards and OS, the position can vary.
Grave,
Minus,
Equal,
LeftBracket,
RightBracket,
BackSlash,
Semicolon,
Apostrophe,
Comma,
Dot,
Slash,
}
impl FromStr for Keycode {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"Key1" => Ok(Self::Key1),
"Key2" => Ok(Self::Key2),
"Key3" => Ok(Self::Key3),
"Key4" => Ok(Self::Key4),
"Key5" => Ok(Self::Key5),
"Key6" => Ok(Self::Key6),
"Key7" => Ok(Self::Key7),
"Key8" => Ok(Self::Key8),
"Key9" => Ok(Self::Key9),
"A" => Ok(Self::A),
"B" => Ok(Self::B),
"C" => Ok(Self::C),
"D" => Ok(Self::D),
"E" => Ok(Self::E),
"F" => Ok(Self::F),
"G" => Ok(Self::G),
"H" => Ok(Self::H),
"I" => Ok(Self::I),
"J" => Ok(Self::J),
"K" => Ok(Self::K),
"L" => Ok(Self::L),
"M" => Ok(Self::M),
"N" => Ok(Self::N),
"O" => Ok(Self::O),
"P" => Ok(Self::P),
"Q" => Ok(Self::Q),
"R" => Ok(Self::R),
"S" => Ok(Self::S),
"T" => Ok(Self::T),
"U" => Ok(Self::U),
"V" => Ok(Self::V),
"W" => Ok(Self::W),
"X" => Ok(Self::X),
"Y" => Ok(Self::Y),
"Z" => Ok(Self::Z),
"F1" => Ok(Self::F1),
"F2" => Ok(Self::F2),
"F3" => Ok(Self::F3),
"F4" => Ok(Self::F4),
"F5" => Ok(Self::F5),
"F6" => Ok(Self::F6),
"F7" => Ok(Self::F7),
"F8" => Ok(Self::F8),
"F9" => Ok(Self::F9),
"F10" => Ok(Self::F10),
"F11" => Ok(Self::F11),
"F12" => Ok(Self::F12),
"Escape" => Ok(Self::Escape),
"Space" => Ok(Self::Space),
"LControl" => Ok(Self::LControl),
"RControl" => Ok(Self::RControl),
"LShift" => Ok(Self::LShift),
"RShift" => Ok(Self::RShift),
"LAlt" => Ok(Self::LAlt),
"RAlt" => Ok(Self::RAlt),
"Meta" => Ok(Self::Meta),
"Enter" => Ok(Self::Enter),
"Up" => Ok(Self::Up),
"Down" => Ok(Self::Down),
"Left" => Ok(Self::Left),
"Right" => Ok(Self::Right),
"Backspace" => Ok(Self::Backspace),
"CapsLock" => Ok(Self::CapsLock),
"Tab" => Ok(Self::Tab),
"Home" => Ok(Self::Home),
"End" => Ok(Self::End),
"PageUp" => Ok(Self::PageUp),
"PageDown" => Ok(Self::PageDown),
"Insert" => Ok(Self::Insert),
"Delete" => Ok(Self::Delete),
"Grave" => Ok(Self::Grave),
"Minus" => Ok(Self::Minus),
"Equal" => Ok(Self::Equal),
"LeftBracket" => Ok(Self::LeftBracket),
"RightBracket" => Ok(Self::RightBracket),
"BackSlash" => Ok(Self::BackSlash),
"Semicolon" => Ok(Self::Semicolon),
"Apostrophe" => Ok(Self::Apostrophe),
"Comma" => Ok(Self::Comma),
"Dot" => Ok(Self::Dot),
"Slash" => Ok(Self::Slash),
_ => Err(String::from("failed to parse keycode")),
}
}
}
impl fmt::Display for Keycode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
|
#[aoc_generator(day15)]
pub fn input_generator(input: &str) -> Vec<usize> {
input
.split(",")
.map(|l| l.parse::<usize>().unwrap())
.collect()
}
#[aoc(day15, part1)]
pub fn part1(starting: &Vec<usize>) -> usize {
const ITERATIONS: usize = 2020;
let numbers = starting.clone();
let last_index = vec![None; ITERATIONS];
play_game(numbers, last_index, ITERATIONS)
}
#[aoc(day15, part2)]
pub fn part2(starting: &Vec<usize>) -> usize {
const ITERATIONS: usize = 30000000;
let numbers = starting.clone();
let last_index = vec![None; ITERATIONS];
play_game(numbers, last_index, ITERATIONS)
}
// Helpers
fn play_game(
mut numbers: Vec<usize>,
mut last_index: Vec<Option<usize>>,
iterations: usize,
) -> usize {
for (i, n) in numbers.iter().enumerate() {
last_index[n.to_owned()] = Some(i + 1);
}
for j in numbers.len()..iterations {
match last_index[numbers[j - 1]] {
Some(last_num) => {
numbers.push(j - last_num);
}
None => {
numbers.push(0);
}
}
last_index[numbers[j - 1]] = Some(j);
}
numbers.last().unwrap().to_owned()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_part1() {
assert_eq!(part1(&input_generator("0,3,6")), 436);
}
#[test]
fn test_part2() {
// This test can be slow, leaving it commented for the future
//assert_eq!(part2(&input_generator("0,3,6")), 175594);
}
}
|
//! The CXX code generator for constructing and compiling C++ code.
//!
//! This is intended as a mechanism for embedding the `cxx` crate into
//! higher-level code generators. See [dtolnay/cxx#235] and
//! [https://github.com/google/autocxx].
//!
//! [dtolnay/cxx#235]: https://github.com/dtolnay/cxx/issues/235
//! [https://github.com/google/autocxx]: https://github.com/google/autocxx
#![doc(html_root_url = "https://docs.rs/cxx-gen/0.7.107")]
#![deny(missing_docs)]
#![allow(dead_code)]
#![allow(
clippy::cast_sign_loss,
clippy::default_trait_access,
clippy::derive_partial_eq_without_eq,
clippy::enum_glob_use,
clippy::if_same_then_else,
clippy::inherent_to_string,
clippy::items_after_statements,
clippy::match_bool,
clippy::match_on_vec_items,
clippy::match_same_arms,
clippy::missing_errors_doc,
clippy::module_name_repetitions,
clippy::must_use_candidate,
clippy::needless_pass_by_value,
clippy::new_without_default,
clippy::nonminimal_bool,
clippy::option_if_let_else,
clippy::or_fun_call,
clippy::redundant_else,
clippy::shadow_unrelated,
clippy::similar_names,
clippy::single_match_else,
clippy::struct_excessive_bools,
clippy::too_many_arguments,
clippy::too_many_lines,
clippy::toplevel_ref_arg,
clippy::uninlined_format_args,
// clippy bug: https://github.com/rust-lang/rust-clippy/issues/6983
clippy::wrong_self_convention
)]
mod error;
mod gen;
mod syntax;
pub use crate::error::Error;
pub use crate::gen::include::{Include, HEADER};
pub use crate::gen::{GeneratedCode, Opt};
pub use crate::syntax::IncludeKind;
use proc_macro2::TokenStream;
/// Generate C++ bindings code from a Rust token stream. This should be a Rust
/// token stream which somewhere contains a `#[cxx::bridge] mod {}`.
pub fn generate_header_and_cc(rust_source: TokenStream, opt: &Opt) -> Result<GeneratedCode, Error> {
let syntax = syn::parse2(rust_source)
.map_err(crate::gen::Error::from)
.map_err(Error::from)?;
gen::generate(syntax, opt).map_err(Error::from)
}
|
#![allow(dead_code)]
#![allow(non_snake_case)]
#![allow(unused_must_use)]
#![allow(unused_parens)]
use std::fs::File;
use std::io::prelude::*;
use std::io;
use std::os::unix::io::AsRawFd;
use std::mem;
use std::thread;
use std::sync::mpsc;
use regex::Regex;
use libc;
use std::os::unix::io::RawFd;
use std::ops::Drop;
const DEVICESDIR: &str = "/dev/input/";
fn get_keyboard_device_fnames() -> Vec<String> {
let mut file = File::open("/proc/bus/input/devices").unwrap();
let mut buffer: String = String::new();
let reg = Regex::new(r"sysrq.*?kbd (event\d+)").unwrap();
let mut fnames: Vec<String> = Vec::new();
file.read_to_string(&mut buffer).unwrap();
for capture in reg.captures_iter(&buffer) {
fnames.push(DEVICESDIR.to_string() + &capture[1]);
}
fnames
}
#[test]
fn test_get_keyboard_device_fnames() {
let fnames = get_keyboard_device_fnames();
for fname in &fnames {
println!("{}", fname);
}
}
#[repr(C)]
struct InputEvnet {
time: libc::timeval,
ty: u16,
code: u16,
value: i32,
}
impl InputEvnet {
fn new() -> InputEvnet {
InputEvnet {
time: libc::timeval { tv_sec: 0, tv_usec: 0 },
ty: 0,
code: 0,
value: 0,
}
}
}
#[test]
#[ignore]
fn test_read_keyboard() {
let fnames = get_keyboard_device_fnames();
let mut file = File::open(fnames.last().unwrap()).unwrap();
let mut event = InputEvnet {
time: libc::timeval { tv_sec: 0, tv_usec: 0 },
ty: 0,
code: 0,
value: 0,
};
unsafe {
let event_p = &mut event as *mut InputEvnet as *mut libc::c_void;
for i in 0..10 {
libc::read(file.as_raw_fd(), event_p, mem::size_of::<InputEvnet>());
println!("{}", &event.ty);
}
}
}
pub struct Keyboard {
receiver: mpsc::Receiver<(u16, u16, i32)>,
keyboard_handles: Vec<thread::JoinHandle<()>>,
raw_fd: Vec<RawFd>,
options: u32,
}
fn read_keyboard(device: File, sender: mpsc::Sender<(u16, u16, i32)>) {
let mut event = InputEvnet::new();
let event_p = &mut event as *mut InputEvnet as *mut libc::c_void;
loop{
unsafe {
if libc::read(device.as_raw_fd(), event_p, mem::size_of::<InputEvnet>()) == -1 {
return;
}
}
sender.send((event.ty, event.code, event.value));
}
}
extern "C" {
fn ioctl_eviocgrab(fd: libc::c_int, mode: libc::c_int) -> libc::c_int;
}
pub mod OpenOption {
pub const EVIOCGRAB: u32 = 0x1;
}
mod OpenOptionOfC {
pub const EVIOCGRAB: u32 = 0x90;
}
impl Keyboard {
pub fn open() -> Result<Keyboard, io::Error> {
let mut keyboard_handles = Vec::new();
let mut raw_fd: Vec<RawFd> = Vec::new();
let (sender, receiver) = mpsc::channel();
for fname in get_keyboard_device_fnames() {
let device = File::open(fname)?;
let child_sender = sender.clone();
raw_fd.push(device.as_raw_fd());
keyboard_handles.push(
thread::spawn(move || read_keyboard(device, child_sender))
);
}
Ok(Keyboard {
keyboard_handles: keyboard_handles,
receiver: receiver,
raw_fd: raw_fd,
options: 0,
})
}
pub fn open_and_grab() -> Result<Keyboard, io::Error> {
let mut kbd = Keyboard::open()?;
let options = OpenOption::EVIOCGRAB;
for fd in &kbd.raw_fd {
unsafe {
ioctl_eviocgrab(*fd, 1);
}
}
kbd.options = options;
Ok(kbd)
}
pub fn read(&self) -> (u16, u16, i32) {
self.receiver.recv().unwrap()
}
fn read_when_state_is(&self, ty: u16) -> (u16, u16, i32) {
loop {
let (read_ty, code, state) = self.read();
if(ty == read_ty){ return (ty, code, state) }
}
}
pub fn read_key(&self) -> (u16, u16, i32) {
return self.read_when_state_is(1);
}
pub fn read_syn(&self) -> (u16, u16, i32) {
return self.read_when_state_is(0);
}
pub fn read_msc(&self) -> (u16, u16, i32) {
return self.read_when_state_is(4);
}
}
impl Drop for Keyboard {
fn drop(&mut self) {
if self.options & OpenOption::EVIOCGRAB == 1 {
for fd in &self.raw_fd {
unsafe {
ioctl_eviocgrab(*fd, 0);
}
}
}
}
}
#[cfg(test)]
use std::time;
#[test]
#[ignore]
fn test_keyboard() {
let kbd = match Keyboard::open() {
Ok(keyboard) => keyboard,
Err(err) => {
println!("You must use 'sudo'");
return;
}
};
thread::spawn(move || {
loop {
let (ty, code, value) = kbd.read();
if value == 2 {
continue;
}
println!( "\t{} {}", code,
if value == 1 { "push" } else { "leave" } );
}
});
let wait_duration = time::Duration::from_millis(3000);
thread::sleep(wait_duration);
}
#[test]
#[ignore]
fn test_keyboard_loop() {
let kbd = match Keyboard::open() {
Ok(keyboard) => keyboard,
Err(err) => {
println!("You must use 'sudo'");
return;
}
};
println!("push q to exit");
loop {
let (ty, code, value) = kbd.read();
if code == 16 {
println!("end");
break;
}
println!("\t{} {} {}", ty, code, value);
}
}
|
fn closures() {
fn function (i: i32) -> i32 { i + 1 }
let closure_annotated = |i: i32| -> i32 { i + 1 };
let closure_inferred = |i| i + 1;
let i = 15;
println!("Regular function: {}", function(i));
println!("Annotated closure: {}", closure_annotated(i));
println!("Inferred closure: {}", closure_inferred(i));
}
// Closures are able to capture variables from the outer scope three ways:
// 1) by reference: &T
// 2) by mutable reference: &mut T
// 3) by value: T
fn capturing() {
use std::mem;
let color = "green";
// 'print' borrows a value of 'color'.
// 'Color' remains borrowed until 'print' is called the last time.
let print = || println!("Color: {}", color);
print();
let _reborrow = &color;
print();
let _color_moved = color;
let mut count = 0;
// 'mut' on the closure is required bc we use mutable variable inside it
let mut incr = || {
count += 1;
println!("count is {}", count);
};
incr();
// this borrow cannot be immutable since 'count' is declared as mutable
// let _reborrow = &count;
// incr();
let movable = Box::new(3);
let consume = || {
println!("movable: {}", movable);
mem::drop(movable)
};
consume();
// You cannot call consume twice
// consume();
}
fn captures_cont() {
let haystack = vec![1, 2, 3];
let contains = move |needle| haystack.contains(needle);
println!("{}", contains(&1));
println!("{}", contains(&2));
println!("{}", contains(&4));
// We used 'move' during the declaration of the 'contains',
// forcing the closure to take ownership of the captured variables,
// in this case 'haystack'
// println!("Elements of the haystack: {}", haystack.len());
}
fn main() {
closures();
capturing();
captures_cont();
} |
#![allow(dead_code)]
#[macro_use]
mod utils;
mod data_parser;
mod day_1;
mod day_10;
mod day_11;
mod day_12;
mod day_13;
mod day_2;
mod day_3;
mod day_4;
mod day_5;
mod day_6;
mod day_7;
mod day_8;
mod day_9;
fn main() {}
|
//entrypoint to the program
// bring required dependencies into scope with use statement
use solana_program::{
account_info::AccountInfo, entrypoint, entrypoint::ProgramResult, msg, pubkey::Pubkey,
};
// declare process_instruction as the entrypoint to the program;
// all program calls are handled only though entrypoints
entrypoint!(process_instruction);
fn process_instruction(
program_id: &Pubkey,
accounts: &[AccountInfo],
instruction_data: &[u8],
) -> ProgramResult {
msg!(
"process_instruction: {}: {} accounts, data={:?}",
program_id,
accounts.len(),
instruction_data
);
Ok(())
} |
use super::atom::tag::{self, Tag};
use crate::arena::block::{self, BlockId};
use isaribi::{
style,
styled::{Style, Styled},
};
use kagura::prelude::*;
pub struct Props {
pub block_arena: block::ArenaRef,
pub world_id: BlockId,
pub removable: bool,
}
pub enum Msg {
Sub(On),
}
pub enum On {
Click(BlockId),
}
pub struct TagList {
block_arena: block::ArenaRef,
world_id: BlockId,
removable: bool,
}
impl Constructor for TagList {
fn constructor(props: Self::Props, _: &mut ComponentBuilder<Self::Msg, Self::Sub>) -> Self {
Self {
block_arena: props.block_arena,
world_id: props.world_id,
removable: props.removable,
}
}
}
impl Component for TagList {
type Props = Props;
type Msg = Msg;
type Sub = On;
fn init(&mut self, props: Self::Props, _: &mut ComponentBuilder<Self::Msg, Self::Sub>) {
self.block_arena = props.block_arena;
self.world_id = props.world_id;
self.removable = props.removable;
}
fn update(&mut self, msg: Self::Msg) -> Cmd<Self::Msg, Self::Sub> {
match msg {
Msg::Sub(sub) => Cmd::sub(sub),
}
}
fn render(&self, _: Vec<Html>) -> Html {
Self::styled(Html::div(
Attributes::new(),
Events::new(),
self.block_arena
.map(&self.world_id, |world: &block::world::World| {
world
.tags()
.map(|tag_id| {
Tag::empty(
tag::Props {
block_arena: block::ArenaRef::clone(&self.block_arena),
block_id: BlockId::clone(tag_id),
removable: self.removable,
},
Subscription::none(),
)
})
.collect::<Vec<_>>()
})
.unwrap_or(vec![]),
))
}
}
impl Styled for TagList {
fn style() -> Style {
style! {}
}
}
|
use app::{
get_immutable_store, get_locales, get_mutable_store, get_static_aliases, get_templates_map,
get_templates_vec, get_translations_manager, APP_ROOT,
};
use fs_extra::dir::{copy as copy_dir, CopyOptions};
use futures::executor::block_on;
use perseus::{build_app, export_app, path_prefix::get_path_prefix_server, SsrNode};
use std::fs;
use std::path::PathBuf;
fn main() {
let exit_code = real_main();
std::process::exit(exit_code)
}
fn real_main() -> i32 {
let immutable_store = get_immutable_store();
// We don't need this in exporting, but the build process does
let mutable_store = get_mutable_store();
let translations_manager = block_on(get_translations_manager());
let locales = get_locales();
// Build the site for all the common locales (done in parallel), denying any non-exportable features
let build_fut = build_app(
get_templates_vec::<SsrNode>(),
&locales,
(&immutable_store, &mutable_store),
&translations_manager,
// We use another binary to handle normal building
true,
);
if let Err(err) = block_on(build_fut) {
eprintln!("Static exporting failed: '{}'.", err);
return 1;
}
// Turn the build artifacts into self-contained static files
let export_fut = export_app(
get_templates_map(),
"../index.html",
&locales,
APP_ROOT,
&immutable_store,
&translations_manager,
get_path_prefix_server(),
);
if let Err(err) = block_on(export_fut) {
eprintln!("Static exporting failed: '{}'.", err);
return 1;
}
// Copy the `static` directory into the export package if it exists
// We don't use a config manager here because static files are always handled on-disk in Perseus (for now)
let static_dir = PathBuf::from("../static");
if static_dir.exists() {
if let Err(err) = copy_dir(&static_dir, "dist/exported/.perseus/", &CopyOptions::new()) {
eprintln!(
"Static exporting failed: 'couldn't copy static directory: '{}''",
err.to_string()
);
return 1;
}
}
// Loop through any static aliases and copy them in too
// Unlike with the server, these could override pages!
// We'll copy from the alias to the path (it could be a directory or a file)
// Remember: `alias` has a leading `/`!
for (alias, path) in get_static_aliases() {
let from = PathBuf::from(path);
let to = format!("dist/exported{}", alias);
if from.is_dir() {
if let Err(err) = copy_dir(&from, &to, &CopyOptions::new()) {
eprintln!(
"Static exporting failed: 'couldn't copy static alias directory: '{}''",
err.to_string()
);
return 1;
}
} else if let Err(err) = fs::copy(&from, &to) {
eprintln!(
"Static exporting failed: 'couldn't copy static alias file: '{}''",
err.to_string()
);
return 1;
}
}
println!("Static exporting successfully completed!");
0
}
|
use std::error::Error;
use std::fs;
use std::env;
pub struct Config {
pub query: String,
pub filename: String,
pub case_sensitive: bool
}
impl Config {
pub fn new(args: &[String]) -> Result<Config, &str>{
// args[0] is execution path, then come the command line args
if args.len()<3 {
return Err("not enough arguments");
}
// only checking if variable is set
// if it is then case sensitive
// if it isn't, then case insensitive
// TODO: make UX clearer
let case_sensitive = env::var("CASE_INSENSITIVE").is_err();
Ok(Config {
query:args[1].clone(),
filename:args[2].clone(),
case_sensitive:case_sensitive
})
}
}
pub fn run(config: Config) -> Result<(), Box<dyn Error>>{
let contents = fs::read_to_string(config.filename)?;
println!{"With text:\n{}", contents};
let matched_lines = if config.case_sensitive{
search(&config.query, &contents)
} else {
search_case_insensitive(&config.query, &contents)
};
println!{"\n Matching lines:"};
for line in matched_lines {
println!{"{}",line};
}
Ok(())
}
fn search<'a>(query: &str, contents: &'a str) -> Vec<&'a str> {
// drawback of this approach:
// create a big vector first before filtering it out
let lines: Vec<&str> = contents.split("\n").collect();
let matching_lines = lines.into_iter().filter(
|&line| line.contains(query)).collect();
matching_lines
}
fn search_case_insensitive<'a>(query: &str, contents: &'a str) -> Vec<&'a str>{
let query_lower = query.to_lowercase();
// note: tried to call search, but 2 problems:
// ownership a bit tricky, and
// probably best to return the content with casing not lower
// search(&query_lower, &content_lower)
// here trying a different implementation as "search", for fun
let mut results = Vec::new();
for line in contents.lines() {
if line.to_lowercase().contains(&query_lower) {
results.push(line);
}
}
results
}
# [cfg(test)]
mod tests {
use super::*;
#[test]
fn config_new_too_few_args(){
let input_args: Vec<String> = vec!["one_arg_only".to_string()];
let output = Config::new(&input_args);
assert!(output.is_err());
}
#[test]
fn config_correctly_parsed(){
let input_args: Vec<String> = vec![
"target/debug/minigrep".to_string(),
"to_parse".to_string(),
"file.txt".to_string()
];
let output = Config::new(&input_args).unwrap();
assert_eq!(output.filename, "file.txt".to_string());
}
#[test]
fn successful_grep_example(){
let query = "duct";
let contents = "\
Rust:
safe, fast, productive.
Pick three.";
assert_eq!(
vec!["safe, fast, productive."],
search(query, contents));
}
#[test]
fn case_sensitive() {
let query = "duct";
let contents = "\
Rust:
safe, fast, productive.
Pick three.
Duct tape.";
assert_eq!(vec!["safe, fast, productive."], search(query, contents));
}
#[test]
fn case_insensitive(){
let query = "rUsT";
let contents = "\
Rust:
safe, fast, productive.
Pick three.
Trust me.";
assert_eq!(
vec!["Rust:", "Trust me."],
search_case_insensitive(query, contents)
);
}
}
|
macro_rules! encode_impl {
($l:expr; $(#[$attr: meta])* $parse_macro:ident; $(#[$encode_attr: meta])* $encode_name: ident; $(#[$encode_to_string_attr: meta])* $encode_to_string_name: ident; $(#[$encode_to_vec_attr: meta])* $encode_to_vec_name: ident; $(#[$encode_to_writer_attr: meta])* $encode_to_writer_name: ident $(;)*) => {
$(#[$encode_attr])*
///
$(#[$attr])*
#[inline]
pub fn $encode_name<S: ?Sized + AsRef<str>>(text: &S) -> Cow<str> {
let text = text.as_ref();
let text_bytes = text.as_bytes();
let text_length = text_bytes.len();
let mut p = 0;
let mut e;
let mut step = 0;
let (mut v, mut start) = loop {
if p == text_length {
return Cow::from(text);
}
e = text_bytes[p];
$parse_macro!(
e,
step,
{
let mut v = Vec::with_capacity(text_length + 1);
v.extend_from_slice(&text_bytes[..(p - $l)]);
break (v, p - $l);
},
{
let mut v = Vec::with_capacity(text_length + 1);
v.extend_from_slice(&text_bytes[..p]);
break (v, p);
}
);
p += 1;
};
v.push(b'\\');
p += 1;
for e in text_bytes[p..].iter().copied() {
$parse_macro!(
e,
step,
{
v.extend_from_slice(&text_bytes[start..(p - $l)]);
start = p - $l;
v.push(b'\\');
},
{
v.extend_from_slice(&text_bytes[start..p]);
start = p;
v.push(b'\\');
}
);
p += 1;
}
v.extend_from_slice(&text_bytes[start..p]);
Cow::from(unsafe { String::from_utf8_unchecked(v) })
}
$(#[$encode_to_string_attr])*
///
$(#[$attr])*
#[inline]
pub fn $encode_to_string_name<S: AsRef<str>>(text: S, output: &mut String) -> &str {
unsafe { from_utf8_unchecked($encode_to_vec_name(text, output.as_mut_vec())) }
}
$(#[$encode_to_vec_attr])*
///
$(#[$attr])*
#[inline]
pub fn $encode_to_vec_name<S: AsRef<str>>(text: S, output: &mut Vec<u8>) -> &[u8] {
let text = text.as_ref();
let text_bytes = text.as_bytes();
let text_length = text_bytes.len();
output.reserve(text_length);
let current_length = output.len();
let mut start = 0;
let mut end = 0;
let mut step = 0;
for e in text_bytes.iter().copied() {
$parse_macro!(
e,
step,
{
output.extend_from_slice(&text_bytes[start..(end - $l)]);
start = end - $l;
output.push(b'\\');
},
{
output.extend_from_slice(&text_bytes[start..end]);
start = end;
output.push(b'\\');
}
);
end += 1;
}
output.extend_from_slice(&text_bytes[start..end]);
&output[current_length..]
}
#[cfg(feature = "std")]
$(#[$encode_to_writer_attr])*
///
$(#[$attr])*
#[inline]
pub fn $encode_to_writer_name<S: AsRef<str>, W: Write>(text: S, output: &mut W) -> Result<(), io::Error> {
let text = text.as_ref();
let text_bytes = text.as_bytes();
let mut start = 0;
let mut end = 0;
let mut step = 0;
for e in text_bytes.iter().copied() {
$parse_macro!(
e,
step,
{
output.write_all(&text_bytes[start..(end - $l)])?;
start = end - $l;
output.write_all(b"\\")?;
},
{
output.write_all(&text_bytes[start..end])?;
start = end;
output.write_all(b"\\")?;
}
);
end += 1;
}
output.write_all(&text_bytes[start..end])
}
};
}
|
pub mod intcpu;
pub mod halp; |
#[doc = "Register `APB1LLPENR` reader"]
pub type R = crate::R<APB1LLPENR_SPEC>;
#[doc = "Register `APB1LLPENR` writer"]
pub type W = crate::W<APB1LLPENR_SPEC>;
#[doc = "Field `TIM2LPEN` reader - TIM2 clock enable during sleep mode Set and reset by software."]
pub type TIM2LPEN_R = crate::BitReader;
#[doc = "Field `TIM2LPEN` writer - TIM2 clock enable during sleep mode Set and reset by software."]
pub type TIM2LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM3LPEN` reader - TIM3 clock enable during sleep mode Set and reset by software."]
pub type TIM3LPEN_R = crate::BitReader;
#[doc = "Field `TIM3LPEN` writer - TIM3 clock enable during sleep mode Set and reset by software."]
pub type TIM3LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM4LPEN` reader - TIM4 clock enable during sleep mode Set and reset by software."]
pub type TIM4LPEN_R = crate::BitReader;
#[doc = "Field `TIM4LPEN` writer - TIM4 clock enable during sleep mode Set and reset by software."]
pub type TIM4LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM5LPEN` reader - TIM5 clock enable during sleep mode Set and reset by software."]
pub type TIM5LPEN_R = crate::BitReader;
#[doc = "Field `TIM5LPEN` writer - TIM5 clock enable during sleep mode Set and reset by software."]
pub type TIM5LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM6LPEN` reader - TIM6 clock enable during sleep mode Set and reset by software."]
pub type TIM6LPEN_R = crate::BitReader;
#[doc = "Field `TIM6LPEN` writer - TIM6 clock enable during sleep mode Set and reset by software."]
pub type TIM6LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM7LPEN` reader - TIM7 clock enable during sleep mode Set and reset by software."]
pub type TIM7LPEN_R = crate::BitReader;
#[doc = "Field `TIM7LPEN` writer - TIM7 clock enable during sleep mode Set and reset by software."]
pub type TIM7LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM12LPEN` reader - TIM12 clock enable during sleep mode Set and reset by software."]
pub type TIM12LPEN_R = crate::BitReader;
#[doc = "Field `TIM12LPEN` writer - TIM12 clock enable during sleep mode Set and reset by software."]
pub type TIM12LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM13LPEN` reader - TIM13 clock enable during sleep mode Set and reset by software."]
pub type TIM13LPEN_R = crate::BitReader;
#[doc = "Field `TIM13LPEN` writer - TIM13 clock enable during sleep mode Set and reset by software."]
pub type TIM13LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM14LPEN` reader - TIM14 clock enable during sleep mode Set and reset by software."]
pub type TIM14LPEN_R = crate::BitReader;
#[doc = "Field `TIM14LPEN` writer - TIM14 clock enable during sleep mode Set and reset by software."]
pub type TIM14LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `WWDGLPEN` reader - WWDG clock enable during sleep mode Set and reset by software."]
pub type WWDGLPEN_R = crate::BitReader;
#[doc = "Field `WWDGLPEN` writer - WWDG clock enable during sleep mode Set and reset by software."]
pub type WWDGLPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SPI2LPEN` reader - SPI2 clock enable during sleep mode Set and reset by software."]
pub type SPI2LPEN_R = crate::BitReader;
#[doc = "Field `SPI2LPEN` writer - SPI2 clock enable during sleep mode Set and reset by software."]
pub type SPI2LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SPI3LPEN` reader - SPI3 clock enable during sleep mode Set and reset by software."]
pub type SPI3LPEN_R = crate::BitReader;
#[doc = "Field `SPI3LPEN` writer - SPI3 clock enable during sleep mode Set and reset by software."]
pub type SPI3LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `USART2LPEN` reader - USART2 clock enable during sleep mode Set and reset by software."]
pub type USART2LPEN_R = crate::BitReader;
#[doc = "Field `USART2LPEN` writer - USART2 clock enable during sleep mode Set and reset by software."]
pub type USART2LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `USART3LPEN` reader - USART3 clock enable during sleep mode Set and reset by software."]
pub type USART3LPEN_R = crate::BitReader;
#[doc = "Field `USART3LPEN` writer - USART3 clock enable during sleep mode Set and reset by software."]
pub type USART3LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `UART4LPEN` reader - UART4 clock enable during sleep mode Set and reset by software."]
pub type UART4LPEN_R = crate::BitReader;
#[doc = "Field `UART4LPEN` writer - UART4 clock enable during sleep mode Set and reset by software."]
pub type UART4LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `UART5LPEN` reader - UART5 clock enable during sleep mode Set and reset by software."]
pub type UART5LPEN_R = crate::BitReader;
#[doc = "Field `UART5LPEN` writer - UART5 clock enable during sleep mode Set and reset by software."]
pub type UART5LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `I2C1LPEN` reader - I2C1 clock enable during sleep mode Set and reset by software."]
pub type I2C1LPEN_R = crate::BitReader;
#[doc = "Field `I2C1LPEN` writer - I2C1 clock enable during sleep mode Set and reset by software."]
pub type I2C1LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `I2C2LPEN` reader - I2C2 clock enable during sleep mode Set and reset by software."]
pub type I2C2LPEN_R = crate::BitReader;
#[doc = "Field `I2C2LPEN` writer - I2C2 clock enable during sleep mode Set and reset by software."]
pub type I2C2LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `I3C1LPEN` reader - I3C1 clock enable during sleep mode Set and reset by software."]
pub type I3C1LPEN_R = crate::BitReader;
#[doc = "Field `I3C1LPEN` writer - I3C1 clock enable during sleep mode Set and reset by software."]
pub type I3C1LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CRSLPEN` reader - CRS clock enable during sleep mode Set and reset by software."]
pub type CRSLPEN_R = crate::BitReader;
#[doc = "Field `CRSLPEN` writer - CRS clock enable during sleep mode Set and reset by software."]
pub type CRSLPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `USART6LPEN` reader - USART6 clock enable during sleep mode Set and reset by software."]
pub type USART6LPEN_R = crate::BitReader;
#[doc = "Field `USART6LPEN` writer - USART6 clock enable during sleep mode Set and reset by software."]
pub type USART6LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `USART10LPEN` reader - USART10 clock enable during sleep mode Set and reset by software."]
pub type USART10LPEN_R = crate::BitReader;
#[doc = "Field `USART10LPEN` writer - USART10 clock enable during sleep mode Set and reset by software."]
pub type USART10LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `USART11LPEN` reader - USART11 clock enable during sleep mode Set and reset by software."]
pub type USART11LPEN_R = crate::BitReader;
#[doc = "Field `USART11LPEN` writer - USART11 clock enable during sleep mode Set and reset by software."]
pub type USART11LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CECLPEN` reader - HDMI-CEC clock enable during sleep mode Set and reset by software."]
pub type CECLPEN_R = crate::BitReader;
#[doc = "Field `CECLPEN` writer - HDMI-CEC clock enable during sleep mode Set and reset by software."]
pub type CECLPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `UART7LPEN` reader - UART7 clock enable during sleep mode Set and reset by software."]
pub type UART7LPEN_R = crate::BitReader;
#[doc = "Field `UART7LPEN` writer - UART7 clock enable during sleep mode Set and reset by software."]
pub type UART7LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `UART8LPEN` reader - UART8 clock enable during sleep mode Set and reset by software."]
pub type UART8LPEN_R = crate::BitReader;
#[doc = "Field `UART8LPEN` writer - UART8 clock enable during sleep mode Set and reset by software."]
pub type UART8LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - TIM2 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn tim2lpen(&self) -> TIM2LPEN_R {
TIM2LPEN_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - TIM3 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn tim3lpen(&self) -> TIM3LPEN_R {
TIM3LPEN_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - TIM4 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn tim4lpen(&self) -> TIM4LPEN_R {
TIM4LPEN_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - TIM5 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn tim5lpen(&self) -> TIM5LPEN_R {
TIM5LPEN_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - TIM6 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn tim6lpen(&self) -> TIM6LPEN_R {
TIM6LPEN_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - TIM7 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn tim7lpen(&self) -> TIM7LPEN_R {
TIM7LPEN_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - TIM12 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn tim12lpen(&self) -> TIM12LPEN_R {
TIM12LPEN_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - TIM13 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn tim13lpen(&self) -> TIM13LPEN_R {
TIM13LPEN_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 8 - TIM14 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn tim14lpen(&self) -> TIM14LPEN_R {
TIM14LPEN_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 11 - WWDG clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn wwdglpen(&self) -> WWDGLPEN_R {
WWDGLPEN_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 14 - SPI2 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn spi2lpen(&self) -> SPI2LPEN_R {
SPI2LPEN_R::new(((self.bits >> 14) & 1) != 0)
}
#[doc = "Bit 15 - SPI3 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn spi3lpen(&self) -> SPI3LPEN_R {
SPI3LPEN_R::new(((self.bits >> 15) & 1) != 0)
}
#[doc = "Bit 17 - USART2 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn usart2lpen(&self) -> USART2LPEN_R {
USART2LPEN_R::new(((self.bits >> 17) & 1) != 0)
}
#[doc = "Bit 18 - USART3 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn usart3lpen(&self) -> USART3LPEN_R {
USART3LPEN_R::new(((self.bits >> 18) & 1) != 0)
}
#[doc = "Bit 19 - UART4 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn uart4lpen(&self) -> UART4LPEN_R {
UART4LPEN_R::new(((self.bits >> 19) & 1) != 0)
}
#[doc = "Bit 20 - UART5 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn uart5lpen(&self) -> UART5LPEN_R {
UART5LPEN_R::new(((self.bits >> 20) & 1) != 0)
}
#[doc = "Bit 21 - I2C1 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn i2c1lpen(&self) -> I2C1LPEN_R {
I2C1LPEN_R::new(((self.bits >> 21) & 1) != 0)
}
#[doc = "Bit 22 - I2C2 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn i2c2lpen(&self) -> I2C2LPEN_R {
I2C2LPEN_R::new(((self.bits >> 22) & 1) != 0)
}
#[doc = "Bit 23 - I3C1 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn i3c1lpen(&self) -> I3C1LPEN_R {
I3C1LPEN_R::new(((self.bits >> 23) & 1) != 0)
}
#[doc = "Bit 24 - CRS clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn crslpen(&self) -> CRSLPEN_R {
CRSLPEN_R::new(((self.bits >> 24) & 1) != 0)
}
#[doc = "Bit 25 - USART6 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn usart6lpen(&self) -> USART6LPEN_R {
USART6LPEN_R::new(((self.bits >> 25) & 1) != 0)
}
#[doc = "Bit 26 - USART10 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn usart10lpen(&self) -> USART10LPEN_R {
USART10LPEN_R::new(((self.bits >> 26) & 1) != 0)
}
#[doc = "Bit 27 - USART11 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn usart11lpen(&self) -> USART11LPEN_R {
USART11LPEN_R::new(((self.bits >> 27) & 1) != 0)
}
#[doc = "Bit 28 - HDMI-CEC clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn ceclpen(&self) -> CECLPEN_R {
CECLPEN_R::new(((self.bits >> 28) & 1) != 0)
}
#[doc = "Bit 30 - UART7 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn uart7lpen(&self) -> UART7LPEN_R {
UART7LPEN_R::new(((self.bits >> 30) & 1) != 0)
}
#[doc = "Bit 31 - UART8 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn uart8lpen(&self) -> UART8LPEN_R {
UART8LPEN_R::new(((self.bits >> 31) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - TIM2 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn tim2lpen(&mut self) -> TIM2LPEN_W<APB1LLPENR_SPEC, 0> {
TIM2LPEN_W::new(self)
}
#[doc = "Bit 1 - TIM3 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn tim3lpen(&mut self) -> TIM3LPEN_W<APB1LLPENR_SPEC, 1> {
TIM3LPEN_W::new(self)
}
#[doc = "Bit 2 - TIM4 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn tim4lpen(&mut self) -> TIM4LPEN_W<APB1LLPENR_SPEC, 2> {
TIM4LPEN_W::new(self)
}
#[doc = "Bit 3 - TIM5 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn tim5lpen(&mut self) -> TIM5LPEN_W<APB1LLPENR_SPEC, 3> {
TIM5LPEN_W::new(self)
}
#[doc = "Bit 4 - TIM6 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn tim6lpen(&mut self) -> TIM6LPEN_W<APB1LLPENR_SPEC, 4> {
TIM6LPEN_W::new(self)
}
#[doc = "Bit 5 - TIM7 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn tim7lpen(&mut self) -> TIM7LPEN_W<APB1LLPENR_SPEC, 5> {
TIM7LPEN_W::new(self)
}
#[doc = "Bit 6 - TIM12 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn tim12lpen(&mut self) -> TIM12LPEN_W<APB1LLPENR_SPEC, 6> {
TIM12LPEN_W::new(self)
}
#[doc = "Bit 7 - TIM13 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn tim13lpen(&mut self) -> TIM13LPEN_W<APB1LLPENR_SPEC, 7> {
TIM13LPEN_W::new(self)
}
#[doc = "Bit 8 - TIM14 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn tim14lpen(&mut self) -> TIM14LPEN_W<APB1LLPENR_SPEC, 8> {
TIM14LPEN_W::new(self)
}
#[doc = "Bit 11 - WWDG clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn wwdglpen(&mut self) -> WWDGLPEN_W<APB1LLPENR_SPEC, 11> {
WWDGLPEN_W::new(self)
}
#[doc = "Bit 14 - SPI2 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn spi2lpen(&mut self) -> SPI2LPEN_W<APB1LLPENR_SPEC, 14> {
SPI2LPEN_W::new(self)
}
#[doc = "Bit 15 - SPI3 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn spi3lpen(&mut self) -> SPI3LPEN_W<APB1LLPENR_SPEC, 15> {
SPI3LPEN_W::new(self)
}
#[doc = "Bit 17 - USART2 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn usart2lpen(&mut self) -> USART2LPEN_W<APB1LLPENR_SPEC, 17> {
USART2LPEN_W::new(self)
}
#[doc = "Bit 18 - USART3 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn usart3lpen(&mut self) -> USART3LPEN_W<APB1LLPENR_SPEC, 18> {
USART3LPEN_W::new(self)
}
#[doc = "Bit 19 - UART4 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn uart4lpen(&mut self) -> UART4LPEN_W<APB1LLPENR_SPEC, 19> {
UART4LPEN_W::new(self)
}
#[doc = "Bit 20 - UART5 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn uart5lpen(&mut self) -> UART5LPEN_W<APB1LLPENR_SPEC, 20> {
UART5LPEN_W::new(self)
}
#[doc = "Bit 21 - I2C1 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn i2c1lpen(&mut self) -> I2C1LPEN_W<APB1LLPENR_SPEC, 21> {
I2C1LPEN_W::new(self)
}
#[doc = "Bit 22 - I2C2 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn i2c2lpen(&mut self) -> I2C2LPEN_W<APB1LLPENR_SPEC, 22> {
I2C2LPEN_W::new(self)
}
#[doc = "Bit 23 - I3C1 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn i3c1lpen(&mut self) -> I3C1LPEN_W<APB1LLPENR_SPEC, 23> {
I3C1LPEN_W::new(self)
}
#[doc = "Bit 24 - CRS clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn crslpen(&mut self) -> CRSLPEN_W<APB1LLPENR_SPEC, 24> {
CRSLPEN_W::new(self)
}
#[doc = "Bit 25 - USART6 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn usart6lpen(&mut self) -> USART6LPEN_W<APB1LLPENR_SPEC, 25> {
USART6LPEN_W::new(self)
}
#[doc = "Bit 26 - USART10 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn usart10lpen(&mut self) -> USART10LPEN_W<APB1LLPENR_SPEC, 26> {
USART10LPEN_W::new(self)
}
#[doc = "Bit 27 - USART11 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn usart11lpen(&mut self) -> USART11LPEN_W<APB1LLPENR_SPEC, 27> {
USART11LPEN_W::new(self)
}
#[doc = "Bit 28 - HDMI-CEC clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn ceclpen(&mut self) -> CECLPEN_W<APB1LLPENR_SPEC, 28> {
CECLPEN_W::new(self)
}
#[doc = "Bit 30 - UART7 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn uart7lpen(&mut self) -> UART7LPEN_W<APB1LLPENR_SPEC, 30> {
UART7LPEN_W::new(self)
}
#[doc = "Bit 31 - UART8 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn uart8lpen(&mut self) -> UART8LPEN_W<APB1LLPENR_SPEC, 31> {
UART8LPEN_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "RCC APB1 sleep clock register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`apb1llpenr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`apb1llpenr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct APB1LLPENR_SPEC;
impl crate::RegisterSpec for APB1LLPENR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`apb1llpenr::R`](R) reader structure"]
impl crate::Readable for APB1LLPENR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`apb1llpenr::W`](W) writer structure"]
impl crate::Writable for APB1LLPENR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets APB1LLPENR to value 0xdffe_c9ff"]
impl crate::Resettable for APB1LLPENR_SPEC {
const RESET_VALUE: Self::Ux = 0xdffe_c9ff;
}
|
pub mod image_file;
pub use self::image_file::*;
pub mod texture;
pub use self::texture::*;
pub mod effect;
pub use self::effect::*;
extern {
pub fn gs_reset_blend_state();
pub fn gs_draw_sprite(this: *mut Texture, flip: u32, width: u32, height: u32);
} |
use glob::glob;
use rayon::prelude::*;
use serde_json::Value;
use std::collections::HashMap;
use std::fs::{create_dir, File};
use std::io::BufReader;
use std::path::{Path, PathBuf};
use structopt::StructOpt;
#[derive(StructOpt, Debug)]
#[structopt(name = "sentry_events")]
struct Opt {
/// Artifacts directory
#[structopt(name = "DIRECTORY", parse(from_os_str))]
directory: PathBuf,
/// Sentry instance URL
#[structopt(short, long)]
url: String,
/// Sentry API token
#[structopt(short, long)]
token: String,
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let opt = Opt::from_args();
let pattern = format!(
"{}/**/metadata.json",
&opt.directory.to_str().expect("Valid directory")
);
let paths: Vec<_> = glob(&pattern)
.expect("Invalid pattern")
.collect::<Result<Vec<_>, _>>()
.expect("Error during reading directories");
println!("Loading events for {} runs", paths.len());
let paths: Vec<(String, &Path, String)> =
paths.par_iter().filter_map(|p| load_metadata(p)).collect();
let mut map = HashMap::new();
for (target, path, run_id) in &paths {
let runs = map
.entry(target.to_string())
.or_insert_with(|| HashMap::with_capacity(30));
runs.insert(
run_id.to_owned(),
path.parent().expect("Has parent dir").to_path_buf(),
);
}
let Opt { url, token, .. } = opt;
map.par_iter()
.for_each(|(target, runs)| load_events(&url, &token, target, runs));
Ok(())
}
fn load_metadata(path: &Path) -> Option<(String, &Path, String)> {
let file = File::open(path).expect("Can't read file");
let reader = BufReader::new(file);
let data: Value = serde_json::from_reader(reader).expect("Can't read metadata");
let target = data["target"]
.as_str()
.expect("`target` should be a string");
if let Some(run_id) = data["run_id"].as_str() {
let target = if let Some((target, _)) = target.split_once(':') {
target
} else {
target
};
Some((target.to_string(), path, run_id.to_string()))
} else {
None
}
}
fn load_events(url: &str, token: &str, target: &str, runs: &HashMap<String, PathBuf>) {
println!("Loading: {}", target);
let client = reqwest::blocking::Client::new();
let mut count = 1;
let parsed = reqwest::Url::parse(url).expect("Invalid URL");
if let (Some(next), true) = make_call(
&client,
&format!("{}api/0/projects/sentry/{}/events/?full=true", url, target),
token,
runs,
) {
let mut next = reqwest::Url::parse(&next).expect("Invalid URL");
next.set_port(parsed.port()).expect("Invalid port");
let mut next = next.to_string();
while let (Some(nxt), true) = make_call(&client, &next, token, runs) {
println!("{} call #{}", target, count);
let mut nxt = reqwest::Url::parse(&nxt).expect("Invalid URL");
nxt.set_port(parsed.port()).expect("Invalid port");
next = nxt.to_string();
count += 1;
}
println!("Finished {}!", target);
} else {
println!("Finished {}!", target);
}
}
fn make_call(
client: &reqwest::blocking::Client,
url: &str,
token: &str,
runs: &HashMap<String, PathBuf>,
) -> (Option<String>, bool) {
let response = client
.get(url)
.bearer_auth(token)
.send()
.expect("Valid response");
if response.status() == 404 {
println!("Got 404: {}", url);
return (None, false);
}
let link_header = response
.headers()
.get(reqwest::header::LINK)
.expect("Link header");
let links =
parse_link_header::parse(link_header.to_str().expect("valid string")).expect("Valid links");
let events: Value = response.json().expect("Valid json");
if let Some(events) = events.as_array() {
for event in events {
for tag in event["tags"].as_array().expect("Array") {
if let Some("wafp.run-id") = tag["key"].as_str() {
let run_id = tag["value"].as_str().expect("String");
if let Some(run_directory) = runs.get(run_id) {
let output_directory = run_directory.join("sentry_events");
let _ = create_dir(&output_directory);
let event_id = event["id"].as_str().expect("Event ID is a string");
let output_path = output_directory.join(format!("{}.json", event_id));
if !output_path.exists() {
let output_file =
File::create(output_path).expect("Can't create a file");
serde_json::to_writer(&output_file, &event)
.expect("Can't serialize event");
}
}
}
}
}
}
match links.get(&Some("next".to_string())) {
Some(parse_link_header::Link {
raw_uri, params, ..
}) => {
let results = match params.get("results").expect("Present").as_str() {
"true" => true,
"false" => false,
value => panic!("Unknown value for `results`: {}", value),
};
(Some(raw_uri.to_string()), results)
}
None => (None, false),
}
}
|
extern crate console;
use std::io;
use std::thread;
use std::time::Duration;
use console::Term;
fn print_sample() -> io::Result<()> {
let term = Term::stdout();
term.write_line("Hello World!")?;
thread::sleep(Duration::from_millis(2000));
term.clear_line()?;
Ok(())
}
fn main() {
print_sample().unwrap();
}
|
use std::mem::size_of;
use math::{Rect, Point2, BaseNum, BaseIntExt};
pub fn mask<N, F>(r: Rect<N>, br: Rect<N>, brush: &[bool], mut f: F)
where
F: FnMut(N, N),
N: BaseIntExt
{
blit(r, br, brush, |x, y, pix| if pix { f(x, y) });
}
pub fn blit<N, F, C>(r: Rect<N>, br: Rect<N>, brush: &[C], mut f: F)
where
F: FnMut(N, N, C),
N: BaseIntExt,
C: Copy
{
let w = br.dx();
let start = r.min - br.min;
let start = (start.x + start.y * w).to_usize().unwrap();
let stride = (w - r.dx()).to_usize().unwrap();
unsafe {
let mut pix = brush.as_ptr().add(start);
for y in r.min.y..r.max.y {
for x in r.min.x..r.max.x {
f(x, y, *pix);
pix = pix.add(size_of::<C>());
}
pix = pix.add(stride);
}
}
}
pub fn fill_rect<N, F>(r: Rect<N>, mut pixel: F)
where
F: FnMut(Point2<N>),
N: BaseIntExt
{
for y in r.min.y..r.max.y {
for x in r.min.x..r.max.x {
pixel(Point2::new(x, y))
}
}
}
pub fn draw_line<N, F>(start: Point2<N>, end: Point2<N>, pixel: F)
where
F: FnMut(Point2<N>),
N: BaseIntExt
{
super::Bresenham::new(start, end).for_each(pixel)
/*
let one = N::one();
let two = N::one() + N::one();
let dx = (start.x - end.x).abs();
let dy = (start.y - end.y).abs();
if dx >= one || dy >= one {
let (incr, delta) = {
let incr_x = if start.x < end.x { one } else { -one };
let incr_y = if start.y < end.y { one } else { -one };
(Point2::new(incr_x, incr_y), Point2::new(dx, dy))
};
let mut pos = start;
if delta.y > delta.x {
let mut cumul = delta.y / two;
for _ in one..delta.y {
pos.y += incr.y;
cumul += delta.x;
if cumul >= delta.y {
cumul -= delta.y;
pos.x += incr.x;
}
pixel(pos);
}
} else {
let mut cumul = delta.x / two;
for _ in one..delta.x {
pos.x += incr.x;
cumul += delta.y;
if cumul >= delta.x {
cumul -= delta.x;
pos.y += incr.y;
}
pixel(pos);
}
}
}
if start != end {
pixel(end);
}
*/
}
pub fn draw_ellipse<N, F>(r: Rect<N>, mut seg: F)
where
N: BaseNum,
F: FnMut(Point2<N>, Point2<N>),
{
let (mut x0, mut y0, mut x1, mut y1) = (
r.min.x.to_i64().unwrap(),
r.min.y.to_i64().unwrap(),
r.max.x.to_i64().unwrap(),
r.max.y.to_i64().unwrap(),
);
let a = (x1 - x0).abs();
let b = (y1 - y0).abs();
// values of diameter
let mut b1 = b & 1;
// error increment
let mut dx = 4 * ( 1 - a) * b * b;
let mut dy = 4 * (b1 + 1) * a * a;
let mut err = dx + dy + b1 * a * a;
// if called with swapped points
if x0 > x1 {
x0 = x1;
x1 += a;
}
// .. exchange them
if y0 > y1 {
y0 = y1;
}
// starting pixel
y0 += (b + 1) / 2;
y1 = y0 - b1;
let a = 8 * a * a;
b1 = 8 * b * b;
while {
let q1 = Point2::new(x1, y0).cast().unwrap();
let q2 = Point2::new(x0, y0).cast().unwrap();
let q3 = Point2::new(x0, y1).cast().unwrap();
let q4 = Point2::new(x1, y1).cast().unwrap();
seg(q2, q1);
seg(q3, q4);
let e2 = 2 * err; // error of 1.step
// y step
if e2 <= dy {
y0 += 1;
y1 -= 1;
dy += a;
err += dy;
}
// x step
if e2 >= dx || 2 * err > dy {
x0 += 1;
x1 -= 1;
dx += b1;
err += dx;
}
x0 <= x1
} {}
// too early stop of flat ellipses a=1
while y0 - y1 < b {
// -> finish tip of ellipse
let a = Point2::new(x0-1, y0).cast().unwrap();
let b = Point2::new(x1+1, y0).cast().unwrap();
seg(a, b);
y0 += 1;
let a = Point2::new(x0-1, y1).cast().unwrap();
let b = Point2::new(x1+1, y1).cast().unwrap();
seg(a, b);
y1 -= 1;
}
}
|
use super::Subscription;
use crate::env::Env;
use futures::prelude::*;
use futures::{
channel::mpsc,
stream::{FusedStream, Stream},
task::{self, Poll},
};
use gloo_events::EventListener;
use serde::Deserialize;
use std::{borrow::Cow, marker::PhantomData, pin::Pin};
use wasm_bindgen::prelude::*;
pub fn window_event<T>(event_type: impl Into<Cow<'static, str>>) -> WindowEvent<T>
where
T: for<'de> Deserialize<'de> + 'static,
{
WindowEvent {
event_type: event_type.into(),
_marker: PhantomData,
}
}
pub struct WindowEvent<T> {
event_type: Cow<'static, str>,
_marker: PhantomData<fn() -> T>,
}
impl<T> Subscription for WindowEvent<T>
where
T: for<'de> Deserialize<'de> + 'static,
{
type Msg = T;
type Stream = WindowEventStream<T>;
fn subscribe(self, env: &Env) -> crate::Result<Self::Stream> {
let (tx, rx) = mpsc::unbounded();
let listener = EventListener::new(&env.window, self.event_type, move |event| {
let event: &JsValue = event.as_ref();
let de = serde_wasm_bindgen::Deserializer::from(event.clone());
if let Ok(msg) = T::deserialize(de) {
tx.unbounded_send(msg).unwrap_throw();
}
});
Ok(WindowEventStream {
rx,
_listener: Some(listener),
})
}
}
pub struct WindowEventStream<T> {
rx: mpsc::UnboundedReceiver<T>,
_listener: Option<EventListener>,
}
impl<T> Stream for WindowEventStream<T>
where
T: for<'de> Deserialize<'de>,
{
type Item = T;
fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Self::Item>> {
self.get_mut().rx.poll_next_unpin(cx)
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.rx.size_hint()
}
}
impl<T> FusedStream for WindowEventStream<T>
where
T: for<'de> Deserialize<'de>,
{
fn is_terminated(&self) -> bool {
self.rx.is_terminated()
}
}
|
//! [Closures]: Anonymous Functions that Can Capture Their Environment
//!
//! [closures]: https://doc.rust-lang.org/book/ch13-01-closures.html
use std::error::Error;
use the_book::ch13::sec01::Cacher;
fn main() -> Result<(), Box<dyn Error>> {
let mut c = Cacher::new(|x| x);
for _ in { 1..1_000 } {
let got = c.get(1);
assert_eq!(1, got);
}
let got = c.get(2);
assert_eq!(1, got);
Ok(())
}
|
pub fn run() {
let name = "Wulf";
// Mutable
let mut age = 25;
println!("My name is {} ang age is {}", name, age);
age = 26;
println!("My name is {} ang age is {}", name, age);
// Define constant
const ID: i32 = 001;
println!("ID is {}", ID);
// Assign multipple variables
let (my_name, my_age) = ("Wulf", 25);
println!(" {} {} ", my_name, my_age);
} |
extern crate lettre;
#[macro_use]
extern crate log;
extern crate rand;
#[macro_use]
extern crate serde_derive;
extern crate toml;
use std::path::Path;
use rand::Rng;
pub mod conf;
mod email;
pub mod file_utils;
pub mod kk_log;
#[derive(Debug)]
pub struct KkPair {
giver: conf::Participants,
receiver: conf::Participants,
}
impl KkPair {
pub fn get_giver(&self) -> conf::Participants {
self.giver.clone()
}
pub fn get_receiver(&self) -> conf::Participants {
self.receiver.clone()
}
}
#[derive(Serialize)]
pub struct Pair {
giver: String,
receiver: String
}
impl From<&KkPair> for Pair {
fn from(pair: &KkPair) -> Self {
Pair {
giver: pair.giver.get_name().clone(),
receiver: pair.receiver.get_name().clone()
}
}
}
/// This is the parent type of this process. It contains all of the data required to allocate
/// and distribute the information.
#[derive(Debug)]
pub struct KrisKringles {
configuration: conf::KkConf,
pairs: Vec<KkPair>,
}
impl KrisKringles {
/// Construct the kris kringle matcher from the provided configuration file. The file will need to be
/// a TOML file following the structure seen in the file at `tests/resources/full.toml`
pub fn build_kks_from_file<P: AsRef<Path>>(path: P) -> KrisKringles {
let conf = conf::KkConf::build(path);
let pairs = perform_pairing(&conf.get_participants());
KrisKringles {
configuration: conf,
pairs: pairs,
}
}
pub fn from_config(conf: conf::KkConf) -> KrisKringles {
let pairs = perform_pairing(&conf.get_participants());
KrisKringles {
configuration: conf,
pairs: pairs
}
}
pub fn get_pairs(&self) -> Vec<Pair> {
self.pairs.iter().map(| x| x.into()).collect()
}
/// Once the configuration has been loaded, this will allow for the allocation of the kris kringles following
/// the criteria that a pair must consist of two separate people (i.e. cannot give a present to themself) and
/// they must be from a separate group as allocated in the configuration.
pub fn write_kks_to_file<P: AsRef<Path>>(&self, path: P) {
let mut all_content = String::new();
for pair in &self.pairs {
let mut file_name: String = pair.get_giver().get_name();
all_content.push_str(&file_name);
all_content.push_str(" --> ");
all_content.push_str(&pair.get_receiver().get_name());
all_content.push_str("\n");
file_name.push_str(".kk");
file_utils::write_to_file(file_name, pair.get_receiver().get_name());
}
file_utils::write_to_file(path, all_content);
}
/// Given the name of the giver will find the name of the receiver
pub fn find_kk(&self, giver: &str) -> Option<String> {
for pair in &self.pairs {
if pair.giver.get_name().eq(giver) {
return Some(pair.receiver.get_name().clone());
}
}
None
}
/// Returns all participants used in the Kris Kringle allocation
pub fn get_participants(&self) -> Vec<String> {
self.configuration
.get_participants()
.iter()
.map(|x| x.get_name())
.collect()
}
/// Sends emails to the allocated giver of the Kris Kringle pair. This function
/// will fail if the allocation has not yet been performed.
// TODO Add some error handling
pub fn email_givers(&self) -> Result<(), String> {
if invalid_map(&self.pairs) {
return Err(String::from("The pairs have not yet been allocated!!!"));
}
if email::send_emails(self).is_ok() {
Ok(())
} else {
Err(String::from("Unable to send emails"))
}
}
}
/// Performs the pairing of each giver to the receiver
fn perform_pairing(all: &Vec<conf::Participants>) -> Vec<KkPair> {
let mut pairs: Vec<KkPair> = Vec::new();
for person in all {
pairs.push(KkPair {
giver: person.clone(),
receiver: person.clone(),
});
}
while invalid_map(&pairs) {
shuffle_pairs(all.len(), &mut pairs);
}
// for pair in &pairs {
// println!("{:?} --> {:?}", pair.giver, pair.receiver);
// }
pairs
}
/// Mutates the pairs vector by randomly shuffling the receivers for two givers in the
/// provided vector.
fn shuffle_pairs(max_length: usize, pairs: &mut Vec<KkPair>) {
let giver1_index = rand::thread_rng().gen_range(0, max_length);
let giver2_index = rand::thread_rng().gen_range(0, max_length);
let temp = pairs[giver1_index].receiver.clone();
pairs[giver1_index].receiver = pairs[giver2_index].receiver.clone();
pairs[giver2_index].receiver = temp;
}
/// Determines if this is a valid match up. Initially needs to check that
/// user is not giving present to themselves. Second check, if enabled is to
/// confirm that groups are different.
fn invalid_map(pairs: &Vec<KkPair>) -> bool {
for pair in pairs {
info!("Comparing {:?} - {:?}", pair.giver, pair.receiver);
if pair.giver.get_name().eq(&pair.receiver.get_name()) {
info!("It is invalid");
return true;
}
let giver_group = pair.giver.get_group();
let recvr_group = pair.receiver.get_group();
if giver_group == recvr_group {
info!("It is invalid");
return true;
}
}
info!("It is valid");
false
}
|
// **************************************************************************
// Copyright (c) 2015 Roland Ruckerbauer All Rights Reserved.
//
// This file is part of hidapi_rust.
//
// hidapi_rust is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// hidapi_rust is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with hidapi_rust. If not, see <http://www.gnu.org/licenses/>.
// *************************************************************************
extern crate gcc;
extern crate pkg_config;
fn main() {
compile();
}
#[cfg(target_os = "linux")]
fn compile() {
let mut config = gcc::Config::new();
config.file("etc/hidapi/libusb/hid.c").include("etc/hidapi/hidapi");
let lib = pkg_config::find_library("libusb-1.0").expect("Unable to find libusb-1.0");
for path in lib.include_paths {
config.include(path.to_str().expect("Failed to convert include path to str"));
}
config.compile("libhidapi.a");
}
#[cfg(target_os = "windows")]
fn compile() {
gcc::Config::new()
.file("etc/hidapi/windows/hid.c")
.include("etc/hidapi/hidapi")
.compile("libhidapi.a");
println!("cargo:rustc-link-lib=setupapi");
}
#[cfg(target_os = "macos")]
fn compile() {
gcc::Config::new()
.file("etc/hidapi/mac/hid.c")
.include("etc/hidapi/hidapi")
.compile("libhidapi.a");
println!("cargo:rustc-link-lib=framework=IOKit");
println!("cargo:rustc-link-lib=framework=CoreFoundation");
}
|
/*
* Copyright 2019 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file expect in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
use bincode::{deserialize, serialize};
use database::lmdb::LmdbDatabase;
use database::DatabaseError;
use engine::consensus_state::ConsensusState;
use poet2_util;
use sawtooth_sdk::consensus::engine::BlockId;
#[derive(Debug)]
pub enum ConsensusStateStoreError {
Error(String),
UnknownConsensusState,
}
pub struct ConsensusStateStore<'a> {
consensus_state_db: LmdbDatabase<'a>,
}
impl<'a> ConsensusStateStore<'a> {
pub fn new(db: LmdbDatabase<'a>) -> Self {
ConsensusStateStore {
consensus_state_db: db,
}
}
pub fn get(&self, block_id: &BlockId) -> Result<Box<ConsensusState>, DatabaseError> {
let reader = self.consensus_state_db.reader()?;
let state = reader.get(block_id).ok_or_else(|| {
DatabaseError::NotFoundError(format!(
"State not found for
block_id: {}",
poet2_util::to_hex_string(block_id)
))
})?;
debug!(
"Found state for block_id : {}",
poet2_util::to_hex_string(block_id)
);
let consensus_state: ConsensusState = deserialize(&state).map_err(|err| {
DatabaseError::CorruptionError(format!(
"Error in deserializing consensus state : {}",
err
))
})?;
Ok(Box::new(consensus_state.clone()))
}
pub fn delete(&mut self, block_id: &BlockId) -> Result<(), DatabaseError> {
let mut writer = self.consensus_state_db.writer()?;
writer.delete(&Vec::from(block_id.clone()))?;
writer.commit().expect(&format!(
"Failed to commit state deletion for block_id : {}",
poet2_util::to_hex_string(&block_id)
));
debug!(
"Deleted state for block_id : {}",
poet2_util::to_hex_string(&block_id)
);
Ok(())
}
pub fn put(
&mut self,
block_id: &BlockId,
consensus_state: ConsensusState,
) -> Result<(), DatabaseError> {
let mut writer = self.consensus_state_db.writer()?;
let serialized_state = serialize(&consensus_state).map_err(|err| {
DatabaseError::WriterError(format!("Failed to serialize state: {}", err))
})?;
writer.put(&Vec::from(block_id.clone()), &serialized_state)?;
writer.commit().expect(&format!(
"Failed to commit state write to db for block_id : {}",
poet2_util::to_hex_string(&block_id)
));
debug!(
"Stored state for block_id : {}",
poet2_util::to_hex_string(&block_id)
);
Ok(())
}
pub fn delete_states_upto(&mut self, ancestor: BlockId, head: BlockId) -> () {
let mut next = head;
let mut count = 0_u64;
loop {
if ancestor == next {
break;
}
count += 1;
let state_ = self.get(&next.clone());
debug!("Deleting state for {}", poet2_util::to_hex_string(&next));
match self.delete(&next) {
Ok(_) => {}
Err(err) => {
panic!(
"Could not delete state for block_id : {}. Error : {}",
poet2_util::to_hex_string(&next),
err
);
}
}
let state = state_.unwrap();
next = BlockId::from(poet2_util::from_hex_string(
state.estimate_info.previous_block_id,
));
}
debug!("Deleted states for {} blocks.", count);
}
}
#[cfg(test)]
mod tests {
use super::*;
use database::config;
use database::lmdb;
use database::CliError;
use std::fs;
use std::path::Path;
/// Asserts that there are COUNT many objects in DB.
fn assert_database_db_count(count: usize, db: &LmdbDatabase) {
let reader = db.reader().unwrap();
assert_eq!(reader.count().unwrap(), count,);
}
/// Asserts that there are are COUNT many objects in DB's INDEX.
fn assert_index_count(index: &str, count: usize, db: &LmdbDatabase) {
let reader = db.reader().unwrap();
assert_eq!(reader.index_count(index).unwrap(), count,);
}
/// Asserts that KEY is associated with VAL in DB.
fn assert_key_value(key: u8, val: u8, db: &LmdbDatabase) {
let reader = db.reader().unwrap();
assert_eq!(reader.get(&[key]).unwrap(), [val],);
}
/// Asserts that KEY is associated with VAL in DB's INDEX.
fn assert_index_key_value(index: &str, key: u8, val: u8, db: &LmdbDatabase) {
let reader = db.reader().unwrap();
assert_eq!(reader.index_get(index, &[key]).unwrap().unwrap(), [val],);
}
/// Asserts that KEY is not in DB.
fn assert_not_in_database_db(key: u8, db: &LmdbDatabase) {
let reader = db.reader().unwrap();
assert!(reader.get(&[key]).is_none());
}
/// Asserts that KEY is not in DB's INDEX.
fn assert_not_in_index(index: &str, key: u8, db: &LmdbDatabase) {
let reader = db.reader().unwrap();
assert!(reader.index_get(index, &[key]).unwrap().is_none());
}
/*
* file_prefix - Taking this as an argument to isolate the
* the context and db files for all the test cases.
* Unit test threads might run in parallel which
* would then create coflicts.
*/
fn create_context(file_prefix: &String) -> lmdb::LmdbContext {
let path_config = config::get_path_config();
assert!(path_config.data_dir.exists());
let statestore_path = &path_config.data_dir.join(Path::new(&format!(
"{}{}",
file_prefix,
config::get_filename()
)));
let context = lmdb::LmdbContext::new(statestore_path, 1, None)
.map_err(|err| CliError::EnvironmentError(format!("{}", err)))
.expect("Error creating context.");
assert!(statestore_path.exists());
context
}
fn cleanup_dbfile(file_prefix: &String) {
let path_config = config::get_path_config();
let statestore_path = &path_config.data_dir.join(Path::new(&format!(
"{}{}",
file_prefix,
config::get_filename()
)));
fs::remove_file(statestore_path.to_str().unwrap());
// Ensure the file has been deleted
assert!(!statestore_path.exists());
}
#[test]
fn test_state_store_get() {
let file_name = String::from("test_state_store_get_");
let ctx = create_context(&file_name);
let statestore_db = lmdb::LmdbDatabase::new(&ctx, &["index_consensus_state"])
.map_err(|err| CliError::EnvironmentError(format!("{}", err)))
.unwrap();
let mut state_store = ConsensusStateStore::new(statestore_db);
// Taking random u8 vector as block_id
assert!(state_store.get(&BlockId::from(vec![11])).is_err());
state_store.put(&BlockId::from(vec![11]), ConsensusState::default());
assert!(state_store.get(&BlockId::from(vec![11])).is_ok());
//cleanup
cleanup_dbfile(&file_name);
}
#[test]
fn test_state_store_put() {
let file_name = String::from("test_state_store_put_");
let ctx = create_context(&file_name);
let statestore_db = lmdb::LmdbDatabase::new(&ctx, &["index_consensus_state"])
.map_err(|err| CliError::EnvironmentError(format!("{}", err)))
.unwrap();
let mut state_store = ConsensusStateStore::new(statestore_db);
// Taking random u8 vector as block_id
assert!(state_store.get(&BlockId::from(vec![13])).is_err());
state_store.put(&BlockId::from(vec![13]), ConsensusState::default());
assert!(state_store.get(&BlockId::from(vec![13])).is_ok());
assert_eq!(
*state_store.get(&BlockId::from(vec![13])).unwrap(),
ConsensusState::default()
);
//cleanup
cleanup_dbfile(&file_name);
}
#[test]
fn test_state_store_delete() {
let file_name = String::from("test_state_store_delete_");
let ctx = create_context(&file_name);
let statestore_db = lmdb::LmdbDatabase::new(&ctx, &["index_consensus_state"])
.map_err(|err| CliError::EnvironmentError(format!("{}", err)))
.unwrap();
let mut state_store = ConsensusStateStore::new(statestore_db);
// Taking random u8 vector as block_id
state_store.put(&BlockId::from(vec![14]), ConsensusState::default());
state_store.put(&BlockId::from(vec![15]), ConsensusState::default());
assert_eq!(
*state_store.get(&BlockId::from(vec![14])).unwrap(),
ConsensusState::default()
);
assert_eq!(
*state_store.get(&BlockId::from(vec![15])).unwrap(),
ConsensusState::default()
);
state_store.delete(&BlockId::from(vec![14]));
assert!(state_store.get(&BlockId::from(vec![14])).is_err());
assert!(state_store.get(&BlockId::from(vec![15])).is_ok());
//cleanup
cleanup_dbfile(&file_name);
}
}
|
use anyhow::{Context, Result};
use fs_err as fs;
use goblin::elf::Elf;
use regex::Regex;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
/// Find musl libc path from executable's ELF header
pub fn find_musl_libc() -> Result<Option<PathBuf>> {
let buffer = fs::read("/bin/ls")?;
let elf = Elf::parse(&buffer)?;
Ok(elf.interpreter.map(PathBuf::from))
}
/// Read the musl version from libc library's output
///
/// The libc library should output something like this to stderr::
///
/// musl libc (x86_64)
/// Version 1.2.2
/// Dynamic Program Loader
pub fn get_musl_version(ld_path: impl AsRef<Path>) -> Result<Option<(u16, u16)>> {
let ld_path = ld_path.as_ref();
let output = Command::new(ld_path)
.stdout(Stdio::null())
.stderr(Stdio::piped())
.output()?;
let stderr = std::str::from_utf8(&output.stderr)?;
let expr = Regex::new(r"Version (\d+)\.(\d+)")?;
if let Some(capture) = expr.captures(stderr) {
let context = "Expected a digit";
let major = capture
.get(1)
.unwrap()
.as_str()
.parse::<u16>()
.context(context)?;
let minor = capture
.get(2)
.unwrap()
.as_str()
.parse::<u16>()
.context(context)?;
return Ok(Some((major, minor)));
}
Ok(None)
}
|
// ===============================================================================
// Authors: AFRL/RQQA
// Organization: Air Force Research Laboratory, Aerospace Systems Directorate, Power and Control Division
//
// Copyright (c) 2017 Government of the United State of America, as represented by
// the Secretary of the Air Force. No copyright is claimed in the United States under
// Title 17, U.S. Code. All Other Rights Reserved.
// ===============================================================================
// This file was auto-created by LmcpGen. Modifications will be overwritten.
use avtas::lmcp::{Error, ErrorType, Lmcp, LmcpSubscription, SrcLoc, Struct, StructInfo};
use std::fmt::Debug;
#[derive(Clone, Debug, Default)]
#[repr(C)]
pub struct CameraConfiguration {
pub payload_id: i64,
pub payload_kind: Vec<u8>,
pub parameters: Vec<Box<::afrl::cmasi::key_value_pair::KeyValuePairT>>,
pub supported_wavelength_band: ::afrl::cmasi::wavelength_band::WavelengthBand,
pub field_of_view_mode: ::afrl::cmasi::fovoperation_mode::FOVOperationMode,
pub min_horizontal_field_of_view: f32,
pub max_horizontal_field_of_view: f32,
pub discrete_horizontal_field_of_view_list: Vec<f32>,
pub video_stream_horizontal_resolution: u32,
pub video_stream_vertical_resolution: u32,
}
impl PartialEq for CameraConfiguration {
fn eq(&self, _other: &CameraConfiguration) -> bool {
true
&& &self.supported_wavelength_band == &_other.supported_wavelength_band
&& &self.field_of_view_mode == &_other.field_of_view_mode
&& &self.min_horizontal_field_of_view == &_other.min_horizontal_field_of_view
&& &self.max_horizontal_field_of_view == &_other.max_horizontal_field_of_view
&& &self.discrete_horizontal_field_of_view_list == &_other.discrete_horizontal_field_of_view_list
&& &self.video_stream_horizontal_resolution == &_other.video_stream_horizontal_resolution
&& &self.video_stream_vertical_resolution == &_other.video_stream_vertical_resolution
}
}
impl LmcpSubscription for CameraConfiguration {
fn subscription() -> &'static str { "afrl.cmasi.CameraConfiguration" }
}
impl Struct for CameraConfiguration {
fn struct_info() -> StructInfo {
StructInfo {
exist: 1,
series: 4849604199710720000u64,
version: 3,
struct_ty: 19,
}
}
}
impl Lmcp for CameraConfiguration {
fn ser(&self, buf: &mut[u8]) -> Result<usize, Error> {
let mut pos = 0;
{
let x = Self::struct_info().ser(buf)?;
pos += x;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.payload_id.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.payload_kind.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.parameters.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.supported_wavelength_band.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.field_of_view_mode.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.min_horizontal_field_of_view.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.max_horizontal_field_of_view.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.discrete_horizontal_field_of_view_list.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.video_stream_horizontal_resolution.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.video_stream_vertical_resolution.ser(r)?;
pos += writeb;
}
Ok(pos)
}
fn deser(buf: &[u8]) -> Result<(CameraConfiguration, usize), Error> {
let mut pos = 0;
let (si, u) = StructInfo::deser(buf)?;
pos += u;
if si == CameraConfiguration::struct_info() {
let mut out: CameraConfiguration = Default::default();
{
let r = get!(buf.get(pos ..));
let (x, readb): (i64, usize) = Lmcp::deser(r)?;
out.payload_id = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (Vec<u8>, usize) = Lmcp::deser(r)?;
out.payload_kind = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (Vec<Box<::afrl::cmasi::key_value_pair::KeyValuePairT>>, usize) = Lmcp::deser(r)?;
out.parameters = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (::afrl::cmasi::wavelength_band::WavelengthBand, usize) = Lmcp::deser(r)?;
out.supported_wavelength_band = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (::afrl::cmasi::fovoperation_mode::FOVOperationMode, usize) = Lmcp::deser(r)?;
out.field_of_view_mode = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (f32, usize) = Lmcp::deser(r)?;
out.min_horizontal_field_of_view = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (f32, usize) = Lmcp::deser(r)?;
out.max_horizontal_field_of_view = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (Vec<f32>, usize) = Lmcp::deser(r)?;
out.discrete_horizontal_field_of_view_list = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (u32, usize) = Lmcp::deser(r)?;
out.video_stream_horizontal_resolution = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (u32, usize) = Lmcp::deser(r)?;
out.video_stream_vertical_resolution = x;
pos += readb;
}
Ok((out, pos))
} else {
Err(error!(ErrorType::InvalidStructInfo))
}
}
fn size(&self) -> usize {
let mut size = 15;
size += self.payload_id.size();
size += self.payload_kind.size();
size += self.parameters.size();
size += self.supported_wavelength_band.size();
size += self.field_of_view_mode.size();
size += self.min_horizontal_field_of_view.size();
size += self.max_horizontal_field_of_view.size();
size += self.discrete_horizontal_field_of_view_list.size();
size += self.video_stream_horizontal_resolution.size();
size += self.video_stream_vertical_resolution.size();
size
}
}
pub trait CameraConfigurationT: Debug + Send + ::afrl::cmasi::payload_configuration::PayloadConfigurationT {
fn as_afrl_cmasi_camera_configuration(&self) -> Option<&CameraConfiguration> { None }
fn as_mut_afrl_cmasi_camera_configuration(&mut self) -> Option<&mut CameraConfiguration> { None }
fn supported_wavelength_band(&self) -> ::afrl::cmasi::wavelength_band::WavelengthBand;
fn supported_wavelength_band_mut(&mut self) -> &mut ::afrl::cmasi::wavelength_band::WavelengthBand;
fn field_of_view_mode(&self) -> ::afrl::cmasi::fovoperation_mode::FOVOperationMode;
fn field_of_view_mode_mut(&mut self) -> &mut ::afrl::cmasi::fovoperation_mode::FOVOperationMode;
fn min_horizontal_field_of_view(&self) -> f32;
fn min_horizontal_field_of_view_mut(&mut self) -> &mut f32;
fn max_horizontal_field_of_view(&self) -> f32;
fn max_horizontal_field_of_view_mut(&mut self) -> &mut f32;
fn discrete_horizontal_field_of_view_list(&self) -> &Vec<f32>;
fn discrete_horizontal_field_of_view_list_mut(&mut self) -> &mut Vec<f32>;
fn video_stream_horizontal_resolution(&self) -> u32;
fn video_stream_horizontal_resolution_mut(&mut self) -> &mut u32;
fn video_stream_vertical_resolution(&self) -> u32;
fn video_stream_vertical_resolution_mut(&mut self) -> &mut u32;
}
impl Clone for Box<CameraConfigurationT> {
fn clone(&self) -> Box<CameraConfigurationT> {
if let Some(x) = CameraConfigurationT::as_afrl_cmasi_camera_configuration(self.as_ref()) {
Box::new(x.clone())
} else {
unreachable!()
}
}
}
impl Default for Box<CameraConfigurationT> {
fn default() -> Box<CameraConfigurationT> { Box::new(CameraConfiguration::default()) }
}
impl PartialEq for Box<CameraConfigurationT> {
fn eq(&self, other: &Box<CameraConfigurationT>) -> bool {
if let (Some(x), Some(y)) =
(CameraConfigurationT::as_afrl_cmasi_camera_configuration(self.as_ref()),
CameraConfigurationT::as_afrl_cmasi_camera_configuration(other.as_ref())) {
x == y
} else {
false
}
}
}
impl Lmcp for Box<CameraConfigurationT> {
fn ser(&self, buf: &mut[u8]) -> Result<usize, Error> {
if let Some(x) = CameraConfigurationT::as_afrl_cmasi_camera_configuration(self.as_ref()) {
x.ser(buf)
} else {
unreachable!()
}
}
fn deser(buf: &[u8]) -> Result<(Box<CameraConfigurationT>, usize), Error> {
let (si, _) = StructInfo::deser(buf)?;
if si == CameraConfiguration::struct_info() {
let (x, readb) = CameraConfiguration::deser(buf)?;
Ok((Box::new(x), readb))
} else {
Err(error!(ErrorType::InvalidStructInfo))
}
}
fn size(&self) -> usize {
if let Some(x) = CameraConfigurationT::as_afrl_cmasi_camera_configuration(self.as_ref()) {
x.size()
} else {
unreachable!()
}
}
}
impl ::afrl::cmasi::payload_configuration::PayloadConfigurationT for CameraConfiguration {
fn as_afrl_cmasi_camera_configuration(&self) -> Option<&CameraConfiguration> { Some(self) }
fn as_mut_afrl_cmasi_camera_configuration(&mut self) -> Option<&mut CameraConfiguration> { Some(self) }
fn payload_id(&self) -> i64 { self.payload_id }
fn payload_id_mut(&mut self) -> &mut i64 { &mut self.payload_id }
fn payload_kind(&self) -> &Vec<u8> { &self.payload_kind }
fn payload_kind_mut(&mut self) -> &mut Vec<u8> { &mut self.payload_kind }
fn parameters(&self) -> &Vec<Box<::afrl::cmasi::key_value_pair::KeyValuePairT>> { &self.parameters }
fn parameters_mut(&mut self) -> &mut Vec<Box<::afrl::cmasi::key_value_pair::KeyValuePairT>> { &mut self.parameters }
}
impl CameraConfigurationT for CameraConfiguration {
fn as_afrl_cmasi_camera_configuration(&self) -> Option<&CameraConfiguration> { Some(self) }
fn as_mut_afrl_cmasi_camera_configuration(&mut self) -> Option<&mut CameraConfiguration> { Some(self) }
fn supported_wavelength_band(&self) -> ::afrl::cmasi::wavelength_band::WavelengthBand { self.supported_wavelength_band }
fn supported_wavelength_band_mut(&mut self) -> &mut ::afrl::cmasi::wavelength_band::WavelengthBand { &mut self.supported_wavelength_band }
fn field_of_view_mode(&self) -> ::afrl::cmasi::fovoperation_mode::FOVOperationMode { self.field_of_view_mode }
fn field_of_view_mode_mut(&mut self) -> &mut ::afrl::cmasi::fovoperation_mode::FOVOperationMode { &mut self.field_of_view_mode }
fn min_horizontal_field_of_view(&self) -> f32 { self.min_horizontal_field_of_view }
fn min_horizontal_field_of_view_mut(&mut self) -> &mut f32 { &mut self.min_horizontal_field_of_view }
fn max_horizontal_field_of_view(&self) -> f32 { self.max_horizontal_field_of_view }
fn max_horizontal_field_of_view_mut(&mut self) -> &mut f32 { &mut self.max_horizontal_field_of_view }
fn discrete_horizontal_field_of_view_list(&self) -> &Vec<f32> { &self.discrete_horizontal_field_of_view_list }
fn discrete_horizontal_field_of_view_list_mut(&mut self) -> &mut Vec<f32> { &mut self.discrete_horizontal_field_of_view_list }
fn video_stream_horizontal_resolution(&self) -> u32 { self.video_stream_horizontal_resolution }
fn video_stream_horizontal_resolution_mut(&mut self) -> &mut u32 { &mut self.video_stream_horizontal_resolution }
fn video_stream_vertical_resolution(&self) -> u32 { self.video_stream_vertical_resolution }
fn video_stream_vertical_resolution_mut(&mut self) -> &mut u32 { &mut self.video_stream_vertical_resolution }
}
#[cfg(test)]
pub mod tests {
use super::*;
use quickcheck::*;
impl Arbitrary for CameraConfiguration {
fn arbitrary<G: Gen>(_g: &mut G) -> CameraConfiguration {
CameraConfiguration {
payload_id: Arbitrary::arbitrary(_g),
payload_kind: Arbitrary::arbitrary(_g),
parameters: Vec::<::afrl::cmasi::key_value_pair::KeyValuePair>::arbitrary(_g).into_iter().map(|x| Box::new(x) as Box<::afrl::cmasi::key_value_pair::KeyValuePairT>).collect(),
supported_wavelength_band: Arbitrary::arbitrary(_g),
field_of_view_mode: Arbitrary::arbitrary(_g),
min_horizontal_field_of_view: Arbitrary::arbitrary(_g),
max_horizontal_field_of_view: Arbitrary::arbitrary(_g),
discrete_horizontal_field_of_view_list: Arbitrary::arbitrary(_g),
video_stream_horizontal_resolution: Arbitrary::arbitrary(_g),
video_stream_vertical_resolution: Arbitrary::arbitrary(_g),
}
}
}
quickcheck! {
fn serializes(x: CameraConfiguration) -> Result<TestResult, Error> {
use std::u16;
if x.parameters.len() > (u16::MAX as usize) { return Ok(TestResult::discard()); }
if x.discrete_horizontal_field_of_view_list.len() > (u16::MAX as usize) { return Ok(TestResult::discard()); }
let mut buf: Vec<u8> = vec![0; x.size()];
let sx = x.ser(&mut buf)?;
Ok(TestResult::from_bool(sx == x.size()))
}
fn roundtrips(x: CameraConfiguration) -> Result<TestResult, Error> {
use std::u16;
if x.parameters.len() > (u16::MAX as usize) { return Ok(TestResult::discard()); }
if x.discrete_horizontal_field_of_view_list.len() > (u16::MAX as usize) { return Ok(TestResult::discard()); }
let mut buf: Vec<u8> = vec![0; x.size()];
let sx = x.ser(&mut buf)?;
let (y, sy) = CameraConfiguration::deser(&buf)?;
Ok(TestResult::from_bool(sx == sy && x == y))
}
}
}
|
//! Comparison of sequences of values with a given violin.
//!
//! # Examples
//!
//! Quick plot.
//! ```no_run
//! use preexplorer::prelude::*;
//! let many_seq_err = (0..5).map(|_| pre::SequenceViolin::new((0..10).map(|i| (i..10 + i))));
//! pre::SequenceViolins::new(many_seq_err).plot("my_identifier").unwrap();
//! ```
//!
// Structs
use crate::errors::PreexplorerError;
use crate::SequenceViolin;
// Traits
pub use crate::traits::{Configurable, Plotable, Saveable};
use core::fmt::Display;
use core::ops::{Add, AddAssign};
/// Comparison counter part of ``SequenceViolin`` struct.
///
#[derive(Debug, PartialEq)]
pub struct SequenceViolins<T>
where
T: Display + Clone,
{
data_set: Vec<SequenceViolin<T>>,
config: crate::configuration::Configuration,
}
impl<T> SequenceViolins<T>
where
T: Display + Clone,
{
pub fn new<I>(data_set: I) -> Self
where
I: IntoIterator<Item = SequenceViolin<T>>,
{
let config = crate::configuration::Configuration::default();
let data_set = data_set
.into_iter()
.collect::<Vec<SequenceViolin<T>>>();
SequenceViolins { data_set, config }
}
}
impl<T> From<SequenceViolin<T>> for SequenceViolins<T>
where
T: Display + Clone,
{
fn from(sequence: SequenceViolin<T>) -> Self {
SequenceViolins::new(vec![sequence])
}
}
impl<T> Add<SequenceViolin<T>> for SequenceViolins<T>
where
T: Display + Clone,
{
type Output = Self;
fn add(mut self, other: SequenceViolin<T>) -> Self {
self += other;
self
}
}
impl<T> Add for SequenceViolins<T>
where
T: Display + Clone,
{
type Output = Self;
fn add(mut self, other: Self) -> Self {
self += other;
self
}
}
impl<T> AddAssign<SequenceViolin<T>> for SequenceViolins<T>
where
T: Display + Clone,
{
fn add_assign(&mut self, other: SequenceViolin<T>) {
self.data_set.push(other);
}
}
impl<T> AddAssign for SequenceViolins<T>
where
T: Display + Clone,
{
fn add_assign(&mut self, mut other: Self) {
self.data_set.append(&mut other.data_set);
}
}
impl<T> Configurable for SequenceViolins<T>
where
T: Display + Clone,
{
fn configuration_mut(&mut self) -> &mut crate::configuration::Configuration {
&mut self.config
}
fn configuration(&self) -> &crate::configuration::Configuration {
&self.config
}
}
impl<T> Saveable for SequenceViolins<T>
where
T: Display + Clone,
{
fn plotable_data(&self) -> String {
let mut raw_data = String::new();
for sequence_violin in self.data_set.iter() {
raw_data += &sequence_violin.plotable_data();
raw_data += "\n";
}
raw_data
}
fn save_with_id<S: Display>(&self, id: S) -> Result<&Self, PreexplorerError> {
for (counter, sequence_violin) in self.data_set.iter().enumerate() {
let inner_id = format!("{}_{}", id, counter);
sequence_violin.save_with_id(&inner_id)?;
}
Ok(self)
}
}
impl<T> Plotable for SequenceViolins<T>
where
T: Display + Clone,
{
fn plot_script(&self) -> String {
let id = self.checked_id();
let mut gnuplot_script = self.config.opening_plot_script_comparison();
gnuplot_script += &format!("array RENORMALIZE[{}]\n", self.data_set.len());
gnuplot_script += &format!("array DATA_POINTS[{}] = [", self.data_set.len());
for counter in 0..self.data_set.iter().len() - 1 {
gnuplot_script += &format!("{}, ", self.data_set[counter].data.len());
}
gnuplot_script += &format!("{}]\n", self.data_set[self.data_set.len() - 1].data.len());
for (counter, sequence_violin) in self.data_set.iter().enumerate() {
let inner_id = format!("{}_{}", id, counter);
let mut inner_path = self.data_path().to_path_buf();
if let Some(extension) = self.data_extension() {
inner_path.set_file_name(&inner_id);
inner_path.set_extension(extension);
} else {
inner_path.set_file_name(&inner_id);
}
gnuplot_script += &format!("\
# Precomputation for violin sequence number {}
RENORMALIZE[{}] = 2
do for [i=0:{}] {{
# Computing some values
set table $_
plot {:?} index i using 2:(1) smooth kdensity
unset table
RENORMALIZE[{}] = (RENORMALIZE[{}] < 2 * GPVAL_Y_MAX) ? 2 * GPVAL_Y_MAX : RENORMALIZE[{}]
# Plotting a greater domain
set table {:?}.'_partial_plot'.i
x_min = (GPVAL_X_MIN < GPVAL_X_MIN - 5 * GPVAL_KDENSITY_BANDWIDTH)? GPVAL_X_MIN : GPVAL_X_MIN - 5 * GPVAL_KDENSITY_BANDWIDTH
x_max = (GPVAL_X_MAX > GPVAL_X_MAX + 5 * GPVAL_KDENSITY_BANDWIDTH)? GPVAL_X_MAX : GPVAL_X_MAX + 5 * GPVAL_KDENSITY_BANDWIDTH
set xrange [x_min:x_max]
plot {:?} index i using 2:(1) smooth kdensity
unset table
# Clean the plotting
unset xrange
unset yrange
}}
",
counter,
counter + 1,
sequence_violin.data.len() - 1,
inner_path,
counter + 1,
counter + 1,
counter + 1,
self.data_path().with_file_name(inner_id),
inner_path,
);
}
gnuplot_script += "set style fill transparent solid 0.5\n";
// Plot with titles
for (counter, sequence_violin) in self.data_set.iter().enumerate() {
let inner_id = format!("{}_{}", id, counter);
let mut inner_path = self.data_path().to_path_buf();
inner_path.set_file_name(&inner_id);
let legend = match sequence_violin.title() {
Some(leg) => String::from(leg),
None => counter.to_string(),
};
if counter > 0 { gnuplot_script += "re"; }
gnuplot_script += &format!("\
plot '{}'.'_partial_plot'.'0' using (0 + $2/RENORMALIZE[{}]):1 with filledcurve x=0 linecolor {} title \"{}\"
",
inner_path.display(),
counter + 1,
counter,
legend,
);
}
// Plot without titles
let mut path = self.data_path().to_path_buf();
path.set_file_name(&id);
gnuplot_script += &format!("\
# Right side
replot for [j=0:{}] for [i=1:DATA_POINTS[j+1]-1] '{}_'.j.'_partial_plot'.i using (i + $2/RENORMALIZE[j+1]):1 with filledcurve x=i linecolor j notitle
# Left side
replot for [j=0:{}] for [i=0:DATA_POINTS[j+1]-1] '{}_'.j.'_partial_plot'.i using (i - $2/RENORMALIZE[j+1]):1 with filledcurve x=i linecolor j notitle
",
self.data_set.len() - 1,
path.display(),
self.data_set.len() - 1,
path.display(),
);
gnuplot_script += "\n";
gnuplot_script += &self.ending_plot_script();
gnuplot_script
}
}
|
use crate::Transformer;
use std::marker::PhantomData;
pub struct MinMaxScaler<I, O> {
min: Option<I>,
max: Option<I>,
phantom: PhantomData<O>,
}
impl<I, O> MinMaxScaler<I, O> {
pub fn new() -> Self {
Self {
min: None,
max: None,
phantom: PhantomData,
}
}
pub fn max(&self) -> Option<&I> {
self.max.as_ref()
}
pub fn min(&self) -> Option<&I> {
self.min.as_ref()
}
}
impl<I, O> Transformer<I, O> for MinMaxScaler<I, O>
where
I: Ord + Copy,
{
type Input = I;
type Output = I;
fn fit(&mut self, x: Vec<Self::Input>) {
self.max = Some(*x.iter().max().unwrap());
self.min = Some(*x.iter().min().unwrap());
}
fn transform(&self, x: Vec<Self::Input>) -> Vec<Self::Input> {
x.into_iter()
.map(|v| {
if Some(v) > self.max {
self.max.unwrap()
} else if Some(v) < self.min {
self.min.unwrap()
} else {
v
}
})
.collect()
}
}
|
use crate::common::{Annot, Loc};
use std::fmt;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum TokenKind {
Incr,
Decr,
Next,
Prev,
Read,
Write,
LParen,
RParen,
}
impl fmt::Display for TokenKind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::TokenKind::*;
match self {
Incr => write!(f, "+"),
Decr => write!(f, "-"),
Next => write!(f, ">"),
Prev => write!(f, "<"),
Read => write!(f, ","),
Write => write!(f, "."),
LParen => write!(f, "["),
RParen => write!(f, "]"),
}
}
}
pub type Token = Annot<TokenKind>;
impl Token {
pub fn incr(loc: Loc) -> Self {
Self::new(TokenKind::Incr, loc)
}
pub fn decr(loc: Loc) -> Self {
Self::new(TokenKind::Decr, loc)
}
pub fn next(loc: Loc) -> Self {
Self::new(TokenKind::Next, loc)
}
pub fn prev(loc: Loc) -> Self {
Self::new(TokenKind::Prev, loc)
}
pub fn read(loc: Loc) -> Self {
Self::new(TokenKind::Read, loc)
}
pub fn write(loc: Loc) -> Self {
Self::new(TokenKind::Write, loc)
}
pub fn lparen(loc: Loc) -> Self {
Self::new(TokenKind::LParen, loc)
}
pub fn rparen(loc: Loc) -> Self {
Self::new(TokenKind::RParen, loc)
}
}
|
#[doc = "Reader of register CFG0"]
pub type R = crate::R<u32, super::CFG0>;
#[doc = "Writer for register CFG0"]
pub type W = crate::W<u32, super::CFG0>;
#[doc = "Register CFG0 `reset()`'s with value 0"]
impl crate::ResetValue for super::CFG0 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `SCS`"]
pub type SCS_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `SCS`"]
pub struct SCS_W<'a> {
w: &'a mut W,
}
impl<'a> SCS_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x03) | ((value as u32) & 0x03);
self.w
}
}
#[doc = "Reader of field `SCSS`"]
pub type SCSS_R = crate::R<u8, u8>;
#[doc = "Reader of field `AHBPSC`"]
pub type AHBPSC_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `AHBPSC`"]
pub struct AHBPSC_W<'a> {
w: &'a mut W,
}
impl<'a> AHBPSC_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 4)) | (((value as u32) & 0x0f) << 4);
self.w
}
}
#[doc = "Reader of field `APB1PSC`"]
pub type APB1PSC_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `APB1PSC`"]
pub struct APB1PSC_W<'a> {
w: &'a mut W,
}
impl<'a> APB1PSC_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 8)) | (((value as u32) & 0x07) << 8);
self.w
}
}
#[doc = "Reader of field `APB2PSC`"]
pub type APB2PSC_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `APB2PSC`"]
pub struct APB2PSC_W<'a> {
w: &'a mut W,
}
impl<'a> APB2PSC_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 11)) | (((value as u32) & 0x07) << 11);
self.w
}
}
#[doc = "Reader of field `ADCPSC_1_0`"]
pub type ADCPSC_1_0_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `ADCPSC_1_0`"]
pub struct ADCPSC_1_0_W<'a> {
w: &'a mut W,
}
impl<'a> ADCPSC_1_0_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 14)) | (((value as u32) & 0x03) << 14);
self.w
}
}
#[doc = "Reader of field `PLLSEL`"]
pub type PLLSEL_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PLLSEL`"]
pub struct PLLSEL_W<'a> {
w: &'a mut W,
}
impl<'a> PLLSEL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Reader of field `PREDV0`"]
pub type PREDV0_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PREDV0`"]
pub struct PREDV0_W<'a> {
w: &'a mut W,
}
impl<'a> PREDV0_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "Reader of field `PLLMF_3_0`"]
pub type PLLMF_3_0_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `PLLMF_3_0`"]
pub struct PLLMF_3_0_W<'a> {
w: &'a mut W,
}
impl<'a> PLLMF_3_0_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 18)) | (((value as u32) & 0x0f) << 18);
self.w
}
}
#[doc = "Reader of field `USBDPSC_1_0`"]
pub type USBDPSC_1_0_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `USBDPSC_1_0`"]
pub struct USBDPSC_1_0_W<'a> {
w: &'a mut W,
}
impl<'a> USBDPSC_1_0_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 22)) | (((value as u32) & 0x03) << 22);
self.w
}
}
#[doc = "Reader of field `CKOUT0SEL`"]
pub type CKOUT0SEL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `CKOUT0SEL`"]
pub struct CKOUT0SEL_W<'a> {
w: &'a mut W,
}
impl<'a> CKOUT0SEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 24)) | (((value as u32) & 0x07) << 24);
self.w
}
}
#[doc = "Reader of field `PLLMF_4`"]
pub type PLLMF_4_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PLLMF_4`"]
pub struct PLLMF_4_W<'a> {
w: &'a mut W,
}
impl<'a> PLLMF_4_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 27)) | (((value as u32) & 0x01) << 27);
self.w
}
}
#[doc = "Reader of field `ADCPSC_2`"]
pub type ADCPSC_2_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ADCPSC_2`"]
pub struct ADCPSC_2_W<'a> {
w: &'a mut W,
}
impl<'a> ADCPSC_2_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 28)) | (((value as u32) & 0x01) << 28);
self.w
}
}
#[doc = "Reader of field `PLLMF_5`"]
pub type PLLMF_5_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PLLMF_5`"]
pub struct PLLMF_5_W<'a> {
w: &'a mut W,
}
impl<'a> PLLMF_5_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 30)) | (((value as u32) & 0x01) << 30);
self.w
}
}
#[doc = "Reader of field `USBDPSC_2`"]
pub type USBDPSC_2_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `USBDPSC_2`"]
pub struct USBDPSC_2_W<'a> {
w: &'a mut W,
}
impl<'a> USBDPSC_2_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31);
self.w
}
}
impl R {
#[doc = "Bits 0:1 - System clock switch"]
#[inline(always)]
pub fn scs(&self) -> SCS_R {
SCS_R::new((self.bits & 0x03) as u8)
}
#[doc = "Bits 2:3 - System clock switch status"]
#[inline(always)]
pub fn scss(&self) -> SCSS_R {
SCSS_R::new(((self.bits >> 2) & 0x03) as u8)
}
#[doc = "Bits 4:7 - AHB prescaler selection"]
#[inline(always)]
pub fn ahbpsc(&self) -> AHBPSC_R {
AHBPSC_R::new(((self.bits >> 4) & 0x0f) as u8)
}
#[doc = "Bits 8:10 - APB1 prescaler selection"]
#[inline(always)]
pub fn apb1psc(&self) -> APB1PSC_R {
APB1PSC_R::new(((self.bits >> 8) & 0x07) as u8)
}
#[doc = "Bits 11:13 - APB2 prescaler selection"]
#[inline(always)]
pub fn apb2psc(&self) -> APB2PSC_R {
APB2PSC_R::new(((self.bits >> 11) & 0x07) as u8)
}
#[doc = "Bits 14:15 - ADC clock prescaler selection"]
#[inline(always)]
pub fn adcpsc_1_0(&self) -> ADCPSC_1_0_R {
ADCPSC_1_0_R::new(((self.bits >> 14) & 0x03) as u8)
}
#[doc = "Bit 16 - PLL Clock Source Selection"]
#[inline(always)]
pub fn pllsel(&self) -> PLLSEL_R {
PLLSEL_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 17 - PREDV0 division factor"]
#[inline(always)]
pub fn predv0(&self) -> PREDV0_R {
PREDV0_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bits 18:21 - The PLL clock multiplication factor"]
#[inline(always)]
pub fn pllmf_3_0(&self) -> PLLMF_3_0_R {
PLLMF_3_0_R::new(((self.bits >> 18) & 0x0f) as u8)
}
#[doc = "Bits 22:23 - USBD clock prescaler selection"]
#[inline(always)]
pub fn usbdpsc_1_0(&self) -> USBDPSC_1_0_R {
USBDPSC_1_0_R::new(((self.bits >> 22) & 0x03) as u8)
}
#[doc = "Bits 24:26 - CKOUT0 Clock Source Selection"]
#[inline(always)]
pub fn ckout0sel(&self) -> CKOUT0SEL_R {
CKOUT0SEL_R::new(((self.bits >> 24) & 0x07) as u8)
}
#[doc = "Bit 27 - Bit 4 of PLLMF"]
#[inline(always)]
pub fn pllmf_4(&self) -> PLLMF_4_R {
PLLMF_4_R::new(((self.bits >> 27) & 0x01) != 0)
}
#[doc = "Bit 28 - Bit 2 of ADCPSC"]
#[inline(always)]
pub fn adcpsc_2(&self) -> ADCPSC_2_R {
ADCPSC_2_R::new(((self.bits >> 28) & 0x01) != 0)
}
#[doc = "Bit 30 - Bit 5 of PLLMF"]
#[inline(always)]
pub fn pllmf_5(&self) -> PLLMF_5_R {
PLLMF_5_R::new(((self.bits >> 30) & 0x01) != 0)
}
#[doc = "Bit 31 - Bit 2 of USBDPSC"]
#[inline(always)]
pub fn usbdpsc_2(&self) -> USBDPSC_2_R {
USBDPSC_2_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:1 - System clock switch"]
#[inline(always)]
pub fn scs(&mut self) -> SCS_W {
SCS_W { w: self }
}
#[doc = "Bits 4:7 - AHB prescaler selection"]
#[inline(always)]
pub fn ahbpsc(&mut self) -> AHBPSC_W {
AHBPSC_W { w: self }
}
#[doc = "Bits 8:10 - APB1 prescaler selection"]
#[inline(always)]
pub fn apb1psc(&mut self) -> APB1PSC_W {
APB1PSC_W { w: self }
}
#[doc = "Bits 11:13 - APB2 prescaler selection"]
#[inline(always)]
pub fn apb2psc(&mut self) -> APB2PSC_W {
APB2PSC_W { w: self }
}
#[doc = "Bits 14:15 - ADC clock prescaler selection"]
#[inline(always)]
pub fn adcpsc_1_0(&mut self) -> ADCPSC_1_0_W {
ADCPSC_1_0_W { w: self }
}
#[doc = "Bit 16 - PLL Clock Source Selection"]
#[inline(always)]
pub fn pllsel(&mut self) -> PLLSEL_W {
PLLSEL_W { w: self }
}
#[doc = "Bit 17 - PREDV0 division factor"]
#[inline(always)]
pub fn predv0(&mut self) -> PREDV0_W {
PREDV0_W { w: self }
}
#[doc = "Bits 18:21 - The PLL clock multiplication factor"]
#[inline(always)]
pub fn pllmf_3_0(&mut self) -> PLLMF_3_0_W {
PLLMF_3_0_W { w: self }
}
#[doc = "Bits 22:23 - USBD clock prescaler selection"]
#[inline(always)]
pub fn usbdpsc_1_0(&mut self) -> USBDPSC_1_0_W {
USBDPSC_1_0_W { w: self }
}
#[doc = "Bits 24:26 - CKOUT0 Clock Source Selection"]
#[inline(always)]
pub fn ckout0sel(&mut self) -> CKOUT0SEL_W {
CKOUT0SEL_W { w: self }
}
#[doc = "Bit 27 - Bit 4 of PLLMF"]
#[inline(always)]
pub fn pllmf_4(&mut self) -> PLLMF_4_W {
PLLMF_4_W { w: self }
}
#[doc = "Bit 28 - Bit 2 of ADCPSC"]
#[inline(always)]
pub fn adcpsc_2(&mut self) -> ADCPSC_2_W {
ADCPSC_2_W { w: self }
}
#[doc = "Bit 30 - Bit 5 of PLLMF"]
#[inline(always)]
pub fn pllmf_5(&mut self) -> PLLMF_5_W {
PLLMF_5_W { w: self }
}
#[doc = "Bit 31 - Bit 2 of USBDPSC"]
#[inline(always)]
pub fn usbdpsc_2(&mut self) -> USBDPSC_2_W {
USBDPSC_2_W { w: self }
}
}
|
use std::{fs, io};
fn get_index(values: &Vec<i32>, modes: &[u32; 3], param: u32, index: usize) -> usize {
if modes[param as usize - 1] == 0 {
values[index + param as usize] as usize
}
else if modes[param as usize -1] == 1 {
index + param as usize
} else {
panic!("Mode not possible")
}
}
fn read_input() -> i32 {
let mut input_text = String::new();
io::stdin()
.read_line(&mut input_text)
.expect("failed to read from stdin");
let trimmed = input_text.trim();
trimmed.parse().unwrap()
}
fn int_code(values: &mut Vec<i32>) {
let len = values.len();
let mut i = 0;
loop {
if i > len {
break;
}
let abcde = format!("{:0>5}", values[i].to_string());
let de: u32 = abcde[3..5].parse().unwrap();
let c: u32 = abcde[2..3].parse().unwrap();
let b: u32 = abcde[1..2].parse().unwrap();
let a: u32 = abcde[0..1].parse().unwrap();
let modes = [c, b, a];
println!("{}", abcde);
match de {
1 => {
let index_to_write = get_index(&values, &modes, 3, i);
values[index_to_write] = values[get_index(&values, &modes, 1, i)] + values[get_index(&values, &modes, 2, i)];
i += 4;
},
2 => {
let index_to_write = get_index(&values, &modes, 3, i);
values[index_to_write] = values[get_index(&values, &modes, 1, i)] * values[get_index(&values, &modes, 2, i)];
i += 4;
},
3 => {
let index_to_write = get_index(&values, &modes, 1, i);
values[index_to_write] = read_input(); // prompt input
i += 2;
},
4 => {
let index_to_read = get_index(&values, &modes, 1, i);
println!("--> {}", values[index_to_read]);
i += 2;
},
5 => {
if values[get_index(&values, &modes, 1, i)] != 0 {
i = values[get_index(&values, &modes, 2, i)] as usize;
} else {
i += 3;
}
},
6 => {
if values[get_index(&values, &modes, 1, i)] == 0 {
i = values[get_index(&values, &modes, 2, i)] as usize;
} else {
i += 3;
}
},
7 => {
let index_to_write = get_index(&values, &modes, 3, i);
if values[get_index(&values, &modes, 1, i)] < values[get_index(&values, &modes, 2, i)] {
values[index_to_write] = 1;
} else {
values[index_to_write] = 0;
}
i += 4;
},
8 => {
let index_to_write = get_index(&values, &modes, 3, i);
if values[get_index(&values, &modes, 1, i)] == values[get_index(&values, &modes, 2, i)] {
values[index_to_write] = 1;
} else {
values[index_to_write] = 0;
}
i += 4;
}
99 => {
break;
},
_ => {
panic!("Uknow OP code {}", de);
}
}
}
}
fn main() {
let text = fs::read_to_string("inputs.txt").expect("got an error opening the file");
let text = text.trim();
println!("{}", text);
let original: Vec<i32> = text.split(",")
.map(|x| x.parse().unwrap())
.collect();
// part 2
let mut values = original.clone();
println!("{:?}", values);
int_code(&mut values); // --> 3419022
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.