text stringlengths 8 4.13M |
|---|
extern crate isosurface;
extern crate nalgebra;
use isosurface::*;
use nalgebra::{Vector3};
use std::cmp::{Ordering,PartialOrd};
fn partial_max<T:PartialOrd+Copy>(v:&Vec<T>)->Option<T>{
if v.len()==0{
None
}
else if v.len()==1{
Some(v[0])
}
else{
let mut acc = v[0];
for x in v.iter(){
match x.partial_cmp(&acc){
Some(Ordering::Greater) => {acc = *x}
None => {return None}
_ => {}
}
}
Some(acc)
}
}
#[test]
fn test_basis_grid(){
let g = Grid::new_box_basis_grid(
Vector3::new(0.,0.,0.),
Vector3::new(1.,0.,0.),
Vector3::new(0.,1.,0.),
Vector3::new(0.,0.,1.),
3,3,3);
assert_eq!(g.points.len(),3*3*3);
let i:GIndex = 0;
assert_eq!(g.points[i as usize].x,0.0);
//println!("edges {}",g.edges.len());
}
#[test]
fn test_relevant_points(){
let g = Grid::new_box_basis_grid(
Vector3::new( -1.0,-1.0,-1.0),
Vector3::new( 0.1, 0.0, 0.0),
Vector3::new( 0.0, 0.1, 0.0),
Vector3::new( 0.0, 0.0, 0.1),
11,11,11);
let pv = OnTheFlyPointValues{
points:&g.points,
function:&|v:Point|{v.norm()-0.001}
};
let rp = (&g).relevant_points(&pv);
let pos = rp.into_iter().position(|x|{x}).unwrap();
let selected = &g.points[pos];
println!("P {} {:?} {}",pos,selected,selected.norm());
assert!(selected.norm()<=0.1);
}
#[test]
fn test_find_intersection() {
let p = find_intersection(
Vector3::new(0.,0.,0.),
Vector3::new(10.,0.,0.),
&|v: Point| { v.norm() - 1.0 },
0.001,
1e-6
);
//println!("inter {:?},{}",&p,p.norm());
assert!((p-Vector3::new(1.,0.,0.)).norm()<0.001)
}
#[test]
fn test_make_mesh() {
let g = Grid::new_box_basis_grid(
Vector3::new(-1.,-1.,-1.),
Vector3::new(0.1,0.,0.),
Vector3::new(0.,0.1,0.),
Vector3::new(0.,0.,0.1),
21,21,21);
let function = |v:Point|{v.norm()-1.0};
let pv = OnTheFlyPointValues{
points:&g.points,
function:&function
};
let mesh = make_mesh(&g,&pv,&function);
// println!("mesh points {:?}",mesh.points);
// println!("mesh triangles {:?}",mesh.triangles);
let d = mesh.points.iter().map(|v|{(v.norm()-1.0).abs()}).collect::<Vec<Scalar>>();
//println!("mesh points max norm diff {}",super::partial_max(&d).unwrap());
//println!("mesh points norm diff {:?}",mesh.points.iter().map(|v|{(v.norm()-1.0).abs()}).collect::<Vec<f32>>());
assert!(mesh.points.iter().all(|v|{(v.norm()-1.0).abs()<0.15}));
}
#[test]
fn test_partial_max(){
assert_eq!(partial_max(&vec![0.1]),Some(0.1));
assert_eq!(partial_max(&vec![0.1,0.2]),Some(0.2));
assert_eq!(partial_max(&vec![0.1,0.2,0.0/0.0]),None);
}
|
use s3::ByteStream;
use s3::Region;
use std::error::Error;
use std::path::Path;
use tracing_subscriber::fmt::format::FmtSpan;
use tracing_subscriber::fmt::SubscriberBuilder;
// Change these to your bucket & key
const BUCKET: &str = "demo-bucket";
const KEY: &str = "demo-object";
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
SubscriberBuilder::default()
.with_env_filter("info")
.with_span_events(FmtSpan::CLOSE)
.init();
let conf = s3::Config::builder()
.region(Region::new("us-east-2"))
.build();
let client = s3::Client::from_conf(conf);
let resp = client.list_buckets().send().await?;
for bucket in resp.buckets.unwrap_or_default() {
println!("bucket: {:?}", bucket.name.expect("buckets have names"))
}
let body = ByteStream::from_path(Path::new("Cargo.toml")).await?;
let resp = client
.put_object()
.bucket(BUCKET)
.key(KEY)
.body(body)
.send();
let resp = resp.await?;
println!("Upload success. Version: {:?}", resp.version_id);
let resp = client.get_object().bucket(BUCKET).key(KEY).send().await?;
let data = resp.body.collect().await?;
println!("data: {:?}", data.into_bytes());
Ok(())
}
|
use crate::output::{ErrorKind, FailureKind, TestCase, TestCaseResult};
use regex::Regex;
use url::Url;
lazy_static::lazy_static! {
static ref CURL_RE: Regex = Regex::new(r"Curl command: curl -i -X (\w+) .+ '(http://.+)'").expect("Valid regex");
}
pub fn process_stdout(content: &str) -> impl Iterator<Item = TestCase> {
content
.split("Falsifying example:")
.skip(1)
.map(process_case)
}
fn process_case(case: &str) -> TestCase {
let (method, path) = if let Some(captures) = CURL_RE.captures(case) {
let method = captures.get(1).expect("Should always match").as_str();
let url = captures.get(2).expect("Should always match").as_str();
let parsed = Url::parse(url).expect("A valid URL");
let path = parsed.path().to_owned();
(Some(method.to_owned()), Some(path))
} else {
(None, None)
};
if case.contains("AssertionError: Response content-type") {
TestCase::new(
method,
path,
TestCaseResult::Failure {
kind: FailureKind::ContentTypeConformance,
},
)
} else if case.contains("Exception: ('Invalid',") {
TestCase::error(method, path, ErrorKind::Schema)
} else {
panic!("Unknown case")
}
}
|
pub fn my_atoi(str: String) -> i32 {
let v: Vec<char> = str.chars().collect();
let mut num = 0;
const INT_MAX: i32 = - ((-1 << 31) + 1);
const INT_MIN: i32 = -1 << 31;
let mut pos = true;
let mut leading_white_space = true;
let mut has_sign = false;
let mut has_digit = false;
for c in v.iter() {
match c {
' ' => {
if !leading_white_space || has_digit {
return num
}
if has_sign {
return 0
}
},
'+' => {
leading_white_space = false;
if has_digit {
return num;
}
if has_sign {
return 0;
} else {
pos = true;
has_sign = true;
}
},
'-' => {
leading_white_space = false;
if has_digit {
return num;
}
if has_sign {
return 0;
} else {
pos = false;
has_sign = true;
}
},
'0'..='9' => {
has_digit = true;
let ci32 = c.clone() as i32 - '0' as i32;
if pos {
let num64 = num as i64 * 10 + ci32 as i64;
if num64 >= INT_MAX as i64 {
return INT_MAX
} else {
num = num64 as i32;
}
} else {
let num64 = num as i64 * 10 - ci32 as i64;
if num64 <= INT_MIN as i64 {
return INT_MIN
} else {
num = num64 as i32;
}
}
},
_ => return num,
}
}
num
}
#[test]
fn test_my_atoi() {
assert_eq!(my_atoi(String::from("42")), 42);
assert_eq!(my_atoi(String::from(" -42")), -42);
assert_eq!(my_atoi(String::from("42 as a word")), 42);
assert_eq!(my_atoi(String::from("-91283472332")), -2147483648);
assert_eq!(my_atoi(String::from("+-2")), 0);
assert_eq!(my_atoi(String::from("2147483646")), 2147483646);
assert_eq!(my_atoi(String::from("0-1")), 0);
} |
use serde::{Deserialize, Serialize};
/// 文章详细信息
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct Article {
pub id: Option<u32>,
pub title: String,
pub content: String,
pub date: Option<chrono::NaiveDate>,
}
/// 文章预览
#[derive(Debug, Clone, Serialize)]
pub struct ArticlePreview {
pub id: u32,
pub title: String,
pub date: chrono::NaiveDate,
}
|
use sqlx::{FromRow, SqlitePool};
use sqlx::sqlite::SqliteQueryAs;
use crate::db_utils::{get_last_inserted_id, DynUpdate};
use crate::errors::ApiError;
use crate::common::time_now;
use sqlx::arguments::Arguments;
#[derive(Debug, Serialize, Deserialize, FromRow)]
pub struct Shoe {
#[sqlx(rename = "Id")]
#[serde(default)] // = optional
pub id: i32,
#[sqlx(rename = "Bought")]
pub bought: i64,
#[sqlx(rename = "Comment")]
pub comment: String,
#[sqlx(rename = "Created")]
#[serde(default)] // = optional
pub created: i64,
}
#[serde(rename_all = "camelCase")]
#[derive(Debug, Serialize, FromRow)]
pub struct ShoeUsedView {
#[sqlx(rename = "Id")]
pub id: i32,
#[sqlx(rename = "Bought")]
pub bought: i64,
#[sqlx(rename = "Comment")]
pub comment: String,
#[sqlx(rename = "Used")]
pub used: i32,
#[sqlx(rename = "TotalLength")]
pub total_length: i32,
}
#[serde(rename_all = "camelCase")]
#[derive(Deserialize, Debug)]
pub struct UpdateShoe {
pub bought: Option<i64>,
pub comment: Option<String>,
}
impl Shoe {
pub async fn find_all(pool: &SqlitePool) -> Result<Vec<ShoeUsedView>, sqlx::Error> {
let sql = r"
select S.Id, S.Bought, S.Comment, count(R.Id) Used, ifnull(sum(R.Length), 0) TotalLength
from Shoes S left join Runs R on S.Id = R.ShoeId
group by S.Id, S.Bought, S.Comment
order by S.Bought desc
";
let shoes = sqlx::query_as::<_, ShoeUsedView>(sql)
.fetch_all(pool)
.await?;
Ok(shoes)
}
pub async fn find_by_id(id: i32, pool: &SqlitePool) -> Result<Option<Shoe>, sqlx::Error> {
let shoe = sqlx::query_as::<_, Shoe>(
"SELECT * FROM Shoes WHERE id = ?"
)
.bind(id)
.fetch_optional(pool)
.await?;
Ok(shoe)
}
pub async fn create(shoe: Shoe, pool: &SqlitePool) -> Result<i32, sqlx::Error> {
let mut tx = pool.begin().await?;
sqlx::query("INSERT INTO Shoes (Bought, Comment, Created) VALUES (?, ?, ?)")
.bind(shoe.bought)
.bind(shoe.comment)
.bind(time_now())
.execute(&mut tx)
.await?;
tx.commit().await?;
return get_last_inserted_id(pool).await;
}
pub async fn update(id: i32, update: UpdateShoe, pool: &SqlitePool) -> Result<Shoe, ApiError> {
let mut dyn_update = DynUpdate::new();
dyn_update.add_option("Bought", update.bought);
dyn_update.add_option("Comment", update.comment);
let sql = dyn_update.to_query_str("UPDATE Shoes SET ", " WHERE id = ?");
dyn_update.bind_values.add(id);
let mut tx = pool.begin().await?;
let q = sqlx::query(sql.as_str())
.bind_all(dyn_update.bind_values)
.execute(&mut tx)
.await?;
if q != 1 {
tx.rollback().await?;
return Err(ApiError::NotFound("No shoe found for id.".to_string()));
}
tx.commit().await?;
// get a fresh run from the db
return Ok(Shoe::find_by_id(id, pool)
.await?
.unwrap());
}
pub async fn delete(id: i32, pool: &SqlitePool) -> Result<(), ApiError> {
let mut tx = pool.begin().await?;
let q = sqlx::query("DELETE FROM Shoes where Id = ?")
.bind(id)
.execute(&mut tx)
.await?;
if q != 1 {
tx.rollback().await?;
return Err(ApiError::NotFound("No shoe found for id.".to_string()));
}
tx.commit().await?;
return Ok(());
}
}
|
use crate::utilities::intcode::execution_error::ExecutionError;
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ParameterMode {
Position,
Immediate,
}
impl ParameterMode {
pub fn try_from_opcode(opcode: i32, idx: u32) -> Result<ParameterMode, ExecutionError> {
let place_value = 10i32.pow(idx + 2);
let mode_value = ((opcode / place_value) % 10) as u8;
match mode_value {
0 => Ok(ParameterMode::Position),
1 => Ok(ParameterMode::Immediate),
unknown => Err(ExecutionError::UnknownMode(unknown)),
}
}
}
|
#[macro_use]
extern crate afl;
extern crate graph_harness;
use graph_harness::*;
fn main() {
fuzz!(|data: FromCsvHarnessParams| {
// We ignore this error because we execute only the fuzzing to find
// the panic situations that are NOT just errors, but unhandled errors.
let _ = from_csv_harness(data);
});
}
|
//
// Copyright (C) 2016-2020 Abstract Horizon
// All rights reserved. This program and the accompanying materials
// are made available under the terms of the Apache License v2.0
// which accompanies this distribution, and is available at
// https://www.apache.org/licenses/LICENSE-2.0
//
// Contributors:
// Daniel Sendula - initial API and implementation
//
use std::boxed::Box;
use std::slice::Iter;
use std::io::Write;
use byteorder::{WriteBytesExt, LittleEndian};
pub trait FieldType {
fn size(&self) -> usize;
fn type_shortcode(&self) -> &'static str;
fn definition_to_json(&self, size: usize) -> String;
fn common_definition_to_json(&self) -> String {
format!("\"type\" : \"{}\"", self.type_shortcode())
}
}
pub struct FieldTypeUnsignedByte;
impl FieldType for FieldTypeUnsignedByte {
fn size(&self) -> usize { 1 }
fn definition_to_json(&self, _: usize) -> String {
format!("{}, \"signed\" : \"{}\"", self.common_definition_to_json(), "false")
}
fn type_shortcode(&self) -> &'static str { "b" }
}
struct FieldTypeSignedByte;
impl FieldType for FieldTypeSignedByte {
fn size(&self) -> usize { 1 }
fn definition_to_json(&self, _: usize) -> String {
format!("{}, \"signed\" : \"{}\"", self.common_definition_to_json(), "true")
}
fn type_shortcode(&self) -> &'static str { "b" }
}
struct FieldTypeUnsignedWord;
impl FieldType for FieldTypeUnsignedWord {
fn size(&self) -> usize { 2 }
fn type_shortcode(&self) -> &'static str { "w" }
fn definition_to_json(&self, _: usize) -> String {
format!("{}, \"signed\" : \"{}\"", self.common_definition_to_json(), "false")
}
}
struct FieldTypeSignedWord;
impl FieldType for FieldTypeSignedWord {
fn size(&self) -> usize { 2 }
fn type_shortcode(&self) -> &'static str { "w" }
fn definition_to_json(&self, _: usize) -> String {
format!("{}, \"signed\" : \"{}\"", self.common_definition_to_json(), "true")
}
}
struct FieldTypeUnsignedInteger;
impl FieldType for FieldTypeUnsignedInteger {
fn size(&self) -> usize { 4 }
fn type_shortcode(&self) -> &'static str { "i" }
fn definition_to_json(&self, _: usize) -> String {
format!("{}, \"signed\" : \"{}\"", self.common_definition_to_json(), "false")
}
}
struct FieldTypeSignedInteger;
impl FieldType for FieldTypeSignedInteger {
fn size(&self) -> usize { 4 }
fn type_shortcode(&self) -> &'static str { "i" }
fn definition_to_json(&self, _: usize) -> String {
format!("{}, \"signed\" : \"{}\"", self.common_definition_to_json(), "true")
}
}
struct FieldTypeUnsignedLong;
impl FieldType for FieldTypeUnsignedLong {
fn size(&self) -> usize { 8 }
fn type_shortcode(&self) -> &'static str { "l" }
fn definition_to_json(&self, _: usize) -> String {
format!("{}, \"signed\" : \"{}\"", self.common_definition_to_json(), "false")
}
}
struct FieldTypeSignedLong;
impl FieldType for FieldTypeSignedLong {
fn size(&self) -> usize { 8 }
fn type_shortcode(&self) -> &'static str { "l" }
fn definition_to_json(&self, _: usize) -> String {
format!("{}, \"signed\" : \"{}\"", self.common_definition_to_json(), "true")
}
}
struct FieldTypeFloat;
impl FieldType for FieldTypeFloat {
fn size(&self) -> usize { 4 }
fn type_shortcode(&self) -> &'static str { "f" }
fn definition_to_json(&self, _: usize) -> String { self.common_definition_to_json() }
}
struct FieldTypeDouble;
impl FieldType for FieldTypeDouble {
fn size(&self) -> usize { 8 }
fn type_shortcode(&self) -> &'static str { "d" }
fn definition_to_json(&self, _: usize) -> String { self.common_definition_to_json() }
}
struct FieldTypeString;
impl FieldType for FieldTypeString {
fn size(&self) -> usize { 0 }
fn type_shortcode(&self) -> &'static str { "s" }
fn definition_to_json(&self, size: usize) -> String {
format!("{}, \"size\" : \"{}\"", self.common_definition_to_json(), size)
}
}
struct FieldTypeBytes;
impl FieldType for FieldTypeBytes {
fn size(&self) -> usize { 0 }
fn type_shortcode(&self) -> &'static str { "a" }
fn definition_to_json(&self, size: usize) -> String {
format!("{}, \"size\" : \"{}\"", self.common_definition_to_json(), size)
}
}
pub trait Storable {
fn store(&self, buf: &mut Vec<u8>);
}
impl Storable for u8 {
fn store(&self, buf: &mut Vec<u8>) { buf.push(*self); }
}
impl Storable for i8 {
fn store(&self, buf: &mut Vec<u8>) { buf.push(*self as u8); }
}
impl Storable for u16 {
fn store(&self, buf: &mut Vec<u8>) { let _ = buf.write_u16::<LittleEndian>(*self); }
}
impl Storable for i16 {
fn store(&self, buf: &mut Vec<u8>) { let _ = buf.write_i16::<LittleEndian>(*self); }
}
impl Storable for u32 {
fn store(&self, buf: &mut Vec<u8>) { let _ = buf.write_u32::<LittleEndian>(*self); }
}
impl Storable for i32 {
fn store(&self, buf: &mut Vec<u8>) { let _ = buf.write_i32::<LittleEndian>(*self); }
}
impl Storable for u64 {
fn store(&self, buf: &mut Vec<u8>) { let _ = buf.write_u64::<LittleEndian>(*self); }
}
impl Storable for i64 {
fn store(&self, buf: &mut Vec<u8>) { let _ = buf.write_i64::<LittleEndian>(*self); }
}
impl Storable for f32 {
fn store(&self, buf: &mut Vec<u8>) { let _ = buf.write_f32::<LittleEndian>(*self); }
}
impl Storable for f64 {
fn store(&self, buf: &mut Vec<u8>) { let _ = buf.write_f64::<LittleEndian>(*self); }
}
impl Storable for &String {
fn store(&self, buf: &mut Vec<u8>) { let _ = buf.write(self.as_bytes()); }
}
impl Storable for &Vec<u8> {
fn store(&self, buf: &mut Vec<u8>) { let _ = buf.write(self); }
}
impl Storable for &[u8] {
fn store(&self, buf: &mut Vec<u8>) { let _ = buf.write(self); }
}
// ----------------------------------------------------------------------------------------------------------
pub struct TelemetryStreamFieldStruct<T: FieldType> {
field_name: &'static str,
field_size: usize,
pub field_type: T
}
pub trait TelemetryStreamField {
fn name(&self) -> &'static str;
fn to_json(&self) -> String;
fn size(&self) -> usize;
}
impl<T: FieldType> TelemetryStreamField for TelemetryStreamFieldStruct<T> {
fn name(&self) -> &'static str { self.field_name }
fn to_json(&self) -> String {
self.field_type.definition_to_json(self.field_size)
}
fn size(&self) -> usize { self.field_size }
}
// ----------------------------------------------------------------------------------------------------------
pub struct TelemetryStreamDefinition {
pub name: &'static str,
stream_id: u32,
fixed_length: usize,
header: Vec<u8>,
fields:Vec<Box<dyn TelemetryStreamField + Sync + Send>>
}
impl TelemetryStreamDefinition {
pub fn new(name: &'static str, stream_id: u32, fields: Vec<Box<dyn TelemetryStreamField + Sync + Send>>) -> TelemetryStreamDefinition {
let fixed_length: usize = fields.iter().map(|field| field.size()).sum();
let fixed_length = fixed_length + 8; // extra time field at the beginning of record
let mut header : Vec<u8> = Vec::new();
let header_byte = if stream_id < 256 { 0 } else { 1 } + if fixed_length < 256 { 0 } else if fixed_length < 65536 { 2 } else { 4 };
header.push(header_byte);
if stream_id < 256 {
header.push(stream_id as u8);
} else {
let _ = header.write_u16::<LittleEndian>(stream_id as u16);
}
if fixed_length < 256 {
header.push(fixed_length as u8);
} else if fixed_length < 655356 {
let _ = header.write_u16::<LittleEndian>(fixed_length as u16);
} else {
let _ = header.write_u32::<LittleEndian>(fixed_length as u32);
}
TelemetryStreamDefinition {
name,
stream_id,
fields,
fixed_length,
header
}
}
#[allow(dead_code)]
pub fn name(&self) -> &'static str {
self.name
}
pub fn to_json(&self) -> String {
let mut s = String::from("");
let mut first = true;
for field in self.fields.iter() {
if first { first = false; } else { s.push_str(", ") }
s.push_str(format!("\"{}\" : {{ {} }}", field.name(), field.to_json()).as_str());
}
format!("{{ \"id\" : {}, \"name\" : \"{}\", \"fields\" : {{ {} }} }}", self.stream_id, self.name, s)
}
pub fn size(&self) -> usize {
self.fixed_length + self.header.len() // time f64
}
pub fn write_header(&self, buf: &mut Vec<u8>) {
let _ = buf.write(&self.header);
}
pub fn fields(&self) -> Iter<Box<dyn TelemetryStreamField + Sync + Send>> {
self.fields.iter()
}
#[allow(dead_code)]
pub fn unsigned_byte_field(name: &'static str) -> Box<dyn TelemetryStreamField + Sync + Send> {
Box::new(TelemetryStreamFieldStruct::<FieldTypeUnsignedByte> {
field_name: name,
field_type: FieldTypeUnsignedByte,
field_size: FieldTypeUnsignedByte.size(),
})
}
#[allow(dead_code)]
pub fn signed_byte_field(name: &'static str) -> Box<dyn TelemetryStreamField + Sync + Send> {
Box::new(TelemetryStreamFieldStruct::<FieldTypeSignedByte> {
field_name: name,
field_type: FieldTypeSignedByte,
field_size: FieldTypeSignedByte.size(),
})
}
#[allow(dead_code)]
pub fn unsigned_word_field(name: &'static str) -> Box<dyn TelemetryStreamField + Sync + Send> {
Box::new(TelemetryStreamFieldStruct::<FieldTypeUnsignedWord> {
field_name: name,
field_type: FieldTypeUnsignedWord,
field_size: FieldTypeUnsignedWord.size(),
})
}
#[allow(dead_code)]
pub fn signed_word_field(name: &'static str) -> Box<dyn TelemetryStreamField + Sync + Send> {
Box::new(TelemetryStreamFieldStruct::<FieldTypeSignedWord> {
field_name: name,
field_type: FieldTypeSignedWord,
field_size: FieldTypeSignedWord.size(),
})
}
#[allow(dead_code)]
pub fn unsigned_integer_field(name: &'static str) -> Box<dyn TelemetryStreamField + Sync + Send> {
Box::new(TelemetryStreamFieldStruct::<FieldTypeUnsignedInteger> {
field_name: name,
field_type: FieldTypeUnsignedInteger,
field_size: FieldTypeUnsignedInteger.size(),
})
}
#[allow(dead_code)]
pub fn signed_integer_field(name: &'static str) -> Box<dyn TelemetryStreamField + Sync + Send> {
Box::new(TelemetryStreamFieldStruct::<FieldTypeSignedInteger> {
field_name: name,
field_type: FieldTypeSignedInteger,
field_size: FieldTypeSignedInteger.size(),
})
}
#[allow(dead_code)]
pub fn unsigned_long_field(name: &'static str) -> Box<dyn TelemetryStreamField + Sync + Send> {
Box::new(TelemetryStreamFieldStruct::<FieldTypeUnsignedLong> {
field_name: name,
field_type: FieldTypeUnsignedLong,
field_size: FieldTypeUnsignedLong.size(),
})
}
#[allow(dead_code)]
pub fn signed_long_field(name: &'static str) -> Box<dyn TelemetryStreamField + Sync + Send> {
Box::new(TelemetryStreamFieldStruct::<FieldTypeSignedLong> {
field_name: name,
field_type: FieldTypeSignedLong,
field_size: FieldTypeSignedLong.size(),
})
}
#[allow(dead_code)]
pub fn float_field(name: &'static str) -> Box<dyn TelemetryStreamField + Sync + Send> {
Box::new(TelemetryStreamFieldStruct::<FieldTypeFloat> {
field_name: name,
field_type: FieldTypeFloat,
field_size: FieldTypeFloat.size(),
})
}
#[allow(dead_code)]
pub fn double_field(name: &'static str) -> Box<dyn TelemetryStreamField + Sync + Send> {
Box::new(TelemetryStreamFieldStruct::<FieldTypeDouble> {
field_name: name,
field_type: FieldTypeDouble,
field_size: FieldTypeDouble.size(),
})
}
#[allow(dead_code)]
pub fn string_field(name: &'static str, string_size: usize) -> Box<dyn TelemetryStreamField + Sync + Send> {
Box::new(TelemetryStreamFieldStruct::<FieldTypeString> {
field_name: name,
field_type: FieldTypeString,
field_size: string_size,
})
}
#[allow(dead_code)]
pub fn bytes_field(name: &'static str, bytes_size: usize) -> Box<dyn TelemetryStreamField + Sync + Send> {
Box::new(TelemetryStreamFieldStruct::<FieldTypeBytes> {
field_name: name,
field_type: FieldTypeBytes,
field_size: bytes_size,
})
}
}
|
use super::*;
#[tokio::test]
async fn test_local_interfaces() -> Result<(), Error> {
let vnet = Arc::new(Net::new(None));
let interfaces = vnet.get_interfaces().await;
let ips = local_interfaces(&vnet, &None, &[NetworkType::Udp4, NetworkType::Udp6]).await;
log::info!("interfaces: {:?}, ips: {:?}", interfaces, ips);
Ok(())
}
|
use sdl2::pixels::Color;
use sdl2::rect::Point;
use sdl2::render::WindowCanvas;
use super::constants::{WORLD_HEIGHT, WORLD_WIDTH, WORLD_X_UPPER_BOUND, WORLD_Y_UPPER_BOUND};
#[derive(Eq, PartialEq, Copy, Clone)]
pub enum ParticleType {
Empty,
Sand,
Water,
Wood,
Iron,
Fire,
Acid,
Smoke,
Steam,
Lava,
}
#[derive(Copy, Clone)]
pub struct Particle {
pub pt: ParticleType,
pub should_burn: bool,
pub updated: bool,
}
pub type Particles = Vec<Particle>;
pub fn point_to_index(x: u32, y: u32) -> usize {
let h = WORLD_Y_UPPER_BOUND - y;
let w = WORLD_X_UPPER_BOUND - x;
return ((h * WORLD_WIDTH) + w) as usize;
}
pub fn index_to_point(index: usize) -> (u32, u32) {
let w = index as u32 % WORLD_WIDTH;
let rem = index as u32 - w;
let h = rem / WORLD_WIDTH;
return (WORLD_X_UPPER_BOUND - w, WORLD_Y_UPPER_BOUND - h);
}
fn check_if_type(x: u32, y: u32, particles: &Particles, pt: ParticleType) -> bool {
if x >= WORLD_WIDTH || y >= WORLD_HEIGHT {
return false;
}
let index = point_to_index(x, y);
let particle = particles[index];
return particle.pt == pt;
}
fn check_if_empty(x: u32, y: u32, particles: &Particles) -> bool {
return check_if_type(x, y, particles, ParticleType::Empty);
}
pub fn update_particles(particles: &mut Particles) {
for i in 1..particles.len() {
particles[i].updated = false;
}
for i in 1..particles.len() {
let mut particle = particles[i];
let (x, y) = index_to_point(i);
if particle.updated {
continue;
}
particle.updated = true;
match particle.pt {
ParticleType::Sand => update_sand(x, y, particles),
ParticleType::Water => update_water(x, y, particles),
ParticleType::Empty => {}
ParticleType::Wood => {}
ParticleType::Iron => {}
ParticleType::Fire => update_fire(x, y, particles),
ParticleType::Lava => update_lava(x, y, particles),
ParticleType::Acid => update_acid(x, y, particles),
ParticleType::Smoke => update_smoke(x, y, particles),
ParticleType::Steam => update_steam(x, y, particles),
}
}
}
pub fn draw_particles(particles: &Particles, canvas: &mut WindowCanvas) {
for (i, particle) in particles.iter().enumerate() {
let (x, y) = index_to_point(i);
match particle.pt {
ParticleType::Empty => {}
ParticleType::Sand => {
canvas.set_draw_color(Color::YELLOW);
canvas.draw_point(Point::new(x as i32, y as i32));
}
ParticleType::Water => {
canvas.set_draw_color(Color::BLUE);
canvas.draw_point(Point::new(x as i32, y as i32));
}
ParticleType::Wood => {
canvas.set_draw_color(Color::RGB(165, 42, 42));
canvas.draw_point(Point::new(x as i32, y as i32));
}
ParticleType::Iron => {
canvas.set_draw_color(Color::RGB(192, 192, 192));
canvas.draw_point(Point::new(x as i32, y as i32));
}
ParticleType::Fire => {
canvas.set_draw_color(Color::RED);
canvas.draw_point(Point::new(x as i32, y as i32));
}
ParticleType::Acid => {
canvas.set_draw_color(Color::GREEN);
canvas.draw_point(Point::new(x as i32, y as i32));
}
ParticleType::Smoke => {
canvas.set_draw_color(Color::GRAY);
canvas.draw_point(Point::new(x as i32, y as i32));
}
ParticleType::Steam => {
canvas.set_draw_color(Color::WHITE);
canvas.draw_point(Point::new(x as i32, y as i32));
}
ParticleType::Lava => {
canvas.set_draw_color(Color::MAGENTA);
canvas.draw_point(Point::new(x as i32, y as i32));
}
}
}
}
fn gen_adjacent_cell_list(x: u32, y: u32) -> Vec<(u32, u32)> {
let x = x as i32;
let y = y as i32;
let cells = vec![
(x + 1, y),
(x - 1, y),
(x, y + 1),
(x, y - 1),
(x + 1, y + 1),
(x + 1, y - 1),
(x - 1, y + 1),
(x - 1, y - 1),
];
return cells
.into_iter()
.filter(|c| {
c.0 >= 0
&& c.1 >= 0
&& c.0 <= (WORLD_WIDTH as i32 - 1)
&& c.1 <= (WORLD_HEIGHT as i32 - 1)
})
.map(|c| (c.0 as u32, c.1 as u32))
.collect();
}
fn swap(i: usize, j: usize, particles: &mut Particles) {
let temp = particles[i];
particles[i] = particles[j];
particles[j] = temp;
particles[j].updated = true;
}
fn solid_particle_movement(x: u32, y: u32, particles: &mut Particles) {
// Check if empty below
if check_if_empty(x, y + 1, &particles) {
let orig = point_to_index(x, y);
let dest = point_to_index(x, y + 1);
swap(orig, dest, particles);
return;
}
// Check if water below
if check_if_type(x, y + 1, &particles, ParticleType::Water) {
let orig = point_to_index(x, y);
let dest = point_to_index(x, y + 1);
swap(orig, dest, particles);
return;
}
if x > 0 && check_if_empty(x - 1, y + 1, &particles) {
let orig = point_to_index(x, y);
let dest = point_to_index(x - 1, y + 1);
swap(orig, dest, particles);
return;
}
if check_if_empty(x + 1, y + 1, &particles) {
let orig = point_to_index(x, y);
let dest = point_to_index(x + 1, y + 1);
swap(orig, dest, particles);
return;
}
}
fn liquid_particle_movement(x: u32, y: u32, particles: &mut Particles) {
if check_if_empty(x, y + 1, &particles) {
let orig = point_to_index(x, y);
let dest = point_to_index(x, y + 1);
swap(orig, dest, particles);
return;
}
if x > 0 && check_if_empty(x - 1, y + 1, &particles) {
let orig = point_to_index(x, y);
let dest = point_to_index(x - 1, y + 1);
swap(orig, dest, particles);
return;
}
if check_if_empty(x + 1, y + 1, &particles) {
let orig = point_to_index(x, y);
let dest = point_to_index(x + 1, y + 1);
swap(orig, dest, particles);
return;
}
if x > 0 && check_if_empty(x - 1, y, &particles) {
let orig = point_to_index(x, y);
let dest = point_to_index(x - 1, y);
swap(orig, dest, particles);
return;
}
if check_if_empty(x + 1, y, &particles) {
let orig = point_to_index(x, y);
let dest = point_to_index(x + 1, y);
swap(orig, dest, particles);
return;
}
}
fn gas_particle_movement(x: u32, y: u32, particles: &mut Particles) {
if y == 0 {
return;
}
if check_if_empty(x, y - 1, &particles) {
let orig = point_to_index(x, y);
let dest = point_to_index(x, y - 1);
swap(orig, dest, particles);
return;
}
if x > 0 && check_if_empty(x - 1, y - 1, &particles) {
let orig = point_to_index(x, y);
let dest = point_to_index(x - 1, y - 1);
swap(orig, dest, particles);
return;
}
if check_if_empty(x + 1, y - 1, &particles) {
let orig = point_to_index(x, y);
let dest = point_to_index(x + 1, y - 1);
swap(orig, dest, particles);
return;
}
if x > 0 && check_if_empty(x - 1, y, &particles) {
let orig = point_to_index(x, y);
let dest = point_to_index(x - 1, y);
swap(orig, dest, particles);
return;
}
if check_if_empty(x + 1, y, &particles) {
let orig = point_to_index(x, y);
let dest = point_to_index(x + 1, y);
swap(orig, dest, particles);
return;
}
}
fn update_sand(x: u32, y: u32, particles: &mut Particles) {
solid_particle_movement(x, y, particles);
}
fn update_water(x: u32, y: u32, particles: &mut Particles) {
liquid_particle_movement(x, y, particles);
}
fn update_fire(x: u32, y: u32, particles: &mut Particles) {
let index = point_to_index(x, y);
let mut particle = particles[index];
if !particle.should_burn {
particle.should_burn = true;
particles[index] = particle;
return;
}
let cells = gen_adjacent_cell_list(x, y);
let mut burnt = false;
for (i, j) in cells {
if check_if_type(i, j, particles, ParticleType::Wood) {
let dest = point_to_index(i, j);
particles[dest].pt = ParticleType::Fire;
particles[dest].should_burn = false;
burnt = true;
}
}
if !burnt {
particle.pt = ParticleType::Smoke;
particle.should_burn = false;
}
particles[index] = particle;
}
fn update_lava(x: u32, y: u32, particles: &mut Particles) {
let cells = gen_adjacent_cell_list(x, y);
let mut evaporated = false;
for (i, j) in cells {
if check_if_type(i, j, particles, ParticleType::Wood) {
let dest = point_to_index(i, j);
particles[dest].pt = ParticleType::Fire;
particles[dest].should_burn = false;
}
if check_if_type(i, j, particles, ParticleType::Water) {
let index = point_to_index(x, y);
particles[index].pt = ParticleType::Steam;
let dest = point_to_index(i, j);
particles[dest].pt = ParticleType::Empty;
evaporated = true;
}
}
if evaporated {
return;
}
liquid_particle_movement(x, y, particles);
}
fn update_acid(x: u32, y: u32, particles: &mut Particles) {
let index = point_to_index(x, y);
let mut particle = particles[index];
if !particle.should_burn {
particle.should_burn = true;
particles[index] = particle;
return;
}
let cells = gen_adjacent_cell_list(x, y);
for (i, j) in cells {
if check_if_type(i, j, particles, ParticleType::Iron) {
let dest = point_to_index(i, j);
particles[dest].pt = ParticleType::Acid;
particles[dest].should_burn = false;
break;
}
}
liquid_particle_movement(x, y, particles);
}
fn update_smoke(x: u32, y: u32, particles: &mut Particles) {
gas_particle_movement(x, y, particles);
}
fn update_steam(x: u32, y: u32, particles: &mut Particles) {
gas_particle_movement(x, y, particles);
}
|
uаse std::fmt::format;
use std::io::{Read, Write};
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener};
use std::str::FromStr;
use std::string::ParseError;
#[derive(Debug)]
struct RequestLine {
method: Option<String>,
path: Option<String>,
protocol: Option<String>
}
impl RequestLine {
fn method(&self) -> String {
if let Some(method) = &self.method {
method.to_string()
} else {
String::from("")
}
}
fn path(&self) -> String {
if let Some(path) = &self.path {
path.to_string()
} else {
String::from("")
}
}
fn get_order_number(&self) -> String {
let path = self.path();
let path_tokens: Vec<String> = path.split("/")
.map(|s| s.parse().unwrap()).collect();
path_tokens[path_tokens.len() - 1].clone()
}
}
impl FromStr for RequestLine {
type Err = ParseError;
fn from_str(msg: &str) -> Result<Self, Self::Err> {
let mut msg_tokens = msg.split_ascii_whitespace();
let method = match msg_tokens.next() {
Some(token) => Some(String::from(token)),
None=> None,
};
let path = match msg_tokens.next() {
Some(token) => Some(String::from(token)),
None => None
};
let protocol = match msg_tokens.next() {
Some(token) => Some(String::from(token)),
None => None,
};
Ok(Self {
method,
path,
protocol,
})
}
}
fn main() {
let port = 3111;
let socket_addr = SocketAddr::new(IpAddr::V4(
Ipv4Addr::new(127, 0, 0, 1)), port);
let listener = TcpListener::bind(
socket_addr).unwrap();
println!("Running on port: {}", port);
for stream in listener.incoming() {
let mut stream = stream.unwrap();
let mut buff = [0; 200];
stream.read(&mut buff).unwrap();
let req_line = "";
let string_request_line =
if let Some(line) = std::str::from_utf8(&buff)
.unwrap().lines().next() { line
} else {
println!("Invalid request line received");
req_line
};
let req_line = RequestLine::from_str(string_request_line).unwrap();
let html_response_string;
let order_status;
println!("len is {}", req_line.get_order_number().len());
if req_line.method() != "GET"
|| !req_line.path().starts_with("/order/status")
|| req_line.get_order_number().len() == 0
{
if req_line.get_order_number().len() == 0 {
order_status = format!("Please provide valid order number");
} else {
order_status = format!("Sorry, this page is not found");
}
html_response_string = format!("HTTP/1.1 404 Not Found\nContent-Type:
text/html\nContent-Length:{}\n\n{}",
order_status.len(),
order_status);
} else {
order_status = format!("Order status for order number {} is: Shipped\n",
req_line.get_order_number());
html_response_string = format!(
"HTTP/1.1 200 OK\nContent-Type: text/html\nContent-Length:{}\n\n{}",
order_status.len(),
order_status);
}
println!("\nGoing to resond to client with: \n\n{}", html_response_string);
stream.write(html_response_string.as_bytes()).unwrap();
}
} |
use std::rc::Rc;
fn main() {
// 堆上的数据有了三个共享的所有者。
let a = Rc::new(1);
let b = a.clone();
let c = a.clone();
}
//
use std::cell::RefCell;
fn main() {
let data = RefCell::new(1);
{
// 获得 RefCell 内部数据的可变借用
let mut v = data.borrow_mut();
*v += 1;
}
println!("data: {:?}", data.borrow());
}
// =================================================================
|
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - OTG_FS host configuration register (OTG_FS_HCFG)"]
pub hcfg: HCFG,
#[doc = "0x04 - OTG_FS Host frame interval register"]
pub hfir: HFIR,
#[doc = "0x08 - OTG_FS host frame number/frame time remaining register (OTG_FS_HFNUM)"]
pub hfnum: HFNUM,
_reserved3: [u8; 0x04],
#[doc = "0x10 - OTG_FS_Host periodic transmit FIFO/queue status register (OTG_FS_HPTXSTS)"]
pub hptxsts: HPTXSTS,
#[doc = "0x14 - OTG_FS Host all channels interrupt register"]
pub haint: HAINT,
#[doc = "0x18 - OTG_FS host all channels interrupt mask register"]
pub haintmsk: HAINTMSK,
_reserved6: [u8; 0x24],
#[doc = "0x40 - OTG_FS host port control and status register (OTG_FS_HPRT)"]
pub hprt: HPRT,
_reserved7: [u8; 0xbc],
#[doc = "0x100..0x280 - Host channel"]
pub hc: [HC; 12],
}
#[doc = "HCFG (rw) register accessor: OTG_FS host configuration register (OTG_FS_HCFG)\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hcfg::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`hcfg::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`hcfg`]
module"]
pub type HCFG = crate::Reg<hcfg::HCFG_SPEC>;
#[doc = "OTG_FS host configuration register (OTG_FS_HCFG)"]
pub mod hcfg;
#[doc = "HFIR (rw) register accessor: OTG_FS Host frame interval register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hfir::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`hfir::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`hfir`]
module"]
pub type HFIR = crate::Reg<hfir::HFIR_SPEC>;
#[doc = "OTG_FS Host frame interval register"]
pub mod hfir;
#[doc = "HFNUM (r) register accessor: OTG_FS host frame number/frame time remaining register (OTG_FS_HFNUM)\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hfnum::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`hfnum`]
module"]
pub type HFNUM = crate::Reg<hfnum::HFNUM_SPEC>;
#[doc = "OTG_FS host frame number/frame time remaining register (OTG_FS_HFNUM)"]
pub mod hfnum;
#[doc = "HPTXSTS (rw) register accessor: OTG_FS_Host periodic transmit FIFO/queue status register (OTG_FS_HPTXSTS)\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hptxsts::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`hptxsts::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`hptxsts`]
module"]
pub type HPTXSTS = crate::Reg<hptxsts::HPTXSTS_SPEC>;
#[doc = "OTG_FS_Host periodic transmit FIFO/queue status register (OTG_FS_HPTXSTS)"]
pub mod hptxsts;
#[doc = "HAINT (r) register accessor: OTG_FS Host all channels interrupt register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`haint::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`haint`]
module"]
pub type HAINT = crate::Reg<haint::HAINT_SPEC>;
#[doc = "OTG_FS Host all channels interrupt register"]
pub mod haint;
#[doc = "HAINTMSK (rw) register accessor: OTG_FS host all channels interrupt mask register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`haintmsk::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`haintmsk::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`haintmsk`]
module"]
pub type HAINTMSK = crate::Reg<haintmsk::HAINTMSK_SPEC>;
#[doc = "OTG_FS host all channels interrupt mask register"]
pub mod haintmsk;
#[doc = "HPRT (rw) register accessor: OTG_FS host port control and status register (OTG_FS_HPRT)\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hprt::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`hprt::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`hprt`]
module"]
pub type HPRT = crate::Reg<hprt::HPRT_SPEC>;
#[doc = "OTG_FS host port control and status register (OTG_FS_HPRT)"]
pub mod hprt;
#[doc = "Host channel"]
pub use self::hc::HC;
#[doc = r"Cluster"]
#[doc = "Host channel"]
pub mod hc;
|
use crate::common::Solution;
use itertools::Itertools;
use itertools::MinMaxResult::MinMax;
use std::collections::HashSet;
static PART1_SOLUTION: usize = 542529149;
static PRE_SIZE: usize = 25;
fn find_combination_of2<'a, I>(input_iter: I, target: usize) -> Option<usize>
where
I: Iterator<Item = &'a usize>,
{
let mut complements: HashSet<usize> = HashSet::new();
for val in input_iter {
let complement = target - val;
if complements.contains(&complement) {
return Some(complement * val);
}
complements.insert(*val);
}
None
}
fn part1(input: &InputType) -> String {
let mut preamble = input.iter().copied().take(PRE_SIZE).collect::<HashSet<_>>();
input
.iter()
.enumerate()
.skip(PRE_SIZE)
.map(|(idx, &curr)| {
let comb = find_combination_of2(preamble.iter(), curr);
let pre_start = input.get(idx - PRE_SIZE).unwrap();
preamble.insert(curr);
preamble.remove(pre_start);
(curr, comb)
})
.find(|(_, comb)| comb.is_none())
.unwrap()
.0
.to_string()
}
fn part2(input: &InputType) -> String {
let mut acc: usize = 0;
let mut start_idx = 0;
for (i, x) in input.iter().enumerate() {
if acc > PART1_SOLUTION {
while acc > PART1_SOLUTION {
acc -= input.get(start_idx).unwrap();
start_idx += 1;
}
}
if acc == PART1_SOLUTION {
if let MinMax(mn, mx) = input.iter().skip(start_idx).take(i - start_idx).minmax() {
return (mx + mn).to_string();
}
}
acc += x;
}
"".to_string()
}
type InputType = Vec<usize>;
fn parse_input(raw_input: &[String]) -> InputType {
raw_input.iter().map(|x| x.parse().unwrap()).collect()
}
pub fn solve(raw_input: &[String]) -> Solution {
let input = parse_input(raw_input);
use std::time::Instant;
let now = Instant::now();
let solution = (part1(&input), part2(&input));
let elapsed = now.elapsed();
(solution, elapsed)
}
|
#[doc = "Register `WRP1AR` reader"]
pub type R = crate::R<WRP1AR_SPEC>;
#[doc = "Register `WRP1AR` writer"]
pub type W = crate::W<WRP1AR_SPEC>;
#[doc = "Field `WRP1A_PSTRT` reader - WRP1A_PSTRT"]
pub type WRP1A_PSTRT_R = crate::FieldReader;
#[doc = "Field `WRP1A_PSTRT` writer - WRP1A_PSTRT"]
pub type WRP1A_PSTRT_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 7, O>;
#[doc = "Field `WRP1A_PEND` reader - WRP1A_PEND"]
pub type WRP1A_PEND_R = crate::FieldReader;
#[doc = "Field `WRP1A_PEND` writer - WRP1A_PEND"]
pub type WRP1A_PEND_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 7, O>;
impl R {
#[doc = "Bits 0:6 - WRP1A_PSTRT"]
#[inline(always)]
pub fn wrp1a_pstrt(&self) -> WRP1A_PSTRT_R {
WRP1A_PSTRT_R::new((self.bits & 0x7f) as u8)
}
#[doc = "Bits 16:22 - WRP1A_PEND"]
#[inline(always)]
pub fn wrp1a_pend(&self) -> WRP1A_PEND_R {
WRP1A_PEND_R::new(((self.bits >> 16) & 0x7f) as u8)
}
}
impl W {
#[doc = "Bits 0:6 - WRP1A_PSTRT"]
#[inline(always)]
#[must_use]
pub fn wrp1a_pstrt(&mut self) -> WRP1A_PSTRT_W<WRP1AR_SPEC, 0> {
WRP1A_PSTRT_W::new(self)
}
#[doc = "Bits 16:22 - WRP1A_PEND"]
#[inline(always)]
#[must_use]
pub fn wrp1a_pend(&mut self) -> WRP1A_PEND_W<WRP1AR_SPEC, 16> {
WRP1A_PEND_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "Flash Bank 1 WRP area A address register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`wrp1ar::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`wrp1ar::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct WRP1AR_SPEC;
impl crate::RegisterSpec for WRP1AR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`wrp1ar::R`](R) reader structure"]
impl crate::Readable for WRP1AR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`wrp1ar::W`](W) writer structure"]
impl crate::Writable for WRP1AR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets WRP1AR to value 0xff00_ff00"]
impl crate::Resettable for WRP1AR_SPEC {
const RESET_VALUE: Self::Ux = 0xff00_ff00;
}
|
use crate::constants::DATA_DIR;
use std::ffi::OsStr;
use std::path::PathBuf;
// Traits
use getset::Getters;
#[derive(Getters, Debug, PartialOrd, PartialEq, Clone)]
#[getset(get = "pub")]
pub(crate) struct SaveConfiguration {
path_buf: PathBuf,
header: bool,
date: chrono::DateTime<chrono::Local>,
id: Option<String>,
}
impl SaveConfiguration {
pub(crate) fn set_extension<S: AsRef<OsStr>>(&mut self, extension: S) -> &mut Self {
self.path_buf.set_extension(extension);
self
}
pub(crate) fn set_header(&mut self, header: bool) -> &mut Self {
self.header = header;
self
}
pub(crate) fn set_date(&mut self, date: chrono::DateTime<chrono::Local>) -> &mut Self {
self.date = date;
self
}
pub(crate) fn set_id(&mut self, id: String) -> &mut Self {
if let Some(extension) = self.path_buf.clone().extension() {
self.path_buf.set_file_name(&id);
self.path_buf.set_extension(extension);
} else {
self.path_buf.set_file_name(&id);
}
self.id = Some(id);
self
}
pub(crate) fn extension(&self) -> Option<&OsStr> {
self.path_buf.extension()
}
pub(crate) fn checked_id(&self) -> &String {
match &self.id {
Some(id) => id,
None => panic!("Uninitialized id. Consider giving an id before processing."),
}
}
}
impl Default for SaveConfiguration {
fn default() -> Self {
let mut path_buf: PathBuf = DATA_DIR.iter().collect();
path_buf.push("none");
path_buf.set_extension("txt");
let header = true;
let date = chrono::Local::now();
let id = None;
SaveConfiguration {
path_buf,
header,
date,
id,
}
}
}
|
//! Using Windows Runtime APIs from Rust.
//!
//! ## Example
//! ```
//! # // THIS IS THE SAME CODE THAT IS SHOWN IN README.md
//! # // PLEASE KEEP THEM IN SYNC SO WE CAN RELY ON DOCTESTS!
//! extern crate winrt;
//!
//! use winrt::*; // import various helper types
//! use winrt::windows::system::diagnostics::*; // import namespace Windows.System.Diagnostics
//!
//! fn main() {
//! let rt = RuntimeContext::init(); // initialize the Windows Runtime
//! let mut infos = ProcessDiagnosticInfo::get_for_processes().unwrap();
//! println!("Currently executed processes ({}):", unsafe { infos.get_size().unwrap() });
//! for mut p in infos.into_iter() {
//! let pid = unsafe { p.get_process_id().unwrap() };
//! let exe = unsafe { p.get_executable_file_name().unwrap() };
//! println!("[{}] {}", pid, exe);
//! }
//! }
#![cfg(windows)]
#![cfg_attr(test,feature(test))]
#![cfg_attr(feature = "nightly", feature(specialization))]
#![cfg_attr(feature = "nightly", feature(associated_consts))]
#![allow(dead_code,non_upper_case_globals,non_snake_case)]
extern crate winapi as w;
extern crate runtimeobject;
extern crate ole32;
extern crate oleaut32;
mod guid;
pub use guid::Guid;
///Represents the trust level of an activatable class (re-export from WinAPI crate)
pub type TrustLevel = ::w::TrustLevel;
// Compared to the DEFINE_GUID macro from winapi, this one creates a private const
macro_rules! DEFINE_IID {
(
$name:ident, $l:expr, $w1:expr, $w2:expr, $b1:expr, $b2:expr, $b3:expr, $b4:expr, $b5:expr,
$b6:expr, $b7:expr, $b8:expr
) => {
const $name: &'static ::Guid = &::Guid {
Data1: $l,
Data2: $w1,
Data3: $w2,
Data4: [$b1, $b2, $b3, $b4, $b5, $b6, $b7, $b8],
};
}
}
mod hstring;
pub use hstring::{HString, FastHString, HStringReference, HStringArg};
mod bstr;
pub use bstr::BStr;
mod comptr;
pub use comptr::{ComPtr, ComArray};
mod cominterfaces;
pub use cominterfaces::{ComInterface, ComIid, IUnknown, IRestrictedErrorInfo, IAgileObject};
mod rt;
pub use rt::{RtInterface, RtClassInterface, RtNamedClass, RtValueType, RtType, RtActivatable, IInspectable, IInspectableVtbl, IActivationFactory, Char, RuntimeContext};
pub use rt::async::{RtAsyncAction, RtAsyncOperation};
mod result;
pub use result::{Result, Error, HRESULT};
pub mod windows {
pub use rt::gen::windows::*;
}
/// This is only for internal use within the generated code
mod prelude {
pub use ::rt::{RtType, RtActivatable, IInspectable, IInspectableVtbl, IActivationFactory, Char};
pub use ::rt::handler::IntoInterface;
pub use ::cominterfaces::{ComInterface, ComIid, IUnknown};
pub use ::comptr::{ComPtr, ComArray};
pub use ::hstring::{HString, HStringArg};
pub use ::result::{Result, HRESULT};
pub use ::w::{IUnknownVtbl, S_OK, HSTRING};
pub use ::std::ptr::null_mut;
pub use ::std::mem::zeroed;
pub use ::guid::Guid;
#[inline]
pub fn err<T>(hr: ::result::HRESULT) -> ::result::Result<T> {
Err(::result::Error::from_hresult(hr))
}
} |
fn sin(x: f32) -> f32 {
let x = x * (1.0 / (std::f32::consts::PI * 2.0));
let x = x - x.floor() - 0.5;
(0.6150599377704147_f32)
.mul_add(x * x, -3.776312346215613_f32)
.mul_add(x * x, 15.084843206874782_f32)
.mul_add(x * x, -42.05746026953019_f32)
.mul_add(x * x, 76.70577449290244_f32)
.mul_add(x * x, -81.60524634871001_f32)
.mul_add(x * x, 41.34170220158861_f32)
.mul_add(x * x, -6.283185307093742_f32)
* x
}
fn cos(x: f32) -> f32 {
let x = x * (1.0 / (std::f32::consts::PI * 2.0));
let x = x - x.floor() - 0.5;
(-0.2437628622134172_f32)
.mul_add(x * x, 1.6969999270888276_f32)
.mul_add(x * x, -7.899269307802109_f32)
.mul_add(x * x, 26.42565411950429_f32)
.mul_add(x * x, -60.24459263234794_f32)
.mul_add(x * x, 85.45681509594338_f32)
.mul_add(x * x, -64.93939398117197_f32)
.mul_add(x * x, 19.739208801937608_f32)
.mul_add(x * x, -0.9999999999999996_f32)
}
fn exp(x: f32) -> f32 {
let x = x * std::f32::consts::LOG2_E;
let mul = f32::from_bits((x.floor() * 0x00800000 as f32 + 0x3f800000 as f32) as u32);
let x = x - x.floor() - 0.5;
(0.00021877504780304022_f32)
.mul_add(x, 0.0018964605237938004_f32)
.mul_add(x, 0.01360194957589631_f32)
.mul_add(x, 0.07849305736942819_f32)
.mul_add(x, 0.3397315896731585_f32)
.mul_add(x, 0.980258206874906_f32)
.mul_add(x, 1.414213562373095_f32)
* mul
}
fn exp2(x: f32) -> f32 {
let mul = f32::from_bits((x.floor() * 0x00800000 as f32 + 0x3f800000 as f32) as u32);
let x = x - x.floor() - 0.5;
(0.00021877504780304022_f32)
.mul_add(x, 0.0018964605237938004_f32)
.mul_add(x, 0.01360194957589631_f32)
.mul_add(x, 0.07849305736942819_f32)
.mul_add(x, 0.3397315896731585_f32)
.mul_add(x, 0.980258206874906_f32)
.mul_add(x, 1.414213562373095_f32)
* mul
}
fn exp_m1(x: f32) -> f32 {
let x = x * std::f32::consts::LOG2_E;
let mul = f32::from_bits((x.floor() * 0x00800000 as f32 + 0x3f800000 as f32) as u32);
let x = x - x.floor() - 0.5;
(0.00021877504780304022_f32)
.mul_add(x, 0.0018964605237938004_f32)
.mul_add(x, 0.01360194957589631_f32)
.mul_add(x, 0.07849305736942819_f32)
.mul_add(x, 0.3397315896731585_f32)
.mul_add(x, 0.980258206874906_f32)
.mul_add(x, 0.414213562373095_f32)
* mul
+ (mul - 1.0)
}
fn ln(x: f32) -> f32 {
let exponent = (x.to_bits() >> 23) as i32 - 0x7f;
let x = f32::from_bits((x.to_bits() & 0x7fffff) | 0x3f800000) - 1.5;
let y: f32 = (-0.008874696649735232_f32)
.mul_add(x, 0.01511708903698527_f32)
.mul_add(x, -0.020698917295047616_f32)
.mul_add(x, 0.03731541074473215_f32)
.mul_add(x, -0.07127813300215657_f32)
.mul_add(x, 0.14254471630463159_f32)
.mul_add(x, -0.32059812016799316_f32)
.mul_add(x, 0.9617954032360934_f32)
.mul_add(x, 0.5849625007211562_f32);
(y + (exponent as f32)) * (1.0 / std::f32::consts::LOG2_E)
}
#[test]
fn test_sin() {
const N: i32 = 0x100000;
let tmin = -3.1415926535897931;
let tmax = 3.1415926535897931;
let mut max_error = 0.0_f64;
for i in 0..=N {
let x = i as f64 * (tmax - tmin) / N as f64 + tmin;
let y1 = x.sin();
let y2 = sin(x as f32) as f64;
max_error = max_error.max((y1 - y2).abs());
if i % (N / 16) == 0 {
println!("y1={:20.16}\ny2={:20.16} e={:20.16}", y1, y2, y2 - y1);
}
}
println!("sin me={:20}", max_error);
assert!(max_error < 0.0000005006790161132813);
}
#[test]
fn test_cos() {
const N: i32 = 0x100000;
let tmin = -3.1415926535897931;
let tmax = 3.1415926535897931;
let mut max_error = 0.0_f64;
for i in 0..=N {
let x = i as f64 * (tmax - tmin) / N as f64 + tmin;
let y1 = x.cos();
let y2 = cos(x as f32) as f64;
max_error = max_error.max((y1 - y2).abs());
if i % (N / 16) == 0 {
println!("y1={:20.16}\ny2={:20.16} e={:20.16}", y1, y2, y2 - y1);
}
}
println!("cos me={:20}", max_error);
assert!(max_error < 0.00000035762786865234375);
}
#[test]
fn test_exp_a() {
const N: i32 = 0x100000;
let tmin = 0.0000000000000000;
let tmax = 1.0000000000000000;
let mut max_error = 0.0_f64;
for i in 0..=N {
let x = i as f64 * (tmax - tmin) / N as f64 + tmin;
let y1 = x.exp();
let y2 = exp(x as f32) as f64;
max_error = max_error.max((y1 - y2).abs());
if i % (N / 16) == 0 {
println!("y1={:20.16}\ny2={:20.16} e={:20.16}", y1, y2, y2 - y1);
}
}
println!("exp me={:20}", max_error);
assert!(max_error < 0.00000035762786865234375);
}
#[test]
fn test_exp_b() {
const N: i32 = 0x100000;
let tmin = 1.0000000000000000;
let tmax = 2.0000000000000000;
let mut max_error = 0.0_f64;
for i in 0..=N {
let x = i as f64 * (tmax - tmin) / N as f64 + tmin;
let y1 = x.exp();
let y2 = exp(x as f32) as f64;
max_error = max_error.max((y1 - y2).abs());
if i % (N / 16) == 0 {
println!("y1={:20.16}\ny2={:20.16} e={:20.16}", y1, y2, y2 - y1);
}
}
println!("exp me={:20}", max_error);
assert!(max_error < 0.0000011920928955078125);
}
#[test]
fn test_exp_m1() {
const N: i32 = 0x100000;
let tmin = 0.0000000000000000;
let tmax = 1.0000000000000000;
let mut max_error = 0.0_f64;
for i in 0..=N {
let x = i as f64 * (tmax - tmin) / N as f64 + tmin;
let y1 = x.exp_m1();
let y2 = exp_m1(x as f32) as f64;
max_error = max_error.max((y1 - y2).abs());
if i % (N / 16) == 0 {
println!("y1={:20.16}\ny2={:20.16} e={:20.16}", y1, y2, y2 - y1);
}
}
println!("exp_m1 me={:20}", max_error);
assert!(max_error < 0.0000002384185791015625);
}
#[test]
fn test_exp2() {
const N: i32 = 0x100000;
let tmin = 0.0000000000000000;
let tmax = 1.0000000000000000;
let mut max_error = 0.0_f64;
for i in 0..=N {
let x = i as f64 * (tmax - tmin) / N as f64 + tmin;
let y1 = x.exp2();
let y2 = exp2(x as f32) as f64;
max_error = max_error.max((y1 - y2).abs());
if i % (N / 16) == 0 {
println!("y1={:20.16}\ny2={:20.16} e={:20.16}", y1, y2, y2 - y1);
}
}
println!("exp2 me={:20}", max_error);
assert!(max_error < 0.0000002384185791015625);
}
#[test]
fn test_ln() {
const N: i32 = 0x100000;
let tmin = 1.0000000000000000;
let tmax = 2.7182818284590451;
let mut max_error = 0.0_f64;
for i in 0..=N {
let x = i as f64 * (tmax - tmin) / N as f64 + tmin;
let y1 = x.ln();
let y2 = ln(x as f32) as f64;
max_error = max_error.max((y1 - y2).abs());
if i % (N / 16) == 0 {
println!("y1={:20.16}\ny2={:20.16} e={:20.16}", y1, y2, y2 - y1);
}
}
println!("ln me={:20}", max_error);
assert!(max_error < 0.0000002384185791015625);
}
|
#![feature(plugin)]
#![plugin(flow)]
extern crate tangle;
use tangle::{Future, Async};
#[test]
fn compile() {
flow!{
let a: bool<-foobar
};
// foobar.and_then(move |a| {
// Async::Ok(())
// })
}
|
#[macro_use]
extern crate serde_derive;
extern crate serde;
extern crate serde_json;
#[derive(Serialize, Deserialize, Debug)]
struct Point {
x: i32,
y: i32,
}
fn main() {
let point = Point { x: 1, y: 2 };
// Convert the Point to a JSON string.
let serialized = serde_json::to_string(&point).unwrap();
// Prints serialized = {"x":1,"y":2}
println!("serialized = {}", serialized);
// Convert the JSON string back to a Point.
let deserialized: Point = serde_json::from_str(&serialized).unwrap();
// Prints deserialized = Point { x: 1, y: 2 }
println!("deserialized = {:?}", deserialized);
}
|
//! Available user interface controls and related functionality.
//!
//! Note that `Control` and all specific control types are references to memory which is owned by the UI library.
use ui::UI;
use ui_sys::{self, uiControl};
use std::ptr;
#[macro_use]
mod create_macro;
mod label;
pub use self::label::*;
mod button;
pub use self::button::*;
mod window;
pub use self::window::*;
mod layout;
pub use self::layout::*;
mod entry;
pub use self::entry::*;
mod progressbar;
pub use self::progressbar::*;
mod area;
pub use self::area::*;
/// A generic UI control. Any UI control can be turned into this type.
///
/// Note that `Control` and all specific control types are references
/// whose memory is owned by the UI library.
pub struct Control {
ui_control: *mut uiControl,
}
impl Drop for Control {
fn drop(&mut self) {
// For now this does nothing, but in the future, when `libui` supports proper memory
// management, this will likely need to twiddle reference counts.
}
}
impl Clone for Control {
fn clone(&self) -> Control {
Control {
ui_control: self.ui_control,
}
}
}
impl Control {
/// Creates a new `Control` object from an existing `*mut uiControl`.
pub unsafe fn from_ui_control(ui_control: *mut uiControl) -> Control {
Control { ui_control }
}
/// Returns the underlying `*mut uiControl`.
pub fn as_ui_control(&self) -> *mut uiControl {
self.ui_control
}
/// Destroys a control. Any use of the control after this is use-after-free; therefore, this
/// is marked unsafe.
pub unsafe fn destroy(&self) {
// Don't check for initialization here since this can be run during deinitialization.
ui_sys::uiControlDestroy(self.ui_control)
}
}
impl UI {
// Return the parent control of the given control, or None if the control is orphaned.
pub fn parent_of<T: Into<Control>>(&self, control: T) -> Option<Control> {
unsafe {
let ptr = ui_sys::uiControlParent(control.into().ui_control);
if ptr.is_null() {
None
} else {
Some(Control::from_ui_control(ptr))
}
}
}
/// Set the parent control of this control, "moving" it to a new place in
/// the UI tree or, if passed `None`, removing it from the tree.
// TODO: Does this actually need to be unsafe? I don't really see why it is.
pub unsafe fn set_parent_of<T: Into<Control>>(&mut self, control: T, parent: Option<T>) {
ui_sys::uiControlSetParent(
control.into().ui_control,
match parent {
None => ptr::null_mut(),
Some(parent) => parent.into().ui_control,
},
)
}
/// Returns true if this control is a top-level control; the root of
/// the UI tree.
pub fn is_toplevel<T: Into<Control>>(&self, control: T) -> bool {
unsafe { ui_sys::uiControlToplevel(control.into().ui_control) != 0 }
}
/// Returns true if this control is currently set to be displayed.
pub fn is_shown<T: Into<Control>>(&self, control: T) -> bool {
unsafe { ui_sys::uiControlVisible(control.into().ui_control) != 0 }
}
/// Sets whether or not the control should be displayed.
pub fn set_shown<T: Into<Control>>(&mut self, control: T, show: bool) {
if show {
unsafe { ui_sys::uiControlShow(control.into().ui_control) }
} else {
unsafe { ui_sys::uiControlHide(control.into().ui_control) }
}
}
/// Returns true if the control is enabled (can be interacted with).
pub fn is_enabled<T: Into<Control>>(&self, control: T) -> bool {
unsafe { ui_sys::uiControlEnabled(control.into().ui_control) != 0 }
}
/// Sets the enable/disable state of the control. If disabled, a control
/// cannot be interacted with, and visual cues to that effect are presented
/// to the user.
pub fn set_enabled<T: Into<Control>>(&mut self, control: T, enabled: bool) {
if enabled {
unsafe { ui_sys::uiControlEnable(control.into().ui_control) }
} else {
unsafe { ui_sys::uiControlDisable(control.into().ui_control) }
}
}
}
|
//! Common Set operations for SegmentSet
use crate::{map::Key, RangeBounds, SegmentMap, SegmentSet};
pub mod difference;
pub mod intersection;
pub mod symmetric_difference;
pub mod union;
impl<T: Ord> SegmentSet<T> {
/// Check whether `self` and `other` are disjoint sets
///
/// That is, the intersection between `self` and `other` is empty
pub fn is_disjoint(&self, other: &Self) -> bool {
self.iter_intersection(other).next().is_none()
}
/// Check whether `self` is a subset of `other`
///
/// That is, all elements of `self` exist in `other`, or (as implemented)
/// `self.difference(other)` is empty
pub fn is_subset(&self, other: &Self) -> bool {
self.iter_difference(other).next().is_none()
}
pub fn is_superset(&self, other: &Self) -> bool {
other.is_subset(self)
}
// TODO: No Clone
// TODO: subset(&self, range: R) -> Self; slightly faster than ranges(..).filter().collect() because it doesn't need to check insertions
pub fn subset<R: RangeBounds<T>>(&self, range: R) -> SegmentSet<T>
where
T: Clone + Ord,
{
SegmentSet {
map: SegmentMap {
map: self
.map
.iter_subset(range)
.map(|(r, _)| (Key(r), ()))
.collect(),
store: alloc::vec::Vec::new(),
},
}
}
// as_complement / into_complement?
pub fn complement(&self) -> SegmentSet<&T>
where
T: Ord,
{
self.map.complement()
}
}
/// Set Complement
impl<'a, T: Ord + Clone> core::ops::Not for &'a SegmentSet<T> {
type Output = SegmentSet<&'a T>;
// TODO: docs
fn not(self) -> Self::Output {
self.complement()
}
}
impl<T: Ord + Clone> core::ops::Not for SegmentSet<T> {
type Output = SegmentSet<T>;
fn not(self) -> Self::Output {
self.complement().cloned()
}
}
|
extern crate futures;
extern crate ocl;
extern crate ocl_extras;
use std::thread::{JoinHandle, Builder as ThreadBuilder};
use futures::Future;
use ocl::{ProQue, Buffer, MemFlags};
use ocl::r#async::{BufferSink, WriteGuard};
// Our arbitrary data set size (about a million) and coefficent:
const WORK_SIZE: usize = 1 << 20;
const COEFF: i32 = 321;
const THREAD_COUNT: usize = 32;
// Our kernel source code:
static KERNEL_SRC: &'static str = r#"
__kernel void multiply_by_scalar(
__private int const coeff,
__global int const* const src,
__global int* const res)
{
uint const idx = get_global_id(0);
res[idx] = src[idx] * coeff;
}
"#;
fn buffer_sink() -> ocl::Result<()> {
let ocl_pq = ProQue::builder()
.src(KERNEL_SRC)
.dims(WORK_SIZE)
.build().expect("Build ProQue");
let source_buffer = Buffer::<i32>::builder()
.queue(ocl_pq.queue().clone())
.flags(MemFlags::new().read_write().alloc_host_ptr())
.len(WORK_SIZE)
.build()?;
let mut vec_result = vec![0i32; WORK_SIZE];
let result_buffer: Buffer<i32> = ocl_pq.create_buffer()?;
let kern = ocl_pq.kernel_builder("multiply_by_scalar")
.arg(COEFF)
.arg(&source_buffer)
.arg(&result_buffer)
.build()?;
assert_eq!(kern.default_global_work_size().to_len(), WORK_SIZE);
let buffer_sink = unsafe {
BufferSink::from_buffer(source_buffer.clone(), Some(ocl_pq.queue().clone()), 0,
WORK_SIZE)?
};
// let source_data = ocl_extras::scrambled_vec((0, 20), ocl_pq.dims().to_len());
let source_datas: Vec<_> = (0..THREAD_COUNT).map(|_| {
ocl_extras::scrambled_vec((0, 20), ocl_pq.dims().to_len())
}).collect();
let mut threads = Vec::<JoinHandle<()>>::with_capacity(THREAD_COUNT * 2);
for i in 0..THREAD_COUNT {
let writer_0 = buffer_sink.clone().write();
threads.push(ThreadBuilder::new().name(format!("thread_{}", i)).spawn(|| async move {
let mut write_guard = writer_0.await;
write_guard.copy_from_slice(&[0i32; WORK_SIZE]);
let buffer_sink: BufferSink<_> = WriteGuard::release(write_guard).into();
buffer_sink.flush().enq().unwrap().await;
}));
let source_data = source_datas[i].clone();
let writer_1 = buffer_sink.clone().write();
threads.push(ThreadBuilder::new().name(format!("thread_{}", i)).spawn(|| async move{
let mut write_guard = writer_1.await;
write_guard.copy_from_slice(&source_data);
let buffer_sink: BufferSink<_> = WriteGuard::release(write_guard).into();
buffer_sink.flush().enq().unwrap().await;
}));
unsafe { kern.enq()?; }
result_buffer.read(&mut vec_result).enq()?;
// Check results:
for (&src, &res) in source_data.iter().zip(vec_result.iter()) {
assert_eq!(src * COEFF, res);
}
}
// for thread in threads {
// thread.join().unwrap();
// }
Ok(())
}
pub fn main() {
match buffer_sink(){
Ok(_) => (),
Err(err) => println!("{}", err),
}
} |
#![cfg_attr(feature = "nightly", feature(try_from))]
#[cfg(feature = "nightly")]
use std::convert::TryFrom;
use std::fmt;
use std::io;
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct OsError {
code: i32,
}
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
pub struct NoOsError;
impl OsError {
/// Creates a new instance of an `OsError` from a particular OS error code.
///
/// # Examples
///
/// On Linux:
///
/// ```
/// # if cfg!(target_os = "linux") {
/// use std::io;
///
/// let error = os_error::OsError::new(98);
/// assert_eq!(error.kind(), io::ErrorKind::AddrInUse);
/// # }
/// ```
///
/// On Windows:
///
/// ```
/// # if cfg!(windows) {
/// use std::io;
///
/// let error = os_error::OsError::new(10048);
/// assert_eq!(error.kind(), io::ErrorKind::AddrInUse);
/// # }
/// ```
pub fn new(code: i32) -> OsError {
OsError { code: code }
}
/// Returns an error representing the last OS error which occurred.
///
/// This function reads the value of `errno` for the target platform (e.g.
/// `GetLastError` on Windows) and will return a corresponding instance of
/// `OsError` for the error code.
///
/// # Examples
///
/// ```
/// use os_error::OsError;
///
/// println!("last OS error: {:?}", OsError::last_os_error());
/// ```
pub fn last_os_error() -> OsError {
OsError::new(io::Error::last_os_error().raw_os_error().unwrap())
}
/// Returns the OS error that this error represents.
///
/// # Examples
///
/// ```
/// use os_error::OsError;
///
/// fn main() {
/// // Will print "raw OS error: ...".
/// println!("raw OS error: {:?}", OsError::last_os_error().code());
/// }
/// ```
pub fn code(&self) -> i32 {
self.code
}
/// Returns the corresponding `ErrorKind` for this error.
///
/// # Examples
///
/// ```
/// use os_error::OsError;
///
/// fn main() {
/// // Will print "No inner error".
/// println!("{:?}", OsError::last_os_error());
/// }
/// ```
pub fn kind(&self) -> io::ErrorKind {
self.to_error().kind()
}
fn to_error(&self) -> io::Error {
io::Error::from_raw_os_error(self.code)
}
}
impl fmt::Debug for OsError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let error: io::Error = self.to_error();
fmt.debug_struct("OsError")
.field("code", &self.code)
.field("kind", &error.kind())
.finish()
}
}
impl fmt::Display for OsError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", &self.to_error())
}
}
#[cfg(feature = "nightly")]
impl TryFrom<io::Error> for OsError {
type Error = NoOsError;
fn try_from(error: io::Error) -> Result<OsError, NoOsError> {
match error.raw_os_error() {
Some(code) => Ok(OsError { code }),
None => Err(NoOsError),
}
}
}
impl Into<io::Error> for OsError {
fn into(self) -> io::Error {
self.to_error()
}
}
#[cfg(test)]
mod tests {
use std::io;
use super::OsError;
#[cfg(feature = "nightly")]
use super::NoOsError;
#[cfg(feature = "nightly")]
use std::convert::TryFrom;
#[cfg(feature = "nightly")]
use std::convert::TryInto;
const CODE: i32 = 6;
#[test]
fn test_fmt_display() {
let err = OsError::new(CODE);
let io_error = io::Error::from_raw_os_error(CODE);
assert_eq!(format!("{}", err), format!("{}", io_error));
}
#[test]
fn test_fmt_debug() {
let kind = io::ErrorKind::Other;
let err = OsError::new(CODE);
let expected = format!("OsError {{ code: {:?}, kind: {:?} }}", CODE, kind);
assert_eq!(format!("{:?}", err), expected);
}
#[test]
#[cfg(feature = "nightly")]
fn from_io_error() {
let os_error = OsError::try_from(io::Error::from_raw_os_error(CODE));
assert_eq!(os_error, Ok(OsError{ code: CODE }));
let os_error = OsError::try_from(io::Error::new(io::ErrorKind::AddrInUse, "NoOsError"));
assert_eq!(os_error, Err(NoOsError));
}
#[test]
#[cfg(feature = "nightly")]
fn into_os_error() {
let os_error: Result<OsError, _> = io::Error::from_raw_os_error(CODE).try_into();
assert_eq!(os_error, Ok(OsError{ code: CODE }));
let os_error: Result<OsError, _> =
io::Error::new(io::ErrorKind::AddrInUse, "NoOsError").try_into();
assert_eq!(os_error, Err(NoOsError));
}
}
|
struct Point<T,U> {
x: T,
y: U,
}
impl<T> Point<T,T> {
fn swap(&mut self) {
std::mem::swap(&mut self.x,&mut self.y);
}
}
fn main() {
let x: Option<i32> = Some(5);
}
|
use aide::openapi::v3::macros::api;
use thiserror::Error;
use time::{Duration, OffsetDateTime};
use uuid::Uuid;
#[api]
#[serde(rename_all = "camelCase")]
pub struct CreateImageRequest {
pub title: String,
pub description: Option<String>,
pub categories: Vec<Uuid>,
}
#[api]
#[serde(rename_all = "camelCase")]
pub struct CreateImageResponse {
pub id: Uuid,
}
#[derive(Debug, Error)]
pub enum CreateImageError {
#[error(r#"the the image category "{0}" was not found"#)]
CategoryNotFound(Uuid),
#[error("there was an unexpected error")]
Unexpected,
}
#[api]
pub struct SearchImagesQuery {
pub search: Option<String>,
pub limit: Option<u64>,
pub offset: Option<u64>,
}
#[api]
pub struct Image {
pub id: Uuid,
pub title: String,
pub description: Option<String>,
pub categories: Vec<Uuid>,
#[serde(serialize_with = "crate::util::serialize_rfc3339")]
#[serde(deserialize_with = "crate::util::deserialize_rfc3339")]
pub date: OffsetDateTime
}
#[api]
pub struct SearchImagesResponse {
pub images: Vec<Image>,
}
#[api]
pub struct GetImageResponse {
pub image: Image,
}
#[derive(Debug, Error)]
pub enum SearchImagesError {
#[error("there was an unexpected error")]
Unexpected,
}
#[derive(Debug, Error)]
pub enum GetImageInfoError {
#[error("the image was not found")]
NotFound,
#[error("there was an unexpected error")]
Unexpected,
}
#[derive(Debug, Error)]
pub enum UploadImageError {
#[error("the given identifier is invalid")]
InvalidId,
#[error("the image was already uploaded")]
AlreadyUploaded,
#[error("the image upload exceeded the {}s timeout, please create a new image", .0.whole_seconds())]
TimeOut(Duration),
#[error("expected a file, but got none")]
ExpectedFile,
#[error("there was an unexpected error during the upload process")]
Unexpected,
}
#[api]
pub struct RateImageRequest {
pub rating: u32,
}
#[derive(Debug, Error)]
pub enum RateImageError {
#[error("the image was not found")]
ImageNotFound,
#[error("own image cannot be rated")]
OwnImage,
#[error("the rating must be between 1 and 5")]
InvalidRating,
#[error("there was an unexpected error")]
Unexpected,
}
#[api]
pub struct GetImageRatingResponse {
pub average: f32,
pub rating_count: u32,
}
#[derive(Debug, Error)]
pub enum GetImageRatingsError {
#[error("the image was not found")]
ImageNotFound,
#[error("there was an unexpected error")]
Unexpected,
}
#[api]
pub struct Category {
pub id: Uuid,
pub name: String,
pub image_count: u32,
}
#[api]
pub struct GetCategoriesResponse {
pub categories: Vec<Category>,
}
#[derive(Debug, Error)]
pub enum GetCategoriesError {
#[error("there was an unexpected error")]
Unexpected,
}
#[api]
pub struct CreateCategoryRequest {
pub name: String,
}
#[api]
pub struct CreateCategoryResponse {
pub id: Uuid,
}
#[derive(Debug, Error)]
pub enum CreateCategoryError {
#[error("only admins are allowed to create categories")]
NotAllowed,
#[error("the category name must match the following pattern: {0}")]
InvalidName(String),
#[error("the category already exists")]
AlreadyExists,
#[error("there was an unexpected error")]
Unexpected,
}
#[api]
pub struct RenameCategoryRequest {
pub name: String,
}
#[derive(Debug, Error)]
pub enum RenameCategoryError {
#[error("only admins are allowed to create categories")]
NotAllowed,
#[error("there category was not found")]
CategoryNotFound,
#[error("the category name must match the following pattern: {0}")]
InvalidName(String),
#[error("the category already exists")]
AlreadyExists,
#[error("there was an unexpected error")]
Unexpected,
}
#[derive(Debug, Error)]
pub enum DeleteCategoryError {
#[error("only admins are allowed to create categories")]
NotAllowed,
#[error("there category was not found")]
CategoryNotFound,
#[error("there was an unexpected error")]
Unexpected,
}
#[api]
pub struct UserRating {
pub name: String,
pub average_rating: f64
}
#[derive(Debug, Error)]
pub enum GetUserRatingsError {
#[error("there was an unexpected error")]
Unexpected,
}
#[api]
pub struct GetUserRatingsResponse {
pub ratings: Vec<UserRating>
} |
use rand::{thread_rng, Rng};
#[derive(Clone, Copy)]
pub struct RandomRange {
pub min: f32,
pub max: f32,
}
impl RandomRange {
pub fn new(min: f32, max: f32) -> RandomRange {
RandomRange {
min, max
}
}
pub fn get(&self) -> f32 {
let mut rng = thread_rng();
rng.gen_range(self.min, self.max)
}
pub fn bool(&self) -> bool {
let mut rng = thread_rng();
rng.gen_bool(self.min as f64 / self.max as f64)
}
pub fn positive_or_negative(&self) -> f32 {
if self.bool() {
1.0
} else {
-1.0
}
}
} |
#[doc = "Reader of register RCC_MCO1CFGR"]
pub type R = crate::R<u32, super::RCC_MCO1CFGR>;
#[doc = "Writer for register RCC_MCO1CFGR"]
pub type W = crate::W<u32, super::RCC_MCO1CFGR>;
#[doc = "Register RCC_MCO1CFGR `reset()`'s with value 0"]
impl crate::ResetValue for super::RCC_MCO1CFGR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "MCO1SEL\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum MCO1SEL_A {
#[doc = "0: HSI clock selected (hsi_ck) (default\r\n after reset)"]
B_0X0 = 0,
#[doc = "1: HSE clock selected\r\n (hse_ck)"]
B_0X1 = 1,
#[doc = "2: CSI clock selected\r\n (csi_ck)"]
B_0X2 = 2,
#[doc = "3: LSI clock selected\r\n (lsi_ck)"]
B_0X3 = 3,
#[doc = "4: LSE oscillator clock selected\r\n (lse_ck)"]
B_0X4 = 4,
}
impl From<MCO1SEL_A> for u8 {
#[inline(always)]
fn from(variant: MCO1SEL_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `MCO1SEL`"]
pub type MCO1SEL_R = crate::R<u8, MCO1SEL_A>;
impl MCO1SEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, MCO1SEL_A> {
use crate::Variant::*;
match self.bits {
0 => Val(MCO1SEL_A::B_0X0),
1 => Val(MCO1SEL_A::B_0X1),
2 => Val(MCO1SEL_A::B_0X2),
3 => Val(MCO1SEL_A::B_0X3),
4 => Val(MCO1SEL_A::B_0X4),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == MCO1SEL_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == MCO1SEL_A::B_0X1
}
#[doc = "Checks if the value of the field is `B_0X2`"]
#[inline(always)]
pub fn is_b_0x2(&self) -> bool {
*self == MCO1SEL_A::B_0X2
}
#[doc = "Checks if the value of the field is `B_0X3`"]
#[inline(always)]
pub fn is_b_0x3(&self) -> bool {
*self == MCO1SEL_A::B_0X3
}
#[doc = "Checks if the value of the field is `B_0X4`"]
#[inline(always)]
pub fn is_b_0x4(&self) -> bool {
*self == MCO1SEL_A::B_0X4
}
}
#[doc = "Write proxy for field `MCO1SEL`"]
pub struct MCO1SEL_W<'a> {
w: &'a mut W,
}
impl<'a> MCO1SEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: MCO1SEL_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "HSI clock selected (hsi_ck) (default after reset)"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(MCO1SEL_A::B_0X0)
}
#[doc = "HSE clock selected (hse_ck)"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(MCO1SEL_A::B_0X1)
}
#[doc = "CSI clock selected (csi_ck)"]
#[inline(always)]
pub fn b_0x2(self) -> &'a mut W {
self.variant(MCO1SEL_A::B_0X2)
}
#[doc = "LSI clock selected (lsi_ck)"]
#[inline(always)]
pub fn b_0x3(self) -> &'a mut W {
self.variant(MCO1SEL_A::B_0X3)
}
#[doc = "LSE oscillator clock selected (lse_ck)"]
#[inline(always)]
pub fn b_0x4(self) -> &'a mut W {
self.variant(MCO1SEL_A::B_0X4)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x07) | ((value as u32) & 0x07);
self.w
}
}
#[doc = "MCO1DIV\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum MCO1DIV_A {
#[doc = "0: bypass (default after\r\n reset)"]
B_0X0 = 0,
#[doc = "1: division by 2"]
B_0X1 = 1,
#[doc = "2: division by 3"]
B_0X2 = 2,
#[doc = "15: division by 16"]
B_0XF = 15,
}
impl From<MCO1DIV_A> for u8 {
#[inline(always)]
fn from(variant: MCO1DIV_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `MCO1DIV`"]
pub type MCO1DIV_R = crate::R<u8, MCO1DIV_A>;
impl MCO1DIV_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, MCO1DIV_A> {
use crate::Variant::*;
match self.bits {
0 => Val(MCO1DIV_A::B_0X0),
1 => Val(MCO1DIV_A::B_0X1),
2 => Val(MCO1DIV_A::B_0X2),
15 => Val(MCO1DIV_A::B_0XF),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == MCO1DIV_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == MCO1DIV_A::B_0X1
}
#[doc = "Checks if the value of the field is `B_0X2`"]
#[inline(always)]
pub fn is_b_0x2(&self) -> bool {
*self == MCO1DIV_A::B_0X2
}
#[doc = "Checks if the value of the field is `B_0XF`"]
#[inline(always)]
pub fn is_b_0x_f(&self) -> bool {
*self == MCO1DIV_A::B_0XF
}
}
#[doc = "Write proxy for field `MCO1DIV`"]
pub struct MCO1DIV_W<'a> {
w: &'a mut W,
}
impl<'a> MCO1DIV_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: MCO1DIV_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "bypass (default after reset)"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(MCO1DIV_A::B_0X0)
}
#[doc = "division by 2"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(MCO1DIV_A::B_0X1)
}
#[doc = "division by 3"]
#[inline(always)]
pub fn b_0x2(self) -> &'a mut W {
self.variant(MCO1DIV_A::B_0X2)
}
#[doc = "division by 16"]
#[inline(always)]
pub fn b_0x_f(self) -> &'a mut W {
self.variant(MCO1DIV_A::B_0XF)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 4)) | (((value as u32) & 0x0f) << 4);
self.w
}
}
#[doc = "MCO1ON\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum MCO1ON_A {
#[doc = "0: The MCO1 output is\r\n disabled"]
B_0X0 = 0,
#[doc = "1: The MCO1 output is\r\n enabled"]
B_0X1 = 1,
}
impl From<MCO1ON_A> for bool {
#[inline(always)]
fn from(variant: MCO1ON_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `MCO1ON`"]
pub type MCO1ON_R = crate::R<bool, MCO1ON_A>;
impl MCO1ON_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> MCO1ON_A {
match self.bits {
false => MCO1ON_A::B_0X0,
true => MCO1ON_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == MCO1ON_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == MCO1ON_A::B_0X1
}
}
#[doc = "Write proxy for field `MCO1ON`"]
pub struct MCO1ON_W<'a> {
w: &'a mut W,
}
impl<'a> MCO1ON_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: MCO1ON_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "The MCO1 output is disabled"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(MCO1ON_A::B_0X0)
}
#[doc = "The MCO1 output is enabled"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(MCO1ON_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12);
self.w
}
}
impl R {
#[doc = "Bits 0:2 - MCO1SEL"]
#[inline(always)]
pub fn mco1sel(&self) -> MCO1SEL_R {
MCO1SEL_R::new((self.bits & 0x07) as u8)
}
#[doc = "Bits 4:7 - MCO1DIV"]
#[inline(always)]
pub fn mco1div(&self) -> MCO1DIV_R {
MCO1DIV_R::new(((self.bits >> 4) & 0x0f) as u8)
}
#[doc = "Bit 12 - MCO1ON"]
#[inline(always)]
pub fn mco1on(&self) -> MCO1ON_R {
MCO1ON_R::new(((self.bits >> 12) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:2 - MCO1SEL"]
#[inline(always)]
pub fn mco1sel(&mut self) -> MCO1SEL_W {
MCO1SEL_W { w: self }
}
#[doc = "Bits 4:7 - MCO1DIV"]
#[inline(always)]
pub fn mco1div(&mut self) -> MCO1DIV_W {
MCO1DIV_W { w: self }
}
#[doc = "Bit 12 - MCO1ON"]
#[inline(always)]
pub fn mco1on(&mut self) -> MCO1ON_W {
MCO1ON_W { w: self }
}
}
|
use crate::input::{Command, KeyInput, KeyMap};
use crate::line_cache::Style;
use cairo::{FontFace, FontOptions, FontSlant, FontWeight, Matrix, ScaledFont};
use druid::shell::piet;
use piet::{CairoText, Font, FontBuilder, Text};
use std::collections::HashMap;
use std::sync::{Arc, Mutex, Weak};
use syntect::highlighting::ThemeSettings;
#[derive(Clone)]
pub struct Config {
pub font: Arc<Mutex<AppFont>>,
pub styles: Arc<Mutex<HashMap<usize, Style>>>,
pub theme: Arc<Mutex<ThemeSettings>>,
pub keymaps: Arc<Mutex<KeyMap>>,
}
impl Config {
pub fn new(font: AppFont) -> Config {
Config {
font: Arc::new(Mutex::new(font)),
styles: Arc::new(Mutex::new(HashMap::new())),
theme: Default::default(),
keymaps: Arc::new(Mutex::new(KeyMap::new())),
}
}
pub fn insert_style(&self, style_id: usize, style: Style) {
self.styles.lock().unwrap().insert(style_id, style);
}
}
fn scale_matrix(scale: f64) -> Matrix {
Matrix {
xx: scale,
yx: 0.0,
xy: 0.0,
yy: scale,
x0: 0.0,
y0: 0.0,
}
}
#[derive(Clone)]
pub struct AppFont {
// font_face: FontFace,
// font: Box<Font + Send>,
pub width: f64,
pub ascent: f64,
pub descent: f64,
pub linespace: f64,
}
impl AppFont {
pub fn new(family: &str, size: f64, linespace: f64) -> AppFont {
let font_face =
FontFace::toy_create("Cascadia Code", FontSlant::Normal, FontWeight::Normal);
let font_matrix = scale_matrix(13.0);
let ctm = scale_matrix(1.0);
let options = FontOptions::default();
let scaled_font = ScaledFont::new(&font_face, &font_matrix, &ctm, &options);
let extents = scaled_font.extents();
let font = CairoText::new()
.new_font_by_name("Cascadia Code", 13.0)
.unwrap()
.build()
.unwrap();
println!("{:?} {:?}", extents, scaled_font.text_extents("W"));
AppFont {
// font_face,
// font: Box::new(font) as Box<Font + Send>,
width: scaled_font.text_extents("W").x_advance,
ascent: extents.ascent,
descent: extents.descent,
linespace,
}
}
pub fn lineheight(&self) -> f64 {
self.ascent + self.descent + self.linespace
}
}
|
#[doc = "Register `CR1` reader"]
pub type R = crate::R<CR1_SPEC>;
#[doc = "Register `CR1` writer"]
pub type W = crate::W<CR1_SPEC>;
#[doc = "Field `TAMP1E` reader - Tamper detection on TAMP_IN1 enable"]
pub type TAMP1E_R = crate::BitReader;
#[doc = "Field `TAMP1E` writer - Tamper detection on TAMP_IN1 enable"]
pub type TAMP1E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TAMP2E` reader - Tamper detection on TAMP_IN2 enable"]
pub type TAMP2E_R = crate::BitReader;
#[doc = "Field `TAMP2E` writer - Tamper detection on TAMP_IN2 enable"]
pub type TAMP2E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ITAMP1E` reader - Internal tamper 1 enable"]
pub type ITAMP1E_R = crate::BitReader;
#[doc = "Field `ITAMP1E` writer - Internal tamper 1 enable"]
pub type ITAMP1E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ITAMP2E` reader - Internal tamper 2 enable"]
pub type ITAMP2E_R = crate::BitReader;
#[doc = "Field `ITAMP2E` writer - Internal tamper 2 enable"]
pub type ITAMP2E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ITAMP3E` reader - Internal tamper 3 enable"]
pub type ITAMP3E_R = crate::BitReader;
#[doc = "Field `ITAMP3E` writer - Internal tamper 3 enable"]
pub type ITAMP3E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ITAMP4E` reader - Internal tamper 4 enable"]
pub type ITAMP4E_R = crate::BitReader;
#[doc = "Field `ITAMP4E` writer - Internal tamper 4 enable"]
pub type ITAMP4E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ITAMP5E` reader - Internal tamper 5 enable"]
pub type ITAMP5E_R = crate::BitReader;
#[doc = "Field `ITAMP5E` writer - Internal tamper 5 enable"]
pub type ITAMP5E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ITAMP6E` reader - Internal tamper 6 enable"]
pub type ITAMP6E_R = crate::BitReader;
#[doc = "Field `ITAMP6E` writer - Internal tamper 6 enable"]
pub type ITAMP6E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ITAMP7E` reader - Internal tamper 7 enable"]
pub type ITAMP7E_R = crate::BitReader;
#[doc = "Field `ITAMP7E` writer - Internal tamper 7 enable"]
pub type ITAMP7E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ITAMP8E` reader - Internal tamper 8 enable"]
pub type ITAMP8E_R = crate::BitReader;
#[doc = "Field `ITAMP8E` writer - Internal tamper 8 enable"]
pub type ITAMP8E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ITAMP9E` reader - Internal tamper 9 enable"]
pub type ITAMP9E_R = crate::BitReader;
#[doc = "Field `ITAMP9E` writer - Internal tamper 9 enable"]
pub type ITAMP9E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ITAMP11E` reader - Internal tamper 11 enable"]
pub type ITAMP11E_R = crate::BitReader;
#[doc = "Field `ITAMP11E` writer - Internal tamper 11 enable"]
pub type ITAMP11E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ITAMP12E` reader - Internal tamper 12 enable"]
pub type ITAMP12E_R = crate::BitReader;
#[doc = "Field `ITAMP12E` writer - Internal tamper 12 enable"]
pub type ITAMP12E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ITAMP13E` reader - Internal tamper 13 enable"]
pub type ITAMP13E_R = crate::BitReader;
#[doc = "Field `ITAMP13E` writer - Internal tamper 13 enable"]
pub type ITAMP13E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ITAMP15E` reader - Internal tamper 15 enable"]
pub type ITAMP15E_R = crate::BitReader;
#[doc = "Field `ITAMP15E` writer - Internal tamper 15 enable"]
pub type ITAMP15E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - Tamper detection on TAMP_IN1 enable"]
#[inline(always)]
pub fn tamp1e(&self) -> TAMP1E_R {
TAMP1E_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - Tamper detection on TAMP_IN2 enable"]
#[inline(always)]
pub fn tamp2e(&self) -> TAMP2E_R {
TAMP2E_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 16 - Internal tamper 1 enable"]
#[inline(always)]
pub fn itamp1e(&self) -> ITAMP1E_R {
ITAMP1E_R::new(((self.bits >> 16) & 1) != 0)
}
#[doc = "Bit 17 - Internal tamper 2 enable"]
#[inline(always)]
pub fn itamp2e(&self) -> ITAMP2E_R {
ITAMP2E_R::new(((self.bits >> 17) & 1) != 0)
}
#[doc = "Bit 18 - Internal tamper 3 enable"]
#[inline(always)]
pub fn itamp3e(&self) -> ITAMP3E_R {
ITAMP3E_R::new(((self.bits >> 18) & 1) != 0)
}
#[doc = "Bit 19 - Internal tamper 4 enable"]
#[inline(always)]
pub fn itamp4e(&self) -> ITAMP4E_R {
ITAMP4E_R::new(((self.bits >> 19) & 1) != 0)
}
#[doc = "Bit 20 - Internal tamper 5 enable"]
#[inline(always)]
pub fn itamp5e(&self) -> ITAMP5E_R {
ITAMP5E_R::new(((self.bits >> 20) & 1) != 0)
}
#[doc = "Bit 21 - Internal tamper 6 enable"]
#[inline(always)]
pub fn itamp6e(&self) -> ITAMP6E_R {
ITAMP6E_R::new(((self.bits >> 21) & 1) != 0)
}
#[doc = "Bit 22 - Internal tamper 7 enable"]
#[inline(always)]
pub fn itamp7e(&self) -> ITAMP7E_R {
ITAMP7E_R::new(((self.bits >> 22) & 1) != 0)
}
#[doc = "Bit 23 - Internal tamper 8 enable"]
#[inline(always)]
pub fn itamp8e(&self) -> ITAMP8E_R {
ITAMP8E_R::new(((self.bits >> 23) & 1) != 0)
}
#[doc = "Bit 24 - Internal tamper 9 enable"]
#[inline(always)]
pub fn itamp9e(&self) -> ITAMP9E_R {
ITAMP9E_R::new(((self.bits >> 24) & 1) != 0)
}
#[doc = "Bit 26 - Internal tamper 11 enable"]
#[inline(always)]
pub fn itamp11e(&self) -> ITAMP11E_R {
ITAMP11E_R::new(((self.bits >> 26) & 1) != 0)
}
#[doc = "Bit 27 - Internal tamper 12 enable"]
#[inline(always)]
pub fn itamp12e(&self) -> ITAMP12E_R {
ITAMP12E_R::new(((self.bits >> 27) & 1) != 0)
}
#[doc = "Bit 28 - Internal tamper 13 enable"]
#[inline(always)]
pub fn itamp13e(&self) -> ITAMP13E_R {
ITAMP13E_R::new(((self.bits >> 28) & 1) != 0)
}
#[doc = "Bit 30 - Internal tamper 15 enable"]
#[inline(always)]
pub fn itamp15e(&self) -> ITAMP15E_R {
ITAMP15E_R::new(((self.bits >> 30) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - Tamper detection on TAMP_IN1 enable"]
#[inline(always)]
#[must_use]
pub fn tamp1e(&mut self) -> TAMP1E_W<CR1_SPEC, 0> {
TAMP1E_W::new(self)
}
#[doc = "Bit 1 - Tamper detection on TAMP_IN2 enable"]
#[inline(always)]
#[must_use]
pub fn tamp2e(&mut self) -> TAMP2E_W<CR1_SPEC, 1> {
TAMP2E_W::new(self)
}
#[doc = "Bit 16 - Internal tamper 1 enable"]
#[inline(always)]
#[must_use]
pub fn itamp1e(&mut self) -> ITAMP1E_W<CR1_SPEC, 16> {
ITAMP1E_W::new(self)
}
#[doc = "Bit 17 - Internal tamper 2 enable"]
#[inline(always)]
#[must_use]
pub fn itamp2e(&mut self) -> ITAMP2E_W<CR1_SPEC, 17> {
ITAMP2E_W::new(self)
}
#[doc = "Bit 18 - Internal tamper 3 enable"]
#[inline(always)]
#[must_use]
pub fn itamp3e(&mut self) -> ITAMP3E_W<CR1_SPEC, 18> {
ITAMP3E_W::new(self)
}
#[doc = "Bit 19 - Internal tamper 4 enable"]
#[inline(always)]
#[must_use]
pub fn itamp4e(&mut self) -> ITAMP4E_W<CR1_SPEC, 19> {
ITAMP4E_W::new(self)
}
#[doc = "Bit 20 - Internal tamper 5 enable"]
#[inline(always)]
#[must_use]
pub fn itamp5e(&mut self) -> ITAMP5E_W<CR1_SPEC, 20> {
ITAMP5E_W::new(self)
}
#[doc = "Bit 21 - Internal tamper 6 enable"]
#[inline(always)]
#[must_use]
pub fn itamp6e(&mut self) -> ITAMP6E_W<CR1_SPEC, 21> {
ITAMP6E_W::new(self)
}
#[doc = "Bit 22 - Internal tamper 7 enable"]
#[inline(always)]
#[must_use]
pub fn itamp7e(&mut self) -> ITAMP7E_W<CR1_SPEC, 22> {
ITAMP7E_W::new(self)
}
#[doc = "Bit 23 - Internal tamper 8 enable"]
#[inline(always)]
#[must_use]
pub fn itamp8e(&mut self) -> ITAMP8E_W<CR1_SPEC, 23> {
ITAMP8E_W::new(self)
}
#[doc = "Bit 24 - Internal tamper 9 enable"]
#[inline(always)]
#[must_use]
pub fn itamp9e(&mut self) -> ITAMP9E_W<CR1_SPEC, 24> {
ITAMP9E_W::new(self)
}
#[doc = "Bit 26 - Internal tamper 11 enable"]
#[inline(always)]
#[must_use]
pub fn itamp11e(&mut self) -> ITAMP11E_W<CR1_SPEC, 26> {
ITAMP11E_W::new(self)
}
#[doc = "Bit 27 - Internal tamper 12 enable"]
#[inline(always)]
#[must_use]
pub fn itamp12e(&mut self) -> ITAMP12E_W<CR1_SPEC, 27> {
ITAMP12E_W::new(self)
}
#[doc = "Bit 28 - Internal tamper 13 enable"]
#[inline(always)]
#[must_use]
pub fn itamp13e(&mut self) -> ITAMP13E_W<CR1_SPEC, 28> {
ITAMP13E_W::new(self)
}
#[doc = "Bit 30 - Internal tamper 15 enable"]
#[inline(always)]
#[must_use]
pub fn itamp15e(&mut self) -> ITAMP15E_W<CR1_SPEC, 30> {
ITAMP15E_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "TAMP control register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cr1::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cr1::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct CR1_SPEC;
impl crate::RegisterSpec for CR1_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`cr1::R`](R) reader structure"]
impl crate::Readable for CR1_SPEC {}
#[doc = "`write(|w| ..)` method takes [`cr1::W`](W) writer structure"]
impl crate::Writable for CR1_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets CR1 to value 0"]
impl crate::Resettable for CR1_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use super::{contains_return, BIND_INSTEAD_OF_MAP};
use crate::utils::{
in_macro, match_qpath, match_type, method_calls, multispan_sugg_with_applicability, paths, remove_blocks, snippet,
snippet_with_macro_callsite, span_lint_and_sugg, span_lint_and_then,
};
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_hir::intravisit::{self, Visitor};
use rustc_lint::LateContext;
use rustc_middle::hir::map::Map;
use rustc_span::Span;
pub(crate) struct OptionAndThenSome;
impl BindInsteadOfMap for OptionAndThenSome {
const TYPE_NAME: &'static str = "Option";
const TYPE_QPATH: &'static [&'static str] = &paths::OPTION;
const BAD_METHOD_NAME: &'static str = "and_then";
const BAD_VARIANT_NAME: &'static str = "Some";
const BAD_VARIANT_QPATH: &'static [&'static str] = &paths::OPTION_SOME;
const GOOD_METHOD_NAME: &'static str = "map";
}
pub(crate) struct ResultAndThenOk;
impl BindInsteadOfMap for ResultAndThenOk {
const TYPE_NAME: &'static str = "Result";
const TYPE_QPATH: &'static [&'static str] = &paths::RESULT;
const BAD_METHOD_NAME: &'static str = "and_then";
const BAD_VARIANT_NAME: &'static str = "Ok";
const BAD_VARIANT_QPATH: &'static [&'static str] = &paths::RESULT_OK;
const GOOD_METHOD_NAME: &'static str = "map";
}
pub(crate) struct ResultOrElseErrInfo;
impl BindInsteadOfMap for ResultOrElseErrInfo {
const TYPE_NAME: &'static str = "Result";
const TYPE_QPATH: &'static [&'static str] = &paths::RESULT;
const BAD_METHOD_NAME: &'static str = "or_else";
const BAD_VARIANT_NAME: &'static str = "Err";
const BAD_VARIANT_QPATH: &'static [&'static str] = &paths::RESULT_ERR;
const GOOD_METHOD_NAME: &'static str = "map_err";
}
pub(crate) trait BindInsteadOfMap {
const TYPE_NAME: &'static str;
const TYPE_QPATH: &'static [&'static str];
const BAD_METHOD_NAME: &'static str;
const BAD_VARIANT_NAME: &'static str;
const BAD_VARIANT_QPATH: &'static [&'static str];
const GOOD_METHOD_NAME: &'static str;
fn no_op_msg() -> String {
format!(
"using `{}.{}({})`, which is a no-op",
Self::TYPE_NAME,
Self::BAD_METHOD_NAME,
Self::BAD_VARIANT_NAME
)
}
fn lint_msg() -> String {
format!(
"using `{}.{}(|x| {}(y))`, which is more succinctly expressed as `{}(|x| y)`",
Self::TYPE_NAME,
Self::BAD_METHOD_NAME,
Self::BAD_VARIANT_NAME,
Self::GOOD_METHOD_NAME
)
}
fn lint_closure_autofixable(
cx: &LateContext<'_>,
expr: &hir::Expr<'_>,
args: &[hir::Expr<'_>],
closure_expr: &hir::Expr<'_>,
closure_args_span: Span,
) -> bool {
if_chain! {
if let hir::ExprKind::Call(ref some_expr, ref some_args) = closure_expr.kind;
if let hir::ExprKind::Path(ref qpath) = some_expr.kind;
if match_qpath(qpath, Self::BAD_VARIANT_QPATH);
if some_args.len() == 1;
then {
let inner_expr = &some_args[0];
if contains_return(inner_expr) {
return false;
}
let some_inner_snip = if inner_expr.span.from_expansion() {
snippet_with_macro_callsite(cx, inner_expr.span, "_")
} else {
snippet(cx, inner_expr.span, "_")
};
let closure_args_snip = snippet(cx, closure_args_span, "..");
let option_snip = snippet(cx, args[0].span, "..");
let note = format!("{}.{}({} {})", option_snip, Self::GOOD_METHOD_NAME, closure_args_snip, some_inner_snip);
span_lint_and_sugg(
cx,
BIND_INSTEAD_OF_MAP,
expr.span,
Self::lint_msg().as_ref(),
"try this",
note,
Applicability::MachineApplicable,
);
true
} else {
false
}
}
}
fn lint_closure(cx: &LateContext<'_>, expr: &hir::Expr<'_>, closure_expr: &hir::Expr<'_>) {
let mut suggs = Vec::new();
let can_sugg = find_all_ret_expressions(cx, closure_expr, |ret_expr| {
if_chain! {
if !in_macro(ret_expr.span);
if let hir::ExprKind::Call(ref func_path, ref args) = ret_expr.kind;
if let hir::ExprKind::Path(ref qpath) = func_path.kind;
if match_qpath(qpath, Self::BAD_VARIANT_QPATH);
if args.len() == 1;
if !contains_return(&args[0]);
then {
suggs.push((ret_expr.span, args[0].span.source_callsite()));
true
} else {
false
}
}
});
if can_sugg {
span_lint_and_then(cx, BIND_INSTEAD_OF_MAP, expr.span, Self::lint_msg().as_ref(), |diag| {
multispan_sugg_with_applicability(
diag,
"try this",
Applicability::MachineApplicable,
std::iter::once((*method_calls(expr, 1).2.get(0).unwrap(), Self::GOOD_METHOD_NAME.into())).chain(
suggs
.into_iter()
.map(|(span1, span2)| (span1, snippet(cx, span2, "_").into())),
),
)
});
}
}
/// Lint use of `_.and_then(|x| Some(y))` for `Option`s
fn lint(cx: &LateContext<'_>, expr: &hir::Expr<'_>, args: &[hir::Expr<'_>]) {
if !match_type(cx, cx.tables().expr_ty(&args[0]), Self::TYPE_QPATH) {
return;
}
match args[1].kind {
hir::ExprKind::Closure(_, _, body_id, closure_args_span, _) => {
let closure_body = cx.tcx.hir().body(body_id);
let closure_expr = remove_blocks(&closure_body.value);
if !Self::lint_closure_autofixable(cx, expr, args, closure_expr, closure_args_span) {
Self::lint_closure(cx, expr, closure_expr);
}
},
// `_.and_then(Some)` case, which is no-op.
hir::ExprKind::Path(ref qpath) if match_qpath(qpath, Self::BAD_VARIANT_QPATH) => {
span_lint_and_sugg(
cx,
BIND_INSTEAD_OF_MAP,
expr.span,
Self::no_op_msg().as_ref(),
"use the expression directly",
snippet(cx, args[0].span, "..").into(),
Applicability::MachineApplicable,
);
},
_ => {},
}
}
}
/// returns `true` if expr contains match expr desugared from try
fn contains_try(expr: &hir::Expr<'_>) -> bool {
struct TryFinder {
found: bool,
}
impl<'hir> intravisit::Visitor<'hir> for TryFinder {
type Map = Map<'hir>;
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
fn visit_expr(&mut self, expr: &'hir hir::Expr<'hir>) {
if self.found {
return;
}
match expr.kind {
hir::ExprKind::Match(_, _, hir::MatchSource::TryDesugar) => self.found = true,
_ => intravisit::walk_expr(self, expr),
}
}
}
let mut visitor = TryFinder { found: false };
visitor.visit_expr(expr);
visitor.found
}
fn find_all_ret_expressions<'hir, F>(_cx: &LateContext<'_>, expr: &'hir hir::Expr<'hir>, callback: F) -> bool
where
F: FnMut(&'hir hir::Expr<'hir>) -> bool,
{
struct RetFinder<F> {
in_stmt: bool,
failed: bool,
cb: F,
}
struct WithStmtGuarg<'a, F> {
val: &'a mut RetFinder<F>,
prev_in_stmt: bool,
}
impl<F> RetFinder<F> {
fn inside_stmt(&mut self, in_stmt: bool) -> WithStmtGuarg<'_, F> {
let prev_in_stmt = std::mem::replace(&mut self.in_stmt, in_stmt);
WithStmtGuarg {
val: self,
prev_in_stmt,
}
}
}
impl<F> std::ops::Deref for WithStmtGuarg<'_, F> {
type Target = RetFinder<F>;
fn deref(&self) -> &Self::Target {
self.val
}
}
impl<F> std::ops::DerefMut for WithStmtGuarg<'_, F> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.val
}
}
impl<F> Drop for WithStmtGuarg<'_, F> {
fn drop(&mut self) {
self.val.in_stmt = self.prev_in_stmt;
}
}
impl<'hir, F: FnMut(&'hir hir::Expr<'hir>) -> bool> intravisit::Visitor<'hir> for RetFinder<F> {
type Map = Map<'hir>;
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
fn visit_stmt(&mut self, stmt: &'hir hir::Stmt<'_>) {
intravisit::walk_stmt(&mut *self.inside_stmt(true), stmt)
}
fn visit_expr(&mut self, expr: &'hir hir::Expr<'_>) {
if self.failed {
return;
}
if self.in_stmt {
match expr.kind {
hir::ExprKind::Ret(Some(expr)) => self.inside_stmt(false).visit_expr(expr),
_ => intravisit::walk_expr(self, expr),
}
} else {
match expr.kind {
hir::ExprKind::Match(cond, arms, _) => {
self.inside_stmt(true).visit_expr(cond);
for arm in arms {
self.visit_expr(arm.body);
}
},
hir::ExprKind::Block(..) => intravisit::walk_expr(self, expr),
hir::ExprKind::Ret(Some(expr)) => self.visit_expr(expr),
_ => self.failed |= !(self.cb)(expr),
}
}
}
}
!contains_try(expr) && {
let mut ret_finder = RetFinder {
in_stmt: false,
failed: false,
cb: callback,
};
ret_finder.visit_expr(expr);
!ret_finder.failed
}
}
|
/// Functions to render cursors on graphics backend independently from it's rendering techique.
///
/// In the most cases this will be the fastest available implementation utilizing hardware composing
/// where possible. This may however be quite restrictive in terms of supported formats.
///
/// For those reasons you may always choose to render your cursor(s) (partially) in software instead.
pub trait CursorBackend<'a> {
/// Format representing the image drawn for the cursor.
type CursorFormat: 'a;
/// Error the underlying backend throws if operations fail
type Error;
/// Sets the cursor position and therefore updates the drawn cursors position.
/// Useful as well for e.g. pointer wrapping.
///
/// Not guaranteed to be supported on every backend. The result usually
/// depends on the backend, the cursor might be "owned" by another more priviledged
/// compositor (running nested).
///
/// In these cases setting the position is actually not required, as movement is done
/// by the higher compositor and not by the backend. It is still good practice to update
/// the position after every recieved event, but don't rely on pointer wrapping working.
///
fn set_cursor_position(&self, x: u32, y: u32) -> Result<(), Self::Error>;
/// Set the cursor drawn on the [`CursorBackend`].
///
/// The format is entirely dictated by the concrete implementation and might range
/// from raw image buffers over a fixed list of possible cursor types to simply the
/// void type () to represent no possible customization of the cursor itself.
fn set_cursor_representation<'b>(
&'b self,
cursor: Self::CursorFormat,
hotspot: (u32, u32),
) -> Result<(), Self::Error>
where
'a: 'b;
}
|
use enum_primitive::FromPrimitive;
use fal::{read_u32, read_u64};
use crate::{
btree::BTreeNode, read_block, reaper::ReaperPhys, spacemanager::SpacemanagerPhys,
superblock::NxSuperblock, ObjPhys, ObjectIdentifier, ObjectType, ObjectTypeAndFlags,
};
#[derive(Debug)]
pub struct CheckpointMapping {
pub ty: ObjectTypeAndFlags,
pub subtype: ObjectType,
pub size: u32,
pub padding: u32,
pub fs_oid: ObjectIdentifier,
pub oid: ObjectIdentifier,
pub paddr: ObjectIdentifier,
}
#[derive(Debug)]
pub struct CheckpointMappingPhys {
pub header: ObjPhys,
pub flags: u32,
pub count: u32,
pub mappings: Box<[CheckpointMapping]>,
}
impl CheckpointMapping {
pub const LEN: usize = 40;
pub fn parse(bytes: &[u8]) -> Self {
Self {
ty: ObjectTypeAndFlags::from_raw(read_u32(bytes, 0)),
subtype: ObjectType::from_u32(read_u32(bytes, 4)).unwrap(),
size: read_u32(bytes, 8),
padding: read_u32(bytes, 12),
fs_oid: read_u64(bytes, 16).into(),
oid: read_u64(bytes, 24).into(),
paddr: read_u64(bytes, 32).into(),
}
}
}
impl CheckpointMappingPhys {
pub const BASE_LEN: usize = ObjPhys::LEN + 8;
pub fn parse(bytes: &[u8]) -> Self {
let header = ObjPhys::parse(&bytes);
let flags = read_u32(bytes, 32);
let count = read_u32(bytes, 36);
let mut mappings = vec![];
for i in 0..count as usize {
mappings.push(CheckpointMapping::parse(
&bytes[40 + i * 40..40 + (i + 1) * 40],
));
}
let len = Self::BASE_LEN + count as usize * CheckpointMapping::LEN;
Self {
header,
flags,
count,
mappings: mappings.into_boxed_slice(),
}
}
}
#[derive(Debug)]
pub enum CheckpointDescAreaEntry {
Superblock(NxSuperblock),
Mapping(CheckpointMappingPhys),
}
#[derive(Debug)]
pub enum GenericObject {
SpaceManager(SpacemanagerPhys),
Reaper(ReaperPhys),
BTreeNode(BTreeNode),
Null,
}
impl GenericObject {
pub fn header(&self) -> &ObjPhys {
match self {
Self::SpaceManager(s) => &s.header,
Self::Reaper(r) => &r.header,
Self::BTreeNode(n) => &n.header,
Self::Null => panic!(),
}
}
pub fn as_spacemanager(&self) -> Option<&SpacemanagerPhys> {
match self {
Self::SpaceManager(s) => Some(s),
_ => None,
}
}
pub fn as_reaper(&self) -> Option<&ReaperPhys> {
match self {
Self::Reaper(r) => Some(r),
_ => None,
}
}
pub fn as_btree_node(&self) -> Option<&BTreeNode> {
match self {
Self::BTreeNode(n) => Some(n),
_ => None,
}
}
}
impl CheckpointDescAreaEntry {
pub fn into_superblock(self) -> Option<NxSuperblock> {
match self {
Self::Superblock(superblock) => Some(superblock),
Self::Mapping(_) => None,
}
}
pub fn into_mapping(self) -> Option<CheckpointMappingPhys> {
match self {
Self::Mapping(mapping) => Some(mapping),
Self::Superblock(_) => None,
}
}
}
pub fn read_from_desc_area<D: fal::Device>(
device: &mut D,
superblock: &NxSuperblock,
index: u32,
) -> CheckpointDescAreaEntry {
let block_bytes = read_block(
superblock,
device,
superblock.chkpnt_desc_base + i64::from(index),
);
let obj_phys = ObjPhys::parse(&block_bytes);
match obj_phys.object_type.ty {
ObjectType::NxSuperblock => {
CheckpointDescAreaEntry::Superblock(NxSuperblock::parse(&block_bytes))
}
ObjectType::CheckpointMap => {
CheckpointDescAreaEntry::Mapping(CheckpointMappingPhys::parse(&block_bytes))
}
other => panic!("Unexpected checkpoint desc area entry type: {:?}.", other),
}
}
pub fn read_from_data_area<D: fal::Device>(
device: &mut D,
superblock: &NxSuperblock,
index: u32,
) -> Option<GenericObject> {
let block_bytes = read_block(
superblock,
device,
superblock.chkpnt_data_base + i64::from(index),
);
let obj_phys = ObjPhys::parse(&block_bytes);
match obj_phys.object_type.ty {
ObjectType::SpaceManager => Some(GenericObject::SpaceManager(SpacemanagerPhys::parse(
&block_bytes,
))),
ObjectType::NxReaper => Some(GenericObject::Reaper(ReaperPhys::parse(&block_bytes))),
ObjectType::Btree | ObjectType::BtreeNode => {
Some(GenericObject::BTreeNode(BTreeNode::parse(&block_bytes)))
}
_ => {
dbg!(index);
dbg!(obj_phys.object_type.ty, obj_phys.object_subtype);
Some(GenericObject::Null)
}
}
}
|
use crate::{
ast_types::{
ast_base::AstBase,
result::ResultExpression,
},
utils::Ops,
};
use serde::Serialize;
use std::any::Any;
/* WHILE BLOCK */
#[derive(Clone, Debug, Serialize)]
pub struct While {
pub body: Vec<Box<dyn self::AstBase>>,
pub conditions: Vec<ResultExpression>,
}
impl AstBase for While {
fn get_type(&self) -> Ops {
Ops::WhileDef
}
fn as_self(&self) -> &dyn Any {
self
}
}
pub trait WhileBase {
fn new(conditions: Vec<ResultExpression>, body: Vec<Box<dyn self::AstBase>>) -> Self;
}
impl WhileBase for While {
fn new(conditions: Vec<ResultExpression>, body: Vec<Box<dyn self::AstBase>>) -> Self {
Self { body, conditions }
}
}
|
extern crate env_logger;
use rand::Rng;
use liars::agent;
use liars::play;
use liars::playexpert;
use liars::start;
#[tokio::main]
async fn main() {
env_logger::init();
use clap::{Arg, SubCommand};
let app = clap::App::new("Liars lie")
.subcommand(
SubCommand::with_name("start")
.about("Start a number of agents, generate file agents.conf")
.arg(
Arg::with_name("value")
.long("value")
.possible_value("true")
.possible_value("false"),
)
.arg(
Arg::with_name("num-agents")
.long("num-agents")
.value_name("number")
.default_value("10")
.validator(|s| {
s.parse::<usize>().map(|_| ()).map_err(|e| format!("{}", e))
}),
)
.arg(
Arg::with_name("liar-ratio")
.long("liar-ratio")
.value_name("ratio")
.default_value("0.1")
.validator(|s| match s.parse::<f64>() {
Err(e) => Err(format!("{}", e)),
Ok(v) if 0. <= v && v < 0.5 => Ok(()),
Ok(v) => Err(format!("Expected a value in [0., 0.5[, got {}", v)),
}),
),
)
.subcommand(
SubCommand::with_name("play")
.about("Play a single round of 'guess the original value'")
.arg(
Arg::with_name("agents")
.long("agents")
.value_name("FILE")
.default_value("agents.conf"),
),
)
.subcommand(
SubCommand::with_name("agent")
.about("Start a single agent, print its port number on stdout")
.arg(
Arg::with_name("value")
.long("value")
.takes_value(true)
.possible_value("true")
.possible_value("false")
.required(true),
),
)
.subcommand(
SubCommand::with_name("playexpert")
.about("Play a single round of 'guess the original value', only talking to some agents")
.arg(
Arg::with_name("agents")
.long("agents")
.value_name("FILE")
.default_value("agents.conf")
)
.arg(
Arg::with_name("liar-ratio")
.long("liar-ratio")
.value_name("ratio")
.default_value("0.1")
.validator(|s| match s.parse::<f64>() {
Err(e) => Err(format!("{}", e)),
Ok(v) if 0. <= v && v < 0.5 => Ok(()),
Ok(v) => Err(format!("Expected a value in [0., 0.5[, got {}", v)),
}),
),
);
match app.get_matches().subcommand() {
("start", Some(args)) => {
let start_args = start::StartArgs {
value: match args.value_of("value") {
None => rand::thread_rng().gen_bool(0.5),
Some(option) => option.parse::<bool>().expect("Invalud value: value"),
},
num_agents: args
.value_of("num-agents")
.expect("Missing arg: value")
.parse::<usize>()
.expect("Invalud value: value"),
liar_ratio: args
.value_of("liar-ratio")
.expect("Missing arg: value")
.parse::<f64>()
.expect("Invalud value: value"),
exe: std::env::current_exe().expect("Could not get executable"),
};
assert!(start_args.liar_ratio >= 0.);
assert!(start_args.liar_ratio < 0.5);
start::start(&start_args).await;
}
("agent", Some(args)) => {
let agent_args = agent::AgentArgs {
value: match args.value_of("value").expect("Missing arg: value") {
"true" => true,
"false" => false,
v => panic!("Invalid boolean {}", v),
},
};
agent::agent(&agent_args).await;
unreachable!();
}
("play", Some(args)) => {
let play_args = play::PlayArgs {
path: args
.value_of("agents")
.expect("Missing arg: agents")
.parse::<std::path::PathBuf>()
.expect("Invalud value: agents"),
};
play::play(&play_args).await;
}
("playexpert", Some(args)) => {
let play_args = playexpert::PlayExpertArgs {
path: args
.value_of("agents")
.expect("Missing arg: agents")
.parse::<std::path::PathBuf>()
.expect("Invalud value: agents"),
liar_ratio: args
.value_of("liar-ratio")
.expect("Missing arg: value")
.parse::<f64>()
.expect("Invalud value: value"),
};
playexpert::play(&play_args).await;
}
_ => {
panic!("Missing command");
}
}
}
|
// Copyright 2019-2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Global cache state.
use crate::{stats::StateUsageStats, utils::Meta};
use hash_db::Hasher;
use linked_hash_map::{Entry, LinkedHashMap};
use log::trace;
use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard};
use sp_core::hexdisplay::HexDisplay;
use sp_core::storage::ChildInfo;
use sp_runtime::traits::{Block as BlockT, HashFor, Header, NumberFor};
use sp_state_machine::{
backend::Backend as StateBackend, ChildStorageCollection, StorageCollection, StorageKey,
StorageValue, TrieBackend,
};
use std::collections::{HashMap, HashSet, VecDeque};
use std::hash::Hash as StdHash;
use std::sync::Arc;
const STATE_CACHE_BLOCKS: usize = 12;
type ChildStorageKey = (Vec<u8>, Vec<u8>);
/// Shared canonical state cache.
pub struct Cache<B: BlockT> {
/// Storage cache. `None` indicates that key is known to be missing.
lru_storage: LRUMap<StorageKey, Option<StorageValue>>,
/// Storage hashes cache. `None` indicates that key is known to be missing.
lru_hashes: LRUMap<StorageKey, OptionHOut<B::Hash>>,
/// Storage cache for child trie. `None` indicates that key is known to be missing.
lru_child_storage: LRUMap<ChildStorageKey, Option<StorageValue>>,
/// Information on the modifications in recently committed blocks; specifically which keys
/// changed in which block. Ordered by block number.
modifications: VecDeque<BlockChanges<B::Header>>,
}
struct LRUMap<K, V>(LinkedHashMap<K, V>, usize, usize);
/// Internal trait similar to `heapsize` but using
/// a simply estimation.
///
/// This should not be made public, it is implementation
/// detail trait. If it need to become public please
/// consider using `malloc_size_of`.
trait EstimateSize {
/// Return a size estimation of additional size needed
/// to cache this struct (in bytes).
fn estimate_size(&self) -> usize;
}
impl EstimateSize for Vec<u8> {
fn estimate_size(&self) -> usize {
self.capacity()
}
}
impl EstimateSize for Option<Vec<u8>> {
fn estimate_size(&self) -> usize {
self.as_ref().map(|v| v.capacity()).unwrap_or(0)
}
}
struct OptionHOut<T: AsRef<[u8]>>(Option<T>);
impl<T: AsRef<[u8]>> EstimateSize for OptionHOut<T> {
fn estimate_size(&self) -> usize {
// capacity would be better
self.0.as_ref().map(|v| v.as_ref().len()).unwrap_or(0)
}
}
impl<T: EstimateSize> EstimateSize for (T, T) {
fn estimate_size(&self) -> usize {
self.0.estimate_size() + self.1.estimate_size()
}
}
impl<K: EstimateSize + Eq + StdHash, V: EstimateSize> LRUMap<K, V> {
fn remove(&mut self, k: &K) {
let map = &mut self.0;
let storage_used_size = &mut self.1;
if let Some(v) = map.remove(k) {
*storage_used_size -= k.estimate_size();
*storage_used_size -= v.estimate_size();
}
}
fn add(&mut self, k: K, v: V) {
let lmap = &mut self.0;
let storage_used_size = &mut self.1;
let limit = self.2;
let klen = k.estimate_size();
*storage_used_size += v.estimate_size();
// TODO assert k v size fit into limit?? to avoid insert remove?
match lmap.entry(k) {
Entry::Occupied(mut entry) => {
// note that in this case we are not running pure lru as
// it would require to remove first
*storage_used_size -= entry.get().estimate_size();
entry.insert(v);
},
Entry::Vacant(entry) => {
*storage_used_size += klen;
entry.insert(v);
},
};
while *storage_used_size > limit {
if let Some((k, v)) = lmap.pop_front() {
*storage_used_size -= k.estimate_size();
*storage_used_size -= v.estimate_size();
} else {
// can happen fairly often as we get value from multiple lru
// and only remove from a single lru
break
}
}
}
fn get<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
where
K: std::borrow::Borrow<Q>,
Q: StdHash + Eq,
{
self.0.get_refresh(k)
}
fn used_size(&self) -> usize {
self.1
}
fn clear(&mut self) {
self.0.clear();
self.1 = 0;
}
}
impl<B: BlockT> Cache<B> {
/// Returns the used memory size of the storage cache in bytes.
pub fn used_storage_cache_size(&self) -> usize {
self.lru_storage.used_size() + self.lru_child_storage.used_size()
// ignore small hashes storage and self.lru_hashes.used_size()
}
/// Synchronize the shared cache with the best block state.
///
/// This function updates the shared cache by removing entries
/// that are invalidated by chain reorganization. It should be called
/// externally when chain reorg happens without importing a new block.
pub fn sync(&mut self, enacted: &[B::Hash], retracted: &[B::Hash]) {
trace!("Syncing shared cache, enacted = {:?}, retracted = {:?}", enacted, retracted);
// Purge changes from re-enacted and retracted blocks.
let mut clear = false;
for block in enacted {
clear = clear || {
if let Some(m) = self.modifications.iter_mut().find(|m| &m.hash == block) {
trace!("Reverting enacted block {:?}", block);
m.is_canon = true;
for a in &m.storage {
trace!("Reverting enacted key {:?}", HexDisplay::from(a));
self.lru_storage.remove(a);
}
for a in &m.child_storage {
trace!("Reverting enacted child key {:?}", a);
self.lru_child_storage.remove(a);
}
false
} else {
true
}
};
}
for block in retracted {
clear = clear || {
if let Some(m) = self.modifications.iter_mut().find(|m| &m.hash == block) {
trace!("Retracting block {:?}", block);
m.is_canon = false;
for a in &m.storage {
trace!("Retracted key {:?}", HexDisplay::from(a));
self.lru_storage.remove(a);
}
for a in &m.child_storage {
trace!("Retracted child key {:?}", a);
self.lru_child_storage.remove(a);
}
false
} else {
true
}
};
}
if clear {
// We don't know anything about the block; clear everything
trace!("Wiping cache");
self.lru_storage.clear();
self.lru_child_storage.clear();
self.lru_hashes.clear();
self.modifications.clear();
}
}
}
pub type SharedCache<B> = Arc<Mutex<Cache<B>>>;
/// Fix lru storage size for hash (small 64ko).
const FIX_LRU_HASH_SIZE: usize = 65_536;
/// Create a new shared cache instance with given max memory usage.
pub fn new_shared_cache<B: BlockT>(
shared_cache_size: usize,
child_ratio: (usize, usize),
) -> SharedCache<B> {
let top = child_ratio.1.saturating_sub(child_ratio.0);
Arc::new(Mutex::new(Cache {
lru_storage: LRUMap(LinkedHashMap::new(), 0, shared_cache_size * top / child_ratio.1),
lru_hashes: LRUMap(LinkedHashMap::new(), 0, FIX_LRU_HASH_SIZE),
lru_child_storage: LRUMap(
LinkedHashMap::new(),
0,
shared_cache_size * child_ratio.0 / child_ratio.1,
),
modifications: VecDeque::new(),
}))
}
#[derive(Debug)]
/// Accumulates a list of storage changed in a block.
struct BlockChanges<B: Header> {
/// Block number.
number: B::Number,
/// Block hash.
hash: B::Hash,
/// Parent block hash.
parent: B::Hash,
/// A set of modified storage keys.
storage: HashSet<StorageKey>,
/// A set of modified child storage keys.
child_storage: HashSet<ChildStorageKey>,
/// Block is part of the canonical chain.
is_canon: bool,
}
/// Cached values specific to a state.
struct LocalCache<H: Hasher> {
/// Storage cache.
///
/// `None` indicates that key is known to be missing.
storage: HashMap<StorageKey, Option<StorageValue>>,
/// Storage hashes cache.
///
/// `None` indicates that key is known to be missing.
hashes: HashMap<StorageKey, Option<H::Out>>,
/// Child storage cache.
///
/// `None` indicates that key is known to be missing.
child_storage: HashMap<ChildStorageKey, Option<StorageValue>>,
}
/// Cache changes.
pub struct CacheChanges<B: BlockT> {
/// Shared canonical state cache.
shared_cache: SharedCache<B>,
/// Local cache of values for this state.
local_cache: RwLock<LocalCache<HashFor<B>>>,
/// Hash of the block on top of which this instance was created or
/// `None` if cache is disabled
pub parent_hash: Option<B::Hash>,
}
/// State cache abstraction.
///
/// Manages shared global state cache which reflects the canonical
/// state as it is on the disk.
///
/// A instance of `CachingState` may be created as canonical or not.
/// For canonical instances local cache is accumulated and applied
/// in `sync_cache` along with the change overlay.
/// For non-canonical clones local cache and changes are dropped.
pub struct CachingState<S, B: BlockT> {
/// Usage statistics
usage: StateUsageStats,
/// State machine registered stats
overlay_stats: sp_state_machine::StateMachineStats,
/// Backing state.
state: S,
/// Cache data.
cache: CacheChanges<B>,
}
impl<S, B: BlockT> std::fmt::Debug for CachingState<S, B> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Block {:?}", self.cache.parent_hash)
}
}
impl<B: BlockT> CacheChanges<B> {
/// Propagate local cache into the shared cache and synchronize
/// the shared cache with the best block state.
///
/// This function updates the shared cache by removing entries
/// that are invalidated by chain reorganization. `sync_cache`
/// should be called after the block has been committed and the
/// blockchain route has been calculated.
pub fn sync_cache(
&mut self,
enacted: &[B::Hash],
retracted: &[B::Hash],
changes: StorageCollection,
child_changes: ChildStorageCollection,
commit_hash: Option<B::Hash>,
commit_number: Option<NumberFor<B>>,
is_best: bool,
) {
let mut cache = self.shared_cache.lock();
trace!(
"Syncing cache, id = (#{:?}, {:?}), parent={:?}, best={}",
commit_number,
commit_hash,
self.parent_hash,
is_best,
);
let cache = &mut *cache;
// Filter out committing block if any.
let enacted: Vec<_> = enacted
.iter()
.filter(|h| commit_hash.as_ref().map_or(true, |p| *h != p))
.cloned()
.collect();
cache.sync(&enacted, retracted);
// Propagate cache only if committing on top of the latest canonical state
// blocks are ordered by number and only one block with a given number is marked as
// canonical (contributed to canonical state cache)
if let Some(_) = self.parent_hash {
let mut local_cache = self.local_cache.write();
if is_best {
trace!(
"Committing {} local, {} hashes, {} modified root entries, {} modified child entries",
local_cache.storage.len(),
local_cache.hashes.len(),
changes.len(),
child_changes.iter().map(|v|v.1.len()).sum::<usize>(),
);
for (k, v) in local_cache.storage.drain() {
cache.lru_storage.add(k, v);
}
for (k, v) in local_cache.child_storage.drain() {
cache.lru_child_storage.add(k, v);
}
for (k, v) in local_cache.hashes.drain() {
cache.lru_hashes.add(k, OptionHOut(v));
}
}
}
if let (Some(ref number), Some(ref hash), Some(ref parent)) =
(commit_number, commit_hash, self.parent_hash)
{
if cache.modifications.len() == STATE_CACHE_BLOCKS {
cache.modifications.pop_back();
}
let mut modifications = HashSet::new();
let mut child_modifications = HashSet::new();
child_changes.into_iter().for_each(|(sk, changes)| {
for (k, v) in changes.into_iter() {
let k = (sk.clone(), k);
if is_best {
cache.lru_child_storage.add(k.clone(), v);
}
child_modifications.insert(k);
}
});
for (k, v) in changes.into_iter() {
if is_best {
cache.lru_hashes.remove(&k);
cache.lru_storage.add(k.clone(), v);
}
modifications.insert(k);
}
// Save modified storage. These are ordered by the block number in reverse.
let block_changes = BlockChanges {
storage: modifications,
child_storage: child_modifications,
number: *number,
hash: hash.clone(),
is_canon: is_best,
parent: parent.clone(),
};
let insert_at = cache
.modifications
.iter()
.enumerate()
.find(|(_, m)| m.number < *number)
.map(|(i, _)| i);
trace!("Inserting modifications at {:?}", insert_at);
if let Some(insert_at) = insert_at {
cache.modifications.insert(insert_at, block_changes);
} else {
cache.modifications.push_back(block_changes);
}
}
}
}
impl<S: StateBackend<HashFor<B>>, B: BlockT> CachingState<S, B> {
/// Create a new instance wrapping generic State and shared cache.
pub(crate) fn new(
state: S,
shared_cache: SharedCache<B>,
parent_hash: Option<B::Hash>,
) -> Self {
CachingState {
usage: StateUsageStats::new(),
overlay_stats: sp_state_machine::StateMachineStats::default(),
state,
cache: CacheChanges {
shared_cache,
local_cache: RwLock::new(LocalCache {
storage: Default::default(),
hashes: Default::default(),
child_storage: Default::default(),
}),
parent_hash,
},
}
}
/// Check if the key can be returned from cache by matching current block parent hash against
/// canonical state and filtering out entries modified in later blocks.
fn is_allowed(
key: Option<&[u8]>,
child_key: Option<&ChildStorageKey>,
parent_hash: &Option<B::Hash>,
modifications: &VecDeque<BlockChanges<B::Header>>,
) -> bool {
let mut parent = match *parent_hash {
None => {
trace!(
"Cache lookup skipped for {:?}: no parent hash",
key.as_ref().map(HexDisplay::from)
);
return false
},
Some(ref parent) => parent,
};
// Ignore all storage entries modified in later blocks.
// Modifications contains block ordered by the number
// We search for our parent in that list first and then for
// all its parents until we hit the canonical block,
// checking against all the intermediate modifications.
for m in modifications {
if &m.hash == parent {
if m.is_canon {
return true
}
parent = &m.parent;
}
if let Some(key) = key {
if m.storage.contains(key) {
trace!(
"Cache lookup skipped for {:?}: modified in a later block",
HexDisplay::from(&key)
);
return false
}
}
if let Some(child_key) = child_key {
if m.child_storage.contains(child_key) {
trace!("Cache lookup skipped for {:?}: modified in a later block", child_key);
return false
}
}
}
trace!(
"Cache lookup skipped for {:?}: parent hash is unknown",
key.as_ref().map(HexDisplay::from),
);
false
}
}
impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for CachingState<S, B> {
type Error = S::Error;
type Transaction = S::Transaction;
type TrieBackendStorage = S::TrieBackendStorage;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
let local_cache = self.cache.local_cache.upgradable_read();
// Note that local cache makes that lru is not refreshed
if let Some(entry) = local_cache.storage.get(key).cloned() {
trace!("Found in local cache: {:?}", HexDisplay::from(&key));
self.usage.tally_key_read(key, entry.as_ref(), true);
return Ok(entry)
}
let mut cache = self.cache.shared_cache.lock();
if Self::is_allowed(Some(key), None, &self.cache.parent_hash, &cache.modifications) {
if let Some(entry) = cache.lru_storage.get(key).map(|a| a.clone()) {
trace!("Found in shared cache: {:?}", HexDisplay::from(&key));
self.usage.tally_key_read(key, entry.as_ref(), true);
return Ok(entry)
}
}
trace!("Cache miss: {:?}", HexDisplay::from(&key));
let value = self.state.storage(key)?;
RwLockUpgradableReadGuard::upgrade(local_cache).storage.insert(key.to_vec(), value.clone());
self.usage.tally_key_read(key, value.as_ref(), false);
Ok(value)
}
fn storage_hash(&self, key: &[u8]) -> Result<Option<B::Hash>, Self::Error> {
let local_cache = self.cache.local_cache.upgradable_read();
if let Some(entry) = local_cache.hashes.get(key).cloned() {
trace!("Found hash in local cache: {:?}", HexDisplay::from(&key));
return Ok(entry)
}
let mut cache = self.cache.shared_cache.lock();
if Self::is_allowed(Some(key), None, &self.cache.parent_hash, &cache.modifications) {
if let Some(entry) = cache.lru_hashes.get(key).map(|a| a.0.clone()) {
trace!("Found hash in shared cache: {:?}", HexDisplay::from(&key));
return Ok(entry)
}
}
trace!("Cache hash miss: {:?}", HexDisplay::from(&key));
let hash = self.state.storage_hash(key)?;
RwLockUpgradableReadGuard::upgrade(local_cache).hashes.insert(key.to_vec(), hash);
Ok(hash)
}
fn child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
let key = (child_info.storage_key().to_vec(), key.to_vec());
let local_cache = self.cache.local_cache.upgradable_read();
if let Some(entry) = local_cache.child_storage.get(&key).cloned() {
trace!("Found in local cache: {:?}", key);
return Ok(self.usage.tally_child_key_read(&key, entry, true))
}
let mut cache = self.cache.shared_cache.lock();
if Self::is_allowed(None, Some(&key), &self.cache.parent_hash, &cache.modifications) {
if let Some(entry) = cache.lru_child_storage.get(&key).map(|a| a.clone()) {
trace!("Found in shared cache: {:?}", key);
return Ok(self.usage.tally_child_key_read(&key, entry, true))
}
}
trace!("Cache miss: {:?}", key);
let value = self.state.child_storage(child_info, &key.1[..])?;
// just pass it through the usage counter
let value = self.usage.tally_child_key_read(&key, value, false);
RwLockUpgradableReadGuard::upgrade(local_cache).child_storage.insert(key, value.clone());
Ok(value)
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.storage(key)?.is_some())
}
fn exists_child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<bool, Self::Error> {
self.state.exists_child_storage(child_info, key)
}
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, child_info: &ChildInfo, f: F) {
self.state.for_keys_in_child_storage(child_info, f)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.state.next_storage_key(key)
}
fn next_child_storage_key(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.state.next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.state.for_keys_with_prefix(prefix, f)
}
fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], f: F) {
self.state.for_key_values_with_prefix(prefix, f)
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.state.for_child_keys_with_prefix(child_info, prefix, f)
}
fn storage_root<'a>(
&self,
delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
) -> (B::Hash, Self::Transaction)
where
B::Hash: Ord,
{
self.state.storage_root(delta)
}
fn child_storage_root<'a>(
&self,
child_info: &ChildInfo,
delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
) -> (B::Hash, bool, Self::Transaction)
where
B::Hash: Ord,
{
self.state.child_storage_root(child_info, delta)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
self.state.pairs()
}
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
self.state.keys(prefix)
}
fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec<Vec<u8>> {
self.state.child_keys(child_info, prefix)
}
fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, HashFor<B>>> {
self.state.as_trie_backend()
}
fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) {
self.overlay_stats.add(stats);
}
fn usage_info(&self) -> sp_state_machine::UsageInfo {
let mut info = self.usage.take();
info.include_state_machine_states(&self.overlay_stats);
info
}
}
/// Extended [`CachingState`] that will sync the caches on drop.
pub struct SyncingCachingState<S, Block: BlockT> {
/// The usage statistics of the backend. These will be updated on drop.
state_usage: Arc<StateUsageStats>,
/// Reference to the meta db.
meta: Arc<RwLock<Meta<NumberFor<Block>, Block::Hash>>>,
/// Mutex to lock get exlusive access to the backend.
lock: Arc<RwLock<()>>,
/// The wrapped caching state.
///
/// This is required to be a `Option`, because sometimes we want to extract
/// the cache changes and Rust does not allow to move fields from types that
/// implement `Drop`.
caching_state: Option<CachingState<S, Block>>,
/// Disable syncing of the cache. This is by default always `false`. However,
/// we need to disable syncing when this is a state in a
/// [`BlockImportOperation`](crate::BlockImportOperation). The import operation
/// takes care to sync the cache and more importantly we want to prevent a dead
/// lock.
disable_syncing: bool,
}
impl<S, B: BlockT> SyncingCachingState<S, B> {
/// Create new automatic syncing state.
pub fn new(
caching_state: CachingState<S, B>,
state_usage: Arc<StateUsageStats>,
meta: Arc<RwLock<Meta<NumberFor<B>, B::Hash>>>,
lock: Arc<RwLock<()>>,
) -> Self {
Self { caching_state: Some(caching_state), state_usage, meta, lock, disable_syncing: false }
}
/// Returns the reference to the internal [`CachingState`].
fn caching_state(&self) -> &CachingState<S, B> {
self.caching_state
.as_ref()
.expect("`caching_state` is always valid for the lifetime of the object; qed")
}
/// Convert `Self` into the cache changes.
pub fn into_cache_changes(mut self) -> CacheChanges<B> {
self.caching_state
.take()
.expect("`caching_state` is always valid for the lifetime of the object; qed")
.cache
}
/// Disable syncing the cache on drop.
pub fn disable_syncing(&mut self) {
self.disable_syncing = true;
}
}
impl<S, B: BlockT> std::fmt::Debug for SyncingCachingState<S, B> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.caching_state().fmt(f)
}
}
impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>>
for SyncingCachingState<S, B>
{
type Error = S::Error;
type Transaction = S::Transaction;
type TrieBackendStorage = S::TrieBackendStorage;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.caching_state().storage(key)
}
fn storage_hash(&self, key: &[u8]) -> Result<Option<B::Hash>, Self::Error> {
self.caching_state().storage_hash(key)
}
fn child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.caching_state().child_storage(child_info, key)
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
self.caching_state().exists_storage(key)
}
fn exists_child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<bool, Self::Error> {
self.caching_state().exists_child_storage(child_info, key)
}
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, child_info: &ChildInfo, f: F) {
self.caching_state().for_keys_in_child_storage(child_info, f)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.caching_state().next_storage_key(key)
}
fn next_child_storage_key(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.caching_state().next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.caching_state().for_keys_with_prefix(prefix, f)
}
fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], f: F) {
self.caching_state().for_key_values_with_prefix(prefix, f)
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.caching_state().for_child_keys_with_prefix(child_info, prefix, f)
}
fn storage_root<'a>(
&self,
delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
) -> (B::Hash, Self::Transaction)
where
B::Hash: Ord,
{
self.caching_state().storage_root(delta)
}
fn child_storage_root<'a>(
&self,
child_info: &ChildInfo,
delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
) -> (B::Hash, bool, Self::Transaction)
where
B::Hash: Ord,
{
self.caching_state().child_storage_root(child_info, delta)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
self.caching_state().pairs()
}
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
self.caching_state().keys(prefix)
}
fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec<Vec<u8>> {
self.caching_state().child_keys(child_info, prefix)
}
fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, HashFor<B>>> {
self.caching_state
.as_mut()
.expect("`caching_state` is valid for the lifetime of the object; qed")
.as_trie_backend()
}
fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) {
self.caching_state().register_overlay_stats(stats);
}
fn usage_info(&self) -> sp_state_machine::UsageInfo {
self.caching_state().usage_info()
}
}
impl<S, B: BlockT> Drop for SyncingCachingState<S, B> {
fn drop(&mut self) {
if self.disable_syncing {
return
}
if let Some(mut caching_state) = self.caching_state.take() {
let _lock = self.lock.read();
self.state_usage.merge_sm(caching_state.usage.take());
if let Some(hash) = caching_state.cache.parent_hash.clone() {
let is_best = self.meta.read().best_hash == hash;
caching_state.cache.sync_cache(&[], &[], vec![], vec![], None, None, is_best);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use sp_runtime::{
testing::{Block as RawBlock, ExtrinsicWrapper, H256},
traits::BlakeTwo256,
};
use sp_state_machine::InMemoryBackend;
type Block = RawBlock<ExtrinsicWrapper<u32>>;
#[test]
fn smoke() {
//init_log();
let root_parent = H256::random();
let key = H256::random()[..].to_vec();
let h0 = H256::random();
let h1a = H256::random();
let h1b = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let h3b = H256::random();
let shared = new_shared_cache::<Block>(256 * 1024, (0, 1));
// blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ]
// state [ 5 5 4 3 2 2 ]
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(root_parent),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![2]))],
vec![],
Some(h0),
Some(0),
true,
);
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h0));
s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1a), Some(1), true);
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h0));
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![3]))],
vec![],
Some(h1b),
Some(1),
false,
);
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h1b));
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![4]))],
vec![],
Some(h2b),
Some(2),
false,
);
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h1a));
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![5]))],
vec![],
Some(h2a),
Some(2),
true,
);
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h2a));
s.cache.sync_cache(&[], &[], vec![], vec![], Some(h3a), Some(3), true);
let s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h3a));
assert_eq!(s.storage(&key).unwrap().unwrap(), vec![5]);
let s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h1a));
assert!(s.storage(&key).unwrap().is_none());
let s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h2b));
assert!(s.storage(&key).unwrap().is_none());
let s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h1b));
assert!(s.storage(&key).unwrap().is_none());
// reorg to 3b
// blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ]
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h2b));
s.cache.sync_cache(
&[h1b, h2b, h3b],
&[h1a, h2a, h3a],
vec![],
vec![],
Some(h3b),
Some(3),
true,
);
let s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h3a));
assert!(s.storage(&key).unwrap().is_none());
}
#[test]
fn simple_fork() {
sp_tracing::try_init_simple();
let root_parent = H256::random();
let key = H256::random()[..].to_vec();
let h1 = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3b = H256::random();
let shared = new_shared_cache::<Block>(256 * 1024, (0, 1));
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(root_parent),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![2]))],
vec![],
Some(h1),
Some(1),
true,
);
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h1));
s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true);
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h1));
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![3]))],
vec![],
Some(h2b),
Some(2),
false,
);
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h2b));
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![3]))],
vec![],
Some(h3b),
Some(2),
false,
);
let s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h2a));
assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]);
}
#[test]
fn double_fork() {
let root_parent = H256::random();
let key = H256::random()[..].to_vec();
let h1 = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let h3b = H256::random();
let shared = new_shared_cache::<Block>(256 * 1024, (0, 1));
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(root_parent),
);
s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1), Some(1), true);
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h1));
s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true);
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h2a));
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![2]))],
vec![],
Some(h3a),
Some(3),
true,
);
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h1));
s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2b), Some(2), false);
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h2b));
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![3]))],
vec![],
Some(h3b),
Some(3),
false,
);
let s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h3a));
assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]);
}
#[test]
fn should_track_used_size_correctly() {
let root_parent = H256::random();
let shared = new_shared_cache::<Block>(109, ((109 - 36), 109));
let h0 = H256::random();
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(root_parent.clone()),
);
let key = H256::random()[..].to_vec();
let s_key = H256::random()[..].to_vec();
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![1, 2, 3]))],
vec![],
Some(h0),
Some(0),
true,
);
// 32 key, 3 byte size
assert_eq!(shared.lock().used_storage_cache_size(), 35 /* bytes */);
let key = H256::random()[..].to_vec();
s.cache.sync_cache(
&[],
&[],
vec![],
vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))])],
Some(h0),
Some(0),
true,
);
// 35 + (2 * 32) key, 2 byte size
assert_eq!(shared.lock().used_storage_cache_size(), 101 /* bytes */);
}
#[test]
fn should_remove_lru_items_based_on_tracking_used_size() {
let root_parent = H256::random();
let shared = new_shared_cache::<Block>(36 * 3, (2, 3));
let h0 = H256::random();
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(root_parent),
);
let key = H256::random()[..].to_vec();
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![1, 2, 3, 4]))],
vec![],
Some(h0),
Some(0),
true,
);
// 32 key, 4 byte size
assert_eq!(shared.lock().used_storage_cache_size(), 36 /* bytes */);
let key = H256::random()[..].to_vec();
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![1, 2]))],
vec![],
Some(h0),
Some(0),
true,
);
// 32 key, 2 byte size
assert_eq!(shared.lock().used_storage_cache_size(), 34 /* bytes */);
}
#[test]
fn fix_storage_mismatch_issue() {
sp_tracing::try_init_simple();
let root_parent = H256::random();
let key = H256::random()[..].to_vec();
let h0 = H256::random();
let h1 = H256::random();
let shared = new_shared_cache::<Block>(256 * 1024, (0, 1));
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(root_parent.clone()),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![2]))],
vec![],
Some(h0.clone()),
Some(0),
true,
);
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h0));
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![3]))],
vec![],
Some(h1),
Some(1),
true,
);
let mut s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h1));
assert_eq!(s.storage(&key).unwrap(), Some(vec![3]));
// Restart (or unknown block?), clear caches.
{
let mut cache = s.cache.shared_cache.lock();
let cache = &mut *cache;
cache.lru_storage.clear();
cache.lru_hashes.clear();
cache.lru_child_storage.clear();
cache.modifications.clear();
}
// New value is written because of cache miss.
s.cache.local_cache.write().storage.insert(key.clone(), Some(vec![42]));
// New value is propagated.
s.cache.sync_cache(&[], &[], vec![], vec![], None, None, true);
let s =
CachingState::new(InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(h1));
assert_eq!(s.storage(&key).unwrap(), None);
}
}
#[cfg(test)]
mod qc {
use std::collections::{hash_map::Entry, HashMap};
use quickcheck::{quickcheck, Arbitrary, TestResult};
use super::*;
use sp_runtime::{
testing::{Block as RawBlock, ExtrinsicWrapper, H256},
traits::BlakeTwo256,
};
use sp_state_machine::InMemoryBackend;
type Block = RawBlock<ExtrinsicWrapper<u32>>;
type KeySet = Vec<(Vec<u8>, Option<Vec<u8>>)>;
type KeyMap = HashMap<Vec<u8>, Option<Vec<u8>>>;
#[derive(Debug, Clone)]
struct Node {
hash: H256,
parent: H256,
state: KeyMap,
changes: KeySet,
}
impl Node {
fn new_next(&self, hash: H256, changes: KeySet) -> Self {
let mut state = self.state.clone();
for (k, v) in self.state.iter() {
state.insert(k.clone(), v.clone());
}
for (k, v) in changes.clone().into_iter() {
state.insert(k, v);
}
Self { hash, parent: self.hash, changes, state }
}
fn new(hash: H256, parent: H256, changes: KeySet) -> Self {
let mut state = KeyMap::new();
for (k, v) in changes.clone().into_iter() {
state.insert(k, v);
}
Self { hash, parent, state, changes }
}
fn purge(&mut self, other_changes: &KeySet) {
for (k, _) in other_changes.iter() {
self.state.remove(k);
}
}
}
#[derive(Debug, Clone)]
enum Action {
Next { hash: H256, changes: KeySet },
Fork { depth: usize, hash: H256, changes: KeySet },
ReorgWithImport { depth: usize, hash: H256 },
FinalizationReorg { fork_depth: usize, depth: usize },
}
impl Arbitrary for Action {
fn arbitrary<G: quickcheck::Gen>(gen: &mut G) -> Self {
let path = gen.next_u32() as u8;
let mut buf = [0u8; 32];
match path {
0..=175 => {
gen.fill_bytes(&mut buf[..]);
Action::Next {
hash: H256::from(&buf),
changes: {
let mut set = Vec::new();
for _ in 0..gen.next_u32() / (64 * 256 * 256 * 256) {
set.push((
vec![gen.next_u32() as u8],
Some(vec![gen.next_u32() as u8]),
));
}
set
},
}
},
176..=220 => {
gen.fill_bytes(&mut buf[..]);
Action::Fork {
hash: H256::from(&buf),
depth: ((gen.next_u32() as u8) / 32) as usize,
changes: {
let mut set = Vec::new();
for _ in 0..gen.next_u32() / (64 * 256 * 256 * 256) {
set.push((
vec![gen.next_u32() as u8],
Some(vec![gen.next_u32() as u8]),
));
}
set
},
}
},
221..=240 => {
gen.fill_bytes(&mut buf[..]);
Action::ReorgWithImport {
hash: H256::from(&buf),
depth: ((gen.next_u32() as u8) / 32) as usize, // 0-7
}
},
_ => {
gen.fill_bytes(&mut buf[..]);
Action::FinalizationReorg {
fork_depth: ((gen.next_u32() as u8) / 32) as usize, // 0-7
depth: ((gen.next_u32() as u8) / 64) as usize, // 0-3
}
},
}
}
}
struct Mutator {
shared: SharedCache<Block>,
canon: Vec<Node>,
forks: HashMap<H256, Vec<Node>>,
}
impl Mutator {
fn new_empty() -> Self {
let shared = new_shared_cache::<Block>(256 * 1024, (0, 1));
Self { shared, canon: vec![], forks: HashMap::new() }
}
fn head_state(&self, hash: H256) -> CachingState<InMemoryBackend<BlakeTwo256>, Block> {
CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
self.shared.clone(),
Some(hash),
)
}
fn canon_head_state(&self) -> CachingState<InMemoryBackend<BlakeTwo256>, Block> {
self.head_state(self.canon.last().expect("Expected to be one commit").hash)
}
fn mutate_static(
&mut self,
action: Action,
) -> CachingState<InMemoryBackend<BlakeTwo256>, Block> {
self.mutate(action)
.expect("Expected to provide only valid actions to the mutate_static")
}
fn canon_len(&self) -> usize {
return self.canon.len()
}
fn head_storage_ref(&self) -> &KeyMap {
&self.canon.last().expect("Expected to be one commit").state
}
fn key_permutations() -> Vec<Vec<u8>> {
(0u8..255).map(|x| vec![x]).collect()
}
fn mutate(
&mut self,
action: Action,
) -> Result<CachingState<InMemoryBackend<BlakeTwo256>, Block>, ()> {
let state = match action {
Action::Fork { depth, hash, changes } => {
let pos = self.canon.len() as isize - depth as isize;
if pos < 0 || self.canon.len() == 0 || pos >= (self.canon.len() - 1) as isize
// no fork on top also, thus len-1
{
return Err(())
}
let pos = pos as usize;
let fork_at = self.canon[pos].hash;
let (total_h, parent) = match self.forks.entry(fork_at) {
Entry::Occupied(occupied) => {
let chain = occupied.into_mut();
let parent =
chain.last().expect("No empty forks are ever created").clone();
let mut node = parent.new_next(hash, changes.clone());
for earlier in chain.iter() {
node.purge(&earlier.changes.clone());
}
chain.push(node);
(pos + chain.len(), parent.hash)
},
Entry::Vacant(vacant) => {
let canon_parent = &self.canon[pos];
vacant.insert(vec![canon_parent.new_next(hash, changes.clone())]);
(pos + 1, fork_at)
},
};
let mut state = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
self.shared.clone(),
Some(parent),
);
state.cache.sync_cache(
&[],
&[],
changes,
vec![],
Some(hash),
Some(total_h as u64),
false,
);
state
},
Action::Next { hash, changes } => {
let (next, parent_hash) = match self.canon.last() {
None => {
let parent_hash = H256::from(&[0u8; 32]);
(Node::new(hash, parent_hash, changes.clone()), parent_hash)
},
Some(parent) => (parent.new_next(hash, changes.clone()), parent.hash),
};
// delete cache entries for earlier
for node in self.canon.iter_mut() {
node.purge(&next.changes);
if let Some(fork) = self.forks.get_mut(&node.hash) {
for node in fork.iter_mut() {
node.purge(&next.changes);
}
}
}
let mut state = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
self.shared.clone(),
Some(parent_hash),
);
state.cache.sync_cache(
&[],
&[],
next.changes.clone(),
vec![],
Some(hash),
Some(self.canon.len() as u64 + 1),
true,
);
self.canon.push(next);
state
},
Action::ReorgWithImport { depth, hash } => {
let pos = self.canon.len() as isize - depth as isize;
if pos < 0 || pos + 1 >= self.canon.len() as isize {
return Err(())
}
let fork_at = self.canon[pos as usize].hash;
let pos = pos as usize;
match self.forks.get_mut(&fork_at) {
Some(chain) => {
let mut new_fork = self.canon.drain(pos + 1..).collect::<Vec<Node>>();
let retracted: Vec<H256> =
new_fork.iter().map(|node| node.hash).collect();
let enacted: Vec<H256> = chain.iter().map(|node| node.hash).collect();
std::mem::swap(chain, &mut new_fork);
let mut node = new_fork
.last()
.map(|node| node.new_next(hash, vec![]))
.expect("No empty fork ever created!");
for invalidators in chain.iter().chain(new_fork.iter()) {
node.purge(&invalidators.changes);
}
self.canon.extend(new_fork.into_iter());
self.canon.push(node);
let mut state = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
self.shared.clone(),
Some(fork_at),
);
let height = pos as u64 + enacted.len() as u64 + 2;
state.cache.sync_cache(
&enacted[..],
&retracted[..],
vec![],
vec![],
Some(hash),
Some(height),
true,
);
state
},
None => {
return Err(()) // no reorg without a fork atm!
},
}
},
Action::FinalizationReorg { fork_depth, depth } => {
let pos = self.canon.len() as isize - fork_depth as isize;
if pos < 0 || pos + 1 >= self.canon.len() as isize {
return Err(())
}
let fork_at = self.canon[pos as usize].hash;
let pos = pos as usize;
match self.forks.get_mut(&fork_at) {
Some(fork_chain) => {
let sync_pos = fork_chain.len() as isize -
fork_chain.len() as isize - depth as isize;
if sync_pos < 0 || sync_pos >= fork_chain.len() as isize {
return Err(())
}
let sync_pos = sync_pos as usize;
let mut new_fork = self.canon.drain(pos + 1..).collect::<Vec<Node>>();
let retracted: Vec<H256> =
new_fork.iter().map(|node| node.hash).collect();
let enacted: Vec<H256> = fork_chain
.iter()
.take(sync_pos + 1)
.map(|node| node.hash)
.collect();
std::mem::swap(fork_chain, &mut new_fork);
self.shared.lock().sync(&retracted, &enacted);
self.head_state(
self.canon
.last()
.expect("wasn't forking to emptiness so there should be one!")
.hash,
)
},
None => {
return Err(()) // no reorg to nothing pls!
},
}
},
};
Ok(state)
}
}
#[test]
fn smoke() {
let key = H256::random()[..].to_vec();
let h0 = H256::random();
let h1a = H256::random();
let h1b = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let h3b = H256::random();
let mut mutator = Mutator::new_empty();
mutator
.mutate_static(Action::Next { hash: h0, changes: vec![(key.clone(), Some(vec![2]))] });
mutator.mutate_static(Action::Next { hash: h1a, changes: vec![] });
mutator.mutate_static(Action::Fork {
depth: 2,
hash: h1b,
changes: vec![(key.clone(), Some(vec![3]))],
});
mutator.mutate_static(Action::Fork {
depth: 2,
hash: h2b,
changes: vec![(key.clone(), Some(vec![4]))],
});
mutator
.mutate_static(Action::Next { hash: h2a, changes: vec![(key.clone(), Some(vec![5]))] });
mutator.mutate_static(Action::Next { hash: h3a, changes: vec![] });
assert_eq!(
mutator.head_state(h3a).storage(&key).unwrap().expect("there should be a value"),
vec![5]
);
assert!(mutator.head_state(h1a).storage(&key).unwrap().is_none());
assert!(mutator.head_state(h2b).storage(&key).unwrap().is_none());
assert!(mutator.head_state(h1b).storage(&key).unwrap().is_none());
mutator.mutate_static(Action::ReorgWithImport { depth: 4, hash: h3b });
assert!(mutator.head_state(h3a).storage(&key).unwrap().is_none());
}
fn is_head_match(mutator: &Mutator) -> bool {
let head_state = mutator.canon_head_state();
for key in Mutator::key_permutations() {
match (head_state.storage(&key).unwrap(), mutator.head_storage_ref().get(&key)) {
(Some(x), Some(y)) =>
if Some(&x) != y.as_ref() {
eprintln!("{:?} != {:?}", x, y);
return false
},
(None, Some(_y)) => {
// TODO: cache miss is not tracked atm
},
(Some(x), None) => {
eprintln!("{:?} != <missing>", x);
return false
},
_ => continue,
}
}
true
}
fn is_canon_match(mutator: &Mutator) -> bool {
for node in mutator.canon.iter() {
let head_state = mutator.head_state(node.hash);
for key in Mutator::key_permutations() {
match (head_state.storage(&key).unwrap(), node.state.get(&key)) {
(Some(x), Some(y)) =>
if Some(&x) != y.as_ref() {
eprintln!("at [{}]: {:?} != {:?}", node.hash, x, y);
return false
},
(None, Some(_y)) => {
// cache miss is not tracked atm
},
(Some(x), None) => {
eprintln!("at [{}]: {:?} != <missing>", node.hash, x);
return false
},
_ => continue,
}
}
}
true
}
#[test]
fn reorg() {
let key = H256::random()[..].to_vec();
let h0 = H256::random();
let h1 = H256::random();
let h2 = H256::random();
let h1b = H256::random();
let h2b = H256::random();
let mut mutator = Mutator::new_empty();
mutator.mutate_static(Action::Next { hash: h0, changes: vec![] });
mutator.mutate_static(Action::Next { hash: h1, changes: vec![] });
mutator
.mutate_static(Action::Next { hash: h2, changes: vec![(key.clone(), Some(vec![2]))] });
mutator.mutate_static(Action::Fork {
depth: 2,
hash: h1b,
changes: vec![(key.clone(), Some(vec![3]))],
});
mutator.mutate_static(Action::ReorgWithImport { depth: 2, hash: h2b });
assert!(is_head_match(&mutator))
}
fn key(k: u8) -> Vec<u8> {
vec![k]
}
fn val(v: u8) -> Option<Vec<u8>> {
Some(vec![v])
}
fn keyval(k: u8, v: u8) -> KeySet {
vec![(key(k), val(v))]
}
#[test]
fn reorg2() {
let h0 = H256::random();
let h1a = H256::random();
let h1b = H256::random();
let h2b = H256::random();
let h2a = H256::random();
let h3a = H256::random();
let mut mutator = Mutator::new_empty();
mutator.mutate_static(Action::Next { hash: h0, changes: keyval(1, 1) });
mutator.mutate_static(Action::Next { hash: h1a, changes: keyval(1, 1) });
mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: keyval(2, 2) });
mutator.mutate_static(Action::Next { hash: h2a, changes: keyval(3, 3) });
mutator.mutate_static(Action::Next { hash: h3a, changes: keyval(4, 4) });
mutator.mutate_static(Action::ReorgWithImport { depth: 4, hash: h2b });
assert!(is_head_match(&mutator))
}
#[test]
fn fork2() {
let h1 = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let h3b = H256::random();
let mut mutator = Mutator::new_empty();
mutator.mutate_static(Action::Next { hash: h1, changes: vec![] });
mutator.mutate_static(Action::Next { hash: h2a, changes: vec![] });
mutator.mutate_static(Action::Next { hash: h3a, changes: keyval(1, 1) });
mutator.mutate_static(Action::Fork { depth: 2, hash: h2b, changes: vec![] });
mutator.mutate_static(Action::Fork { depth: 2, hash: h3b, changes: keyval(1, 2) });
assert!(is_head_match(&mutator))
}
#[test]
fn fork3() {
let h1 = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let mut mutator = Mutator::new_empty();
mutator.mutate_static(Action::Next { hash: h1, changes: keyval(1, 1) });
mutator.mutate_static(Action::Next { hash: h2a, changes: keyval(2, 2) });
mutator.mutate_static(Action::Next { hash: h3a, changes: keyval(3, 3) });
mutator.mutate_static(Action::Fork { depth: 2, hash: h2b, changes: keyval(1, 3) });
assert!(is_canon_match(&mutator))
}
quickcheck! {
fn head_complete(actions: Vec<Action>) -> TestResult {
let mut mutator = Mutator::new_empty();
for action in actions.into_iter() {
if let Err(_) = mutator.mutate(action) {
return TestResult::discard();
}
}
if mutator.canon_len() == 0 {
return TestResult::discard();
}
TestResult::from_bool(is_head_match(&mutator))
}
fn canon_complete(actions: Vec<Action>) -> TestResult {
let mut mutator = Mutator::new_empty();
for action in actions.into_iter() {
if let Err(_) = mutator.mutate(action) {
return TestResult::discard();
}
}
if mutator.canon_len() == 0 {
return TestResult::discard();
}
TestResult::from_bool(is_canon_match(&mutator))
}
}
}
|
fn fibo(result:&mut [i64], n : usize) -> i64 {
let mut temp : i64 = 0;
if n == 0 {
result[ n ] = temp;
return 0;
}
if n == 1 {
temp = 1;
result[ n ] = temp;
return 1;
}
if result[ n ] != 0 {
temp = result[ n ];
return temp;
}
temp = fibo(result, n - 2) + fibo(result, n - 1);
result[ n ] = temp;
return temp;
}
fn main() {
let n: usize = 20;
let mut result: [i64; 100] = [ 0 ; 100 ];
// Calculate fibonacci number.
fibo(&mut result, n);
println!("");
for i in 0..n {
println!("{}", result[ i ]);
}
println!("");
}
|
use std::f64::consts;
use rand::{Rng, SeedableRng, StdRng};
use std::mem;
use std::cmp;
use std::collections::HashMap;
use geom::{Ray,Scalar,Point2,Vector2};
use geom as g;
use nalgebra;
#[derive(Copy, Clone, Debug)]
pub struct LightProperties {
pub wavelength: Scalar, // um
pub intensity: Scalar
}
pub struct TracingProperties {
pub random_seed: [usize; 1],
// If a new ray is generated with intensity below
// this threshold, it will be discarded.
pub intensity_threshold: Scalar
}
pub enum Event<'a> {
Hit {
segment_index: usize,
segment_name: &'a str,
point: Point2
}
}
pub trait EventHandler<E> where Self: FnMut(&Event) -> Result<(), E> { }
impl <'a,E,F> EventHandler<E> for F where F: FnMut(&Event) -> Result<(), E> { }
#[derive(Debug, Clone)]
pub struct MaterialProperties {
pub diffuse_reflect_fraction: Scalar,
pub diffuse_reflect_absorption: Scalar,
pub specular_reflect_fraction: Scalar,
pub specular_reflect_absorption: Scalar,
pub refraction_fraction: Scalar,
pub total_fraction: Scalar, // Sum of 'diffuse_reflect_fraction', 'specular_reflect_fraction', 'refraction_fraction'
pub attenuation_coeff: Scalar,
pub cauchy_coeffs: Vec<Scalar>
}
impl MaterialProperties {
pub fn default() -> MaterialProperties {
MaterialProperties {
diffuse_reflect_fraction: 0.5,
diffuse_reflect_absorption: 0.0,
specular_reflect_fraction: 0.5,
specular_reflect_absorption: 0.0,
refraction_fraction: 0.0,
total_fraction: 1.0,
attenuation_coeff: 0.0,
cauchy_coeffs: vec![ 1.0 ]
}
}
}
pub type RayTraceSegmentInfo = usize;
pub struct RayBuffer {
pub old_rays: Vec<(Ray, LightProperties, usize)>, // usize gives traces remaining.
pub new_rays: Vec<(Ray, LightProperties, usize)>
}
impl RayBuffer {
pub fn get_rays(&self) -> &Vec<(Ray, LightProperties, usize)> {
debug_assert!(self.old_rays.len() == 0 || self.new_rays.len() == 0);
if self.old_rays.len() == 0 { &self.new_rays } else { &self.old_rays }
}
pub fn get_n_rays(&self) -> usize {
cmp::max(self.old_rays.len(), self.new_rays.len())
}
}
#[derive(Clone)]
pub struct RayTraceInitArgs<'a> {
pub tracing_properties: &'a TracingProperties,
pub qtree: &'a g::QTree<'a, RayTraceSegmentInfo>,
pub segment_names: &'a HashMap<usize, String>,
pub materials: &'a Vec<MaterialProperties>,
pub left_material_properties: &'a Vec<u8>,
pub right_material_properties: &'a Vec<u8>
}
#[derive(Clone)]
pub struct RayTraceState<'a> {
args: &'a RayTraceInitArgs<'a>,
ray_count: usize,
recursion_level: usize,
rng: StdRng
}
impl<'a> RayTraceState<'a> {
pub fn initial(args: &'a RayTraceInitArgs) -> RayTraceState<'a> {
RayTraceState {
args: args,
ray_count: 0,
recursion_level: 0,
rng: SeedableRng::from_seed(&(args.tracing_properties.random_seed)[..])
}
}
pub fn get_rng(&mut self) -> &mut StdRng {
&mut self.rng
}
}
struct TraceRayArgs<'a> {
ray: &'a Ray,
ray_props: &'a LightProperties,
new_rays: &'a mut Vec<(Ray,LightProperties,usize)>
}
fn trace_ray<F,E>(st: &mut RayTraceState, args: &mut TraceRayArgs, mut handle_event: &mut F)
-> Result<usize,E> // Returns number of new rays traced
where F: EventHandler<E> {
let rayline = args.ray.p2 - args.ray.p1;
let mut num_new_rays = 0;
if let Some((segs_with_info, intersect, _)) = st.args.qtree.get_segments_touched_by_ray(args.ray) {
for (seg, segi) in segs_with_info {
if let Some(ref name) = st.args.segment_names.get(&segi) {
handle_event(&Event::Hit {
segment_index: segi,
segment_name: name.as_str(),
point: intersect
})?;
}
// Is the ray hitting the left surface or the right surface of
// the segment?
let side = g::point_side_of_line_segment(seg.p1, seg.p2, args.ray.p1);
// If the ray actually originates on this segment, ignore it.
if side == 0
{ continue; }
//println!("SIDE: ({}, {}, {}, {}) segi={} {}", seg.p1.coords[0], seg.p1.coords[1], seg.p2.coords[0], seg.p2.coords[1], segi, side);
let segline = seg.p2 - seg.p1;
// The left normal (looking "along" the line from the origin.)
let mut surface_normal = Vector2::new(-segline.data[1], segline.data[0]);
// Ensure that surface normal is pointing in opposite direction to ray.
if side == 1 {
surface_normal = -surface_normal;
}
let into_matprops_i;
let from_matprops_i;
if side == -1 {
into_matprops_i = st.args.right_material_properties[segi];
from_matprops_i = st.args.left_material_properties[segi];
}
else {
into_matprops_i = st.args.left_material_properties[segi];
from_matprops_i = st.args.right_material_properties[segi];
}
let ref into_matprops = st.args.materials[into_matprops_i as usize];
let ref from_matprops = st.args.materials[from_matprops_i as usize];
// We need to calculate the extent to which the ray's intensity has been attenuated
// by traveling through the relevant material for whatever distance.
let distance2 = nalgebra::distance_squared(&intersect, &(args.ray.p1));
let att = from_matprops.attenuation_coeff * distance2;
let mut new_intensity = args.ray_props.intensity - att;
// Decide whether we're going to do diffuse reflection, specular reflection,
// or refraction, based on the relative amount of intensity they preserve.
let rnd = st.rng.next_f64() * into_matprops.total_fraction;
if rnd < into_matprops.diffuse_reflect_fraction {
new_intensity *= 1.0 - into_matprops.diffuse_reflect_absorption;
num_new_rays += add_diffuse(st, args, new_intensity, &segline, &into_matprops, &intersect, &surface_normal);
}
else if rnd < into_matprops.diffuse_reflect_fraction + into_matprops.specular_reflect_fraction {
new_intensity *= 1.0 - into_matprops.specular_reflect_absorption;
num_new_rays += add_specular(st, args, new_intensity, &rayline, &into_matprops, &intersect, &surface_normal);
}
else if rnd < into_matprops.total_fraction {
num_new_rays += add_refraction(st, args, new_intensity, &rayline, &from_matprops, &into_matprops, &intersect, &surface_normal, side);
}
}
}
Ok(num_new_rays)
}
fn add_diffuse(
st: &mut RayTraceState,
args: &mut TraceRayArgs,
new_intensity: Scalar,
segline: &Vector2,
matprops: &MaterialProperties,
intersect: &Point2,
surface_normal: &Vector2
)
-> usize
{
let _ = matprops; // Not used currently; suppress compiler warning.
//print!("DIFFMAT {:?} {:?}", matprops, segline);
let mut num_new_rays = 0;
// If the intensity of the reflected ray is above the thresholed,
// then cast it in a randomly chosen direction.
if new_intensity > st.args.tracing_properties.intensity_threshold {
num_new_rays += 1;
let mut new_diffuse_ray_props = *(args.ray_props);
new_diffuse_ray_props.intensity = new_intensity;
let angle = (st.rng.next_f64() as Scalar) * consts::PI;
let along_seg = angle.cos();
let normal_to_seg = angle.sin();
let new_ray_p2 = intersect + (along_seg * segline) + (normal_to_seg * surface_normal);
let new_ray = Ray {
p1: *intersect,
p2: new_ray_p2
};
//println!("NEW RAY {} {} {} {}", intersect.coords[0], intersect.coords[1], new_ray_p2.coords[0], new_ray_p2.coords[1]);
args.new_rays.push((new_ray, new_diffuse_ray_props, 1));
}
num_new_rays
}
fn add_specular(
st: &mut RayTraceState,
args: &mut TraceRayArgs,
new_intensity: Scalar,
rayline: &Vector2,
matprops: &MaterialProperties,
intersect: &Point2,
surface_normal: &Vector2
)
-> usize
{
let _ = matprops; // Not used currently; suppress compiler warning.
//print!("SPECMAT {:?} {:?}", matprops, surface_normal);
let mut num_new_rays = 0;
if new_intensity > st.args.tracing_properties.intensity_threshold {
num_new_rays += 1;
let mut new_specular_ray_props = *(args.ray_props);
new_specular_ray_props.intensity = new_intensity;
// Get a normalized normal vector and ray vector.
let surface_normal_n = surface_normal.normalize();
let ray_n = rayline.normalize();
let dot = nalgebra::dot(&ray_n, &surface_normal_n);
let reflection = ray_n -((2.0 * dot) * surface_normal_n);
let new_ray = Ray {
p1: *intersect,
p2: intersect + reflection
};
args.new_rays.push((new_ray, new_specular_ray_props, 1));
}
num_new_rays
}
fn add_refraction(
st: &mut RayTraceState,
args: &mut TraceRayArgs,
new_intensity: Scalar,
rayline: &Vector2,
from_matprops: &MaterialProperties,
into_matprops: &MaterialProperties,
intersect: &Point2,
surface_normal: &Vector2,
side: i32
)
-> usize
{
debug_assert!(side != 0);
debug_assert!(from_matprops.cauchy_coeffs.len() > 0);
debug_assert!(into_matprops.cauchy_coeffs.len() > 0);
let mut num_new_rays = 0;
if new_intensity > st.args.tracing_properties.intensity_threshold {
num_new_rays += 1;
// Calculate the refractive index for each material given
// the wavelength and the material properties.
let mut from_ri = from_matprops.cauchy_coeffs[0];
let mut pow: i32 = 2;
for c in from_matprops.cauchy_coeffs.iter().skip(1) {
from_ri += c / args.ray_props.wavelength.powi(pow);
pow += 2;
}
let mut into_ri = into_matprops.cauchy_coeffs[0];
for c in into_matprops.cauchy_coeffs.iter().skip(1) {
into_ri += c / args.ray_props.wavelength.powi(pow);
pow += 2;
}
let ri = from_ri / into_ri;
let nsn = surface_normal.normalize();
let rayline = rayline.normalize();
let n_1 = -nsn;
let c = nalgebra::dot(&n_1, &rayline);
debug_assert!(c >= 0.0);
let vrefract =
(ri * rayline) +
(((ri * c) -
(1.0 - ri*ri*(1.0 - c*c)).sqrt())
*nsn);
let mut new_refracted_ray_props = *(args.ray_props);
new_refracted_ray_props.intensity = new_intensity;
let new_ray = Ray {
p1: *intersect,
p2: intersect + vrefract
};
args.new_rays.push((new_ray, new_refracted_ray_props, 1));
}
num_new_rays
}
pub struct TraceStepResult {
pub ray_count: usize,
pub recursion_level: usize
}
pub fn ray_trace_step<F,E>(st: &mut RayTraceState, rayb: &mut RayBuffer, mut handle_event: F)
-> Result<TraceStepResult, E>
where F: EventHandler<E> {
// Old rays that still have additional traces left will be at the beginning of the buffer.
// So to ensure a depth-first trace, we iterate through it in reverse.
// We know that any rays with additional traces left will end up together at the beginning
// of the buffer, so once we're done we can just truncate it.
let mut first_keep_index = 0;
let mut i = rayb.old_rays.len();
for &mut(ref ray, ref ray_props, ref mut traces_remaining) in rayb.old_rays.iter_mut().rev() {
let n_new_rays = trace_ray(
st,
&mut TraceRayArgs {
ray: ray,
ray_props: ray_props,
new_rays: &mut rayb.new_rays
},
&mut handle_event
)?;
st.ray_count += n_new_rays;
*traces_remaining -= 1;
if first_keep_index == 0 && *traces_remaining > 0 {
first_keep_index = i;
}
i -= 1;
}
rayb.old_rays.truncate(first_keep_index);
mem::swap(&mut (rayb.old_rays), &mut (rayb.new_rays));
st.recursion_level += 1;
Ok(TraceStepResult {
recursion_level: st.recursion_level,
ray_count: st.ray_count
})
}
|
use futures::stream::Stream;
use futures::TryStreamExt; // body.map_ok(|mut buf|
use warp::http::header::HeaderMap;
use warp::Filter;
use std::collections::HashMap;
#[macro_use]
extern crate log;
lazy_static::lazy_static! {
static ref ENDPOINTS: HashMap<&'static str, &'static str> = {
let mut endpoints = HashMap::new();
endpoints.insert("/v1/tablet/events", "calendar.ipsumlorem.net");
endpoints.insert("/login/password","loremipsum.ipsumlorem.net");
endpoints.insert("/api/v1/structure/rooms","loremipsum.ipsumlorem.net");
endpoints.insert("/api/v2/support_reports","loremipsum.ipsumlorem.net");
// Gitlab
endpoints.insert("/gitlab-org/gitlab-foss/issues/62077","gitlab.com");
// GitHub
endpoints.insert("/users/octocat/orgs","api.github.com");
endpoints
};
}
#[derive(Debug)]
struct HyperClientError;
impl warp::reject::Reject for HyperClientError {}
pub async fn handler_proxy(
path: warp::path::FullPath,
method: warp::http::Method,
headers: HeaderMap,
body: impl Stream<Item = Result<impl hyper::body::Buf, warp::Error>> + Send + Sync + 'static,
client: hyper::Client<hyper_rustls::HttpsConnector<hyper::client::HttpConnector>>,
) -> Result<impl warp::Reply, warp::Rejection> {
// Get host based on endpoint
let host = match ENDPOINTS.get(&path.as_str()) {
Some(host) => host,
None => return Err(warp::reject::custom(HyperClientError)),
};
let url = format!("https://{}{}", &host, path.as_str());
// Map stream from buf to bytes
let body = body.map_ok(|mut buf| buf.to_bytes());
debug!("{:?} {}", &method, &url);
let mut request = hyper::Request::builder()
.uri(url)
.method(method)
.body(hyper::Body::wrap_stream(body))
.unwrap();
*request.headers_mut() = headers;
trace!("resp: {:?}", request);
// Get data from server
let response = client.request(request).await;
debug!("client finished");
match response {
// Return response data
Ok(response) => Ok(response),
Err(_) => Err(warp::reject::custom(HyperClientError)),
}
}
#[tokio::main]
async fn main() {
std::env::set_var("RUST_LOG", "warpsslproxyhyper1=trace");
env_logger::init();
let https = hyper_rustls::HttpsConnector::new();
let client = hyper::Client::builder().build::<_, hyper::Body>(https);
let http_client = warp::any().map(move || client.clone());
let routes = warp::any()
.and(warp::path::full()) // path: /users/octocat/orgs
.and(warp::method()) // GET, POST
.and(warp::header::headers_cloned()) // headers
.and(warp::body::stream()) // body
.and(http_client) // hyper::Client
.and_then(handler_proxy);
warp::serve(routes)
.tls()
.cert_path("ssl-keys/rustasync.crt")
.key_path("ssl-keys/rustasync.key")
.run(([127, 0, 0, 1], 3030))
.await;
}
|
pub mod io;
pub mod string;
|
use radmin::uuid::Uuid;
use serde::{Deserialize, Serialize};
use super::ContactInfo as Contact;
use crate::models::Address;
use crate::schema::contact_addresses;
#[derive(
Debug,
PartialEq,
Clone,
Serialize,
Deserialize,
Queryable,
Identifiable,
AsChangeset,
Associations,
)]
#[belongs_to(Contact)]
#[belongs_to(Address)]
#[table_name = "contact_addresses"]
pub struct ContactAddress {
pub id: Uuid,
pub contact_id: Uuid,
pub address_id: Uuid,
pub address_type: String,
}
|
use crate::config::{Config, KeyAction, Search as SearchConfig, SearchMatcher};
use crate::persistent::WindowState;
use anyhow::{Error, Result};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt;
use std::io;
use std::path::{Path, PathBuf};
#[derive(Serialize)]
#[serde(tag = "kind")]
#[serde(rename_all = "snake_case")]
pub enum MessageToRenderer<'a> {
Config {
keymaps: &'a HashMap<String, KeyAction>,
search: &'a SearchConfig,
theme: Theme,
recent: Vec<&'a Path>,
},
Search,
SearchNext,
SearchPrevious,
Welcome,
Outline,
NewFile {
path: &'a Path,
},
History,
Help,
Zoom {
percent: u16,
},
Reload,
Debug,
AlwaysOnTop {
pinned: bool,
},
}
#[derive(Clone, Copy, Deserialize, Debug)]
pub enum Zoom {
In,
Out,
}
#[derive(Deserialize, Debug)]
#[serde(tag = "kind")]
#[serde(rename_all = "snake_case")]
pub enum MessageFromRenderer {
Init,
Reload,
FileDialog,
DirDialog,
Forward,
Back,
Quit,
Search { query: String, index: Option<usize>, matcher: SearchMatcher },
OpenFile { path: String },
Zoom { zoom: Zoom },
Error { message: String },
}
#[derive(Debug)]
pub enum UserEvent {
IpcMessage(MessageFromRenderer),
FileDrop(PathBuf),
WatchedFilesChanged(Vec<PathBuf>),
OpenLocalPath(PathBuf),
OpenExternalLink(String),
Error(Error),
}
#[derive(Clone, Copy, Debug)]
pub enum MenuItem {
Quit,
Forward,
Back,
Reload,
OpenFile,
WatchDir,
Search,
SearchNext,
SearchPrevious,
Outline,
Print,
ZoomIn,
ZoomOut,
History,
Help,
OpenRepo,
ToggleAlwaysOnTop,
}
pub trait MenuItems {
type ItemId: fmt::Debug;
fn item_from_id(&self, id: Self::ItemId) -> Result<MenuItem>;
}
pub trait RawMessageWriter {
type Output;
fn write_to(self, writer: impl io::Write) -> io::Result<Self::Output>;
}
#[derive(Clone, Copy, PartialEq, Eq, Debug, Serialize)]
pub enum Theme {
Dark,
Light,
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)]
pub struct ZoomLevel(u8);
impl ZoomLevel {
const MAX: u8 = 14;
// Following the same zoom factors in Chrome
pub fn factor(self) -> f64 {
match self.0 {
0 => 0.25,
1 => 0.33,
2 => 0.50,
3 => 0.67,
4 => 0.75,
5 => 0.80,
6 => 0.90,
7 => 1.00,
8 => 1.10,
9 => 1.25,
10 => 1.50,
11 => 1.75,
12 => 2.00,
13 => 2.50,
14 => 3.00,
_ => unreachable!("Invalid zoom level {:?}", self.0),
}
}
pub fn percent(self) -> u16 {
(self.factor() * 100.0) as u16
}
pub fn zoom_in(self) -> Option<Self> {
(self.0 < Self::MAX).then_some(Self(self.0 + 1))
}
pub fn zoom_out(self) -> Option<Self> {
self.0.checked_sub(1).map(Self)
}
}
impl Default for ZoomLevel {
fn default() -> Self {
Self(7) // Zoom factor 1.0
}
}
#[derive(Debug)]
pub enum AppControl {
Continue,
Exit,
}
pub trait App<M: MenuItems> {
fn handle_user_event(&mut self, event: UserEvent) -> Result<AppControl>;
fn handle_menu_event(&mut self, id: M::ItemId) -> Result<AppControl>;
fn handle_exit(&self) -> Result<()>;
}
pub trait EventChannel: 'static + Send {
fn send_event(&self, event: UserEvent);
}
pub trait EventLoop {
type Channel: EventChannel;
type Menu: MenuItems;
fn create_channel(&self) -> Self::Channel;
fn start<A>(self, app: A) -> !
where
A: App<Self::Menu> + 'static;
}
pub trait Renderer: Sized {
type EventLoop: EventLoop;
type Menu: MenuItems;
fn new(
config: &Config,
event_loop: &Self::EventLoop,
window_state: Option<WindowState>,
) -> Result<Self>;
fn menu(&self) -> &Self::Menu;
fn send_message(&self, message: MessageToRenderer<'_>) -> Result<()>;
fn send_message_raw<W: RawMessageWriter>(&self, writer: W) -> Result<W::Output>;
fn set_title(&self, title: &str);
fn window_state(&self) -> Option<WindowState>;
fn theme(&self) -> Theme;
fn show(&self);
fn set_background_color(&self, rbga: (u8, u8, u8, u8)) -> Result<()>;
fn print(&self) -> Result<()>;
fn zoom(&mut self, level: ZoomLevel);
fn zoom_level(&self) -> ZoomLevel;
fn set_always_on_top(&mut self, enabled: bool);
fn always_on_top(&self) -> bool;
}
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate http;
extern crate hyper;
extern crate serde;
extern crate serde_json;
extern crate serde_with;
extern crate tokio;
extern crate url;
use chrono::{DateTime, Utc};
use std::convert::TryInto;
use url::Url;
use crate::auth::add_auth_header;
use crate::http::{
do_gcs_request, new_client, request_with_gcs_retry, GcsHttpClient, GCS_DEFAULT_MAX_BACKOFF,
GCS_DEFAULT_TIMEOUT,
};
use crate::errors::HttpError;
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct Bucket {
id: String,
pub name: String,
pub location: String,
self_link: String,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct Object {
id: String,
pub name: String,
pub bucket: String,
self_link: String,
#[serde(with = "serde_with::rust::display_fromstr")]
pub size: u64,
pub time_created: DateTime<Utc>,
pub updated: DateTime<Utc>,
// The docs at
// https://cloud.google.com/storage/docs/json_api/v1/objects#resource
// refer to generation and metageneration as "long" represented as
// a strings.
#[serde(with = "serde_with::rust::display_fromstr")]
pub generation: i64,
#[serde(with = "serde_with::rust::display_fromstr")]
pub metageneration: i64,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct ListObjectsResponse {
next_page_token: Option<String>,
prefixes: Option<Vec<String>>,
items: Option<Vec<Object>>,
}
pub struct ResumableUploadCursor {
pub name: String,
pub bucket: String,
// The session URI
pub session_uri: String,
// The amount written so far.
pub offset: u64,
// We need to have a buffer to build up writes in multiples of 256 KiB. Sigh.
pub buffer: Vec<u8>,
}
#[allow(dead_code)]
// get_bucket isn't used by fs, but I don't want to mark it test only.
async fn get_bucket(bucket_str: &str) -> Result<Bucket, HttpError> {
debug!("Looking to request: {:#?}", bucket_str);
let client = new_client();
let base_url = "https://www.googleapis.com/storage/v1/b";
let bucket_url = format!("{}/{}", base_url, bucket_str);
let uri: hyper::Uri = bucket_url.parse()?;
let mut builder = hyper::Request::builder().uri(uri);
add_auth_header(&mut builder).await?;
let body = hyper::Body::empty();
let request = builder.body(body).expect("Failed to construct request");
debug!("{:#?}", request);
let bytes = do_gcs_request(&client, request).await?;
let bucket: Bucket = serde_json::from_slice(bytes.as_ref()).unwrap();
debug!("{:#?}", bucket);
Ok(bucket)
}
#[allow(dead_code)]
// get_object isn't used by fs, but I don't want to mark it test only.
async fn get_object(url: Url) -> Result<Object, HttpError> {
debug!("Looking to request: {:#?}", url);
let client = new_client();
let uri: hyper::Uri = url.into_string().parse()?;
let body = hyper::Body::empty();
let mut builder = hyper::Request::builder().uri(uri);
add_auth_header(&mut builder).await?;
let request = builder.body(body).expect("Failed to construct request");
let bytes = do_gcs_request(&client, request).await?;
let object: Object = serde_json::from_slice(&bytes).unwrap();
debug!("{:#?}", object);
Ok(object)
}
#[allow(dead_code)]
// get_bytes isn't used by fs, but I don't want to mark it test only.
async fn get_bytes(obj: &Object, offset: u64, how_many: u64) -> Result<Vec<u8>, HttpError> {
let client = new_client();
return get_bytes_with_client(&client, obj, offset, how_many).await;
}
pub async fn get_bytes_with_client(
client: &GcsHttpClient,
obj: &Object,
offset: u64,
how_many: u64,
) -> Result<Vec<u8>, HttpError> {
debug!(
"Asking for {} bytes at {} from the origin for {} (self link = {}",
how_many, offset, obj.name, obj.self_link
);
if how_many == 0 {
debug!("how_many = 0. You must ask for at least one byte");
return Err(HttpError::Body);
}
// TODO(boulos): Should we add logic here to test that offset +
// how_many <= obj.size? It's *likely* a client error, but who are
// we to decide? (Note: obj.size shouldn't be out of date, because
// we also enforce generation/meta-generation match, but we want
// to make that optional). Leaving it free allows clients to
// "overfetch" and let GCS decide and return the short read, or an
// error if offset > obj.size.
// Use the self_link from the object as the url, but add ?alt=media
let mut object_url = Url::parse(&obj.self_link).unwrap();
let byte_range = format!("bytes={}-{}", offset, offset + how_many - 1);
// Make sure we're getting the data from the version we intend.
// TODO(boulos): Allow people to read stale data (generation=) if
// they prefer, rather than require that the latest version is up
// to date.
let generation_str = format!("{}", obj.generation);
let metageneration_str = format!("{}", obj.metageneration);
object_url.query_pairs_mut().append_pair("alt", "media");
object_url
.query_pairs_mut()
.append_pair("ifGenerationMatch", &generation_str);
object_url
.query_pairs_mut()
.append_pair("ifMetagenerationMatch", &metageneration_str);
let now = std::time::Instant::now();
let uri: hyper::Uri = object_url.into_string().parse()?;
let body = hyper::Body::empty();
let mut builder = http::Request::builder()
.uri(uri)
// NOTE(boulos): RANGE *not* CONTENT-RANGE.
// https://cloud.google.com/storage/docs/xml-api/reference-headers#range
.header(http::header::RANGE, byte_range);
add_auth_header(&mut builder).await?;
let request = builder.body(body).expect("Failed to construct request");
debug!("Performing range request {:#?}", request);
// Do the request (reliably) but bail on error.
let written = do_gcs_request(&client, request).await?;
debug!(
"Got back {} bytes. Took {:#?}",
written.len(),
now.elapsed()
);
// Range requests *can* be ignored and given a 200 "here's the
// whole thing". If we got back more bytes than expected, trim.
if written.len() > how_many.try_into().unwrap() {
let start: usize = offset.try_into().unwrap();
let how_many_usize: usize = how_many.try_into().unwrap();
let end: usize = start + how_many_usize - 1;
Ok(written.slice(start..end).to_vec())
} else {
Ok(written.to_vec())
}
}
pub async fn create_object_with_client(
client: &GcsHttpClient,
bucket: &str,
name: &str,
) -> Result<ResumableUploadCursor, HttpError> {
debug!(
"Going to start a resumable upload for object {} in bucket {}",
name, bucket
);
let base_url = "https://storage.googleapis.com/upload/storage/v1/b";
let full_url = format!(
"{base_url}/{bucket}/o?uploadType=resumable",
base_url = base_url,
bucket = bucket
);
let upload_url: hyper::Uri = full_url.parse()?;
// For uploads, you just need the object name as JSON.
let object_json = serde_json::json!({
"name": name,
});
let mut builder = hyper::Request::builder()
.method(hyper::Method::POST)
.uri(upload_url)
.header(http::header::CONTENT_TYPE, "application/json");
add_auth_header(&mut builder).await?;
let request = builder
.body(hyper::Body::from(object_json.to_string()))
.expect("Failed to construct upload request");
debug!("{:#?}", request);
let response = request_with_gcs_retry(
&client,
request,
GCS_DEFAULT_MAX_BACKOFF,
GCS_DEFAULT_TIMEOUT,
)
.await?;
debug!("{:#?}", response);
if response.status() != hyper::StatusCode::OK {
return Err(HttpError::Status(response.status()));
}
if !response.headers().contains_key(hyper::header::LOCATION) {
debug!("Didn't get back a LOCATION header!");
return Err(HttpError::UploadFailed);
}
let session_uri = response.headers().get(hyper::header::LOCATION).unwrap();
debug!("Got resumable upload URI {:#?}", session_uri);
Ok(ResumableUploadCursor {
name: name.to_string(),
bucket: bucket.to_string(),
session_uri: session_uri.to_str().unwrap().to_string(),
offset: 0,
// Our 256 KiB buffer.
buffer: Vec::with_capacity(256 * 1024),
})
}
async fn _do_resumable_upload(
client: &GcsHttpClient,
session_uri: &str,
offset: u64,
data: &[u8],
finalize: bool,
) -> Result<Option<Object>, HttpError> {
if data.is_empty() && !finalize {
error!("Empty data for non-finalize");
return Err(HttpError::Body);
}
if data.len() % (256 * 1024) != 0 && !finalize {
error!(
"Asked to append {} bytes which isn't a multiple of 256 KiB",
data.len()
);
return Err(HttpError::Body);
}
let last_byte = match data.len() {
0 => offset,
_ => offset + (data.len() as u64) - 1,
};
let object_size = if finalize {
format!("{}", offset + data.len() as u64)
} else {
String::from("*")
};
// NOTE(boulos): *CONTENT_RANGE* has the format bytes X-Y/Z. While RANGE is bytes=X-Y.
let byte_range = format!("bytes {}-{}/{}", offset, last_byte, object_size);
let verb = if finalize { "finalize" } else { "issue" };
debug!(
"Going to {} resumable upload with range {}",
verb, byte_range
);
let upload_url: hyper::Uri = session_uri.parse()?;
// Hopefully this works with empty bodies.
let body = hyper::body::Bytes::copy_from_slice(data);
let chunked: Vec<Result<_, std::io::Error>> = vec![Ok(body)];
let chunk_stream = futures::stream::iter(chunked);
let body = hyper::Body::wrap_stream(chunk_stream);
let request = hyper::Request::builder()
.method(hyper::Method::POST)
.uri(upload_url)
.header(http::header::CONTENT_RANGE, byte_range)
.header(
http::header::CONTENT_TYPE,
"application/x-www-form-urlencoded",
)
.body(body)
.expect("Failed to construct upload request");
debug!("{:#?}", request);
let response = do_gcs_request(&client, request).await;
if !finalize {
// Check that our upload worked. We're actually looking for a
// 308 "Resume Incomplete" error.
if !response.is_err() {
debug!("Unexpected Ok() from multi-part upload!");
return Err(HttpError::UploadFailed);
}
let err = response.unwrap_err();
match err {
HttpError::Status(status) => {
if status.as_u16() == 308 {
// We got a 308! That's "success" for us.
// TODO(boulos): We need to check that the Range
// header response says we uploaded all our
// bytes. But we should do that inside of
// do_gcs_request (somehow) or just use
// request_with_gcs_retry directly here as well.
return Ok(None);
}
return Err(err);
}
_ => {
// Anything else is also an error.
return Err(err);
}
};
}
// This was our final segment. The response should be an Object.
let bytes = response?;
debug!("response bytes {:#?}", bytes);
let object: Object = serde_json::from_slice(&bytes).unwrap();
debug!("{:#?}", object);
Ok(Some(object))
}
pub async fn append_bytes_with_client(
client: &GcsHttpClient,
cursor: &mut ResumableUploadCursor,
data: &[u8],
) -> Result<usize, HttpError> {
debug!("Asking to append {} bytes to our cursor", data.len());
let buffer_remaining = cursor.buffer.capacity() - cursor.buffer.len();
if data.len() <= buffer_remaining {
// Just append.
cursor.buffer.extend_from_slice(data);
return Ok(data.len());
}
let remaining = if !cursor.buffer.is_empty() {
// First fill up the buffer.
let (left, right) = data.split_at(buffer_remaining);
cursor.buffer.extend_from_slice(left);
debug!("Flushing the buffer of size {} to GCS", cursor.buffer.len());
let flush = _do_resumable_upload(
client,
&cursor.session_uri,
cursor.offset,
cursor.buffer.as_slice(),
false, /* not finalizing */
)
.await;
match flush {
Err(e) => return Err(e),
_ => debug!("Flush succeeded!"),
}
// Move the offset forward and clear our buffer.
cursor.offset += cursor.buffer.len() as u64;
cursor.buffer.clear();
right
} else {
// Don't do anything, so we can catch the full chunks without buffering.
data
};
// The current buffer is empty and we might have several chunks we can
// ship without buffering.
let num_chunks: usize = remaining.len() / (256 * 1024);
let chunked_bytes = 256 * 1024 * num_chunks;
let (full_chunks, final_append) = remaining.split_at(chunked_bytes);
if num_chunks > 0 {
// Write out the full chunks in one shot.
debug!(
"Shipping {} full chunks ({} total bytes)",
num_chunks, chunked_bytes
);
let flush = _do_resumable_upload(
client,
&cursor.session_uri,
cursor.offset,
full_chunks,
false, /* not finalizing */
)
.await;
match flush {
Err(e) => return Err(e),
_ => debug!("Flushing full chunks succeeded!"),
}
cursor.offset += chunked_bytes as u64;
}
// Push whatever is left over (if any) into our buffer.
cursor.buffer.extend_from_slice(final_append);
Ok(data.len())
}
pub async fn finalize_upload_with_client(
client: &GcsHttpClient,
cursor: &mut ResumableUploadCursor,
) -> Result<Object, HttpError> {
debug!(
"Finializing our object. {} bytes left!",
cursor.buffer.len()
);
let result = _do_resumable_upload(
client,
&cursor.session_uri,
cursor.offset,
cursor.buffer.as_slice(),
true, /* We're the last ones! */
)
.await;
// Clear our buffer, even if we had an error.
cursor.buffer.clear();
// Pop up the result.
if result.is_err() {
return Err(result.err().unwrap());
}
let obj: Object = result.unwrap().unwrap();
Ok(obj)
}
// Do a single list object request (i.e., don't follow the next page token).
async fn _do_one_list_object(
client: &GcsHttpClient,
bucket: &str,
prefix: Option<&str>,
delim: Option<&str>,
token: Option<&str>,
) -> Result<ListObjectsResponse, HttpError> {
let base_url = "https://www.googleapis.com/storage/v1/b";
let bucket_url = format!("{}/{}/o", base_url, bucket);
let mut list_url = Url::parse(&bucket_url).unwrap();
if let Some(prefix_str) = prefix {
list_url.query_pairs_mut().append_pair("prefix", prefix_str);
}
if let Some(delim_str) = delim {
list_url
.query_pairs_mut()
.append_pair("delimiter", delim_str);
}
if let Some(token_str) = token {
list_url
.query_pairs_mut()
.append_pair("pageToken", token_str);
}
let uri: hyper::Uri = list_url.into_string().parse()?;
let mut builder = http::Request::builder().uri(uri);
add_auth_header(&mut builder).await?;
let body = hyper::Body::empty();
let request = builder
.body(body)
.expect("Failed to construct list request");
debug!("{:#?}", request);
let bytes = do_gcs_request(client, request).await?;
let list_response: ListObjectsResponse = serde_json::from_slice(&bytes).unwrap();
debug!("{:#?}", list_response);
Ok(list_response)
}
// Perform a list objects request, including following any
// nextPageToken responses, so that we get the full set of Objects and
// list of Prefixes (as a Vec<String>).
pub async fn list_objects(
client: &GcsHttpClient,
bucket: &str,
prefix: Option<&str>,
delim: Option<&str>,
) -> Result<(Vec<Object>, Vec<String>), HttpError> {
debug!(
"Asking for a list from bucket '{}' with prefix '{:#?}' and delim = '{:#?}'",
bucket, prefix, delim
);
let mut objects: Vec<Object> = vec![];
let mut prefixes: Vec<String> = vec![];
let mut page_token: String = String::from("");
loop {
let resp = match page_token.is_empty() {
true => _do_one_list_object(client, bucket, prefix, delim, None).await?,
false => _do_one_list_object(client, bucket, prefix, delim, Some(&page_token)).await?,
};
if resp.items.is_some() {
objects.append(&mut resp.items.unwrap());
}
if resp.prefixes.is_some() {
prefixes.append(&mut resp.prefixes.unwrap());
}
match resp.next_page_token {
Some(temp_token_str) => page_token = temp_token_str.clone(),
None => break,
}
}
Ok((objects, prefixes))
}
#[cfg(test)]
mod tests {
use super::*;
extern crate env_logger;
const LANDSAT_BUCKET: &str = "gcp-public-data-landsat";
const LANDSAT_PREFIX: &str = "LC08/01/044/034/";
const LANDSAT_SUBDIR: &str = "LC08_L1GT_044034_20130330_20170310_01_T2";
const LANDSAT_B7_TIF: &str = "LC08_L1GT_044034_20130330_20170310_01_T2_B7.TIF";
const LANDSAT_B7_MTL: &str = "LC08_L1GT_044034_20130330_20170310_01_T2_MTL.txt";
fn init() {
// https://docs.rs/env_logger/0.8.2/env_logger/index.html#capturing-logs-in-tests
let _ = env_logger::builder().is_test(true).try_init();
}
fn test_bucket() -> String {
std::env::var("GCSFUSER_TEST_BUCKET").expect("You must provide a read/write bucket")
}
fn landsat_obj_url(object_str: &str) -> Url {
let mut object_url = Url::parse("https://www.googleapis.com/storage/v1/b").unwrap();
// Each *push* url encodeds the argument and then also adds a
// *real* slash *before* appending.
object_url
.path_segments_mut()
.unwrap()
.push(LANDSAT_BUCKET)
.push("o")
.push(object_str);
object_url
}
fn landsat_big_obj_url() -> Url {
// Make the full object "name".
let object_str = format!("{}{}/{}", LANDSAT_PREFIX, LANDSAT_SUBDIR, LANDSAT_B7_TIF);
landsat_obj_url(&object_str)
}
fn landsat_small_obj_url() -> Url {
// Make the full object "name".
let object_str = format!("{}{}/{}", LANDSAT_PREFIX, LANDSAT_SUBDIR, LANDSAT_B7_MTL);
landsat_obj_url(&object_str)
}
#[tokio::test(flavor = "multi_thread")]
async fn get_landsat_bucket() {
init();
let bucket = get_bucket(LANDSAT_BUCKET).await.unwrap();
println!("Got back bucket {:#?}", bucket)
}
#[tokio::test(flavor = "multi_thread")]
async fn get_private_bucket() {
init();
let private_bucket = test_bucket();
let bucket = get_bucket(&private_bucket).await.unwrap();
println!("Got back bucket {:#?}", bucket)
}
#[tokio::test(flavor = "multi_thread")]
async fn get_private_object() {
init();
let private_bucket = test_bucket();
let filename = "get_private_object.txt";
let url = format!(
"https://www.googleapis.com/storage/v1/b/{}/o/{}",
private_bucket, filename
);
let object_url = Url::parse(&url).unwrap();
let object: Object = get_object(object_url).await.unwrap();
println!("Object has {} bytes", object.size);
let bytes: Vec<u8> = get_bytes(&object, 0, 769).await.unwrap();
println!("Got back:\n {}", String::from_utf8(bytes).unwrap());
let offset_bytes: Vec<u8> = get_bytes(&object, 6, 769).await.unwrap();
println!("Got back:\n {}", String::from_utf8(offset_bytes).unwrap());
}
#[tokio::test(flavor = "multi_thread")]
async fn get_public_object() {
init();
let object_url = landsat_small_obj_url();
println!("Going to request object {}", object_url.as_str());
let object: Object = get_object(object_url).await.unwrap();
println!("Object has {} bytes", object.size);
println!("Object debug is {:#?}", object);
let bytes: Vec<u8> = get_bytes(&object, 0, 4096).await.unwrap();
println!("Got back:\n {}", String::from_utf8(bytes).unwrap());
let offset_bytes: Vec<u8> = get_bytes(&object, 4099, 1024).await.unwrap();
println!("Got back:\n {}", String::from_utf8(offset_bytes).unwrap());
}
#[tokio::test(flavor = "multi_thread")]
async fn get_object_invalid() {
init();
let object_url = landsat_small_obj_url();
let object: Object = get_object(object_url).await.unwrap();
println!("Object has {} bytes", object.size);
println!("Object debug is {:#?}", object);
let bytes: Vec<u8> = get_bytes(&object, 0, 4096).await.unwrap();
println!("Got back:\n {}", String::from_utf8(bytes).unwrap());
// Now, make a copy of that object and change the self link.
let mut modified: Object = object;
// Take the last character off.
modified.self_link.pop();
let expect_404 = get_bytes(&modified, 0, 4096).await;
assert_eq!(expect_404.is_err(), true);
// I cannot figure out how to make this work. But I wish I could.
//assert_eq!(expect_404.unwrap_err(),
// HttpError::Status(http::StatusCode::NOT_FOUND));
}
#[tokio::test(flavor = "multi_thread")]
async fn get_public_bytes_bad_range() {
init();
let object_url = landsat_small_obj_url();
let object: Object = get_object(object_url).await.unwrap();
// If we don't ask for any bytes, we get an error.
let result = get_bytes(&object, 0, 0).await;
assert!(result.is_err());
// Changing the offset but no bytes, still gets an error.
let result = get_bytes(&object, 100, 0).await;
assert!(result.is_err());
// But *overfetching* (the small obj is 8454 bytes), we shouldn't get an error.
let result = get_bytes(&object, 0, 10000).await;
assert!(result.is_ok());
// Trying to *start* past the end => 416 "Range Not Satisifiable".
let result = get_bytes(&object, 10000, 1).await;
assert!(result.is_err());
}
#[tokio::test(flavor = "multi_thread")]
async fn get_public_bytes_large_read() {
init();
let client = new_client();
let object_url = landsat_big_obj_url();
println!("Going to request obj (url) {}", object_url);
let object: Object = get_object(object_url).await.unwrap();
println!("Object has {} bytes", object.size);
assert!(
object.size > 1024 * 1024,
"Object must be at least 1MB in size!"
);
// Issue a 1MB read. TODO(boulos): Ensure that we're doing it
// in "one shot" (we aren't currently! hyper is breaking it up
// into 16 KiB reads).
let bytes: Vec<u8> = get_bytes_with_client(&client, &object, 0, 1024 * 1024)
.await
.unwrap();
// Make sure we got back the entire 1MB read.
assert_eq!(bytes.len(), 1024 * 1024);
}
#[tokio::test(flavor = "multi_thread")]
async fn write_private_object() {
init();
let client = new_client();
let bucket_str = test_bucket();
let filename = "write_private_obj.txt";
// Get us a handle to a resumable upload.
let mut cursor = create_object_with_client(&client, &bucket_str, filename)
.await
.unwrap();
let bytes = "Hello, GCS!";
let written = append_bytes_with_client(&client, &mut cursor, bytes.as_bytes())
.await
.unwrap();
// Make sure we get back the right length.
assert_eq!(written, bytes.len());
// Now finalize
let result = finalize_upload_with_client(&client, &mut cursor).await;
match result {
Ok(obj) => println!(
"Obj has size {} and generation {}",
obj.size, obj.generation
),
Err(e) => panic!("Got error {:#?}", e),
};
}
#[tokio::test(flavor = "multi_thread")]
async fn write_object_chunks() {
init();
let client = new_client();
let bucket_str = test_bucket();
let filename = "write_chunk_obj.txt";
// Get us a handle to a resumable upload.
let mut cursor = create_object_with_client(&client, &bucket_str, filename)
.await
.unwrap();
let lengths = vec![
20,
350 * 1024,
512 * 1024 - (350 * 1024 - 20),
1024 * 1024,
384 * 1024,
];
let total_length: u64 = lengths.iter().sum();
for length in lengths.iter() {
// Make ascii text
let bytes: Vec<u8> = (0..*length).map(|x| (48 + (x % 10)) as u8).collect();
let written = append_bytes_with_client(&client, &mut cursor, &bytes)
.await
.unwrap();
// Make sure we get back the right length.
assert_eq!(written, bytes.len());
}
// Now finalize
let obj = finalize_upload_with_client(&client, &mut cursor)
.await
.expect("Expected an object!");
// Check that the length is the same as all our appends.
assert_eq!(obj.size, total_length);
}
#[tokio::test(flavor = "multi_thread")]
async fn write_object_race() {
init();
let client = new_client();
let bucket_str = test_bucket();
let filename = "write_object_race.txt";
let original = "Original value";
let new_value = "New values";
let original_obj = {
// Write out the original value to our file.
let mut cursor = create_object_with_client(&client, &bucket_str, filename)
.await
.unwrap();
let written = append_bytes_with_client(&client, &mut cursor, original.as_bytes())
.await
.unwrap();
// Make sure we get back the right length.
assert_eq!(written, original.len());
// Now finalize
let result = finalize_upload_with_client(&client, &mut cursor).await;
match result {
Ok(obj) => obj,
Err(_) => panic!("Expected to get back an object..."),
}
};
println!("Wrote {} with object {:#?}", filename, original_obj);
let new_obj = {
// Now, write over the object again with the new data.
let mut cursor = create_object_with_client(&client, &bucket_str, filename)
.await
.unwrap();
let written = append_bytes_with_client(&client, &mut cursor, new_value.as_bytes())
.await
.unwrap();
// Make sure we get back the right length.
assert_eq!(written, new_value.len());
// And finalize
let result = finalize_upload_with_client(&client, &mut cursor).await;
match result {
Ok(obj) => obj,
Err(_) => panic!("Expected to get back an object..."),
}
};
println!("Overwrote {} with object {:#?}", filename, new_obj);
// Now, if we try to read the original one, it's gone.
let read_orig = get_bytes_with_client(&client, &original_obj, 0, original_obj.size).await;
// We should have gotten some sort of error.
assert_eq!(read_orig.is_err(), true);
let read_new = get_bytes_with_client(&client, &new_obj, 0, new_obj.size).await;
// We shouldn't have gotten an error.
assert_eq!(read_new.is_err(), false);
let read_result = read_new.unwrap();
// Make sure we got the correct bytes out.
assert_eq!(read_result, new_value.as_bytes());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_list_paginated() {
init();
let client = new_client();
let bucket = LANDSAT_BUCKET;
let prefix = LANDSAT_PREFIX;
let delim = "/";
let (objects, prefixes) = list_objects(&client, bucket, Some(prefix), Some(delim))
.await
.unwrap();
println!("Got {} objects", objects.len());
println!("prefixes: {:#?}", prefixes);
println!("objects: {:#?}", objects);
let (only_top_level, prefixes) = list_objects(&client, bucket, None, Some(delim))
.await
.unwrap();
println!("Got {} objects", only_top_level.len());
println!("Dump:\n\n{:#?}", only_top_level);
println!("Prefixes:\n\n{:#?}", prefixes);
// No delimeter
let (all_objects, prefixes) = list_objects(&client, bucket, Some(prefix), None)
.await
.unwrap();
println!("Got {} objects", objects.len());
println!("prefixes: {:#?}", prefixes);
println!("objects: {:#?}", all_objects);
}
}
|
#[doc = "Register `FPUIMR` reader"]
pub type R = crate::R<FPUIMR_SPEC>;
#[doc = "Register `FPUIMR` writer"]
pub type W = crate::W<FPUIMR_SPEC>;
#[doc = "Field `FPU_IE` reader - FPU interrupt enable Set and cleared by software to enable the Cortex-M33 FPU interrupts FPU_IE\\[5\\]: inexact interrupt enable (interrupt disabled at reset) FPU_IE\\[4\\]: input abnormal interrupt enable FPU_IE\\[3\\]: overflow interrupt enable FPU_IE\\[2\\]: underflow interrupt enable FPU_IE\\[1\\]: divide-by-zero interrupt enable FPU_IE\\[0\\]: invalid operation interrupt enable"]
pub type FPU_IE_R = crate::FieldReader;
#[doc = "Field `FPU_IE` writer - FPU interrupt enable Set and cleared by software to enable the Cortex-M33 FPU interrupts FPU_IE\\[5\\]: inexact interrupt enable (interrupt disabled at reset) FPU_IE\\[4\\]: input abnormal interrupt enable FPU_IE\\[3\\]: overflow interrupt enable FPU_IE\\[2\\]: underflow interrupt enable FPU_IE\\[1\\]: divide-by-zero interrupt enable FPU_IE\\[0\\]: invalid operation interrupt enable"]
pub type FPU_IE_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 6, O>;
impl R {
#[doc = "Bits 0:5 - FPU interrupt enable Set and cleared by software to enable the Cortex-M33 FPU interrupts FPU_IE\\[5\\]: inexact interrupt enable (interrupt disabled at reset) FPU_IE\\[4\\]: input abnormal interrupt enable FPU_IE\\[3\\]: overflow interrupt enable FPU_IE\\[2\\]: underflow interrupt enable FPU_IE\\[1\\]: divide-by-zero interrupt enable FPU_IE\\[0\\]: invalid operation interrupt enable"]
#[inline(always)]
pub fn fpu_ie(&self) -> FPU_IE_R {
FPU_IE_R::new((self.bits & 0x3f) as u8)
}
}
impl W {
#[doc = "Bits 0:5 - FPU interrupt enable Set and cleared by software to enable the Cortex-M33 FPU interrupts FPU_IE\\[5\\]: inexact interrupt enable (interrupt disabled at reset) FPU_IE\\[4\\]: input abnormal interrupt enable FPU_IE\\[3\\]: overflow interrupt enable FPU_IE\\[2\\]: underflow interrupt enable FPU_IE\\[1\\]: divide-by-zero interrupt enable FPU_IE\\[0\\]: invalid operation interrupt enable"]
#[inline(always)]
#[must_use]
pub fn fpu_ie(&mut self) -> FPU_IE_W<FPUIMR_SPEC, 0> {
FPU_IE_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "SBS FPU interrupt mask register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`fpuimr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`fpuimr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct FPUIMR_SPEC;
impl crate::RegisterSpec for FPUIMR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`fpuimr::R`](R) reader structure"]
impl crate::Readable for FPUIMR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`fpuimr::W`](W) writer structure"]
impl crate::Writable for FPUIMR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets FPUIMR to value 0x1f"]
impl crate::Resettable for FPUIMR_SPEC {
const RESET_VALUE: Self::Ux = 0x1f;
}
|
use matrix_nio::events::collections::all::{RoomEvent, StateEvent};
use matrix_nio::events::room::member::{MemberEvent, MembershipState};
use matrix_nio::events::room::message::{
MessageEvent, MessageEventContent, TextMessageEventContent,
};
use matrix_nio::Room;
use url::Url;
use crate::executor::spawn_weechat;
use crate::server::Connection;
use crate::{Config, PLUGIN_NAME};
use std::borrow::Cow;
use std::cell::RefCell;
use std::rc::{Rc, Weak};
use weechat::{Buffer, Weechat};
pub(crate) struct RoomMember {
nick: String,
user_id: String,
prefix: String,
color: String,
}
pub(crate) struct RoomBuffer {
server_name: String,
homeserver: Url,
room_id: String,
prev_batch: Option<String>,
typing_notice_time: Option<u64>,
room: Room,
printed_before_ack_queue: Vec<String>,
}
impl RoomBuffer {
pub fn new(
server_name: &str,
connected_state: &Rc<RefCell<Option<Connection>>>,
homeserver: &Url,
config: &Config,
room_id: &str,
own_user_id: &str,
) -> Self {
let weechat = unsafe { Weechat::weechat() };
let buffer = weechat.buffer_new(
room_id,
Some(RoomBuffer::input_callback),
Some((Rc::downgrade(connected_state), room_id.to_owned())),
Some(RoomBuffer::close_callback),
Some(room_id.to_string()),
);
RoomBuffer {
server_name: server_name.to_owned(),
homeserver: homeserver.clone(),
room_id: room_id.to_owned(),
prev_batch: None,
typing_notice_time: None,
room: Room::new(room_id, &own_user_id.to_string()),
printed_before_ack_queue: Vec::new(),
}
}
pub fn get_weechat_buffer(&self) -> Option<Buffer> {
let weechat = unsafe { Weechat::weechat() };
weechat.buffer_search(PLUGIN_NAME, &self.room_id)
}
pub fn input_callback(
data: &mut (Weak<RefCell<Option<Connection>>>, String),
buffer: &Buffer,
input: Cow<str>,
) {
let (state, room_id) = data;
let state = state.clone();
let room_id = room_id.clone();
let input = input.into_owned();
{
let client_rc = state
.upgrade()
.expect("Can't upgrade server, server has been deleted?");
let client = client_rc.borrow();
if client.is_none() {
buffer.print("Error not connected");
return;
}
}
let task = async move {
let client_rc = state
.upgrade()
.expect("Can't upgrade server, server has been deleted?");
let client = client_rc.borrow();
if let Some(s) = client.as_ref() {
s.send_message(&room_id, &input).await;
}
};
spawn_weechat(task);
}
pub fn close_callback(data: &String, buffer: &Buffer) {}
pub fn handle_membership_state(&mut self, event: MembershipState) {}
pub fn handle_membership_event(&mut self, event: &MemberEvent) {
let buffer = self.get_weechat_buffer().unwrap();
let content = &event.content;
let message = match content.membership {
MembershipState::Join => "joined",
MembershipState::Leave => "left",
_ => return,
};
let message = format!(
"{} ({}) has {} the room",
content.displayname.as_ref().unwrap_or(&"".to_string()),
event.state_key,
message
);
let timestamp: u64 = event.origin_server_ts.into();
let timestamp = timestamp / 1000;
buffer.print_date_tags(timestamp as i64, &[], &message);
self.room.handle_membership(&event);
}
pub fn handle_state_event(&mut self, event: StateEvent) {
self.room.receive_state_event(&event);
}
pub fn handle_text_message(
&mut self,
sender: &str,
timestamp: u64,
content: &TextMessageEventContent,
) {
let buffer = self.get_weechat_buffer().unwrap();
let timestamp = timestamp / 1000;
let message = format!("{}\t{}", sender, content.body);
buffer.print_date_tags(timestamp as i64, &[], &message);
}
pub fn handle_room_message(&mut self, event: &MessageEvent) {
let sender = &event.sender;
let timestamp: u64 = event.origin_server_ts.into();
match &event.content {
MessageEventContent::Text(t) => {
self.handle_text_message(&sender.to_string(), timestamp, t)
}
_ => (),
}
}
pub fn handle_room_event(&mut self, event: RoomEvent) {
match &event {
RoomEvent::RoomMember(e) => self.handle_membership_event(e),
RoomEvent::RoomMessage(m) => self.handle_room_message(m),
event => {
self.room.receive_timeline_event(event);
}
}
}
}
|
pub(crate) const ANDROID_MANIFEST_XML: &str = "AndroidManifest.xml";
pub(crate) const RESOURCES_ARSC: &str = "resources.arsc";
|
use super::{super::guild::GuildEntity, CategoryChannelEntity, MessageEntity};
use crate::{
repository::{GetEntityFuture, Repository},
utils, Backend, Entity,
};
use twilight_model::{
channel::{permission_overwrite::PermissionOverwrite, ChannelType, TextChannel},
id::{ChannelId, GuildId, MessageId},
};
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct TextChannelEntity {
pub guild_id: Option<GuildId>,
pub id: ChannelId,
pub kind: ChannelType,
pub last_message_id: Option<MessageId>,
pub last_pin_timestamp: Option<String>,
pub name: String,
pub nsfw: bool,
pub permission_overwrites: Vec<PermissionOverwrite>,
pub parent_id: Option<ChannelId>,
pub position: i64,
pub rate_limit_per_user: Option<u64>,
pub topic: Option<String>,
}
impl From<TextChannel> for TextChannelEntity {
fn from(channel: TextChannel) -> Self {
Self {
guild_id: channel.guild_id,
id: channel.id,
kind: channel.kind,
last_message_id: channel.last_message_id,
last_pin_timestamp: channel.last_pin_timestamp,
name: channel.name,
nsfw: channel.nsfw,
permission_overwrites: channel.permission_overwrites,
parent_id: channel.parent_id,
position: channel.position,
rate_limit_per_user: channel.rate_limit_per_user,
topic: channel.topic,
}
}
}
impl Entity for TextChannelEntity {
type Id = ChannelId;
/// Return the text channel's ID.
fn id(&self) -> Self::Id {
self.id
}
}
/// Repository to work with guild text channels and their associated entities.
pub trait TextChannelRepository<B: Backend>: Repository<TextChannelEntity, B> {
/// Retrieve the guild associated with a guild text channel.
fn guild(&self, channel_id: ChannelId) -> GetEntityFuture<'_, GuildEntity, B::Error> {
utils::relation_and_then(
self.backend().text_channels(),
self.backend().guilds(),
channel_id,
|channel| channel.guild_id,
)
}
/// Retrieve the last message of a text channel.
fn last_message(&self, channel_id: ChannelId) -> GetEntityFuture<'_, MessageEntity, B::Error> {
utils::relation_and_then(
self.backend().text_channels(),
self.backend().messages(),
channel_id,
|channel| channel.last_message_id,
)
}
/// Retrieve the parent category channel of the voice channel.
fn parent(
&self,
channel_id: ChannelId,
) -> GetEntityFuture<'_, CategoryChannelEntity, B::Error> {
utils::relation_and_then(
self.backend().text_channels(),
self.backend().category_channels(),
channel_id,
|channel| channel.parent_id,
)
}
}
|
use parking_lot::RwLock;
use std::collections::{HashMap, VecDeque};
use std::hash::Hash;
use std::sync::Arc;
#[derive(Clone)]
pub struct NamedMap<K, V>
where
K: Clone + Eq + Hash,
V: Clone,
{
inner: Arc<RwLock<InnerMap<K, V>>>,
}
#[derive(Default)]
struct InnerMap<K, V>
where
K: Clone + Eq + Hash,
V: Clone,
{
map: HashMap<u64, V>,
name_id: HashMap<K, u64>,
id_name: HashMap<u64, Vec<K>>,
free: VecDeque<u64>,
}
impl<K, V> InnerMap<K, V>
where
K: Clone + Eq + Hash,
V: Clone,
{
fn new() -> Self {
Self {
map: HashMap::new(),
name_id: HashMap::new(),
id_name: HashMap::new(),
free: VecDeque::new(),
}
}
pub fn insert(&mut self, value: V) -> u64 {
let id = if let Some(id) = self.free.pop_front() {
id
} else {
self.map.len() as u64 + 1
};
self.map.insert(id, value);
id
}
pub fn insert_with_name(&mut self, value: V, name: K) -> u64 {
let id = self.insert(value);
self.set_name(id, name);
id
}
pub fn set_name(&mut self, id: u64, key: K) {
self.name_id.insert(key.clone(), id);
self.id_name
.entry(id)
.and_modify(|names| names.push(key.clone()))
.or_insert_with(|| vec![key]);
}
// pub fn remove(&mut self, id: &u64) -> Option<V> {
// if let Some(item) = self.map.remove(id) {
// if let Some(names) = self.id_name.get(id) {
// for name in names {
// self.name_id.remove(name);
// }
// self.id_name.remove(id);
// }
// self.free.push_back(*id);
// Some(item)
// } else {
// None
// }
// }
}
impl<K, V> NamedMap<K, V>
where
K: Clone + Eq + Hash,
V: Clone,
{
pub fn new() -> Self {
let inner = InnerMap::new();
let inner = Arc::new(RwLock::new(inner));
Self { inner }
}
// pub fn insert(&mut self, value: V) -> u64 {
// self.inner.write().insert(value)
// }
pub fn insert_with_name(&mut self, value: V, name: K) -> u64 {
self.inner.write().insert_with_name(value, name)
}
pub fn get(&self, id: u64) -> Option<V> {
let inner = self.inner.read();
let value = inner.map.get(&id).map(|v| v.clone());
value
}
pub fn get_by_name(&self, key: &K) -> Option<V> {
let inner = self.inner.read();
let id = inner.name_id.get(&key);
match id {
None => None,
Some(id) => inner.map.get(id).map(|v| v.clone()),
}
}
pub fn set_name(&mut self, id: u64, key: K) {
let mut inner = self.inner.write();
inner.set_name(id, key);
}
// pub fn remove(&mut self, id: &u64) -> Option<V> {
// self.inner.write().remove(id)
// }
}
|
#![feature(libc)]
extern crate libc;
mod ffi {
use libc::{c_char, c_int};
#[derive(Debug)]
#[repr(C)]
pub struct PgQueryError {
pub message: *const c_char, // exception message
pub funcname: *const c_char, // source function of exception (e.g. SearchSysCache)
pub filename: *const c_char, // source of exception (e.g. parse.l)
pub lineno: c_int, // source of exception (e.g. 104)
pub cursorpos: c_int, // char in query at which exception occurred
pub context: *const c_char, // additional context (optional, can be NULL)
}
#[derive(Debug)]
#[repr(C)]
pub struct PgQueryParseResult {
pub parse_tree: *const c_char,
pub stderr_buffer: *const c_char,
pub error: *mut PgQueryError
}
#[link(name = "pg_query")]
extern "C" {
pub fn pg_query_parse(input: *const c_char) -> PgQueryParseResult;
pub fn pg_query_free_parse_result(result: PgQueryParseResult);
}
}
#[derive(Debug)]
pub struct PgQueryError {
pub message: String,
pub funcname: String,
pub filename: String,
pub lineno: i32,
pub cursorpos: i32,
pub context: Option<String>,
}
#[derive(Debug)]
pub struct PgQueryParseResult {
pub parse_tree: String,
pub stderr_buffer: Option<String>,
pub error: Option<PgQueryError>
}
pub fn pg_query_parse(input: &str) -> PgQueryParseResult {
use std::ffi::{CString, CStr};
use std::str;
let c_input = CString::new(input).unwrap();
unsafe {
let result = ffi::pg_query_parse(c_input.as_ptr());
let query_error = if !result.error.is_null() {
let ref error = *(result.error);
let message = {
let bytes = CStr::from_ptr(error.message).to_bytes();
str::from_utf8(bytes).unwrap().to_string()
};
let funcname = {
let bytes = CStr::from_ptr(error.funcname).to_bytes();
str::from_utf8(bytes).unwrap().to_string()
};
let filename = {
let bytes = CStr::from_ptr(error.filename).to_bytes();
str::from_utf8(bytes).unwrap().to_string()
};
let context = if !error.context.is_null() {
let bytes = CStr::from_ptr(error.context).to_bytes();
Some(str::from_utf8(bytes).unwrap().to_string())
} else {
None
};
let query_error = PgQueryError {
message: message,
funcname: funcname,
filename: filename,
lineno: error.lineno,
cursorpos: error.cursorpos,
context: context
};
Some(query_error)
} else {
None
};
let parse_tree = {
let parse_tree_bytes = CStr::from_ptr(result.parse_tree).to_bytes();
str::from_utf8(parse_tree_bytes).unwrap().to_string()
};
let stderr_buffer = if !result.stderr_buffer.is_null() {
let stderr_buffer_bytes = CStr::from_ptr(result.stderr_buffer).to_bytes();
Some(str::from_utf8(stderr_buffer_bytes).unwrap().to_string())
} else {
None
};
ffi::pg_query_free_parse_result(result);
PgQueryParseResult {
parse_tree: parse_tree,
stderr_buffer: stderr_buffer,
error: query_error
}
}
}
#[cfg(test)]
mod tests {
use super::{pg_query_parse};
#[test]
fn it_works() {
let result = pg_query_parse("SELECT 1");
println!("{:?}", result);
assert!(result.error.is_none());
}
#[test]
fn it_does_not_work() {
let result = pg_query_parse("INSERT FROM DOES NOT WORK");
println!("{:?}", result);
assert!(result.error.is_some());
}
// TODO: more tests
}
|
extern crate proc_macro;
extern crate proc_macro2;
extern crate latte_verify;
extern crate latte_lib;
use latte_verify::*;
extern crate quote;
extern crate syn;
use proc_macro2::{Ident, Span};
// use regex::Regex;
use proc_macro::TokenStream;
mod utils;
use utils::*;
use quote::quote;
use syn::DeriveInput;
use latte_lib::*;
use latte_verify::Verify;
fn getAttrStr(attrs: &std::vec::Vec<syn::Attribute>, name: String) -> String {
for attr in attrs {
if attr.path.segments.len() == 1 && attr.path.segments[0].ident.to_string() == name {
return attr.tts.to_string()
}
}
return "".to_string()
}
fn parseAttr(name: &syn::Ident, field: &syn::Field) -> String {
let ty = field.ty.clone();
let ident = field.ident.clone();
let attrs = field.attrs.clone();
let attrStr = getAttrStr(&attrs, "verify".to_string());
let identStr = ident.unwrap().to_string();
let mut v: Vec<char> = identStr.chars().collect();
v[0] = v[0].to_uppercase().nth(0).unwrap();
let mut fnameStr: String = v.into_iter().collect();
let setStr = "set".to_string() + &fnameStr;
let set = Ident::new(&setStr, Span::call_site());
let getStr = "get".to_string() + &fnameStr;
let get = Ident::new(&getStr, Span::call_site());
let mut result = r#"
impl {{name}} {
fn {{set}}(mut self,value: {{ty}}) -> Result<bool, &'static str> {
let str = {{r#}}"{{attrStr}}"{{#}};
let copy = value.clone();
if (str.len() == 0) {
self.{{ident}} = copy;
return Ok(true);
}
let result = value.verify(latte_verify::VerifyConfig::String(str.to_string()));
match result {
Ok(v) => {
if v {
self.{{ident}} = copy;
}
return Ok(v);
}
Err(_) => {
return result;
}
}
}
fn {{get}}(self) -> {{ty}} {
self.{{ident}}
}
}"#.to_string();
// result = str::replace(&result, "{{name}}", "e!(#name).to_string());
// result = str::replace(&result, "{{ty}}", "e!(#ty).to_string());
// result = str::replace(&result, "{{set}}", &setStr);
// result = str::replace(&result, "{{get}}", &getStr);
// result = str::replace(&result, "{{ident}}", &identStr);
// result = str::replace(&result, "{{attrStr}}", &toEscape(attrStr));
result = replace!(result, {
"{{name}}" => "e!(#name).to_string(),
"{{ty}}" => "e!(#ty).to_string(),
"{{set}}" => &setStr,
"{{get}}" => &getStr,
"{{ident}}" => &identStr,
"{{attrStr}}" => &toEscape(attrStr),
"{{r#}}" => "r#",
"{{#}}" => "#"
});
// println!("hello {:?}",result);
result
}
#[proc_macro_derive(Set, attributes(verify))]
pub fn set_derive(input: TokenStream) -> TokenStream {
//转换成解析对象
let input: DeriveInput = syn::parse(input).unwrap();
let mut name = input.ident;
let output:proc_macro2::TokenStream = if let syn::Data::Struct(data) = input.data {
let mut fs = vec!();
match data.fields {
syn::Fields::Named(ref fields) => {
fs = fields.named.iter().enumerate().map(|(i, f)| {
parseAttr(&name, &f)
}).collect();
},
syn::Fields::Unit => {
println!("unit");
},
syn::Fields::Unnamed(ref fields) => {
println!("unamed");
},
}
let ss = fs.join(" ");
println!("over: {:?}", ss);
(&ss).parse().unwrap()
}else{
panic!("Only impl to struct")
};
output.into()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_class() {
#[derive(Set)]
struct Struct {
#[verify{"min":10, "max":100}]
x: i32,
#[verify{"len":3}]
y: String
}
let s = Struct {
x: 1,
y: "200".to_string(),
};
println!("Hello, world! {:?}", s.setX(2));
}
} |
use super::{ConnectionPath, ConsensusStatePath};
pub fn connection_path(_path: &ConnectionPath) -> String {
todo!()
}
pub fn consensus_state_path(_path: &ConsensusStatePath) -> String {
todo!()
}
|
#[doc = "Register `ETH_MTLTxQ1OMR` reader"]
pub type R = crate::R<ETH_MTLTX_Q1OMR_SPEC>;
#[doc = "Register `ETH_MTLTxQ1OMR` writer"]
pub type W = crate::W<ETH_MTLTX_Q1OMR_SPEC>;
#[doc = "Field `FTQ` reader - FTQ"]
pub type FTQ_R = crate::BitReader;
#[doc = "Field `FTQ` writer - FTQ"]
pub type FTQ_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TSF` reader - TSF"]
pub type TSF_R = crate::BitReader;
#[doc = "Field `TSF` writer - TSF"]
pub type TSF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TXQEN` reader - TXQEN"]
pub type TXQEN_R = crate::FieldReader;
#[doc = "Field `TXQEN` writer - TXQEN"]
pub type TXQEN_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>;
#[doc = "Field `TTC` reader - TTC"]
pub type TTC_R = crate::FieldReader;
#[doc = "Field `TTC` writer - TTC"]
pub type TTC_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>;
#[doc = "Field `TQS` reader - TQS"]
pub type TQS_R = crate::FieldReader<u16>;
#[doc = "Field `TQS` writer - TQS"]
pub type TQS_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 9, O, u16>;
impl R {
#[doc = "Bit 0 - FTQ"]
#[inline(always)]
pub fn ftq(&self) -> FTQ_R {
FTQ_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - TSF"]
#[inline(always)]
pub fn tsf(&self) -> TSF_R {
TSF_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bits 2:3 - TXQEN"]
#[inline(always)]
pub fn txqen(&self) -> TXQEN_R {
TXQEN_R::new(((self.bits >> 2) & 3) as u8)
}
#[doc = "Bits 4:5 - TTC"]
#[inline(always)]
pub fn ttc(&self) -> TTC_R {
TTC_R::new(((self.bits >> 4) & 3) as u8)
}
#[doc = "Bits 16:24 - TQS"]
#[inline(always)]
pub fn tqs(&self) -> TQS_R {
TQS_R::new(((self.bits >> 16) & 0x01ff) as u16)
}
}
impl W {
#[doc = "Bit 0 - FTQ"]
#[inline(always)]
#[must_use]
pub fn ftq(&mut self) -> FTQ_W<ETH_MTLTX_Q1OMR_SPEC, 0> {
FTQ_W::new(self)
}
#[doc = "Bit 1 - TSF"]
#[inline(always)]
#[must_use]
pub fn tsf(&mut self) -> TSF_W<ETH_MTLTX_Q1OMR_SPEC, 1> {
TSF_W::new(self)
}
#[doc = "Bits 2:3 - TXQEN"]
#[inline(always)]
#[must_use]
pub fn txqen(&mut self) -> TXQEN_W<ETH_MTLTX_Q1OMR_SPEC, 2> {
TXQEN_W::new(self)
}
#[doc = "Bits 4:5 - TTC"]
#[inline(always)]
#[must_use]
pub fn ttc(&mut self) -> TTC_W<ETH_MTLTX_Q1OMR_SPEC, 4> {
TTC_W::new(self)
}
#[doc = "Bits 16:24 - TQS"]
#[inline(always)]
#[must_use]
pub fn tqs(&mut self) -> TQS_W<ETH_MTLTX_Q1OMR_SPEC, 16> {
TQS_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "Tx queue 1 operating mode Register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`eth_mtltx_q1omr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`eth_mtltx_q1omr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct ETH_MTLTX_Q1OMR_SPEC;
impl crate::RegisterSpec for ETH_MTLTX_Q1OMR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`eth_mtltx_q1omr::R`](R) reader structure"]
impl crate::Readable for ETH_MTLTX_Q1OMR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`eth_mtltx_q1omr::W`](W) writer structure"]
impl crate::Writable for ETH_MTLTX_Q1OMR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets ETH_MTLTxQ1OMR to value 0"]
impl crate::Resettable for ETH_MTLTX_Q1OMR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
pub use super::crc32::compute_crc4_remainder_demo;
pub use super::crc32::compute_crc32_remainder;
#[test]
fn test_compute_crc4_demo() {
let crc4_rem = compute_crc4_remainder_demo();
println!("crc4 remainder: {:b}", crc4_rem);
}
#[test]
fn test_compute_crc32() {
assert_eq!(compute_crc32_remainder(0), 0);
assert_eq!(compute_crc32_remainder(1), 0x04c11db7);
assert_eq!(compute_crc32_remainder(2), 0x09823b6e);
assert_eq!(compute_crc32_remainder(3), 0x0d4326d9);
assert_eq!(compute_crc32_remainder(4), 0x130476dc);
assert_eq!(compute_crc32_remainder(5), 0x17c56b6b);
assert_eq!(compute_crc32_remainder(6), 0x1a864db2);
assert_eq!(compute_crc32_remainder(7), 0x1e475005);
assert_eq!(compute_crc32_remainder(8), 0x2608edb8);
assert_eq!(compute_crc32_remainder(15), 0x384fbdbd);
assert_eq!(compute_crc32_remainder(23), 0x52568b75);
assert_eq!(compute_crc32_remainder(31), 0x745e66cd);
}
|
use worker::schema::GeneratedSchema;
#[derive(Debug, Default)]
pub struct Entity<S: GeneratedSchema> {
pub bit_field: S::ComponentBitField,
pub chunk_index: usize,
pub index_in_chunk: usize,
}
impl<S: GeneratedSchema> Entity<S> {
pub fn new(
chunk_index: usize,
index_in_chunk: usize,
bit_field: S::ComponentBitField,
) -> Entity<S> {
Entity {
bit_field,
chunk_index,
index_in_chunk,
}
}
}
|
use std::path::PathBuf;
use structopt::StructOpt;
use omnicolor_rust::palettes::*;
use omnicolor_rust::{Error, GrowthImageBuilder, PixelLoc, RGB};
#[derive(Debug, StructOpt)]
struct Options {
#[structopt(short = "o", long)]
output: PathBuf,
#[structopt(short, long, default_value = "1920")]
width: u32,
#[structopt(short, long, default_value = "1080")]
height: u32,
#[structopt(short, long, default_value = "0.5")]
proportion_first_color: f32,
#[structopt(
long,
default_value = "1.0",
help = "Size of the color palette relative to the number of pixels in each stage"
)]
proportion_excess_colors: f32,
#[structopt(long, default_value = "ff6680")]
first_color: RGB,
#[structopt(long, default_value = "80ff66")]
second_color: RGB,
#[structopt(long, default_value = "50")]
color_radius: f32,
#[structopt(long)]
reset_frontier_for_second: bool,
#[structopt(long)]
num_additional_seeds: Option<u32>,
#[structopt(
long,
help = "(x,y), location of the first point",
min_values = 2,
max_values = 2
)]
initial_point: Vec<i32>,
#[structopt(
long,
help = "(x1,y1,x2,y2), endpoints of a wall during the first stage",
min_values = 4,
max_values = 4
)]
wall_location: Vec<i32>,
#[structopt(
long,
help = "(x1,y1,x2,y2), endpoints of a portal during first stage",
min_values = 4,
max_values = 4
)]
portal_location: Vec<i32>,
}
fn main() -> Result<(), Error> {
let opt = Options::from_args();
let num_pixels_first =
((opt.width * opt.height) as f32 * opt.proportion_first_color) as usize;
let num_pixels_second =
(opt.width * opt.height) as usize - num_pixels_first;
// The number of colors to generate can be automatically
// determined from the size of the image, or can be specified
// directly. A stage ends either when the palette runs out of
// colors, when the max number of pixels for that stage is
// reached, or when no further pixels are available to be filled.
let num_colors_first =
((num_pixels_first as f32) * opt.proportion_excess_colors) as u32;
let num_colors_second =
((num_pixels_second as f32) * opt.proportion_excess_colors) as u32;
let first_palette = SphericalPalette {
central_color: opt.first_color,
color_radius: opt.color_radius,
};
let second_palette = SphericalPalette {
central_color: opt.second_color,
color_radius: opt.color_radius,
};
let mut builder = GrowthImageBuilder::new();
builder
.show_progress_bar()
.add_layer(opt.width, opt.height)
.epsilon(5.0);
let stage_builder = builder
.new_stage()
.palette(first_palette)
.n_colors(num_colors_first)
.max_iter(num_pixels_first);
if opt.initial_point.len() == 2 {
let v = &opt.initial_point;
stage_builder.seed_points(vec![PixelLoc {
layer: 0,
i: v[0],
j: v[1],
}]);
}
if opt.wall_location.len() == 4 {
let v = &opt.wall_location;
stage_builder.forbidden_points(
PixelLoc {
layer: 0,
i: v[0],
j: v[1],
}
.line_to(PixelLoc {
layer: 0,
i: v[2],
j: v[3],
}),
);
}
if opt.portal_location.len() == 4 {
let v = &opt.portal_location;
stage_builder.connected_points(vec![(
PixelLoc {
layer: 0,
i: v[0],
j: v[1],
},
PixelLoc {
layer: 0,
i: v[2],
j: v[3],
},
)]);
}
let stage_builder = builder
.new_stage()
.palette(second_palette)
.n_colors(num_colors_second)
.grow_from_previous(!opt.reset_frontier_for_second);
if let Some(random_seeds) = opt.num_additional_seeds {
stage_builder.num_random_seed_points(random_seeds);
}
let mut image = builder.build()?;
image.fill_until_done();
image.write(opt.output);
Ok(())
}
|
#[no_mangle]
pub extern fn physics_single_chain_ufjc_morse_thermodynamics_isotensional_asymptotic_end_to_end_length(number_of_links: u8, link_length: f64, link_stiffness: f64, link_energy: f64, force: f64, temperature: f64) -> f64
{
super::end_to_end_length(&number_of_links, &link_length, &link_stiffness, &link_energy, &force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ufjc_morse_thermodynamics_isotensional_asymptotic_end_to_end_length_per_link(link_length: f64, link_stiffness: f64, link_energy: f64, force: f64, temperature: f64) -> f64
{
super::end_to_end_length_per_link(&link_length, &link_stiffness, &link_energy, &force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ufjc_morse_thermodynamics_isotensional_asymptotic_nondimensional_end_to_end_length(number_of_links: u8, nondimensional_link_stiffness: f64, nondimensional_link_energy: f64, nondimensional_force: f64) -> f64
{
super::nondimensional_end_to_end_length(&number_of_links, &nondimensional_link_stiffness, &nondimensional_link_energy, &nondimensional_force)
}
#[no_mangle]
pub extern fn physics_single_chain_ufjc_morse_thermodynamics_isotensional_asymptotic_nondimensional_end_to_end_length_per_link(nondimensional_link_stiffness: f64, nondimensional_link_energy: f64, nondimensional_force: f64) -> f64
{
super::nondimensional_end_to_end_length_per_link(&nondimensional_link_stiffness, &nondimensional_link_energy, &nondimensional_force)
}
#[no_mangle]
pub extern fn physics_single_chain_ufjc_morse_thermodynamics_isotensional_asymptotic_gibbs_free_energy(number_of_links: u8, link_length: f64, hinge_mass: f64, link_stiffness: f64, link_energy: f64, force: f64, temperature: f64) -> f64
{
super::gibbs_free_energy(&number_of_links, &link_length, &hinge_mass, &link_stiffness, &link_energy, &force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ufjc_morse_thermodynamics_isotensional_asymptotic_gibbs_free_energy_per_link(link_length: f64, hinge_mass: f64, link_stiffness: f64, link_energy: f64, force: f64, temperature: f64) -> f64
{
super::gibbs_free_energy_per_link(&link_length, &hinge_mass, &link_stiffness, &link_energy, &force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ufjc_morse_thermodynamics_isotensional_asymptotic_relative_gibbs_free_energy(number_of_links: u8, link_length: f64, link_stiffness: f64, link_energy: f64, force: f64, temperature: f64) -> f64
{
super::relative_gibbs_free_energy(&number_of_links, &link_length, &link_stiffness, &link_energy, &force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ufjc_morse_thermodynamics_isotensional_asymptotic_relative_gibbs_free_energy_per_link(link_length: f64, link_stiffness: f64, link_energy: f64, force: f64, temperature: f64) -> f64
{
super::relative_gibbs_free_energy_per_link(&link_length, &link_stiffness, &link_energy, &force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ufjc_morse_thermodynamics_isotensional_asymptotic_nondimensional_gibbs_free_energy(number_of_links: u8, link_length: f64, hinge_mass: f64, nondimensional_link_stiffness: f64, nondimensional_link_energy: f64, nondimensional_force: f64, temperature: f64) -> f64
{
super::nondimensional_gibbs_free_energy(&number_of_links, &link_length, &hinge_mass, &nondimensional_link_stiffness, &nondimensional_link_energy, &nondimensional_force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ufjc_morse_thermodynamics_isotensional_asymptotic_nondimensional_gibbs_free_energy_per_link(link_length: f64, hinge_mass: f64, nondimensional_link_stiffness: f64, nondimensional_link_energy: f64, nondimensional_force: f64, temperature: f64) -> f64
{
super::nondimensional_gibbs_free_energy_per_link(&link_length, &hinge_mass, &nondimensional_link_stiffness, &nondimensional_link_energy, &nondimensional_force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ufjc_morse_thermodynamics_isotensional_asymptotic_nondimensional_relative_gibbs_free_energy(number_of_links: u8, nondimensional_link_stiffness: f64, nondimensional_link_energy: f64, nondimensional_force: f64) -> f64
{
super::nondimensional_relative_gibbs_free_energy(&number_of_links, &nondimensional_link_stiffness, &nondimensional_link_energy, &nondimensional_force)
}
#[no_mangle]
pub extern fn physics_single_chain_ufjc_morse_thermodynamics_isotensional_asymptotic_nondimensional_relative_gibbs_free_energy_per_link(nondimensional_link_stiffness: f64, nondimensional_link_energy: f64, nondimensional_force: f64) -> f64
{
super::nondimensional_relative_gibbs_free_energy_per_link(&nondimensional_link_stiffness, &nondimensional_link_energy, &nondimensional_force)
}
|
use cfg_if::cfg_if;
use serde::{Deserialize, Serialize};
use std::os::unix::process::ExitStatusExt;
use std::process::{Command, Output};
use crate::Error;
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum ExecCode {
Line(String),
Multi(Vec<String>),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExecRequest {
pub code: ExecCode,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExecResponse {
pub stdout: String,
pub stderr: String,
pub code: Option<i32>,
pub signal: Option<i32>,
}
impl From<Output> for ExecResponse {
fn from(output: Output) -> Self {
ExecResponse {
stdout: String::from_utf8_lossy(&output.stdout).to_string(),
stderr: String::from_utf8_lossy(&output.stderr).to_string(),
code: output.status.code(),
signal: output.status.signal(),
}
}
}
pub fn exec(req: &ExecRequest) -> Result<ExecResponse, Error> {
let code = match &req.code {
ExecCode::Line(line) => line.to_owned(),
ExecCode::Multi(lines) => {
let mut code = String::new();
for line in lines {
code.push_str(&line);
code.push('\n');
}
code
}
};
cfg_if! {
if #[cfg(feature = "lambda")] {
let output = Command::new("/usr/bin/python3.8").arg("-c").arg(code).output()?;
} else {
let output = Command::new("/usr/bin/python").arg("-c").arg(code).output()?;
}
}
Ok(output.into())
}
|
use log::{debug, trace};
use super::{astroplant_capnp, Error};
use capnp::serialize_packed;
use futures::channel::oneshot;
use futures::future::{BoxFuture, FutureExt};
use ratelimit_meter::{algorithms::NonConformanceExt, KeyedRateLimiter};
#[derive(Debug)]
pub enum ServerRpcRequest {
Version {
response: oneshot::Sender<String>,
},
GetActiveConfiguration {
kit_serial: String,
response: oneshot::Sender<Option<serde_json::Value>>,
},
GetQuantityTypes {
response: oneshot::Sender<Vec<serde_json::Value>>,
},
}
#[derive(Debug)]
pub struct ServerRpcResponse {
pub kit_serial: String,
pub bytes: Vec<u8>,
}
struct ServerRpcResponseBuilder {
kit_serial: String,
message_builder: capnp::message::Builder<capnp::message::HeapAllocator>,
}
impl ServerRpcResponseBuilder {
pub fn new(kit_serial: String, id: u64) -> Self {
let mut message_builder = capnp::message::Builder::new_default();
let mut response_builder =
message_builder.init_root::<astroplant_capnp::server_rpc_response::Builder>();
response_builder.set_id(id);
Self {
kit_serial,
message_builder,
}
}
pub fn set_error_method_not_found(mut self) -> Self {
let response_builder = self
.message_builder
.get_root::<astroplant_capnp::server_rpc_response::Builder>()
.expect("could not get root");
response_builder.init_error().set_method_not_found(());
self
}
pub fn set_error_rate_limit(mut self, millis: u64) -> Self {
let response_builder = self
.message_builder
.get_root::<astroplant_capnp::server_rpc_response::Builder>()
.expect("could not get root");
response_builder.init_error().set_rate_limit(millis);
self
}
pub fn set_version(mut self, version: String) -> Self {
let mut response_builder = self
.message_builder
.get_root::<astroplant_capnp::server_rpc_response::Builder>()
.expect("could not get root");
response_builder.set_version(&version);
self
}
pub fn set_active_configuration(mut self, configuration: Option<serde_json::Value>) -> Self {
let response_builder = self
.message_builder
.get_root::<astroplant_capnp::server_rpc_response::Builder>()
.expect("could not get root");
match configuration {
Some(configuration) => {
response_builder
.init_get_active_configuration()
.set_configuration(&configuration.to_string());
}
None => {
response_builder
.init_get_active_configuration()
.set_none(());
}
}
self
}
pub fn set_quantity_types(mut self, quantity_types: Vec<serde_json::Value>) -> Self {
let mut response_builder = self
.message_builder
.get_root::<astroplant_capnp::server_rpc_response::Builder>()
.expect("could not get root");
response_builder.set_get_quantity_types(&serde_json::to_string(&quantity_types).unwrap());
self
}
pub fn create(self) -> ServerRpcResponse {
let mut bytes = Vec::new();
serialize_packed::write_message(&mut bytes, &self.message_builder).unwrap();
ServerRpcResponse {
kit_serial: self.kit_serial,
bytes,
}
}
}
pub type ServerRpcResponder<'a> = BoxFuture<'a, Option<ServerRpcResponse>>;
pub struct ServerRpcHandler {
rate_limiter: KeyedRateLimiter<String>,
}
impl ServerRpcHandler {
pub fn new() -> Self {
const NUM_REQUESTS: u32 = 15u32;
const PER: std::time::Duration = std::time::Duration::from_secs(60);
let rate_limiter =
KeyedRateLimiter::<String>::new(std::num::NonZeroU32::new(NUM_REQUESTS).unwrap(), PER);
Self { rate_limiter }
}
fn check_rate_limit(&mut self, kit_serial: String, request_id: u64) -> Result<(), Error> {
debug!(
"request id {} of kit {} was rate limited",
request_id, kit_serial
);
match self.rate_limiter.check(kit_serial.clone()) {
Ok(_) => Ok(()),
Err(neg) => {
let response = ServerRpcResponseBuilder::new(kit_serial.clone(), request_id)
.set_error_rate_limit(neg.wait_time().as_millis() as u64)
.create();
Err(Error::ServerRpcError(response))
}
}
}
pub fn handle_rpc_request(
&mut self,
kit_serial: String,
mut payload: &[u8],
) -> Result<(ServerRpcRequest, Option<ServerRpcResponder<'static>>), Error> {
let message_reader =
serialize_packed::read_message(&mut payload, capnp::message::ReaderOptions::default())
.unwrap();
let rpc_request = message_reader
.get_root::<astroplant_capnp::server_rpc_request::Reader>()
.map_err(Error::Capnp)?;
let id: u64 = rpc_request.get_id();
self.check_rate_limit(kit_serial.clone(), id)?;
match rpc_request.which().map_err(|_| {
let response = ServerRpcResponseBuilder::new(kit_serial.clone(), id)
.set_error_method_not_found()
.create();
Error::ServerRpcError(response)
})? {
astroplant_capnp::server_rpc_request::Which::Version(_) => {
trace!("received server RPC version request");
let (sender, receiver) = oneshot::channel();
let request = ServerRpcRequest::Version { response: sender };
let receiver = receiver.map(move |version| match version {
Ok(version) => Some(
ServerRpcResponseBuilder::new(kit_serial, id)
.set_version(version)
.create(),
),
Err(_) => None,
});
Ok((request, Some(receiver.boxed())))
}
astroplant_capnp::server_rpc_request::Which::GetActiveConfiguration(_) => {
trace!("received server RPC active configuration request");
let (sender, receiver) = oneshot::channel();
let request = ServerRpcRequest::GetActiveConfiguration {
kit_serial: kit_serial.clone(),
response: sender,
};
let receiver = receiver.map(move |configuration| match configuration {
Ok(configuration) => Some(
ServerRpcResponseBuilder::new(kit_serial, id)
.set_active_configuration(configuration)
.create(),
),
Err(_) => None,
});
Ok((request, Some(receiver.boxed())))
}
astroplant_capnp::server_rpc_request::Which::GetQuantityTypes(_) => {
trace!("received server RPC quantity types");
let (sender, receiver) = oneshot::channel();
let request = ServerRpcRequest::GetQuantityTypes { response: sender };
let receiver = receiver.map(move |quantity_types| match quantity_types {
Ok(quantity_types) => Some(
ServerRpcResponseBuilder::new(kit_serial, id)
.set_quantity_types(quantity_types)
.create(),
),
Err(_) => None,
});
Ok((request, Some(receiver.boxed())))
}
}
}
}
|
// Copyright 2018-2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! This module takes care of loading, checking and preprocessing of a
//! wasm module before execution. It also extracts some essential information
//! from a module.
use crate::wasm::env_def::ImportSatisfyCheck;
use crate::wasm::PrefabWasmModule;
use crate::Schedule;
use parity_wasm::elements::{self, External, Internal, MemoryType, Type, ValueType};
use pwasm_utils;
use pwasm_utils::rules;
use sp_runtime::traits::SaturatedConversion;
use sp_std::prelude::*;
/// Currently, all imported functions must be located inside this module. We might support
/// additional modules for versioning later.
pub const IMPORT_MODULE_FN: &str = "seal0";
/// Imported memory must be located inside this module. The reason for that is that current
/// compiler toolchains might not support specifying other modules than "env" for memory imports.
pub const IMPORT_MODULE_MEMORY: &str = "env";
struct ContractModule<'a> {
/// A deserialized module. The module is valid (this is Guaranteed by `new` method).
module: elements::Module,
schedule: &'a Schedule,
}
impl<'a> ContractModule<'a> {
/// Creates a new instance of `ContractModule`.
///
/// Returns `Err` if the `original_code` couldn't be decoded or
/// if it contains an invalid module.
fn new(original_code: &[u8], schedule: &'a Schedule) -> Result<Self, &'static str> {
use wasmi_validation::{validate_module, PlainValidator};
let module =
elements::deserialize_buffer(original_code).map_err(|_| "Can't decode wasm code")?;
// Make sure that the module is valid.
validate_module::<PlainValidator>(&module).map_err(|_| "Module is not valid")?;
// Return a `ContractModule` instance with
// __valid__ module.
Ok(ContractModule { module, schedule })
}
/// Ensures that module doesn't declare internal memories.
///
/// In this runtime we only allow wasm module to import memory from the environment.
/// Memory section contains declarations of internal linear memories, so if we find one
/// we reject such a module.
fn ensure_no_internal_memory(&self) -> Result<(), &'static str> {
if self.module.memory_section().map_or(false, |ms| ms.entries().len() > 0) {
return Err("module declares internal memory")
}
Ok(())
}
/// Ensures that tables declared in the module are not too big.
fn ensure_table_size_limit(&self, limit: u32) -> Result<(), &'static str> {
if let Some(table_section) = self.module.table_section() {
// In Wasm MVP spec, there may be at most one table declared. Double check this
// explicitly just in case the Wasm version changes.
if table_section.entries().len() > 1 {
return Err("multiple tables declared")
}
if let Some(table_type) = table_section.entries().first() {
// Check the table's initial size as there is no instruction or environment function
// capable of growing the table.
if table_type.limits().initial() > limit {
return Err("table exceeds maximum size allowed")
}
}
}
Ok(())
}
/// Ensures that no floating point types are in use.
fn ensure_no_floating_types(&self) -> Result<(), &'static str> {
if let Some(global_section) = self.module.global_section() {
for global in global_section.entries() {
match global.global_type().content_type() {
ValueType::F32 | ValueType::F64 =>
return Err("use of floating point type in globals is forbidden"),
_ => {},
}
}
}
if let Some(code_section) = self.module.code_section() {
for func_body in code_section.bodies() {
for local in func_body.locals() {
match local.value_type() {
ValueType::F32 | ValueType::F64 =>
return Err("use of floating point type in locals is forbidden"),
_ => {},
}
}
}
}
if let Some(type_section) = self.module.type_section() {
for wasm_type in type_section.types() {
match wasm_type {
Type::Function(func_type) => {
let return_type = func_type.return_type();
for value_type in func_type.params().iter().chain(return_type.iter()) {
match value_type {
ValueType::F32 | ValueType::F64 =>
return Err(
"use of floating point type in function types is forbidden",
),
_ => {},
}
}
},
}
}
}
Ok(())
}
fn inject_gas_metering(self) -> Result<Self, &'static str> {
let gas_rules = rules::Set::new(
self.schedule.regular_op_cost.clone().saturated_into(),
Default::default(),
)
.with_grow_cost(self.schedule.grow_mem_cost.clone().saturated_into())
.with_forbidden_floats();
let contract_module =
pwasm_utils::inject_gas_counter(self.module, &gas_rules, IMPORT_MODULE_FN)
.map_err(|_| "gas instrumentation failed")?;
Ok(ContractModule { module: contract_module, schedule: self.schedule })
}
fn inject_stack_height_metering(self) -> Result<Self, &'static str> {
let contract_module =
pwasm_utils::stack_height::inject_limiter(self.module, self.schedule.max_stack_height)
.map_err(|_| "stack height instrumentation failed")?;
Ok(ContractModule { module: contract_module, schedule: self.schedule })
}
/// Check that the module has required exported functions. For now
/// these are just entrypoints:
///
/// - 'call'
/// - 'deploy'
///
/// Any other exports are not allowed.
fn scan_exports(&self) -> Result<(), &'static str> {
let mut deploy_found = false;
let mut call_found = false;
let module = &self.module;
let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]);
let export_entries = module.export_section().map(|is| is.entries()).unwrap_or(&[]);
let func_entries = module.function_section().map(|fs| fs.entries()).unwrap_or(&[]);
// Function index space consists of imported function following by
// declared functions. Calculate the total number of imported functions so
// we can use it to convert indexes from function space to declared function space.
let fn_space_offset = module
.import_section()
.map(|is| is.entries())
.unwrap_or(&[])
.iter()
.filter(|entry| match *entry.external() {
External::Function(_) => true,
_ => false,
})
.count();
for export in export_entries {
match export.field() {
"call" => call_found = true,
"deploy" => deploy_found = true,
_ => return Err("unknown export: expecting only deploy and call functions"),
}
// Then check the export kind. "call" and "deploy" are
// functions.
let fn_idx = match export.internal() {
Internal::Function(ref fn_idx) => *fn_idx,
_ => return Err("expected a function"),
};
// convert index from function index space to declared index space.
let fn_idx = match fn_idx.checked_sub(fn_space_offset as u32) {
Some(fn_idx) => fn_idx,
None => {
// Underflow here means fn_idx points to imported function which we don't allow!
return Err("entry point points to an imported function")
},
};
// Then check the signature.
// Both "call" and "deploy" has a () -> () function type.
let func_ty_idx = func_entries
.get(fn_idx as usize)
.ok_or_else(|| "export refers to non-existent function")?
.type_ref();
let Type::Function(ref func_ty) = types
.get(func_ty_idx as usize)
.ok_or_else(|| "function has a non-existent type")?;
if !func_ty.params().is_empty() ||
!(func_ty.return_type().is_none() ||
func_ty.return_type() == Some(ValueType::I32))
{
return Err("entry point has wrong signature")
}
}
if !deploy_found {
return Err("deploy function isn't exported")
}
if !call_found {
return Err("call function isn't exported")
}
Ok(())
}
/// Scan an import section if any.
///
/// This accomplishes two tasks:
///
/// - checks any imported function against defined host functions set, incl. their signatures.
/// - if there is a memory import, returns it's descriptor
fn scan_imports<C: ImportSatisfyCheck>(&self) -> Result<Option<&MemoryType>, &'static str> {
let module = &self.module;
let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]);
let import_entries = module.import_section().map(|is| is.entries()).unwrap_or(&[]);
let mut imported_mem_type = None;
for import in import_entries {
let type_idx = match import.external() {
&External::Table(_) => return Err("Cannot import tables"),
&External::Global(_) => return Err("Cannot import globals"),
&External::Function(ref type_idx) => {
if import.module() != IMPORT_MODULE_FN {
return Err("Invalid module for imported function")
}
type_idx
},
&External::Memory(ref memory_type) => {
if import.module() != IMPORT_MODULE_MEMORY {
return Err("Invalid module for imported memory")
}
if import.field() != "memory" {
return Err("Memory import must have the field name 'memory'")
}
if imported_mem_type.is_some() {
return Err("Multiple memory imports defined")
}
imported_mem_type = Some(memory_type);
continue
},
};
let Type::Function(ref func_ty) = types
.get(*type_idx as usize)
.ok_or_else(|| "validation: import entry points to a non-existent type")?;
// We disallow importing `seal_println` unless debug features are enabled,
// which should only be allowed on a dev chain
if !self.schedule.enable_println && import.field().as_bytes() == b"seal_println" {
return Err("module imports `seal_println` but debug features disabled")
}
// We disallow importing `gas` function here since it is treated as implementation
// detail.
if import.field().as_bytes() == b"gas" ||
!C::can_satisfy(import.field().as_bytes(), func_ty)
{
return Err("module imports a non-existent function")
}
}
Ok(imported_mem_type)
}
fn into_wasm_code(self) -> Result<Vec<u8>, &'static str> {
elements::serialize(self.module).map_err(|_| "error serializing instrumented module")
}
}
/// Loads the given module given in `original_code`, performs some checks on it and
/// does some preprocessing.
///
/// The checks are:
///
/// - provided code is a valid wasm module.
/// - the module doesn't define an internal memory instance,
/// - imported memory (if any) doesn't reserve more memory than permitted by the `schedule`,
/// - all imported functions from the external environment matches defined by `env` module,
///
/// The preprocessing includes injecting code for gas metering and metering the height of stack.
pub fn prepare_contract<C: ImportSatisfyCheck>(
original_code: &[u8],
schedule: &Schedule,
) -> Result<PrefabWasmModule, &'static str> {
let mut contract_module = ContractModule::new(original_code, schedule)?;
contract_module.scan_exports()?;
contract_module.ensure_no_internal_memory()?;
contract_module.ensure_table_size_limit(schedule.max_table_size)?;
contract_module.ensure_no_floating_types()?;
struct MemoryDefinition {
initial: u32,
maximum: u32,
}
let memory_def = if let Some(memory_type) = contract_module.scan_imports::<C>()? {
// Inspect the module to extract the initial and maximum page count.
let limits = memory_type.limits();
match (limits.initial(), limits.maximum()) {
(initial, Some(maximum)) if initial > maximum => {
return Err(
"Requested initial number of pages should not exceed the requested maximum",
)
},
(_, Some(maximum)) if maximum > schedule.max_memory_pages => {
return Err("Maximum number of pages should not exceed the configured maximum.")
},
(initial, Some(maximum)) => MemoryDefinition { initial, maximum },
(_, None) => {
// Maximum number of pages should be always declared.
// This isn't a hard requirement and can be treated as a maximum set
// to configured maximum.
return Err("Maximum number of pages should be always declared.")
},
}
} else {
// If none memory imported then just crate an empty placeholder.
// Any access to it will lead to out of bounds trap.
MemoryDefinition { initial: 0, maximum: 0 }
};
contract_module = contract_module.inject_gas_metering()?.inject_stack_height_metering()?;
Ok(PrefabWasmModule {
schedule_version: schedule.version,
initial: memory_def.initial,
maximum: memory_def.maximum,
_reserved: None,
code: contract_module.into_wasm_code()?,
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::exec::Ext;
use assert_matches::assert_matches;
use std::fmt;
impl fmt::Debug for PrefabWasmModule {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "PreparedContract {{ .. }}")
}
}
// Define test environment for tests. We need ImportSatisfyCheck
// implementation from it. So actual implementations doesn't matter.
define_env!(TestEnv, <E: Ext>,
panic(_ctx) => { unreachable!(); },
// gas is an implementation defined function and a contract can't import it.
gas(_ctx, _amount: u32) => { unreachable!(); },
nop(_ctx, _unused: u64) => { unreachable!(); },
seal_println(_ctx, _ptr: u32, _len: u32) => { unreachable!(); },
);
macro_rules! prepare_test {
($name:ident, $wat:expr, $($expected:tt)*) => {
#[test]
fn $name() {
let wasm = wat::parse_str($wat).unwrap();
let schedule = Schedule::default();
let r = prepare_contract::<TestEnv>(wasm.as_ref(), &schedule);
assert_matches!(r, $($expected)*);
}
};
}
prepare_test!(
no_floats,
r#"
(module
(func (export "call")
(drop
(f32.add
(f32.const 0)
(f32.const 1)
)
)
)
(func (export "deploy"))
)"#,
Err("gas instrumentation failed")
);
mod memories {
use super::*;
// Tests below assumes that maximum page number is configured to a certain number.
#[test]
fn assume_memory_size() {
assert_eq!(Schedule::default().max_memory_pages, 16);
}
prepare_test!(
memory_with_one_page,
r#"
(module
(import "env" "memory" (memory 1 1))
(func (export "call"))
(func (export "deploy"))
)
"#,
Ok(_)
);
prepare_test!(
internal_memory_declaration,
r#"
(module
(memory 1 1)
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("module declares internal memory")
);
prepare_test!(
no_memory_import,
r#"
(module
;; no memory imported
(func (export "call"))
(func (export "deploy"))
)"#,
Ok(_)
);
prepare_test!(
initial_exceeds_maximum,
r#"
(module
(import "env" "memory" (memory 16 1))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("Module is not valid")
);
prepare_test!(
no_maximum,
r#"
(module
(import "env" "memory" (memory 1))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("Maximum number of pages should be always declared.")
);
prepare_test!(
requested_maximum_exceeds_configured_maximum,
r#"
(module
(import "env" "memory" (memory 1 17))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("Maximum number of pages should not exceed the configured maximum.")
);
prepare_test!(
field_name_not_memory,
r#"
(module
(import "env" "forgetit" (memory 1 1))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("Memory import must have the field name 'memory'")
);
prepare_test!(
multiple_memory_imports,
r#"
(module
(import "env" "memory" (memory 1 1))
(import "env" "memory" (memory 1 1))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("Module is not valid")
);
prepare_test!(
table_import,
r#"
(module
(import "seal0" "table" (table 1 anyfunc))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("Cannot import tables")
);
prepare_test!(
global_import,
r#"
(module
(global $g (import "seal0" "global") i32)
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("Cannot import globals")
);
}
mod tables {
use super::*;
// Tests below assumes that maximum table size is configured to a certain number.
#[test]
fn assume_table_size() {
assert_eq!(Schedule::default().max_table_size, 16384);
}
prepare_test!(
no_tables,
r#"
(module
(func (export "call"))
(func (export "deploy"))
)
"#,
Ok(_)
);
prepare_test!(
table_valid_size,
r#"
(module
(table 10000 funcref)
(func (export "call"))
(func (export "deploy"))
)
"#,
Ok(_)
);
prepare_test!(
table_too_big,
r#"
(module
(table 20000 funcref)
(func (export "call"))
(func (export "deploy"))
)"#,
Err("table exceeds maximum size allowed")
);
}
mod imports {
use super::*;
prepare_test!(
can_import_legit_function,
r#"
(module
(import "seal0" "nop" (func (param i64)))
(func (export "call"))
(func (export "deploy"))
)
"#,
Ok(_)
);
// even though gas is defined the contract can't import it since
// it is an implementation defined.
prepare_test!(
can_not_import_gas_function,
r#"
(module
(import "seal0" "gas" (func (param i32)))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("module imports a non-existent function")
);
// memory is in "env" and not in "seal0"
prepare_test!(
memory_not_in_seal0,
r#"
(module
(import "seal0" "memory" (memory 1 1))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("Invalid module for imported memory")
);
// memory is in "env" and not in some arbitrary module
prepare_test!(
memory_not_in_arbitrary_module,
r#"
(module
(import "any_module" "memory" (memory 1 1))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("Invalid module for imported memory")
);
// functions are in "env" and not in "seal0"
prepare_test!(
function_not_in_env,
r#"
(module
(import "env" "nop" (func (param i64)))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("Invalid module for imported function")
);
// functions are in "seal0" and not in in some arbitrary module
prepare_test!(
function_not_arbitrary_module,
r#"
(module
(import "any_module" "nop" (func (param i64)))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("Invalid module for imported function")
);
// wrong signature
prepare_test!(
wrong_signature,
r#"
(module
(import "seal0" "gas" (func (param i64)))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("module imports a non-existent function")
);
prepare_test!(
unknown_func_name,
r#"
(module
(import "seal0" "unknown_func" (func))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("module imports a non-existent function")
);
prepare_test!(
seal_println_debug_disabled,
r#"
(module
(import "seal0" "seal_println" (func $seal_println (param i32 i32)))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("module imports `seal_println` but debug features disabled")
);
#[test]
fn seal_println_debug_enabled() {
let wasm = wat::parse_str(
r#"
(module
(import "seal0" "seal_println" (func $seal_println (param i32 i32)))
(func (export "call"))
(func (export "deploy"))
)
"#,
)
.unwrap();
let mut schedule = Schedule::default();
schedule.enable_println = true;
let r = prepare_contract::<TestEnv>(wasm.as_ref(), &schedule);
assert_matches!(r, Ok(_));
}
}
mod entrypoints {
use super::*;
prepare_test!(
it_works,
r#"
(module
(func (export "call"))
(func (export "deploy"))
)
"#,
Ok(_)
);
prepare_test!(
omit_deploy,
r#"
(module
(func (export "call"))
)
"#,
Err("deploy function isn't exported")
);
prepare_test!(
omit_call,
r#"
(module
(func (export "deploy"))
)
"#,
Err("call function isn't exported")
);
// Try to use imported function as an entry point.
prepare_test!(
try_sneak_export_as_entrypoint,
r#"
(module
(import "seal0" "panic" (func))
(func (export "deploy"))
(export "call" (func 0))
)
"#,
Err("entry point points to an imported function")
);
// Try to use imported function as an entry point.
prepare_test!(
try_sneak_export_as_global,
r#"
(module
(func (export "deploy"))
(global (export "call") i32 (i32.const 0))
)
"#,
Err("expected a function")
);
prepare_test!(
wrong_signature,
r#"
(module
(func (export "deploy"))
(func (export "call") (param i32))
)
"#,
Err("entry point has wrong signature")
);
prepare_test!(
unknown_exports,
r#"
(module
(func (export "call"))
(func (export "deploy"))
(func (export "whatevs"))
)
"#,
Err("unknown export: expecting only deploy and call functions")
);
prepare_test!(
global_float,
r#"
(module
(global $x f32 (f32.const 0))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("use of floating point type in globals is forbidden")
);
prepare_test!(
local_float,
r#"
(module
(func $foo (local f32))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("use of floating point type in locals is forbidden")
);
prepare_test!(
param_float,
r#"
(module
(func $foo (param f32))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("use of floating point type in function types is forbidden")
);
prepare_test!(
result_float,
r#"
(module
(func $foo (result f32) (f32.const 0))
(func (export "call"))
(func (export "deploy"))
)
"#,
Err("use of floating point type in function types is forbidden")
);
}
}
|
use tower_http::trace::{MakeSpan, OnResponse};
use tracing::Level;
#[derive(Debug, Clone, Copy)]
pub(crate) struct Tracer;
impl<Body> MakeSpan<Body> for Tracer {
fn make_span(&mut self, request: &axum::http::Request<Body>) -> tracing::Span {
tracing::span!(
Level::INFO,
"request",
kind = "server",
uri = %request.uri(),
ulr.path = %request.uri().path(),
url.query = request.uri().query(),
url.scheme = request.uri().scheme_str(),
server.address = request.uri().host(),
server.port = request.uri().port_u16(),
http_version = ?request.version(),
user_agent.original = request.headers().get("user-agent").and_then(|h| h.to_str().ok()),
http.request.method = %request.method(),
http.request.header.host = request.headers().get("host").and_then(|h| h.to_str().ok()),
http.request.header.forwarded_for = request.headers().get("x-forwarded-for").and_then(|h| h.to_str().ok()),
http.request.header.forwarded_proto = request.headers().get("x-forwarded-proto").and_then(|h| h.to_str().ok()),
http.request.header.host = request.headers().get("x-forwarded-ssl").and_then(|h| h.to_str().ok()),
http.request.header.referer = request.headers().get("referer").and_then(|h| h.to_str().ok()),
http.request.header.fly_forwarded_port = request.headers().get("fly-forwarded-port").and_then(|h| h.to_str().ok()),
http.request.header.fly_region = request.headers().get("fly-region").and_then(|h| h.to_str().ok()),
http.request.header.via = request.headers().get("via").and_then(|h| h.to_str().ok()),
http.response.status_code = tracing::field::Empty,
http.response.header.content_type = tracing::field::Empty,
)
}
}
impl<Body> OnResponse<Body> for Tracer {
fn on_response(
self,
response: &axum::http::Response<Body>,
latency: std::time::Duration,
span: &tracing::Span,
) {
let status_code = response.status().as_u16();
tracing::event!(
Level::INFO,
status = status_code,
latency = format_args!("{} ms", latency.as_millis()),
"finished processing request"
);
span.record("http.response.status_code", status_code);
span.record(
"http.response.header.content_type",
response
.headers()
.get("content-type")
.and_then(|h| h.to_str().ok()),
);
}
}
|
use super::message::Error as MessageError;
use super::*;
use crossbeam_channel as channel;
use std::collections::VecDeque;
use std::io::{BufRead, BufReader};
use std::net::TcpStream;
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
use std::time::Instant;
#[derive(Debug, PartialEq)]
pub enum Error {
CannotConnect,
ParseError(MessageError),
CannotRead,
EndOfStream,
}
pub struct Client {
state: Arc<State>,
inner: Arc<Inner>,
errors: channel::Receiver<Error>,
}
impl Client {
pub fn connect(addr: impl Into<String>) -> Result<Self, Error> {
let (err_tx, err_rx) = channel::bounded(8);
let this = Self {
state: Arc::new(State::new()),
inner: Arc::new(Inner {
stream: Mutex::new(None),
buf: RwLock::new(VecDeque::new()),
}),
errors: err_rx,
};
let state = Arc::clone(&this.state);
let inner = Arc::clone(&this.inner);
let addr = addr.into();
thread::spawn(move || {
let conn = match TcpStream::connect(&addr).map_err(|_err| Error::CannotConnect) {
Ok(conn) => conn,
Err(err) => {
err_tx.send(err);
return;
}
};
let read = conn.try_clone().expect("conn clone for read");
let write = conn.try_clone().expect("conn clone for write");
{
*inner.stream.lock().unwrap() = Some(write);
inner.flush();
}
for line in BufReader::new(read).lines() {
let line = match line.map_err(|_err| Error::CannotRead) {
Ok(line) => line,
Err(err) => {
debug!("error reading: {:?}", err);
err_tx.send(err);
return;
}
};
trace!("<< {}", line.trim());
let msg = match Message::parse(&line).map_err(Error::ParseError) {
Ok(msg) => msg,
Err(err) => {
debug!("error parsing: {:?}", err);
err_tx.send(err);
return;
}
};
inner.update(&msg, &Arc::clone(&state));
state.push_message((Instant::now(), msg));
}
err_tx.send(Error::EndOfStream);
trace!("end of read loop");
});
Ok(this)
}
pub fn errors(&self) -> channel::Receiver<Error> {
self.errors.clone()
}
pub fn state(&self) -> Arc<State> {
Arc::clone(&self.state)
}
}
impl IrcClient for Client {
fn write(&self, data: &[u8]) {
self.inner.write(data);
}
fn close(&self) {
self.inner.close();
}
}
struct Inner {
stream: Mutex<Option<TcpStream>>,
buf: RwLock<VecDeque<Vec<u8>>>,
}
impl Inner {
fn update(&self, msg: &Message, state: &Arc<State>) {
let mut from_self = false;
if let Some(Prefix::User { nick, .. }) = &msg.prefix {
if let Some(current) = &state.nickname() {
from_self = current == nick
}
}
match &msg.command {
Command::Ping { token } => self.pong(token),
Command::Join { channel, key: _key } => {
let channel = if from_self {
state.channels().add(channel.clone())
} else {
state.channels().get(&channel).expect("existing channel")
};
channel.add(msg.get_nick());
}
Command::Part {
channel,
reason: _reason,
} => {
if from_self {
state.channels().remove(channel.clone());
return;
}
state
.channels()
.get(&channel)
.expect("existing channel")
.remove(msg.get_nick());
}
Command::Quit { reason: _reason } => {
if from_self {
// let the client clean up
return;
}
state.channels().clear_nick(msg.get_nick());
}
Command::Nick { nickname } => {
if from_self {
state.set_nickname(nickname.clone());
return;
}
state
.channels()
.update_nick(msg.get_nick(), nickname.clone());
}
Command::Other {
command: _command,
params: _params,
} => {
// need to periodically do a /who or /names #channel
}
Command::Reply { numeric, params } => match numeric {
1 => state.set_nickname(params[0].clone()),
433 => self.nick(format!("{}_", params[1])),
// TODO more numerics
_ => {}
},
_ => {
// what should be done here?
}
};
}
fn flush(&self) {
for msg in { self.buf.write().unwrap().drain(..) } {
self.write(&msg);
}
}
}
impl IrcClient for Inner {
fn write(&self, data: &[u8]) {
use std::io::Write;
use std::str;
if self.stream.lock().unwrap().is_none() {
trace!(
"queueing: {}",
str::from_utf8(&data[..data.len() - 2]).expect("valid utf-8")
);
self.buf.write().unwrap().push_back(data.to_vec());
return;
}
let w = self.stream.lock().unwrap();
let w = &mut w.as_ref().unwrap();
trace!(
">> {}",
str::from_utf8(&data[..data.len() - 2]).expect("valid utf-8")
);
// TODO split this as 510 chunks (512 - CRLF)
w.write_all(data).expect("write")
}
fn close(&self) {
use std::net::Shutdown;
if let Some(writer) = &*self.stream.lock().unwrap() {
writer.shutdown(Shutdown::Both).expect("shutdown TcpStream");
}
}
}
|
// Quiet diesel warnings https://github.com/diesel-rs/diesel/issues/1785
#![allow(proc_macro_derive_resolution_fallback)]
// Force these as errors so that they are not lost in all the diesel warnings
#![deny(unreachable_patterns)]
#![deny(unknown_lints)]
#![deny(unused_variables)]
#![deny(unused_imports)]
// Unused results is more often than not an error
#![deny(unused_must_use)]
#![deny(unused_extern_crates)]
#![deny(dead_code)]
extern crate bigneon_db;
extern crate chrono;
extern crate diesel;
extern crate rand;
#[macro_use]
extern crate serde_json;
extern crate time;
extern crate uuid;
extern crate validator;
#[macro_use]
extern crate macros;
mod unit;
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ActionsList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<ActionResponse>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ActionRequest {
#[serde(flatten)]
pub resource_with_etag: ResourceWithEtag,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ActionRequestProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ActionRequestProperties {
#[serde(flatten)]
pub action_properties_base: ActionPropertiesBase,
#[serde(rename = "triggerUri")]
pub trigger_uri: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ActionResponse {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ActionResponseProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ActionResponseProperties {
#[serde(flatten)]
pub action_properties_base: ActionPropertiesBase,
#[serde(rename = "workflowId", default, skip_serializing_if = "Option::is_none")]
pub workflow_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ActionPropertiesBase {
#[serde(rename = "logicAppResourceId")]
pub logic_app_resource_id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AlertRule {
#[serde(flatten)]
pub resource_with_etag: ResourceWithEtag,
pub kind: AlertRuleKind,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AlertRuleKind {
Scheduled,
MicrosoftSecurityIncidentCreation,
Fusion,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AlertRuleTemplate {
#[serde(flatten)]
pub resource: Resource,
pub kind: AlertRuleKind,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AlertRuleTemplateDataSource {
#[serde(rename = "connectorId", default, skip_serializing_if = "Option::is_none")]
pub connector_id: Option<String>,
#[serde(rename = "dataTypes", default, skip_serializing_if = "Vec::is_empty")]
pub data_types: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AlertRuleTemplateStatus {
Installed,
Available,
NotAvailable,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AlertRuleTriggerOperator {
GreaterThan,
LessThan,
Equal,
NotEqual,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AlertRulesList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<AlertRule>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AlertRuleTemplatesList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<AlertRuleTemplate>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AlertSeverity {
High,
Medium,
Low,
Informational,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AttackTactic {
InitialAccess,
Execution,
Persistence,
PrivilegeEscalation,
DefenseEvasion,
CredentialAccess,
Discovery,
LateralMovement,
Collection,
Exfiltration,
CommandAndControl,
Impact,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FusionAlertRule {
#[serde(flatten)]
pub alert_rule: AlertRule,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<FusionAlertRuleProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FusionAlertRuleProperties {
#[serde(rename = "alertRuleTemplateName")]
pub alert_rule_template_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
pub enabled: bool,
#[serde(rename = "lastModifiedUtc", default, skip_serializing_if = "Option::is_none")]
pub last_modified_utc: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub severity: Option<AlertSeverity>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub tactics: Vec<AttackTactic>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FusionAlertRuleTemplate {
#[serde(flatten)]
pub alert_rule_template: AlertRuleTemplate,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<FusionAlertRuleTemplateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FusionAlertRuleTemplateProperties {
#[serde(rename = "alertRulesCreatedByTemplateCount", default, skip_serializing_if = "Option::is_none")]
pub alert_rules_created_by_template_count: Option<i32>,
#[serde(rename = "createdDateUTC", default, skip_serializing_if = "Option::is_none")]
pub created_date_utc: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "requiredDataConnectors", default, skip_serializing_if = "Vec::is_empty")]
pub required_data_connectors: Vec<AlertRuleTemplateDataSource>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<AlertRuleTemplateStatus>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub severity: Option<AlertSeverity>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub tactics: Vec<AttackTactic>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MicrosoftSecurityIncidentCreationAlertRule {
#[serde(flatten)]
pub alert_rule: AlertRule,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<MicrosoftSecurityIncidentCreationAlertRuleProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MicrosoftSecurityIncidentCreationAlertRuleCommonProperties {
#[serde(rename = "displayNamesFilter", default, skip_serializing_if = "Vec::is_empty")]
pub display_names_filter: Vec<String>,
#[serde(rename = "displayNamesExcludeFilter", default, skip_serializing_if = "Vec::is_empty")]
pub display_names_exclude_filter: Vec<String>,
#[serde(rename = "productFilter")]
pub product_filter: MicrosoftSecurityProductName,
#[serde(rename = "severitiesFilter", default, skip_serializing_if = "Vec::is_empty")]
pub severities_filter: Vec<AlertSeverity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MicrosoftSecurityIncidentCreationAlertRuleProperties {
#[serde(flatten)]
pub microsoft_security_incident_creation_alert_rule_common_properties: MicrosoftSecurityIncidentCreationAlertRuleCommonProperties,
#[serde(rename = "alertRuleTemplateName", default, skip_serializing_if = "Option::is_none")]
pub alert_rule_template_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "displayName")]
pub display_name: String,
pub enabled: bool,
#[serde(rename = "lastModifiedUtc", default, skip_serializing_if = "Option::is_none")]
pub last_modified_utc: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MicrosoftSecurityIncidentCreationAlertRuleTemplate {
#[serde(flatten)]
pub alert_rule_template: AlertRuleTemplate,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<MicrosoftSecurityIncidentCreationAlertRuleTemplateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MicrosoftSecurityIncidentCreationAlertRuleTemplateProperties {
#[serde(rename = "alertRulesCreatedByTemplateCount", default, skip_serializing_if = "Option::is_none")]
pub alert_rules_created_by_template_count: Option<i32>,
#[serde(rename = "createdDateUTC", default, skip_serializing_if = "Option::is_none")]
pub created_date_utc: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "requiredDataConnectors", default, skip_serializing_if = "Vec::is_empty")]
pub required_data_connectors: Vec<AlertRuleTemplateDataSource>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<AlertRuleTemplateStatus>,
#[serde(rename = "displayNamesFilter", default, skip_serializing_if = "Vec::is_empty")]
pub display_names_filter: Vec<String>,
#[serde(rename = "displayNamesExcludeFilter", default, skip_serializing_if = "Vec::is_empty")]
pub display_names_exclude_filter: Vec<String>,
#[serde(rename = "productFilter")]
pub product_filter: MicrosoftSecurityProductName,
#[serde(rename = "severitiesFilter", default, skip_serializing_if = "Vec::is_empty")]
pub severities_filter: Vec<AlertSeverity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum MicrosoftSecurityProductName {
#[serde(rename = "Microsoft Cloud App Security")]
MicrosoftCloudAppSecurity,
#[serde(rename = "Azure Security Center")]
AzureSecurityCenter,
#[serde(rename = "Azure Advanced Threat Protection")]
AzureAdvancedThreatProtection,
#[serde(rename = "Azure Active Directory Identity Protection")]
AzureActiveDirectoryIdentityProtection,
#[serde(rename = "Azure Security Center for IoT")]
AzureSecurityCenterForIoT,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScheduledAlertRule {
#[serde(flatten)]
pub alert_rule: AlertRule,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ScheduledAlertRuleProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScheduledAlertRuleCommonProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<String>,
#[serde(rename = "queryFrequency", default, skip_serializing_if = "Option::is_none")]
pub query_frequency: Option<String>,
#[serde(rename = "queryPeriod", default, skip_serializing_if = "Option::is_none")]
pub query_period: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub severity: Option<AlertSeverity>,
#[serde(rename = "triggerOperator", default, skip_serializing_if = "Option::is_none")]
pub trigger_operator: Option<AlertRuleTriggerOperator>,
#[serde(rename = "triggerThreshold", default, skip_serializing_if = "Option::is_none")]
pub trigger_threshold: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScheduledAlertRuleProperties {
#[serde(flatten)]
pub scheduled_alert_rule_common_properties: ScheduledAlertRuleCommonProperties,
#[serde(rename = "alertRuleTemplateName", default, skip_serializing_if = "Option::is_none")]
pub alert_rule_template_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "displayName")]
pub display_name: String,
pub enabled: bool,
#[serde(rename = "lastModifiedUtc", default, skip_serializing_if = "Option::is_none")]
pub last_modified_utc: Option<String>,
#[serde(rename = "suppressionDuration")]
pub suppression_duration: String,
#[serde(rename = "suppressionEnabled")]
pub suppression_enabled: bool,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub tactics: Vec<AttackTactic>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScheduledAlertRuleTemplateProperties {
#[serde(rename = "alertRulesCreatedByTemplateCount", default, skip_serializing_if = "Option::is_none")]
pub alert_rules_created_by_template_count: Option<i32>,
#[serde(rename = "createdDateUTC", default, skip_serializing_if = "Option::is_none")]
pub created_date_utc: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "requiredDataConnectors", default, skip_serializing_if = "Vec::is_empty")]
pub required_data_connectors: Vec<AlertRuleTemplateDataSource>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<AlertRuleTemplateStatus>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<String>,
#[serde(rename = "queryFrequency", default, skip_serializing_if = "Option::is_none")]
pub query_frequency: Option<String>,
#[serde(rename = "queryPeriod", default, skip_serializing_if = "Option::is_none")]
pub query_period: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub severity: Option<AlertSeverity>,
#[serde(rename = "triggerOperator", default, skip_serializing_if = "Option::is_none")]
pub trigger_operator: Option<AlertRuleTriggerOperator>,
#[serde(rename = "triggerThreshold", default, skip_serializing_if = "Option::is_none")]
pub trigger_threshold: Option<i32>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub tactics: Vec<AttackTactic>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScheduledAlertRuleTemplate {
#[serde(flatten)]
pub alert_rule_template: AlertRuleTemplate,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ScheduledAlertRuleTemplateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ClientInfo {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "objectId", default, skip_serializing_if = "Option::is_none")]
pub object_id: Option<String>,
#[serde(rename = "userPrincipalName", default, skip_serializing_if = "Option::is_none")]
pub user_principal_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudError {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorResponse>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Incident {
#[serde(flatten)]
pub resource_with_etag: ResourceWithEtag,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IncidentProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IncidentAdditionalData {
#[serde(rename = "alertsCount", default, skip_serializing_if = "Option::is_none")]
pub alerts_count: Option<i32>,
#[serde(rename = "bookmarksCount", default, skip_serializing_if = "Option::is_none")]
pub bookmarks_count: Option<i32>,
#[serde(rename = "commentsCount", default, skip_serializing_if = "Option::is_none")]
pub comments_count: Option<i32>,
#[serde(rename = "alertProductNames", default, skip_serializing_if = "Vec::is_empty")]
pub alert_product_names: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub tactics: Vec<AttackTactic>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IncidentComment {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IncidentCommentProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IncidentCommentList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<IncidentComment>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IncidentCommentProperties {
#[serde(rename = "createdTimeUtc", default, skip_serializing_if = "Option::is_none")]
pub created_time_utc: Option<String>,
pub message: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub author: Option<ClientInfo>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IncidentLabel {
#[serde(rename = "labelName")]
pub label_name: String,
#[serde(rename = "labelType", default, skip_serializing_if = "Option::is_none")]
pub label_type: Option<incident_label::LabelType>,
}
pub mod incident_label {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LabelType {
User,
System,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IncidentList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<Incident>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IncidentOwnerInfo {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
#[serde(rename = "assignedTo", default, skip_serializing_if = "Option::is_none")]
pub assigned_to: Option<String>,
#[serde(rename = "objectId", default, skip_serializing_if = "Option::is_none")]
pub object_id: Option<String>,
#[serde(rename = "userPrincipalName", default, skip_serializing_if = "Option::is_none")]
pub user_principal_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IncidentProperties {
#[serde(rename = "additionalData", default, skip_serializing_if = "Option::is_none")]
pub additional_data: Option<IncidentAdditionalData>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub classification: Option<incident_properties::Classification>,
#[serde(rename = "classificationComment", default, skip_serializing_if = "Option::is_none")]
pub classification_comment: Option<String>,
#[serde(rename = "classificationReason", default, skip_serializing_if = "Option::is_none")]
pub classification_reason: Option<incident_properties::ClassificationReason>,
#[serde(rename = "createdTimeUtc", default, skip_serializing_if = "Option::is_none")]
pub created_time_utc: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "firstActivityTimeUtc", default, skip_serializing_if = "Option::is_none")]
pub first_activity_time_utc: Option<String>,
#[serde(rename = "incidentUrl", default, skip_serializing_if = "Option::is_none")]
pub incident_url: Option<String>,
#[serde(rename = "incidentNumber", default, skip_serializing_if = "Option::is_none")]
pub incident_number: Option<i32>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub labels: Vec<IncidentLabel>,
#[serde(rename = "lastActivityTimeUtc", default, skip_serializing_if = "Option::is_none")]
pub last_activity_time_utc: Option<String>,
#[serde(rename = "lastModifiedTimeUtc", default, skip_serializing_if = "Option::is_none")]
pub last_modified_time_utc: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub owner: Option<IncidentOwnerInfo>,
#[serde(rename = "relatedAnalyticRuleIds", default, skip_serializing_if = "Vec::is_empty")]
pub related_analytic_rule_ids: Vec<String>,
pub severity: IncidentSeverityEnum,
pub status: incident_properties::Status,
pub title: String,
}
pub mod incident_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Classification {
Undetermined,
TruePositive,
BenignPositive,
FalsePositive,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ClassificationReason {
SuspiciousActivity,
SuspiciousButExpected,
IncorrectAlertLogic,
InaccurateData,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
New,
Active,
Closed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum IncidentSeverityEnum {
High,
Medium,
Low,
Informational,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OfficeConsent {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OfficeConsentProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OfficeConsentList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<OfficeConsent>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OfficeConsentProperties {
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "tenantName", default, skip_serializing_if = "Option::is_none")]
pub tenant_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Operation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<String>,
}
pub mod operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Display {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationsList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<Operation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceWithEtag {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ThreatIntelligence {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub confidence: Option<f64>,
#[serde(rename = "providerName", default, skip_serializing_if = "Option::is_none")]
pub provider_name: Option<String>,
#[serde(rename = "reportLink", default, skip_serializing_if = "Option::is_none")]
pub report_link: Option<String>,
#[serde(rename = "threatDescription", default, skip_serializing_if = "Option::is_none")]
pub threat_description: Option<String>,
#[serde(rename = "threatName", default, skip_serializing_if = "Option::is_none")]
pub threat_name: Option<String>,
#[serde(rename = "threatType", default, skip_serializing_if = "Option::is_none")]
pub threat_type: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserInfo {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "objectId")]
pub object_id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Bookmark {
#[serde(flatten)]
pub resource_with_etag: ResourceWithEtag,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<BookmarkProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BookmarkList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<Bookmark>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BookmarkProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub created: Option<String>,
#[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")]
pub created_by: Option<UserInfo>,
#[serde(rename = "displayName")]
pub display_name: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub labels: Vec<Label>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub notes: Option<String>,
pub query: String,
#[serde(rename = "queryResult", default, skip_serializing_if = "Option::is_none")]
pub query_result: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub updated: Option<String>,
#[serde(rename = "updatedBy", default, skip_serializing_if = "Option::is_none")]
pub updated_by: Option<UserInfo>,
#[serde(rename = "eventTime", default, skip_serializing_if = "Option::is_none")]
pub event_time: Option<String>,
#[serde(rename = "queryStartTime", default, skip_serializing_if = "Option::is_none")]
pub query_start_time: Option<String>,
#[serde(rename = "queryEndTime", default, skip_serializing_if = "Option::is_none")]
pub query_end_time: Option<String>,
#[serde(rename = "incidentInfo", default, skip_serializing_if = "Option::is_none")]
pub incident_info: Option<IncidentInfo>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IncidentInfo {
#[serde(rename = "incidentId", default, skip_serializing_if = "Option::is_none")]
pub incident_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub severity: Option<IncidentSeverityEnum>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub title: Option<String>,
#[serde(rename = "relationName", default, skip_serializing_if = "Option::is_none")]
pub relation_name: Option<String>,
}
pub type Label = String;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AadDataConnector {
#[serde(flatten)]
pub data_connector: DataConnector,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AadDataConnectorProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AadDataConnectorProperties {
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "dataTypes", default, skip_serializing_if = "Option::is_none")]
pub data_types: Option<AlertsDataTypeOfDataConnector>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AatpDataConnector {
#[serde(flatten)]
pub data_connector: DataConnector,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AatpDataConnectorProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AatpDataConnectorProperties {
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "dataTypes", default, skip_serializing_if = "Option::is_none")]
pub data_types: Option<AlertsDataTypeOfDataConnector>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AscDataConnector {
#[serde(flatten)]
pub data_connector: DataConnector,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AscDataConnectorProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AscDataConnectorProperties {
#[serde(flatten)]
pub data_connector_with_alerts_properties: DataConnectorWithAlertsProperties,
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AlertsDataTypeOfDataConnector {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alerts: Option<DataConnectorDataTypeCommon>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AwsCloudTrailDataConnector {
#[serde(flatten)]
pub data_connector: DataConnector,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AwsCloudTrailDataConnectorProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AwsCloudTrailDataConnectorDataTypes {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub logs: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AwsCloudTrailDataConnectorProperties {
#[serde(rename = "awsRoleArn", default, skip_serializing_if = "Option::is_none")]
pub aws_role_arn: Option<String>,
#[serde(rename = "dataTypes", default, skip_serializing_if = "Option::is_none")]
pub data_types: Option<AwsCloudTrailDataConnectorDataTypes>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataConnector {
#[serde(flatten)]
pub resource_with_etag: ResourceWithEtag,
pub kind: DataConnectorKind,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DataConnectorKind {
AzureActiveDirectory,
AzureSecurityCenter,
MicrosoftCloudAppSecurity,
ThreatIntelligence,
Office365,
AmazonWebServicesCloudTrail,
AzureAdvancedThreatProtection,
MicrosoftDefenderAdvancedThreatProtection,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataConnectorList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
pub value: Vec<DataConnector>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataConnectorDataTypeCommon {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<data_connector_data_type_common::State>,
}
pub mod data_connector_data_type_common {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Enabled,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataConnectorTenantId {
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataConnectorWithAlertsProperties {
#[serde(rename = "dataTypes", default, skip_serializing_if = "Option::is_none")]
pub data_types: Option<AlertsDataTypeOfDataConnector>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct McasDataConnector {
#[serde(flatten)]
pub data_connector: DataConnector,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<McasDataConnectorProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct McasDataConnectorDataTypes {
#[serde(flatten)]
pub alerts_data_type_of_data_connector: AlertsDataTypeOfDataConnector,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alerts: Option<DataConnectorDataTypeCommon>,
#[serde(rename = "discoveryLogs", default, skip_serializing_if = "Option::is_none")]
pub discovery_logs: Option<DataConnectorDataTypeCommon>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct McasDataConnectorProperties {
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "dataTypes", default, skip_serializing_if = "Option::is_none")]
pub data_types: Option<McasDataConnectorDataTypes>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MdatpDataConnector {
#[serde(flatten)]
pub data_connector: DataConnector,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<MdatpDataConnectorProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MdatpDataConnectorProperties {
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "dataTypes", default, skip_serializing_if = "Option::is_none")]
pub data_types: Option<AlertsDataTypeOfDataConnector>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TiDataConnector {
#[serde(flatten)]
pub data_connector: DataConnector,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<TiDataConnectorProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TiDataConnectorDataTypes {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub indicators: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TiDataConnectorProperties {
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "tipLookbackPeriod", default, skip_serializing_if = "Option::is_none")]
pub tip_lookback_period: Option<String>,
#[serde(rename = "dataTypes", default, skip_serializing_if = "Option::is_none")]
pub data_types: Option<TiDataConnectorDataTypes>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OfficeDataConnector {
#[serde(flatten)]
pub data_connector: DataConnector,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OfficeDataConnectorProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OfficeDataConnectorDataTypes {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub exchange: Option<serde_json::Value>,
#[serde(rename = "sharePoint", default, skip_serializing_if = "Option::is_none")]
pub share_point: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub teams: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OfficeDataConnectorProperties {
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "dataTypes", default, skip_serializing_if = "Option::is_none")]
pub data_types: Option<OfficeDataConnectorDataTypes>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ErrorResponse>,
#[serde(rename = "additionalInfo", default, skip_serializing_if = "Vec::is_empty")]
pub additional_info: Vec<ErrorAdditionalInfo>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorAdditionalInfo {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub info: Option<serde_json::Value>,
}
|
fn main(){
let a = vec![1,1,2];
let var = &a[2];
print!("{}", var);
} |
use alloc::vec::Vec;
use core::cmp::Ordering;
use util::{impl_md_flow, uint_from_bytes, Hash};
use crate::consts::{ch, maj, parity, IV, K};
macro_rules! init_w {
( $w:expr, $( $t:expr ),* ) => {
$(
$w[$t] = $w[$t - 3] ^ $w[$t - 8] ^ $w[$t - 14] ^ $w[$t - 16];
)*
};
}
macro_rules! round {
($temp:expr, $a:expr, $b:expr, $c:expr, $d:expr, $e:expr, $f:ident, $w:expr, $( $t:expr ),+) => {
$(
$temp = $a
.rotate_left(5)
.wrapping_add($f($b, $c, $d))
.wrapping_add($e)
.wrapping_add($w[$t])
.wrapping_add(K[$t / 20]);
$e = $d;
$d = $c;
$c = $b.rotate_left(30);
$b = $a;
$a = $temp;
)*
};
}
pub struct Sha0 {
status: [u32; 5],
}
impl Sha0 {
pub fn new() -> Self {
Self::default()
}
#[allow(clippy::many_single_char_names, clippy::needless_late_init)]
fn compress(&mut self, m: &[u32; 16]) {
let [mut a, mut b, mut c, mut d, mut e] = self.status;
let mut temp;
let mut w = [0; 80];
w[..16].copy_from_slice(m);
init_w!(
w, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79
);
// Round 1
round!(
temp, a, b, c, d, e, ch, w, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19
);
// Round 2
round!(
temp, a, b, c, d, e, parity, w, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
34, 35, 36, 37, 38, 39
);
// Round 3
round!(
temp, a, b, c, d, e, maj, w, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59
);
// Round 4
round!(
temp, a, b, c, d, e, parity, w, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
74, 75, 76, 77, 78, 79
);
self.status[0] = self.status[0].wrapping_add(a);
self.status[1] = self.status[1].wrapping_add(b);
self.status[2] = self.status[2].wrapping_add(c);
self.status[3] = self.status[3].wrapping_add(d);
self.status[4] = self.status[4].wrapping_add(e);
}
}
impl Default for Sha0 {
#[rustfmt::skip]
fn default() -> Self {
Self {
status: IV,
}
}
}
impl Hash for Sha0 {
fn hash_to_bytes(&mut self, message: &[u8]) -> Vec<u8> {
impl_md_flow!(u32=> self, message, from_be_bytes, to_be_bytes);
self.status
.iter()
.flat_map(|word| word.to_be_bytes().to_vec())
.collect()
}
}
|
//! Items related to receiving CBM 8032 frame data over serial.
use crate::fps::Fps;
use crate::vis;
use serialport::prelude::*;
use std::cell::RefCell;
use std::io::{self, Write};
use std::sync::atomic::{self, AtomicBool};
use std::sync::{mpsc, Arc};
const BAUD_RATE: u32 = 1_500_000;
const DATA_PER_BUFFER: u32 = 40;
const TOTAL_BUFFERS_PER_FRAME: u32 = 51;
/// A handle to the receiving serial thread.
pub struct Handle {
is_closed: Arc<AtomicBool>,
thread: std::thread::JoinHandle<()>,
rx: ChannelRx,
last_recorded_frame_hz: RefCell<FrameHz>,
port_info: SerialPortInfo,
}
enum State {
CountingZeros,
InSync,
}
struct ReceiverContext {
rx_buffer: [u8; 256],
rx_buffer_index: u32,
rx_buffer_count: u32,
state: State,
bufnum: u32,
count: u32,
buffer: [u8; 40],
screen_buffer: Box<vis::Cbm8032FrameData>,
graphic: vis::Cbm8032FrameMode,
}
fn init_receiver_context() -> ReceiverContext {
ReceiverContext {
rx_buffer: [0u8; 256],
rx_buffer_index: 0,
rx_buffer_count: 0,
bufnum: 0,
count: 0,
state: State::CountingZeros,
buffer: [0u8; 40],
screen_buffer: Box::new([0u8; vis::CBM_8032_FRAME_DATA_LEN]),
graphic: vis::Cbm8032FrameMode::Graphics,
}
}
type Message = (vis::Cbm8032Frame, FrameHz);
type ChannelRx = mpsc::Receiver<Message>;
type ChannelTx = mpsc::Sender<Message>;
type SerialPortObj = dyn SerialPort;
/// The rate at which the serial stream is producing frames.
#[derive(Clone, Copy, Default)]
pub struct FrameHz {
pub avg: f64,
pub min: f64,
pub max: f64,
}
impl Handle {
/// Checks the queue for a pending frame and returns it.
pub fn try_recv_frame(&self) -> Option<vis::Cbm8032Frame> {
if let Some((frame, hz)) = self.rx.try_iter().last() {
*self.last_recorded_frame_hz.borrow_mut() = hz;
return Some(frame);
}
None
}
/// Produces the last frame sending rate sent by the serial thread.
pub fn frame_hz(&self) -> FrameHz {
*self.last_recorded_frame_hz.borrow()
}
/// Close the receiving thread.
pub fn close(self) {
self.is_closed.store(true, atomic::Ordering::SeqCst);
if let Err(e) = self.thread.join() {
eprintln!("failed to join serial thread: {:?}", e);
}
}
/// Information about the connected serial port.
pub fn port_info(&self) -> &SerialPortInfo {
&self.port_info
}
/// Whether or not the stream has closed.
///
/// This can happen if a serious error occurs on the serial thread.
pub fn is_closed(&self) -> bool {
self.is_closed.load(atomic::Ordering::SeqCst)
}
}
fn find_usb_port() -> Result<Option<SerialPortInfo>, serialport::Error> {
let infos = serialport::available_ports()?;
let info = infos
.into_iter()
.filter(|info| match info.port_type {
serialport::SerialPortType::UsbPort(_) => true,
_ => false,
})
.next();
Ok(info)
}
fn port_settings() -> SerialPortSettings {
let mut settings = SerialPortSettings::default();
settings.baud_rate = BAUD_RATE.into();
settings.timeout = std::time::Duration::from_secs(1);
settings
}
fn open_port(name: &str) -> Result<Box<SerialPortObj>, serialport::Error> {
if cfg!(target_os = "linux") {
let res = std::process::Command::new("setserial")
.arg(&name)
.arg("low_latency")
.output();
match res {
Ok(output) => {
if !output.status.success() {
eprintln!("`setserial {} low_latency` was unsuccessful", name);
}
if !output.stderr.is_empty() {
io::stderr().write_all(&output.stderr).ok();
}
}
Err(err) => {
eprintln!("failed to execute command `setserial {} low_latency`: {}", name, err);
}
}
}
let settings = port_settings();
serialport::open_with_settings(&name, &settings)
}
fn byte_to_mode(byte: u8) -> vis::Cbm8032FrameMode {
match byte {
0 => vis::Cbm8032FrameMode::Graphics,
_ => vis::Cbm8032FrameMode::Text,
}
}
fn handle_received_buffer(context: &mut ReceiverContext) {
if context.bufnum > 0 {
if context.bufnum < TOTAL_BUFFERS_PER_FRAME {
let bufidx = context.bufnum - 1;
let screen_start = (bufidx * DATA_PER_BUFFER) as usize;
let screen_end = screen_start + DATA_PER_BUFFER as usize;
let screen_slice = &mut context.screen_buffer[screen_start..screen_end];
screen_slice.copy_from_slice(&context.buffer);
} else {
context.graphic = byte_to_mode(context.buffer[0]);
}
}
}
fn handle_sync_loss(context: &ReceiverContext, byte: u8) {
eprintln!(
"out of sync at bufnum {} count {} - received {}\n",
context.bufnum, context.count, byte
);
}
fn handle_received_byte(context: &mut ReceiverContext, byte: u8) -> bool {
let mut screen_complete = false;
match context.state {
State::CountingZeros => {
if byte == 0 {
context.count += 1;
if context.count == 41 {
context.state = State::InSync;
context.bufnum = 1;
context.count = 0;
}
} else {
context.count = 0;
}
}
State::InSync => {
if context.count < 40 {
context.buffer[context.count as usize] = byte;
context.count += 1;
} else {
if byte == context.bufnum as u8 {
handle_received_buffer(context);
context.bufnum += 1;
context.count = 0;
if context.bufnum == 52 {
context.state = State::CountingZeros;
screen_complete = true;
}
} else {
handle_sync_loss(context, byte);
context.state = State::CountingZeros;
context.count = 0;
}
}
}
}
screen_complete
}
fn receive_screen(port: &mut Box<SerialPortObj>, context: &mut ReceiverContext) -> io::Result<()> {
loop {
if context.rx_buffer_index == context.rx_buffer_count {
context.rx_buffer_index = 0;
context.rx_buffer_count = match port.read(&mut context.rx_buffer) {
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => 0,
Err(err) => return Err(err),
Ok(len) => len as _,
};
} else {
let ix = context.rx_buffer_index;
context.rx_buffer_index += 1;
let received_byte = context.rx_buffer[ix as usize];
if handle_received_byte(context, received_byte) {
return Ok(());
}
}
}
}
// Open the serial port and run the read loop.
fn run(mut port: Box<SerialPortObj>, vis_frame_tx: ChannelTx, is_closed: Arc<AtomicBool>) {
let port_name = port.name();
let fps = Fps::default();
let mut context = init_receiver_context();
while !is_closed.load(atomic::Ordering::Relaxed) {
if let Err(e) = receive_screen(&mut port, &mut context) {
if let io::ErrorKind::TimedOut = e.kind() {
eprintln!("No serial data received in the last second");
continue;
}
eprintln!("An error occurred while reading from the serial port: {}", e);
if let Some(ref port_name) = port_name {
let attempts = 3;
for attempt in 0..attempts {
println!("Attempting to re-establish connection with {:?}", port_name);
std::thread::sleep(std::time::Duration::from_secs(1));
match open_port(&port_name) {
Ok(new_port) => port = new_port,
Err(err) => {
eprintln!("failed to connect to port: {}", err);
if attempt < attempts - 1 {
continue;
}
is_closed.store(true, atomic::Ordering::SeqCst);
}
};
break;
}
}
continue;
}
// Construct the frame.
let frame = vis::Cbm8032Frame::new(context.graphic, context.screen_buffer.clone());
// Sample the rate at which serial data is producing frames.
fps.sample();
let avg = fps.avg();
let min = fps.min();
let max = fps.max();
let hz = FrameHz { avg, min, max };
// Send the frame to the main thread.
if vis_frame_tx.send((frame, hz)).is_err() {
eprintln!("lost connecton to main thread, closing serial thread");
return;
}
}
}
/// Spawn a thread for receiving serial data.
pub fn spawn() -> Result<Handle, serialport::Error> {
let is_closed = Arc::new(AtomicBool::new(false));
let is_closed2 = is_closed.clone();
let (tx, rx) = mpsc::channel();
let info = match find_usb_port()? {
Some(info) => info,
None => {
let desc = "no available serial USB ports".to_string();
let kind = serialport::ErrorKind::NoDevice;
return Err(serialport::Error::new(kind, desc));
}
};
let port = open_port(&info.port_name)?;
let thread = std::thread::Builder::new()
.name("serial_rx_thread".into())
.spawn(move || run(port, tx, is_closed2))
.expect("failed to spawn serial rx thread");
let last_recorded_frame_hz = RefCell::new(FrameHz::default());
Ok(Handle {
is_closed,
rx,
thread,
last_recorded_frame_hz,
port_info: info,
})
}
|
//! This module contains a scheduler.
use std::cell::RefCell;
use std::collections::VecDeque;
use std::rc::Rc;
pub(crate) type Shared<T> = Rc<RefCell<T>>;
thread_local! {
static SCHEDULER: Rc<Scheduler> =
Rc::new(Scheduler::new());
}
pub(crate) fn scheduler() -> Rc<Scheduler> {
SCHEDULER.with(Rc::clone)
}
/// A routine which could be run.
pub(crate) trait Runnable {
/// Runs a routine with a context instance.
fn run(self: Box<Self>);
}
/// This is a global scheduler suitable to schedule and run any tasks.
#[derive(Clone)]
pub(crate) struct Scheduler {
/// This lock is used to prevent recursion in [Scheduler#start()](Scheduler#start())
lock: Rc<RefCell<()>>,
main: Shared<VecDeque<Box<dyn Runnable>>>,
pub(crate) component: ComponentScheduler,
}
pub(crate) enum ComponentRunnableType {
Create,
Update,
Render,
Rendered,
Destroy,
}
#[derive(Clone)]
pub(crate) struct ComponentScheduler {
// Queues
destroy: Shared<VecDeque<Box<dyn Runnable>>>,
create: Shared<VecDeque<Box<dyn Runnable>>>,
update: Shared<VecDeque<Box<dyn Runnable>>>,
render: Shared<VecDeque<Box<dyn Runnable>>>,
// Stack
rendered: Shared<Vec<Box<dyn Runnable>>>,
}
impl ComponentScheduler {
fn new() -> Self {
ComponentScheduler {
destroy: Rc::new(RefCell::new(VecDeque::new())),
create: Rc::new(RefCell::new(VecDeque::new())),
update: Rc::new(RefCell::new(VecDeque::new())),
render: Rc::new(RefCell::new(VecDeque::new())),
rendered: Rc::new(RefCell::new(Vec::new())),
}
}
pub(crate) fn push_update_batch(&self, it: impl IntoIterator<Item = Box<dyn Runnable>>) {
self.update.borrow_mut().extend(it);
}
pub(crate) fn push(&self, run_type: ComponentRunnableType, runnable: Box<dyn Runnable>) {
match run_type {
ComponentRunnableType::Create => self.create.borrow_mut().push_back(runnable),
ComponentRunnableType::Update => self.update.borrow_mut().push_back(runnable),
ComponentRunnableType::Render => self.render.borrow_mut().push_back(runnable),
ComponentRunnableType::Rendered => self.rendered.borrow_mut().push(runnable),
ComponentRunnableType::Destroy => self.destroy.borrow_mut().push_back(runnable),
};
}
fn next_runnable(&self) -> Option<Box<dyn Runnable>> {
self.destroy
.borrow_mut()
.pop_front()
.or_else(|| self.create.borrow_mut().pop_front())
.or_else(|| self.update.borrow_mut().pop_front())
.or_else(|| self.render.borrow_mut().pop_front())
.or_else(|| self.rendered.borrow_mut().pop())
}
}
impl Scheduler {
fn new() -> Self {
Scheduler {
lock: Rc::new(RefCell::new(())),
main: Rc::new(RefCell::new(VecDeque::new())),
component: ComponentScheduler::new(),
}
}
pub(crate) fn push(&self, runnable: Box<dyn Runnable>) {
self.main.borrow_mut().push_back(runnable);
self.start();
}
fn next_runnable(&self) -> Option<Box<dyn Runnable>> {
self.component
.next_runnable()
.or_else(|| self.main.borrow_mut().pop_front())
}
pub(crate) fn start(&self) {
// The lock is used to prevent recursion. If the lock
// cannot be acquired, it is because the `start()` method
// is being called recursively as part of a `runnable.run()`.
if let Ok(_lock) = self.lock.try_borrow_mut() {
while let Some(runnable) = self.next_runnable() {
runnable.run();
}
}
}
}
|
//! Generate G-Code with funcational operation describing motion of the machine that the created gcode should produce
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
#[test]
fn test_move_xy() {
let p = Point2d { x: 10.0, y: 5.0 };
let gcode = move_xy(p, None, None);
assert_eq!("G0 X10 Y5\n", gcode);
}
#[test]
fn test_move_xy_with_f() {
let p = Point2d { x: 10.0, y: 5.0 };
let gcode = move_xy(p, Some(400), None);
assert_eq!("G0 X10 Y5 F400\n", gcode);
}
#[test]
fn test_move_xy_with_e() {
let p = Point2d { x: 10.0, y: 5.0 };
let gcode = move_xy(p, None, Some(5.));
assert_eq!("G1 X10 Y5 E5\n", gcode);
}
#[test]
fn test_move_xy_with_f_and_e() {
let p = Point2d { x: 10.0, y: 5.0 };
let gcode = move_xy(p, Some(400), Some(5.));
assert_eq!("G1 X10 Y5 E5 F400\n", gcode);
}
#[test]
fn test_move_xyz() {
let p = Point3d { x: 10.0, y: 5.0, z: 0.2 };
let gcode = move_xyz(p, None, None);
assert_eq!("G0 X10 Y5 Z0.2\n", gcode);
}
#[test]
fn test_move_xyz_with_f() {
let p = Point3d { x: 10.0, y: 5.0, z: 0.17 };
let gcode = move_xyz(p, Some(400), None);
assert_eq!("G0 X10 Y5 Z0.17 F400\n", gcode);
}
#[test]
fn test_move_xyz_with_e() {
let p = Point3d { x: 10.0, y: 5.0, z: 0.17 };
let gcode = move_xyz(p, None, Some(5.));
assert_eq!("G1 X10 Y5 Z0.17 E5\n", gcode);
}
#[test]
fn test_move_xyz_with_f_and_e() {
let p = Point3d { x: 10.0, y: 5.0, z: 0.17 };
let gcode = move_xyz(p, Some(400), Some(5.));
assert_eq!("G1 X10 Y5 Z0.17 E5 F400\n", gcode);
}
#[test]
fn test_arc_ij_i() {
let p = Point2d { x: 125.0, y: 0.0 };
let gcode = move_xy_arc_ij(Some(p), Some(62.5), None, None, false);
assert_eq!("G2 X125 Y0 I62.5\n", gcode);
}
#[test]
fn test_arc_ij_j() {
let p = Point2d { x: 125.0, y: 0.0 };
let gcode = move_xy_arc_ij(Some(p), None, Some(62.5), None, false);
assert_eq!("G2 X125 Y0 J62.5\n", gcode);
}
#[test]
fn test_arc_ij_full_circle() {
let p = Point2d { x: 220.0, y: 110.0 };
// Move to the point (220,110)
let _ = move_xy(p, None, None);
// Make a counter clockwise circle with centerpoint at (110,110)
let gcode = move_xy_arc_ij(None, Some(110.0), Some(110.0), Some(920.0), true);
assert_eq!("G3 I110 J110 E920\n", gcode);
}
#[test]
fn test_set_pos_2d() {
let p = Point2d { x: 125.0, y: 125.0 };
let gcode = set_pos_2d(p, None);
assert_eq!("G92 X125 Y125\n", gcode);
}
#[test]
fn test_set_pos_2d_with_e() {
let p = Point2d { x: 125.0, y: 125.0 };
let gcode = set_pos_2d(p, Some(90.0));
assert_eq!("G92 X125 Y125 E90\n", gcode);
}
#[test]
fn test_set_pos_3d() {
let p = Point3d { x: 125.0, y: 125.0, z: 25.0};
let gcode = set_pos_3d(p, None);
assert_eq!("G92 X125 Y125 Z25\n", gcode);
}
#[test]
fn test_set_pos_3d_with_e() {
let p = Point3d { x: 125.0, y: 125.0, z: 25.0 };
let gcode = set_pos_3d(p, Some(90.0));
assert_eq!("G92 X125 Y125 Z25 E90\n", gcode);
}
#[test]
fn test_reset_extruder() {
let gcode = reset_extruder(0.0);
assert_eq!("G92 E0\n", gcode);
}
#[test]
fn test_rest_pos() {
let gcode = reset_pos();
assert_eq!("G92.1\n", gcode);
}
#[test]
fn test_set_hotend_temp() {
let gcode = set_hotend_temp(210, None);
assert_eq!("M104 S210\n", gcode);
}
#[test]
fn test_set_hotend_temp_non_default_tool() {
let gcode = set_hotend_temp(210, Some(2));
assert_eq!("M104 S210 T2\n", gcode);
}
#[test]
fn test_wait_hotend_temp() {
let gcode = wait_hotend_temp(210, None);
assert_eq!("M109 S210\n", gcode);
}
#[test]
fn test_wait_hotend_temp_non_default_tool() {
let gcode = wait_hotend_temp(210, Some(2));
assert_eq!("M109 S210 T2\n", gcode);
}
#[test]
fn test_set_fan_speed() {
//set default fan to half speed
let gcode = set_fan_speed(128, None);
assert_eq!("M106 S128\n", gcode);
}
#[test]
fn test_set_fan_speed_atl_fan() {
//set alternate fan to full speed
let gcode = set_fan_speed(u8::MAX, Some(1));
assert_eq!("M106 S255 P1\n", gcode);
}
#[test]
fn test_fan_off() {
let gcode = fan_off(None);
assert_eq!("M107\n", gcode);
}
#[test]
fn test_fan_off_alt_fan() {
let gcode = fan_off(Some(3));
assert_eq!("M107 P3\n", gcode);
}
#[test]
fn test_test_bed_temp() {
let gcode = set_bed_temp(210);
assert_eq!("M140 S210\n", gcode);
}
#[test]
fn test_wait_bed_temp() {
let gcode = wait_bed_temp(210);
assert_eq!("M190 S210\n", gcode);
}
#[test]
fn test_test_chamber_temp() {
let gcode = set_chamber_temp(50);
assert_eq!("M141 S50\n", gcode);
}
#[test]
fn test_wait_chamber_temp() {
let gcode = wait_chamber_temp(50);
assert_eq!("M191 S50\n", gcode);
}
#[test]
fn test_auto_home() {
let gcode = auto_home();
assert_eq!("G28\n", gcode);
}
#[test]
fn test_absolute_extrution() {
let gcode = absolute_extrution();
assert_eq!("M82\n", gcode);
}
#[test]
fn test_relative_extrution() {
let gcode = relative_extrution();
assert_eq!("M83\n", gcode);
}
#[test]
fn test_move_z() {
let gcode = move_z(1.8);
assert_eq!("G0 Z1.8\n", gcode);
}
#[test]
fn test_relative_positioning() {
let gcode = relative_positioning();
assert_eq!("G91\n", gcode);
}
#[test]
fn test_absolute_positioning() {
let gcode = absolute_positioning();
assert_eq!("G90\n", gcode);
}
#[test]
fn test_use_inches() {
let gcode = use_inches();
assert_eq!("G20\n", gcode);
}
#[test]
fn test_use_millimeters() {
let gcode = use_millimeters();
assert_eq!("G21\n", gcode);
}
}
/// Defines a 2 dimentional point in the XY catersian coordanant system
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::Point2d;
///
/// let p1 = Point2d { x: 0.0, y: 0.0 };
/// let p2 = Point2d { x: 10.0, y: 0.0 };
/// let p3 = Point2d { x: 10.0, y: 10.0 };
/// let p4 = Point2d { x: 0.0, y: 10.0 };
/// let square: Vec<Point2d> = vec!(p1, p2, p3, p4);
/// ```
#[derive(Debug, Copy, Clone)]
pub struct Point2d {
pub x: f32,
pub y: f32,
}
/// Defines a 3 dimentional point in the XYZ catersian coordanant system
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::Point3d;
///
/// let p1 = Point3d { x: 0.0, y: 0.0, z: 0.0 };
/// let p2 = Point3d { x: 10.0, y: 0.0, z: 0.0 };
/// let p3 = Point3d { x: 10.0, y: 10.0, z: 0.0 };
/// let p4 = Point3d { x: 0.0, y: 10.0, z: 0.0 };
/// let p5 = Point3d { x: 0.0, y: 10.0, z: 10.0 };
/// let p6 = Point3d { x: 10.0, y: 10.0, z: 10.0 };
/// let p7 = Point3d { x: 10.0, y: 0.0, z: 10.0 };
/// let p8 = Point3d { x: 0.0, y: 0.0, z: 10.0 };
/// let cube: Vec<Point3d> = vec!(p1, p2, p3, p4, p5, p6, p7, p8);
/// ```
#[derive(Debug, Copy, Clone)]
pub struct Point3d {
pub x: f32,
pub y: f32,
pub z: f32,
}
/// Returns a G1 or G0 command as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::{Point2d, move_xy};
///
/// let p = Point2d { x: 10.0, y: 5.0 };
/// // move without extruding
/// let gcode = move_xy(p, None, None);
/// assert_eq!("G0 X10 Y5\n", gcode);
/// ```
///
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::{Point2d, move_xy};
///
/// let p = Point2d { x: 10.0, y: 5.0 };
/// // move with extrude
/// let gcode = move_xy(p, None, Some(5.0));
/// assert_eq!("G1 X10 Y5 E5\n", gcode);
/// ```
///
pub fn move_xy(dest:Point2d, feed_rate: Option<u32>, flow_rate: Option<f32>) -> String {
let f_str: String;
let e_str: String;
if let Some(maybe_feed_rate) = feed_rate {
f_str = format!(" F{}", maybe_feed_rate);
} else {
f_str = format!("");
}
if let Some(maybe_flow_rate) = flow_rate {
e_str = format!(" E{}", maybe_flow_rate);
return format!("G1 X{x} Y{y}{e}{f}\n", x=dest.x, y=dest.y, e=e_str, f=f_str)
} else {
return format!("G0 X{x} Y{y}{f}\n", x=dest.x, y=dest.y, f=f_str)
}
}
/// Takes a [Point3d] as input, returns a G1 or G0 command to move in 3 dimentionsReturns as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::{Point3d, move_xyz};
///
/// let p = Point3d { x: 10.0, y: 5.0, z: 15.0 };
/// // move without extruding
/// let gcode = move_xyz(p, None, None);
/// assert_eq!("G0 X10 Y5 Z15\n", gcode);
/// ```
///
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::{Point3d, move_xyz};
///
/// let p = Point3d { x: 10.0, y: 5.0, z: 0.2 };
/// // move with extrude
/// let gcode = move_xyz(p, None, Some(5.0));
/// assert_eq!("G1 X10 Y5 Z0.2 E5\n", gcode);
/// ```
///
pub fn move_xyz(dest:Point3d, feed_rate: Option<u32>, flow_rate: Option<f32>) -> String {
let f_str: String;
let e_str: String;
if let Some(maybe_feed_rate) = feed_rate {
f_str = format!(" F{}", maybe_feed_rate);
} else {
f_str = format!("");
}
if let Some(maybe_flow_rate) = flow_rate {
e_str = format!(" E{}", maybe_flow_rate);
return format!("G1 X{x} Y{y} Z{z}{e}{f}\n", x=dest.x, y=dest.y, z=dest.z, e=e_str, f=f_str)
} else {
return format!("G0 X{x} Y{y} Z{z}{f}\n", x=dest.x, y=dest.y, z=dest.z, f=f_str)
}
}
/// Takes an [f32] value as a location on the Z axis to move to, Returns a G0 command
/// Useful in layerchanges and z-hops as this function does not take arguments to extrude.
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::move_z;
///
/// let gcode = move_z(1.8);
/// assert_eq!("G0 Z1.8\n", gcode);
/// ```
pub fn move_z(z: f32) -> String {
return format!("G0 Z{}\n", z)
}
/// Returns a G2 or G3 command as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::{Point2d, move_xy_arc_ij};
///
/// let p = Point2d { x: 125.0, y: 0.0 };
/// // Create a Clockwise 180 degree Arc starting at 0,0 ending at 125,0 with center point 62.5,0
/// let gcode = move_xy_arc_ij(Some(p), Some(62.5), None, None, false);
/// assert_eq!("G2 X125 Y0 I62.5\n", gcode);
/// ```
///
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::{Point2d, move_xy, move_xy_arc_ij};
///
/// let p = Point2d { x: 220.0, y: 110.0 };
/// // Move to the point (220,110)
/// let _ = move_xy(p, None, None);
/// // Make a counter clockwise circle with centerpoint at (110,110)
/// let gcode = move_xy_arc_ij(None, Some(110.0), Some(110.0), Some(920.0), true);
/// assert_eq!("G3 I110 J110 E920\n", gcode);
/// ```
pub fn move_xy_arc_ij(dest: Option<Point2d>, x_offset: Option<f32>, y_offset: Option<f32>, flow_rate: Option<f32>, ccw: bool) -> String {
let x_str: String;
let y_str: String;
let i_str: String;
let j_str: String;
let e_str: String;
if let Some(maybe_dest) = dest {
x_str = format!(" X{}", maybe_dest.x);
y_str = format!(" Y{}", maybe_dest.y);
} else {
x_str = format!("");
y_str = format!("");
}
if let Some(maybe_x_offset) = x_offset {
i_str = format!(" I{}", maybe_x_offset);
} else {
i_str = format!("");
}
if let Some(maybe_y_offset) = y_offset {
j_str = format!(" J{}", maybe_y_offset);
} else {
j_str = format!("");
}
if let Some(maybe_flow_rate) = flow_rate {
e_str = format!(" E{}", maybe_flow_rate);
} else {
e_str = format!("");
}
if ccw {
return format!("G3{x}{y}{i}{j}{e}\n", i=i_str, j=j_str, x=x_str, y=y_str, e=e_str);
} else {
return format!("G2{x}{y}{i}{j}{e}\n", i=i_str, j=j_str, x=x_str, y=y_str, e=e_str);
}
}
/// Returns a G21 command as a String
///
/// Sets units to millimeters
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::use_millimeters;
///
/// let gcode = use_millimeters();
/// assert_eq!("G21\n", gcode);
/// ```
pub fn use_millimeters() -> String {
return format!("G21\n")
}
/// Returns a G20 command as a String
///
/// Sets units to inches
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::use_inches;
///
/// let gcode = use_inches();
/// assert_eq!("G20\n", gcode);
/// ```
pub fn use_inches() -> String {
return format!("G20\n")
}
/// Returns a G90 command as a String
///
/// sets all axes to absolute positioning (relative to home, ie. (0,0))
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::absolute_positioning;
///
/// let gcode = absolute_positioning();
/// assert_eq!("G90\n", gcode);
/// ```
pub fn absolute_positioning() -> String {
return format!("G90\n")
}
/// Returns a G91 command as a String
///
/// sets all axes to relative positioning (relative to nozzle/tool position)
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::relative_positioning;
///
/// let gcode = relative_positioning();
/// assert_eq!("G91\n", gcode);
/// ```
pub fn relative_positioning() -> String {
return format!("G91\n")
}
/// Returns a G92 command to set the current nozzle/tool possition in the XY plane as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::{Point2d, set_pos_2d};
///
/// let p = Point2d { x: 125.0, y: 125.0 };
/// let gcode = set_pos_2d(p, None);
/// assert_eq!("G92 X125 Y125\n", gcode);
/// ```
pub fn set_pos_2d(pos: Point2d, extrude_pos: Option<f32>) -> String {
let e_str: String;
if let Some(maybe_extrude_pos) = extrude_pos {
e_str = format!(" E{}", maybe_extrude_pos);
} else {
e_str = format!("");
}
return format!("G92 X{x} Y{y}{e}\n", x=pos.x, y=pos.y, e=e_str)
}
/// Returns a G92 command to set the current nozzle/tool possition in 3 dimentions (XYZ) as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::{Point3d, set_pos_3d};
///
/// let p = Point3d { x: 125.0, y: 125.0, z: 25.0};
/// let gcode = set_pos_3d(p, None);
/// assert_eq!("G92 X125 Y125 Z25\n", gcode);
/// ```
pub fn set_pos_3d(pos: Point3d, extrude_pos: Option<f32>) -> String {
let e_str: String;
if let Some(maybe_extrude_pos) = extrude_pos {
e_str = format!(" E{}", maybe_extrude_pos);
} else {
e_str = format!("");
}
return format!("G92 X{x} Y{y} Z{z}{e}\n", x=pos.x, y=pos.y, z=pos.z, e=e_str)
}
/// Returns a G92 command to set the extruder possition (E axis) as a string
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::reset_extruder;
///
/// let gcode = reset_extruder(0.0);
/// assert_eq!("G92 E0\n", gcode);
/// ```
pub fn reset_extruder(extrude_pos: f32) -> String {
return format!("G92 E{}\n", extrude_pos)
}
/// Returns a G92.1 command to reset to machine's native possitioning offsets as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::reset_pos;
///
/// let gcode = reset_pos();
/// assert_eq!("G92.1\n", gcode);
/// ```
pub fn reset_pos() -> String {
return format!("G92.1\n")
}
/// Returns a M104 command to set target hotend temp as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::set_hotend_temp;
///
/// let gcode = set_hotend_temp(210, None);
/// assert_eq!("M104 S210\n", gcode);
/// ```
///
/// To specify an extruder other than default (last active):
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::set_hotend_temp;
/// // some(2) is the extruder index on the printer
/// let gcode = set_hotend_temp(210, Some(2));
/// assert_eq!("M104 S210 T2\n", gcode);
/// ```
pub fn set_hotend_temp(temp: u16, hotend: Option<u8>) -> String {
let t_str: String;
if let Some(maybe_hotend) = hotend {
t_str = format!(" T{}", maybe_hotend);
} else {
t_str = format!("");
}
return format!("M104 S{s}{t}\n", s=temp, t=t_str)
}
/// Returns a M109 command to set target hotend temp to wait to reach as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::wait_hotend_temp;
///
/// let gcode = wait_hotend_temp(210, None);
/// assert_eq!("M109 S210\n", gcode);
/// ```
///
/// To specify an extruder other than default (last active):
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::wait_hotend_temp;
/// // some(2) is the extruder index on the printer
/// let gcode = wait_hotend_temp(210, Some(2));
/// assert_eq!("M109 S210 T2\n", gcode);
/// ```
pub fn wait_hotend_temp(temp: u16, hotend: Option<u8>) -> String {
let t_str: String;
if let Some(maybe_hotend) = hotend {
t_str = format!(" T{}", maybe_hotend);
} else {
t_str = format!("");
}
return format!("M109 S{s}{t}\n", s=temp, t=t_str)
}
/// Returns a M106 command to set the fan speed, with optional fan index, as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::set_fan_speed;
///
/// //set default fan to half speed
/// let gcode = set_fan_speed(128, None);
/// assert_eq!("M106 S128\n", gcode);
/// ```
///
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::set_fan_speed;
///
/// //set alternate fan to full speed
/// let gcode = set_fan_speed(u8::MAX, Some(1));
/// assert_eq!("M106 S255 P1\n", gcode);
/// ```
pub fn set_fan_speed(speed: u8, fan: Option<u8>) -> String {
let p_str: String;
if let Some(maybe_fan) = fan {
p_str = format!(" P{}", maybe_fan);
} else {
p_str = format!("");
}
return format!("M106 S{s}{p}\n", s=speed, p=p_str)
}
/// Returns a M107 command to disable the fan, with optional fan index, as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::fan_off;
///
/// let gcode = fan_off(None);
/// assert_eq!("M107\n", gcode);
/// ```
///
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::fan_off;
///
/// let gcode = fan_off(Some(3));
/// assert_eq!("M107 P3\n", gcode);
/// ```
pub fn fan_off(fan: Option<u8>) -> String {
let p_str: String;
if let Some(maybe_fan) = fan {
p_str = format!(" P{}", maybe_fan);
} else {
p_str = format!("");
}
return format!("M107{p}\n", p=p_str)
}
/// Returns a M140 command to set bed hotend temp as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::set_bed_temp;
///
/// let gcode = set_bed_temp(210);
/// assert_eq!("M140 S210\n", gcode);
/// ```
pub fn set_bed_temp(temp: u8) -> String {
return format!("M140 S{}\n", temp)
}
/// Returns a M190 command to set target bed temp to wait to reach as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::wait_bed_temp;
///
/// let gcode = wait_bed_temp(210);
/// assert_eq!("M190 S210\n", gcode);
/// ```
pub fn wait_bed_temp(temp: u8) -> String {
return format!("M190 S{}\n", temp)
}
/// Returns a M141 command to set target chamber temp as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::set_chamber_temp;
///
/// let gcode = set_chamber_temp(50);
/// assert_eq!("M141 S50\n", gcode);
/// ```
pub fn set_chamber_temp(temp: u8) -> String {
return format!("M141 S{}\n", temp)
}
/// Returns a M191 command to set target chamber temp to wait to reach as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::wait_chamber_temp;
///
/// let gcode = wait_chamber_temp(50);
/// assert_eq!("M191 S50\n", gcode);
/// ```
pub fn wait_chamber_temp(temp: u8) -> String {
return format!("M191 S{}\n", temp)
}
/// Returns a G28 command to trigger autohome procedure, using default parameters set in machine firmware, as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::auto_home;
///
/// let gcode = auto_home();
/// assert_eq!("G28\n", gcode);
/// ```
pub fn auto_home() -> String {
return format!("G28\n")
}
/// Returns a M82 command to set the extruder axis to absolute mode, independant of other axes, as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::absolute_extrution;
///
/// let gcode = absolute_extrution();
/// assert_eq!("M82\n", gcode);
pub fn absolute_extrution() -> String {
return format!("M82\n")
}
/// Returns a M83 command to set the extruder axis to relative mode, independant of other axes, as a String
///
/// # Examples
/// ```
/// extern crate gen_gcode;
/// use gen_gcode::relative_extrution;
///
/// let gcode = relative_extrution();
/// assert_eq!("M83\n", gcode);
pub fn relative_extrution() -> String {
return format!("M83\n")
} |
pub mod auto;
pub mod control;
pub mod drawable;
pub mod member;
pub mod container;
pub mod container_multi;
pub mod container_single;
pub mod has_image;
pub mod has_label;
pub mod has_layout;
pub mod has_native_id;
pub mod has_orientation;
pub mod has_progress;
pub mod has_size;
pub mod has_visibility;
pub mod clickable;
pub mod closeable;
pub mod item_clickable;
pub mod adapted;
pub mod adapter;
pub mod application;
pub mod button;
pub mod frame;
pub mod image;
pub mod layout_linear;
pub mod list;
pub mod tree;
pub mod message;
pub mod progress_bar;
pub mod splitted;
pub mod text;
pub mod tray;
pub mod window;
pub(crate) mod seal {
pub trait Sealed {}
}
|
#[cfg(test)]use error::*;
#[cfg(test)]use engine;
#[cfg(test)]use engine::*;
#[cfg(test)]use il;
#[cfg(test)]use loader;
#[cfg(test)]use loader::Loader;
#[cfg(test)]use platform::*;
#[cfg(test)]use std::path::Path;
#[cfg(test)]use std::sync::Arc;
#[cfg(test)]
fn simple_0_test () -> Result<Vec<u8>> {
// let filename = Path::new("test_binaries/Palindrome/Palindrome.json");
// let elf = ::falcon::loader::json::Json::from_file(filename)?;
let filename = Path::new("test_binaries/simple-0/simple-0");
let elf = loader::elf::ElfLinker::new(filename)?;
// let mut elf = ::falcon::loader::elf::Elf::from_file(filename)?;
let mut program = il::Program::new();
program.add_function(elf.function(elf.program_entry())?);
// Initialize memory.
let mut memory = SymbolicMemory::new(engine::Endian::Little);
// Load all memory as given by the loader.
for (address, segment) in elf.memory()?.segments() {
let bytes = segment.bytes();
for i in 0..bytes.len() {
memory.store(*address + i as u64, il::expr_const(bytes[i] as u64, 8))?;
}
}
let mut platform = linux_x86::LinuxX86::new();
// Create the engine
let mut engine = SymbolicEngine::new(memory);
platform.initialize(&mut engine)?;
// Get the first instruction we care about
let pl = ProgramLocation::from_address(elf.program_entry(), &program).unwrap();
// let pl = ProgramLocation::from_address(0x804880f, &program).unwrap();
let translator = elf.translator()?;
let driver = EngineDriver::new(
Arc::new(program),
pl,
engine,
&translator,
Arc::new(platform)
);
let mut drivers = vec![driver];
let target_address: u64 = 0x8048512;
loop {
let mut new_drivers = Vec::new();
for driver in drivers {
{
let location = driver.location();
let program = driver.program();
let function_location = location.function_location();
if let FunctionLocation::Instruction{block_index, instruction_index} = *function_location {
let function = program.function(location.function_index()).unwrap();
let block = function.block(block_index).unwrap();
let instruction = block.instruction(instruction_index).unwrap();
let address = instruction.address().unwrap();
if address == target_address {
println!("Reached Target Address");
for constraint in driver.engine().constraints() {
println!("Constraint: {}", constraint);
}
let mut stdin: Vec<u8> = Vec::new();
for scalar in driver.platform().symbolic_scalars() {
let byte = driver.engine().eval(&scalar.clone().into(), None)?.unwrap();
assert!(byte.bits() == 8);
stdin.push(byte.value() as u8);
}
return Ok(stdin);
}
}
}
new_drivers.append(&mut driver.step()?);
}
drivers = new_drivers;
if drivers.is_empty() {
break;
}
}
bail!("Did not find result")
}
#[test]
pub fn engine_test () -> () {
let result: Vec<u8> = vec![0x61, 0x62, 0x63, 0x64 ,0x65, 0x66, 0x67, 0x68];
let found = simple_0_test();
let found = found.unwrap();
for i in 0..result.len() {
println!("{} {}", result[i], found[i]);
assert!(result[i] == found[i]);
}
}
|
use core::any::Any;
use crate::{Asset, AssetInfo, ResolverContext, ResolverError};
mod wrapper;
/// Trait for the asset resolution system. An asset resolver is
/// responsible for resolving asset information (including the asset's
/// physical path) from a logical path.
pub trait Resolver {
/// Configures the resolver for a given asset path
fn configure_resolver_for_asset(&mut self);
/// Returns the path formed by anchoring `path` to `anchor_path`.
///
/// If `anchor_path` ends with a trailing '/', it is treated as
/// a directory to which `path` will be anchored. Otherwise, it
/// is treated as a file and `path` will be anchored to its
/// containing directory.
///
/// If `anchor_path` is empty, `path` will be returned as-is.
///
/// If `path` is empty or not a relative path, it will be
/// returned as-is.
fn anchor_relative_path(&self, anchor_path: &str, path: &str) -> &str;
/// Returns true if the given path is a relative path.
fn is_relative_path(&self) -> bool;
/// Returns true if the given path is a repository path.
fn is_repository_path(&self) -> bool;
/// Returns whether this path is a search path.
fn is_search_path(&self) -> bool;
/// Returns the normalized extension for the given `path`.
fn get_extension(&self) -> &str;
/// Returns a normalized version of the given `path`.
fn compute_normalized_path(&self) -> &str;
/// Returns the computed repository path using the current resolver.
fn compute_repository_path(&self) -> &str;
/// Returns the local path for the given `path`.
fn compute_local_path(&self, path: &str) -> &str;
/// Returns the resolved filesystem path for the file identified by
/// the given `path` if it exists. If the file does not exist,
/// returns an empty string.
fn resolve(&self, path: &str) -> &str;
/// Binds the given context to this resolver.
///
/// Clients should generally use [`ResolverContextBinder`] instead of calling
/// this function directly.
fn bind_context(&mut self, context: &ResolverContext, binding_data: &dyn Any);
/// Unbind the given context from this resolver.
///
/// Clients should generally use [`ResolverContextBinder`] instead of calling
/// this function directly.
fn unbind_context(&mut self, context: &ResolverContext, binding_data: &dyn Any);
/// Return a default [`ResolverContext`] that may be bound to this resolver
/// to resolve assets when no other context is explicitly specified.
///
/// This function should not automatically bind this context, but should
/// create one that may be used later.
fn create_default_context(&self) -> ResolverContext;
/// Return a default [`ResolverContext`] that may be bound to this resolver
/// to resolve the asset located at `file_path` when no other context is
/// explicitly specified.
///
/// This function should not automatically bind this context, but should
/// create one that may be used later.
fn create_default_context_for_asset(&self, file_path: &str) -> ResolverContext;
/// Refresh any caches associated with the given context.
fn refresh_context(&mut self, context: ResolverContext);
/// Returns the currently-bound asset resolver context.
///
/// see [`Self::bind_context()`], [`Self::unbind_context()`]
fn get_current_context(&self) -> ResolverContext;
/// Returns the resolved filesystem path for the file identified
/// by `path` following the same path resolution behavior as in
/// [`Self::resolve()`].
///
/// If the file identified by `path` represents an asset and
/// `asset_info` is not [`None`], the resolver should populate
/// `asset_info` with whatever additional metadata it knows or can
/// reasonably compute about the asset without actually opening it.
///
/// see [`Self::resolve()`].
fn resolve_with_asset_info(&self, path: &str, asset_info: Option<AssetInfo>) -> &str;
/// Update `asset_info` with respect to the given `file_version`.
/// Note: This API is currently in flux. In general, you should prefer
/// to call [`Self::resolve_with_asset_info()`]
fn update_asset_info(
&self,
identifier: &str,
file_path: &str,
file_version: &str,
asset_info: &mut AssetInfo,
);
/// Returns a value representing the last time the asset identified
/// by `path` was modified. `resolved_path` is the resolved path
/// of the asset.
///
/// Implementations may use whatever value is most appropriate
/// for this timestamp. The value must be equality comparable,
/// and this function must return a different timestamp whenever
/// an asset has been modified. For instance, if an asset is stored
/// as a file on disk, the timestamp may simply be that file's mtime.
///
/// If a timestamp cannot be retrieved, returns an [`ResolverError`].
fn get_modification_timestamp(
&self,
path: &str,
resolved_path: &str,
) -> Result<i64, ResolverError>;
/// Returns an [`Asset`] object for the asset located at `resolved_path`.
/// Returns an error if object could not be created.
///
/// The returned ArAsset object provides functions for accessing the
/// contents of the specified asset.
///
/// Note that clients may still be using the data associated with
/// this object even after the last shared_ptr has been destroyed. For
/// example, a client may have created a memory mapping using the FILE*
/// presented in the ArAsset object; this would preclude truncating or
/// overwriting any of the contents of that file.
fn open_asset(&self, resolved_path: &str) -> Result<&dyn Asset, ResolverError>;
/// Create path needed to write a file to the given `path`.
///
/// For example:
/// - A filesystem-based resolver might create the directories specified
/// in `path`.
/// - A database-based resolver might create a new table, or it might
/// ignore this altogether.
///
/// In practice, when writing a layer, [`Self::can_write_layer_to_path()`] will be called
/// first to check if writing is permitted. If this returns success, then
/// [`Self::create_path_for_layer()`] will be called before writing the layer out.
fn create_path_for_layer(&self, path: &str) -> Result<(), ResolverError>;
/// Returns [`Result::Ok`] if a file may be written to the given `path`, [`ResolverError::CannotWriteLayerToPath`]
/// otherwise.
///
/// In practice, when writing a layer, [`Self::can_write_layer_to_path()`] will be called
/// first to check if writing is permitted. If this returns success, then
/// [`Self::create_path_for_layer()`] will be called before writing the layer out.
fn can_write_layer_to_path(&self) -> Result<(), ResolverError>;
/// Returns success if a new file may be created using the given.
/// `identifier`, [`ResolverError::CannotCreateNewLayerWithIdentifier`] otherwise.
fn can_create_new_layer_with_identifier(&self, identifier: &str) -> Result<(), ResolverError>;
/// A scoped resolution cache indicates to the resolver that results of
/// calls to resolve should be cached for a certain scope. This is
/// important for performance and also for consistency -- it ensures
/// that repeated calls to resolve with the same parameters will
/// return the same result.
///
/// A resolution cache scope is opened by a call to begin_cache_scope and
/// must be closed with a matching call to EndCacheScope. The resolver must
/// cache the results of resolve until the scope is closed. Note that these
/// calls may be nested.
///
/// Cache scopes are thread-specific: if multiple threads are running and
/// a cache scope is opened in one of those threads, caching should be
/// enabled in that thread only.
///
/// When opening a scope, a resolver may return additional data for
/// implementation-specific purposes. This data may be shared across
/// threads, so long as it is safe to access this data concurrently.
///
/// ArResolverScopedCache is an RAII object for managing cache scope
/// lifetimes and data. Clients should generally use that class rather
/// than calling the begin_cache_scope and EndCacheScope functions manually.
///
/// see [`ResolverScopedCache`]
/// Mark the start of a resolution caching scope.
///
/// Clients should generally use [`ResolverScopedCache`] instead of calling
/// this function directly.
///
/// Resolvers may fill `cache_scope_data` with arbitrary data. Clients may
/// also pass in a `cache_scope_data` populated by an earlier call to
/// [`Self::begin_cache_scope()`] to allow the resolver access to that information.
///
/// see [`ResolverScopedCache`]
fn begin_cache_scope(&mut self, cache_scope_data: Option<&dyn Any>);
/// Mark the end of a resolution caching scope.
///
/// Clients should generally use [`ResolverScopedCache'] instead of calling
/// this function directly.
///
/// `cache_scope_data` should contain the data that was populated by the
/// previous corresponding call to begin_cache_scope.
///
/// see [`ResolverScopedCache`]
fn end_cache_scope(&mut self, cache_scope_data: &dyn Any);
}
// /// Returns the configured asset resolver.
// ///
// /// When first called, this function will determine the ArResolver subclass
// /// to use for asset resolution via the following process:
// ///
// /// - If a preferred resolver has been set via \ref ArSetPreferredResolver,
// /// it will be selected.
// ///
// /// - Otherwise, a list of available ArResolver subclasses in plugins will
// /// be generated. If multiple ArResolver subclasses are found, the list
// /// will be sorted by typename. ArDefaultResolver will be added as the last
// /// element of this list, and the first resolver in the list will be
// /// selected.
// ///
// /// - The plugin for the selected subclass will be loaded and an instance
// /// of the subclass will be constructed.
// ///
// /// - If an error occurs, an ArDefaultResolver will be constructed.
// ///
// /// The constructed ArResolver subclass will be cached and used to service
// /// function calls made on the returned resolver.
// ///
// /// Note that this function may not return the constructed subclass itself,
// /// meaning that dynamic casts to the subclass type may fail. See
// /// ArGetUnderlyingResolver if access to this object is needed.
// pub fn get_resolver() -> impl Resolver {
// todo!()
// }
//
// /// Set the preferred [`Resolver`] subclass used by [`get_resolver`].
// ///
// /// Consumers may override [`get_resolver`]'s plugin resolver discovery and
// /// force the use of a specific resolver subclass by calling this
// /// function with the typename of the implementation to use.
// ///
// /// If the subclass specified by `resolver_type_name` cannot be found,
// /// `get_resolver` will issue a warning and fall back to using
// /// [`DefaultResolver`].
// ///
// /// This must be called before the first call to ArGetResolver.
// pub fn set_preferred_resolver(resolver_type_name: &str) {
// todo!()
// }
//
// /// # Advanced API
// ///
// /// <section class="warning">
// /// These functions should typically not be used by consumers except
// /// in very specific cases. Consumers who want to retrieve an ArResolver to
// /// perform asset resolution should use \ref ArGetResolver.
// /// </section>
//
// /// Returns the underlying ArResolver instance used by ArGetResolver.
// ///
// /// This function returns the instance of the ArResolver subclass used by
// /// ArGetResolver and can be dynamic_cast to that type.
// ///
// /// <section class="warning">
// /// This functions should typically not be used by consumers except
// /// in very specific cases. Consumers who want to retrieve an ArResolver to
// /// perform asset resolution should use \ref ArGetResolver.
// /// </section>
// pub fn get_underlying_resolver() -> impl Resolver {
// todo!()
// }
//
// /// Returns list of TfTypes for available ArResolver subclasses.
// ///
// /// This function returns the list of ArResolver subclasses used to determine
// /// the resolver implementation returned by [`get_resolver`]. See
// /// documentation on that function for more details.
// ///
// /// If this function is called from within a call (or calls) to
// /// [`create_resolver`], the [`Resolver`] subclass(es) being created will
// /// be removed from the returned list.
// ///
// /// <section class="warning>
// /// This functions should typically not be used by consumers except
// /// in very specific cases. Consumers who want to retrieve a [`Resolver`] to
// /// perform asset resolution should use [`get_resolver`].
// /// </section>
// pub fn get_available_resolvers() -> Vec<String> {
// todo!()
// }
//
// /// Construct an instance of the [`Resolver`] subclass specified by
// /// `resolver_type`.
// ///
// /// This function will load the plugin for the given `resolver_type` and
// /// construct and return a new instance of the specified [`Resolver`] subclass.
// /// If an error occurs, coding errors will be emitted and this function
// /// will return an [`DefaultResolver`] instance.
// ///
// /// Note that this function *does not* change the resolver used by
// /// [`get_resolver`] to an instance of `resolver_type`.
// ///
// /// This function is not safe to call concurrently with itself or
// /// [`get_available_resolvers`].
// ///
// /// <section class="warning">
// /// This functions should typically not be used by consumers except
// /// in very specific cases. Consumers who want to retrieve an ArResolver to
// /// perform asset resolution should use [`get_resolver`].
// /// </section>
// pub fn create_resolver(resolver_type: &str) -> impl Resolver {
// todo!()
// }
|
use crate::Result;
use byteorder::*;
#[derive(Copy, Clone)]
pub enum Phdr_type {
NULL = 0x0,
LOAD = 0x1,
DYNAMIC = 0x2,
INTERP = 0x3,
NOTE = 0x4,
SHLIB = 0x5,
PHDR = 0x6,
TLS = 0x7,
LOOS = 0x60000000,
HIOS = 0x6FFFFFFF,
LOPROC = 0x70000000,
HIPROC = 0x7FFFFFFF,
// GNU options missing here
// Currently we are dropping all foreign formats
// This might not be optimal.
}
pub struct ProgramHeader {
pub p_type: Phdr_type,
flags: u32,
pub offset: u64,
vaddr: u64,
paddr: u64,
pub filesz: u64,
memsz: u64,
p_flags: u64,
pub p_align: u64
}
impl ProgramHeader {
// Parse programheaders
pub fn parse(phdr: &[u8]) -> Result< ProgramHeader > {
Ok(ProgramHeader{
p_type: parse_phdr_type(&phdr),
flags: LittleEndian::read_u32(&phdr[0x04..0x08]),
offset: LittleEndian::read_u64(&phdr[0x8..0x10]),
vaddr: LittleEndian::read_u64(&phdr[0x10..0x18]),
paddr: LittleEndian::read_u64(&phdr[0x18..0x20]),
filesz: LittleEndian::read_u64(&phdr[0x20..0x28]),
memsz: LittleEndian::read_u64(&phdr[0x28..0x30]),
p_flags: 0,
p_align: LittleEndian::read_u64(&phdr[0x30..0x38]),
})
}
pub fn to_le(&self) -> Vec<u8> {
self.to_le_offset(0)
}
pub fn to_le_offset(&self, offset:usize) -> Vec<u8> {
// bin.append([1,2,3].to_vec())
let mut bin = vec![];
// do i end up owning this data, thus preventing me from using sh_type elsewhere?
bin.extend_from_slice(&(self.p_type as u32).to_le_bytes());
bin.extend_from_slice(&self.flags.to_le_bytes());
bin.extend_from_slice(&(self.offset + offset as u64).to_le_bytes());
bin.extend_from_slice(&self.vaddr.to_le_bytes());
bin.extend_from_slice(&self.paddr.to_le_bytes());
bin.extend_from_slice(&self.filesz.to_le_bytes());
bin.extend_from_slice(&self.memsz.to_le_bytes());
// bin.extend_from_slice(&self.p_flags.to_le_bytes()); used in 32-bit
bin.extend_from_slice(&self.p_align.to_le_bytes());
// ProgramHeader::add_padding(40, &mut bin);
return bin;
}
fn add_padding(target_size: u32, bin: &mut Vec<u8>) {
while bin.len() < 40 {
bin.push(b'\0');
}
}
}
fn parse_phdr_type(phdr: &[u8]) -> Phdr_type {
return match LittleEndian::read_u32(&phdr[0x0..0x4]) {
0x0 => return Phdr_type::NULL,
0x1 => return Phdr_type::LOAD,
0x2 => return Phdr_type::DYNAMIC,
0x3 => return Phdr_type::INTERP,
0x4 => return Phdr_type::NOTE,
0x5 => return Phdr_type::SHLIB,
0x6 => return Phdr_type::PHDR,
0x7 => return Phdr_type::TLS,
0x60000000 => return Phdr_type::LOOS,
0x6FFFFFFF => return Phdr_type::HIOS,
0x70000000 => return Phdr_type::LOPROC,
0x7FFFFFFF => return Phdr_type::HIPROC,
_ => Phdr_type::NULL
}
}
pub fn parse_program_header(bin: &Vec<u8>) -> Result<Vec<ProgramHeader>> {
let phdr_offset = LittleEndian::read_u64(&bin[0x20..0x28]);
let phdr_size = LittleEndian::read_u16(&bin[0x36..0x38]);
let phdr_num = LittleEndian::read_u16(&bin[0x38..0x3A]);
let mut phdrs:Vec<ProgramHeader> = vec![];
// loop through all programheaders
for i in 0..phdr_num {
let start = (phdr_offset+(phdr_size as u64*i as u64) ) as usize;
let end = (phdr_offset+(phdr_size as u64*i as u64)+phdr_size as u64 ) as usize;
phdrs.push(ProgramHeader::parse(&bin[start..end])?)
}
return Ok(phdrs);
}
pub fn to_le(phdrs: Vec<ProgramHeader> ) -> Vec<u8> {
let mut bin = vec![];
for phdr in phdrs {
bin.extend(phdr.to_le());
}
return bin;
} |
#[macro_use]
extern crate bencher;
use bencher::Bencher;
use vowpalwabbit;
fn uniform_hash_10chars(bench: &mut Bencher) {
bench.iter(|| vowpalwabbit::hash::uniform_hash(b"abcdefghij", 0))
}
fn uniform_hash_100chars(bench: &mut Bencher) {
bench.iter(|| {
vowpalwabbit::hash::uniform_hash(b"abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij", 0)
})
}
benchmark_group!(benches, uniform_hash_10chars, uniform_hash_100chars);
benchmark_main!(benches);
|
use std::collections::BTreeMap;
use std::io;
use std::io::prelude::*;
use std::fs::File;
use rustc_serialize::json::{ToJson, Json};
use super::super::engine::{CheckResult, CheckSuiteResult, PropertyResult};
use super::Report;
pub struct JsonReport<'a> {
check_suite_result: &'a CheckSuiteResult<'a>,
filename: &'a str,
}
impl<'a> JsonReport<'a> {
pub fn new(check_suite_result: &'a CheckSuiteResult, filename: &'a str) -> JsonReport<'a> {
JsonReport {
check_suite_result: check_suite_result,
filename: filename,
}
}
}
impl<'a> Report<'a> for JsonReport<'a> {
fn as_string(&self) -> String {
format!("{}", self.check_suite_result.to_json().pretty())
}
fn write_to_file(&self) -> io::Result<()> {
let mut f = try!(File::create(self.filename));
f.write_all(self.as_string().as_bytes())
}
}
impl<'a> ToJson for PropertyResult<'a> {
fn to_json(&self) -> Json {
let mut d = BTreeMap::new();
d.insert("host".to_string(), self.host.to_json());
let mut property = BTreeMap::new();
property.insert("name".to_string(), self.property.name.to_json());
property.insert("module".to_string(), self.property.module.to_json());
property.insert("params".to_string(), self.property.params.to_json());
d.insert("property".to_string(), property.to_json());
let property_result = match &self.result {
&Ok(()) => "Success".to_string(),
&Err(ref err) => err.to_string(),
};
d.insert("property_result".to_string(), property_result.to_json());
Json::Object(d)
}
}
impl<'a> ToJson for CheckResult<'a> {
fn to_json(&self) -> Json {
let mut d = BTreeMap::new();
d.insert("inventory_name".to_string(),
self.check.inventory_name.to_json());
d.insert("property_results".to_string(), self.results.to_json());
Json::Object(d)
}
}
impl<'a> ToJson for CheckSuiteResult<'a> {
fn to_json(&self) -> Json {
let mut d = BTreeMap::new();
d.insert("inventory".to_string(),
self.check_suite.inventory.to_json());
d.insert("check_results".to_string(), self.results.to_json());
Json::Object(d)
}
}
|
use super::*;
/// Structure that saves the reader specific to writing and reading a nodes csv file.
///
/// # Attributes
#[derive(Clone)]
pub struct EdgeFileReader {
pub(crate) reader: CSVFileReader,
pub(crate) sources_column_number: usize,
pub(crate) destinations_column_number: usize,
pub(crate) edge_types_column_number: Option<usize>,
pub(crate) default_edge_type: Option<String>,
pub(crate) weights_column_number: Option<usize>,
pub(crate) default_weight: Option<WeightT>,
pub(crate) skip_self_loops: bool,
pub(crate) numeric_edge_type_ids: bool,
pub(crate) numeric_node_ids: bool,
pub(crate) skip_weights_if_unavailable: bool,
pub(crate) skip_edge_types_if_unavailable: bool,
}
impl EdgeFileReader {
/// Return new EdgeFileReader object.
///
/// # Arguments
///
/// * reader: CSVFilereader - Path where to store/load the file.
///
pub fn new(path: String) -> Result<EdgeFileReader, String> {
Ok(EdgeFileReader {
reader: CSVFileReader::new(path)?,
sources_column_number: 0,
destinations_column_number: 1,
edge_types_column_number: None,
default_edge_type: None,
weights_column_number: None,
default_weight: None,
skip_self_loops: false,
numeric_edge_type_ids: false,
numeric_node_ids: false,
skip_weights_if_unavailable: false,
skip_edge_types_if_unavailable: false,
})
}
/// Set the column of the source nodes.
///
/// # Arguments
///
/// * sources_column: Option<String> - The source nodes column to use for the file.
///
pub fn set_sources_column(
mut self,
sources_column: Option<String>,
) -> Result<EdgeFileReader, String> {
if let Some(column) = sources_column {
if column.is_empty() {
return Err("The given node types column is empty.".to_owned());
}
self.sources_column_number = self.reader.get_column_number(column)?;
}
Ok(self)
}
/// Set the column_number of the nodes.
///
/// # Arguments
///
/// * sources_column_number: Option<usize> - The sources column number to use for the file.
///
pub fn set_sources_column_number(
mut self,
sources_column_number: Option<usize>,
) -> Result<EdgeFileReader, String> {
if let Some(column) = sources_column_number {
let expected_elements = self.reader.get_elements_per_line()?;
if column >= expected_elements {
return Err(format!(
concat!(
"The source column number passed was {} but ",
"the first parsable line has {} values."
),
column, expected_elements
));
}
self.sources_column_number = column;
}
Ok(self)
}
/// Set the column of the nodes.
///
/// # Arguments
///
/// * destination_column: Option<String> - The node types column to use for the file.
///
pub fn set_destinations_column(
mut self,
destinations_column: Option<String>,
) -> Result<EdgeFileReader, String> {
if let Some(column) = destinations_column {
if column.is_empty() {
return Err("The given node types column is empty.".to_owned());
}
self.destinations_column_number = self.reader.get_column_number(column)?;
}
Ok(self)
}
/// Set the column_number of the nodes.
///
/// # Arguments
///
/// * destinations_column_number: Option<usize> - The destinations column number to use for the file.
///
pub fn set_destinations_column_number(
mut self,
destinations_column_number: Option<usize>,
) -> Result<EdgeFileReader, String> {
if let Some(column) = destinations_column_number {
let expected_elements = self.reader.get_elements_per_line()?;
if column >= expected_elements {
return Err(format!(
concat!(
"The destinations column number passed was {} but ",
"the first parsable line has {} values."
),
column, expected_elements
));
}
self.destinations_column_number = column;
}
Ok(self)
}
/// Set the column of the nodes.
///
/// # Arguments
///
/// * destination_column: Option<String> - The node types column to use for the file.
///
pub fn set_edge_types_column(
mut self,
edge_type_column: Option<String>,
) -> Result<EdgeFileReader, String> {
if let Some(column) = edge_type_column {
if column.is_empty() {
return Err("The given node types column is empty.".to_owned());
}
match self.reader.get_column_number(column) {
Ok(ecn) => {
self.edge_types_column_number = Some(ecn);
}
Err(e) => {
if !self.skip_edge_types_if_unavailable {
return Err(e);
}
}
}
}
Ok(self)
}
/// Set the column_number of the nodes.
///
/// # Arguments
///
/// * edge_types_column_number: Option<usize> - The edge_types column number to use for the file.
///
pub fn set_edge_types_column_number(
mut self,
edge_types_column_number: Option<usize>,
) -> Result<EdgeFileReader, String> {
if let Some(etcn) = &edge_types_column_number {
let expected_elements = self.reader.get_elements_per_line()?;
if *etcn >= expected_elements {
if !self.skip_edge_types_if_unavailable {
return Err(format!(
concat!(
"The edge types column number passed was {} but ",
"the first parsable line has {} values."
),
etcn, expected_elements
));
}
} else {
self.edge_types_column_number = edge_types_column_number;
}
}
Ok(self)
}
/// Set the column of the edge weights.
///
/// # Arguments
///
/// * weights_column: Option<String> - The edge weights column to use for the file.
///
pub fn set_weights_column(
mut self,
weights_column: Option<String>,
) -> Result<EdgeFileReader, String> {
if let Some(column) = weights_column {
if column.is_empty() {
return Err("The given edge weights column is empty.".to_owned());
}
match self.reader.get_column_number(column) {
Ok(wcn) => {
self.weights_column_number = Some(wcn);
}
Err(e) => {
if !self.skip_weights_if_unavailable {
return Err(e);
}
}
}
}
Ok(self)
}
/// Set the column_number of the nodes.
///
/// # Arguments
///
/// * weights_column_number: Option<usize> - The weights column number to use for the file.
///
pub fn set_weights_column_number(
mut self,
weights_column_number: Option<usize>,
) -> Result<EdgeFileReader, String> {
if let Some(wcn) = &weights_column_number {
let expected_elements = self.reader.get_elements_per_line()?;
if *wcn >= expected_elements {
if !self.skip_edge_types_if_unavailable {
return Err(format!(
concat!(
"The weights column number passed was {} but ",
"the first parsable line has {} values."
),
wcn, expected_elements
));
}
} else {
self.weights_column_number = weights_column_number;
}
}
Ok(self)
}
/// Set wether to automatically skip weights if they are not avaitable instead of raising an exception.
///
/// # Arguments
///
/// * skip_weights_if_unavailable: Option<bool> - Wether to skip weights if they are not available.
///
pub fn set_skip_weights_if_unavailable(
mut self,
skip_weights_if_unavailable: Option<bool>,
) -> Result<EdgeFileReader, String> {
if let Some(skip) = skip_weights_if_unavailable {
self.skip_weights_if_unavailable = skip;
}
Ok(self)
}
/// Set wether to automatically skip edge types if they are not avaitable instead of raising an exception.
///
/// # Arguments
///
/// * skip_edge_types_if_unavailable: Option<bool> - Wether to skip edge types if they are not available.
///
pub fn set_skip_edge_types_if_unavailable(
mut self,
skip_edge_types_if_unavailable: Option<bool>,
) -> Result<EdgeFileReader, String> {
if let Some(skip) = skip_edge_types_if_unavailable {
self.skip_edge_types_if_unavailable = skip;
}
Ok(self)
}
/// Set the default default_weight.
///
/// # Arguments
///
/// * default_weight: Option<WeightT> - The default_weight to use when default_weight is missing.
///
pub fn set_default_weight(mut self, default_weight: Option<WeightT>) -> EdgeFileReader {
self.default_weight = default_weight;
self
}
/// Set the default edge type.
///
/// # Arguments
///
/// * default_edge_type: Option<String> - The edge type to use when edge type is missing.
///
pub fn set_default_edge_type(mut self, default_edge_type: Option<String>) -> EdgeFileReader {
self.default_edge_type = default_edge_type;
self
}
/// Set if the reader should ignore or not duplicated edges.
///
/// # Arguments
///
/// * skip_self_loops: Option<bool> - if the reader should ignore or not duplicated edges.
///
pub fn set_skip_self_loops(mut self, skip_self_loops: Option<bool>) -> EdgeFileReader {
if let Some(i) = skip_self_loops {
self.skip_self_loops = i;
}
self
}
/// Set the comment symbol to use to skip the lines.
///
/// # Arguments
///
/// * comment_symbol: Option<String> - if the reader should ignore or not duplicated edges.
///
pub fn set_comment_symbol(
mut self,
comment_symbol: Option<String>,
) -> Result<EdgeFileReader, String> {
if let Some(cs) = comment_symbol {
if cs.is_empty() {
return Err("The given comment symbol is empty.".to_string());
}
self.reader.comment_symbol = Some(cs);
}
Ok(self)
}
/// Set the verbose.
///
/// # Arguments
///
/// * verbose: Option<bool> - Wethever to show the loading bar or not.
///
pub fn set_verbose(mut self, verbose: Option<bool>) -> EdgeFileReader {
if let Some(v) = verbose {
self.reader.verbose = v;
}
self
}
/// Set the numeric_id.
///
/// # Arguments
///
/// * numeric_id: Option<bool> - Wethever to convert numeric Ids to Node Id.
///
pub fn set_numeric_edge_type_ids(
mut self,
numeric_edge_type_ids: Option<bool>,
) -> EdgeFileReader {
if let Some(neti) = numeric_edge_type_ids {
self.numeric_edge_type_ids = neti;
}
self
}
/// Set the numeric_id.
///
/// # Arguments
///
/// * numeric_id: Option<bool> - Wethever to convert numeric Ids to Node Id.
///
pub fn set_numeric_node_ids(mut self, numeric_node_ids: Option<bool>) -> EdgeFileReader {
if let Some(nni) = numeric_node_ids {
self.numeric_node_ids = nni;
}
self
}
/// Set the ignore_duplicates.
///
/// # Arguments
///
/// * ignore_duplicates: Option<bool> - Wethever to ignore detected duplicates or raise exception.
///
pub fn set_ignore_duplicates(mut self, ignore_duplicates: Option<bool>) -> EdgeFileReader {
if let Some(v) = ignore_duplicates {
self.reader.ignore_duplicates = v;
}
self
}
/// Set the separator.
///
/// # Arguments
///
/// * separator: Option<String> - The separator to use for the file.
///
pub fn set_separator(mut self, separator: Option<String>) -> Result<EdgeFileReader, String> {
if let Some(sep) = separator {
if sep.is_empty() {
return Err("The separator cannot be empty.".to_owned());
}
self.reader.separator = sep;
}
Ok(self)
}
/// Set the header.
///
/// # Arguments
///
/// * header: Option<bool> - Wethever to expect an header or not.
///
pub fn set_header(mut self, header: Option<bool>) -> EdgeFileReader {
if let Some(v) = header {
self.reader.header = v;
}
self
}
/// Set number of rows to be skipped when starting to read file.
///
/// # Arguments
///
/// * rows_to_skip: Option<bool> - Wethever to show the loading bar or not.
///
pub fn set_rows_to_skip(mut self, rows_to_skip: Option<usize>) -> EdgeFileReader {
if let Some(v) = rows_to_skip {
self.reader.rows_to_skip = v;
}
self
}
/// Set the maximum number of rows to load from the file
///
/// # Arguments
///
/// * max_rows_number: Option<u64> - The edge type to use when edge type is missing.
///
pub fn set_max_rows_number(mut self, max_rows_number: Option<u64>) -> EdgeFileReader {
self.reader.max_rows_number = max_rows_number;
self
}
/// Parse a single line (vecotr of strings already splitted)
/// # Arguments
///
/// * vals: Vec<String> - Vector of the values of the line to be parsed
fn parse_edge_line(&self, vals: Vec<String>) -> Result<StringQuadruple, String> {
// exctract the values
let source_node_name = vals[self.sources_column_number].to_owned();
let destination_node_name = vals[self.destinations_column_number].to_owned();
// extract the edge type if present
let edge_type: Option<String> = match self.edge_types_column_number {
None => Ok(None),
Some(idx) => {
let curr = vals[idx].to_owned();
if !curr.is_empty() {
Ok(Some(curr))
} else if let Some(def) = &self.default_edge_type {
Ok(Some(def.clone()))
} else {
Err(format!(
concat!(
"Found empty edge type but no default edge ",
"type to use was provided.",
"The source node name is {source_node_name}.\n",
"The destination node name is {destination_node_name}.\n",
"The path of the document was {path}.\n"
),
source_node_name = source_node_name,
destination_node_name = destination_node_name,
path = self.reader.path
))
}
}
}?;
// extract the weights
let edge_weight = match self.weights_column_number {
None => Ok(None),
Some(idx) => {
let curr = vals[idx].to_owned();
if !curr.is_empty() {
match parse_weight(Some(curr)) {
Ok(v) => Ok(v),
Err(e) => Err(e),
}
} else if let Some(def) = &self.default_weight {
Ok(Some(*def))
} else {
Err(format!(
concat!(
"Found empty weight but no default weight ",
"to use was provided. ",
"The source node name is {source_node_name}.\n",
"The destination node name is {destination_node_name}.\n",
"The path of the document was {path}.\n"
),
source_node_name = source_node_name,
destination_node_name = destination_node_name,
path = self.reader.path
))
}
}
}?;
Ok((
source_node_name,
destination_node_name,
edge_type,
edge_weight,
))
}
/// Return iterator of rows of the edge file.
pub fn read_lines(
&self,
) -> Result<impl Iterator<Item = Result<StringQuadruple, String>> + '_, String> {
if self.destinations_column_number == self.sources_column_number {
return Err("The destinations column is the same as the sources one.".to_string());
}
if Some(self.destinations_column_number) == self.weights_column_number {
return Err("The destinations column is the same as the weights one.".to_string());
}
if Some(self.sources_column_number) == self.weights_column_number {
return Err("The sources column is the same as the weights one.".to_string());
}
if Some(self.sources_column_number) == self.edge_types_column_number {
return Err("The sources column is the same as the edge types one.".to_string());
}
if Some(self.destinations_column_number) == self.edge_types_column_number {
return Err("The destinations column is the same as the edge types one.".to_string());
}
if self.weights_column_number.is_some()
&& self.weights_column_number == self.edge_types_column_number
{
return Err("The weights column is the same as the edge types one.".to_string());
}
let expected_elements = self.reader.get_elements_per_line()?;
if self.sources_column_number >= expected_elements {
return Err(format!(
concat!(
"The sources column number passed was {} but ",
"the first parsable line has {} values."
),
self.sources_column_number, expected_elements
));
}
if self.destinations_column_number >= expected_elements {
return Err(format!(
concat!(
"The destinations column number passed was {} but ",
"the first parsable line has {} values."
),
self.destinations_column_number, expected_elements
));
}
Ok(self.reader.read_lines()?.map(move |values| match values {
Ok(vals) => self.parse_edge_line(vals),
Err(e) => Err(e),
}))
}
}
|
// Based on stuff from: https://www.snip2code.com/Snippet/1473242/Rust-Hexdump
use std::io::{self, Read, BufRead};
use std::cmp;
use std::fmt::{self, Write};
const HR_BYTES_PER_LINE: usize = 16;
pub struct HexReader<T> {
inner: T,
buf: String,
buf_pos: usize,
line_count: usize,
}
impl<T: Read> HexReader<T> {
pub fn new(inner: T) -> HexReader<T> {
HexReader {
inner: inner,
buf: String::new(),
buf_pos: 0,
line_count: 0
}
}
pub fn render_bytes(&mut self, bytes: &[u8]) -> fmt::Result {
write!(&mut self.buf, "${:08x} ", HR_BYTES_PER_LINE * self.line_count)?;
for (count, b) in bytes.iter().enumerate() {
if count == 8 {
write!(&mut self.buf, " ")?;
}
write!(&mut self.buf, " {:02x}", b)?;
}
loop {
if self.buf.len() > 60 { break }
write!(&mut self.buf, " ")?;
}
write!(&mut self.buf, "|")?;
for b in bytes.iter() {
if *b >= 32 && *b <= 127 {
write!(&mut self.buf, "{}", *b as char)?;
continue;
}
write!(&mut self.buf, ".")?;
}
write!(&mut self.buf, "|")?;
write!(&mut self.buf, "\n")?;
self.line_count += 1;
Ok(())
}
}
impl<T: Read> Read for HexReader<T> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let nread = {
let mut rem = self.fill_buf()?;
rem.read(buf)?
};
self.consume(nread);
Ok(nread)
}
}
impl<R: Read> BufRead for HexReader<R> {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
if self.buf_pos >= self.buf.len() {
let mut nread: usize = 0;
let mut tmp: [u8; HR_BYTES_PER_LINE] = [0; HR_BYTES_PER_LINE];
loop {
nread += match self.inner.read(&mut tmp[nread..]) {
Ok(0) if nread == 0 => return Ok(&[]),
Ok(0) => break,
Ok(n) => n,
Err(e) => return Err(e),
};
if nread >= HR_BYTES_PER_LINE { break }
}
self.buf.clear();
self.render_bytes(&tmp[..nread]).expect("TODO:");
self.buf_pos = 0;
}
Ok(self.buf[self.buf_pos..].as_bytes())
}
fn consume(&mut self, count: usize) {
self.buf_pos = cmp::min(self.buf_pos + count, self.buf.len());
}
}
|
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
_reserved0: [u8; 8144usize],
#[doc = "0x1fd0 - AXIMC peripheral ID4 register"]
pub periph_id_4: PERIPH_ID_4,
#[doc = "0x1fd4 - AXIMC peripheral ID5 register"]
pub periph_id_5: PERIPH_ID_5,
#[doc = "0x1fd8 - AXIMC peripheral ID6 register"]
pub periph_id_6: PERIPH_ID_6,
_reserved3: [u8; 4usize],
#[doc = "0x1fe0 - AXIMC peripheral ID0 register"]
pub periph_id_0: PERIPH_ID_0,
#[doc = "0x1fe4 - AXIMC peripheral ID1 register"]
pub periph_id_1: PERIPH_ID_1,
#[doc = "0x1fe8 - AXIMC peripheral ID2 register"]
pub periph_id_2: PERIPH_ID_2,
#[doc = "0x1fec - AXIMC peripheral ID3 register"]
pub periph_id_3: PERIPH_ID_3,
#[doc = "0x1ff0 - AXIMC component ID0 register"]
pub comp_id_0: COMP_ID_0,
#[doc = "0x1ff4 - AXIMC component ID1 register"]
pub comp_id_1: COMP_ID_1,
#[doc = "0x1ff8 - AXIMC component ID2 register"]
pub comp_id_2: COMP_ID_2,
#[doc = "0x1ffc - AXIMC component ID3 register"]
pub comp_id_3: COMP_ID_3,
_reserved11: [u8; 122312usize],
#[doc = "0x1fdc8 - AXIMC peripheral ID7 register"]
pub periph_id_7: PERIPH_ID_7,
_reserved12: [u8; 139864usize],
#[doc = "0x42024 - AXIMC master 0 packing functionality register"]
pub m0_fn_mod2: M0_FN_MOD2,
#[doc = "0x42028 - AXIMC master 0 AHB conversion override functionality register"]
pub m0_fn_mod_ahb: M0_FN_MOD_AHB,
_reserved14: [u8; 212usize],
#[doc = "0x42100 - AXIMC master 0 read priority register"]
pub m0_read_qos: M0_READ_QOS,
#[doc = "0x42104 - AXIMC master 0 write priority register"]
pub m0_write_qos: M0_WRITE_QOS,
#[doc = "0x42108 - AXIMC master 0 issuing capability override functionality register"]
pub m0_fn_mod: M0_FN_MOD,
_reserved17: [u8; 3864usize],
#[doc = "0x43024 - AXIMC master 1 packing functionality register"]
pub m1_fn_mod2: M1_FN_MOD2,
#[doc = "0x43028 - AXIMC master 1 AHB conversion override functionality register"]
pub m1_fn_mod_ahb: M1_FN_MOD_AHB,
_reserved19: [u8; 212usize],
#[doc = "0x43100 - AXIMC master 1 read priority register"]
pub m1_read_qos: M1_READ_QOS,
#[doc = "0x43104 - AXIMC master 1 write priority register"]
pub m1_write_qos: M1_WRITE_QOS,
#[doc = "0x43108 - AXIMC master 1 issuing capability override functionality register"]
pub m1_fn_mod: M1_FN_MOD,
_reserved22: [u8; 3864usize],
#[doc = "0x44024 - AXIMC master 2 packing functionality register"]
pub m2_fn_mod2: M2_FN_MOD2,
#[doc = "0x44028 - AXIMC master 2 AHB conversion override functionality register"]
pub m2_fn_mod_ahb: M2_FN_MOD_AHB,
_reserved24: [u8; 212usize],
#[doc = "0x44100 - AXIMC master 2 read priority register"]
pub m2_read_qos: M2_READ_QOS,
#[doc = "0x44104 - AXIMC master 2 write priority register"]
pub m2_write_qos: M2_WRITE_QOS,
#[doc = "0x44108 - AXIMC master 2 issuing capability override functionality register"]
pub m2_fn_mod: M2_FN_MOD,
_reserved27: [u8; 3864usize],
#[doc = "0x45024 - AXIMC master 5 packing functionality register"]
pub m5_fn_mod2: M5_FN_MOD2,
#[doc = "0x45028 - AXIMC master 5 AHB conversion override functionality register"]
pub m5_fn_mod_ahb: M5_FN_MOD_AHB,
_reserved29: [u8; 212usize],
#[doc = "0x45100 - AXIMC master 5 read priority register"]
pub m5_read_qos: M5_READ_QOS,
#[doc = "0x45104 - AXIMC master 5 write priority register"]
pub m5_write_qos: M5_WRITE_QOS,
#[doc = "0x45108 - AXIMC master 5 issuing capability override functionality register"]
pub m5_fn_mod: M5_FN_MOD,
_reserved32: [u8; 4084usize],
#[doc = "0x46100 - AXIMC master 3 read priority register"]
pub m3_read_qos: M3_READ_QOS,
#[doc = "0x46104 - AXIMC master 3 write priority register"]
pub m3_write_qos: M3_WRITE_QOS,
#[doc = "0x46108 - AXIMC master 3 packing functionality register"]
pub m3_fn_mod: M3_FN_MOD,
_reserved35: [u8; 4084usize],
#[doc = "0x47100 - AXIMC master 7 read priority register"]
pub m7_read_qos: M7_READ_QOS,
#[doc = "0x47104 - AXIMC master 7 write priority register"]
pub m7_write_qos: M7_WRITE_QOS,
#[doc = "0x47108 - AXIMC master 7 issuing capability override functionality register"]
pub m7_fn_mod: M7_FN_MOD,
_reserved38: [u8; 4084usize],
#[doc = "0x48100 - AXIMC master 8 read priority register"]
pub m8_read_qos: M8_READ_QOS,
#[doc = "0x48104 - AXIMC master 8 write priority register"]
pub m8_write_qos: M8_WRITE_QOS,
#[doc = "0x48108 - AXIMC master 8 issuing capability override functionality register"]
pub m8_fn_mod: M8_FN_MOD,
_reserved41: [u8; 7968usize],
#[doc = "0x4a02c - AXIMC long burst capability inhibition register"]
pub fn_mod_lb: FN_MOD_LB,
_reserved42: [u8; 208usize],
#[doc = "0x4a100 - AXIMC master 4 read priority register"]
pub m4_read_qos: M4_READ_QOS,
#[doc = "0x4a104 - AXIMC master 4 write priority register"]
pub m4_write_qos: M4_WRITE_QOS,
#[doc = "0x4a108 - AXIMC master 4 packing functionality register"]
pub m4_fn_mod: M4_FN_MOD,
_reserved45: [u8; 4084usize],
#[doc = "0x4b100 - AXIMC master 9 read priority register"]
pub m9_read_qos: M9_READ_QOS,
#[doc = "0x4b104 - AXIMC master 9 write priority register"]
pub m9_write_qos: M9_WRITE_QOS,
#[doc = "0x4b108 - AXIMC master 9 issuing capability override functionality register"]
pub m9_fn_mod: M9_FN_MOD,
_reserved48: [u8; 4084usize],
#[doc = "0x4c100 - AXIMC master 10 read priority register"]
pub m10_read_qos: M10_READ_QOS,
#[doc = "0x4c104 - AXIMC master 10 write priority register"]
pub m10_write_qos: M10_WRITE_QOS,
#[doc = "0x4c108 - AXIMC master 10 issuing capability override functionality register"]
pub m10_fn_mod: M10_FN_MOD,
_reserved51: [u8; 3868usize],
#[doc = "0x4d028 - AXIMC master 6 AHB conversion override functionality register"]
pub m6_fn_mod_ahb: M6_FN_MOD_AHB,
_reserved52: [u8; 212usize],
#[doc = "0x4d100 - AXIMC master 6 read priority register"]
pub m6_read_qos: M6_READ_QOS,
#[doc = "0x4d104 - AXIMC master 6 write priority register"]
pub m6_write_qos: M6_WRITE_QOS,
#[doc = "0x4d108 - AXIMC master 6 issuing capability override functionality register"]
pub m6_fn_mod: M6_FN_MOD,
}
#[doc = "AXIMC peripheral ID4 register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [periph_id_4](periph_id_4) module"]
pub type PERIPH_ID_4 = crate::Reg<u32, _PERIPH_ID_4>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PERIPH_ID_4;
#[doc = "`read()` method returns [periph_id_4::R](periph_id_4::R) reader structure"]
impl crate::Readable for PERIPH_ID_4 {}
#[doc = "AXIMC peripheral ID4 register"]
pub mod periph_id_4;
#[doc = "AXIMC peripheral ID5 register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [periph_id_5](periph_id_5) module"]
pub type PERIPH_ID_5 = crate::Reg<u32, _PERIPH_ID_5>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PERIPH_ID_5;
#[doc = "`read()` method returns [periph_id_5::R](periph_id_5::R) reader structure"]
impl crate::Readable for PERIPH_ID_5 {}
#[doc = "AXIMC peripheral ID5 register"]
pub mod periph_id_5;
#[doc = "AXIMC peripheral ID6 register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [periph_id_6](periph_id_6) module"]
pub type PERIPH_ID_6 = crate::Reg<u32, _PERIPH_ID_6>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PERIPH_ID_6;
#[doc = "`read()` method returns [periph_id_6::R](periph_id_6::R) reader structure"]
impl crate::Readable for PERIPH_ID_6 {}
#[doc = "AXIMC peripheral ID6 register"]
pub mod periph_id_6;
#[doc = "AXIMC peripheral ID7 register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [periph_id_7](periph_id_7) module"]
pub type PERIPH_ID_7 = crate::Reg<u32, _PERIPH_ID_7>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PERIPH_ID_7;
#[doc = "`read()` method returns [periph_id_7::R](periph_id_7::R) reader structure"]
impl crate::Readable for PERIPH_ID_7 {}
#[doc = "AXIMC peripheral ID7 register"]
pub mod periph_id_7;
#[doc = "AXIMC peripheral ID0 register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [periph_id_0](periph_id_0) module"]
pub type PERIPH_ID_0 = crate::Reg<u32, _PERIPH_ID_0>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PERIPH_ID_0;
#[doc = "`read()` method returns [periph_id_0::R](periph_id_0::R) reader structure"]
impl crate::Readable for PERIPH_ID_0 {}
#[doc = "AXIMC peripheral ID0 register"]
pub mod periph_id_0;
#[doc = "AXIMC peripheral ID1 register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [periph_id_1](periph_id_1) module"]
pub type PERIPH_ID_1 = crate::Reg<u32, _PERIPH_ID_1>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PERIPH_ID_1;
#[doc = "`read()` method returns [periph_id_1::R](periph_id_1::R) reader structure"]
impl crate::Readable for PERIPH_ID_1 {}
#[doc = "AXIMC peripheral ID1 register"]
pub mod periph_id_1;
#[doc = "AXIMC peripheral ID2 register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [periph_id_2](periph_id_2) module"]
pub type PERIPH_ID_2 = crate::Reg<u32, _PERIPH_ID_2>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PERIPH_ID_2;
#[doc = "`read()` method returns [periph_id_2::R](periph_id_2::R) reader structure"]
impl crate::Readable for PERIPH_ID_2 {}
#[doc = "AXIMC peripheral ID2 register"]
pub mod periph_id_2;
#[doc = "AXIMC peripheral ID3 register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [periph_id_3](periph_id_3) module"]
pub type PERIPH_ID_3 = crate::Reg<u32, _PERIPH_ID_3>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _PERIPH_ID_3;
#[doc = "`read()` method returns [periph_id_3::R](periph_id_3::R) reader structure"]
impl crate::Readable for PERIPH_ID_3 {}
#[doc = "AXIMC peripheral ID3 register"]
pub mod periph_id_3;
#[doc = "AXIMC component ID0 register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [comp_id_0](comp_id_0) module"]
pub type COMP_ID_0 = crate::Reg<u32, _COMP_ID_0>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _COMP_ID_0;
#[doc = "`read()` method returns [comp_id_0::R](comp_id_0::R) reader structure"]
impl crate::Readable for COMP_ID_0 {}
#[doc = "AXIMC component ID0 register"]
pub mod comp_id_0;
#[doc = "AXIMC component ID1 register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [comp_id_1](comp_id_1) module"]
pub type COMP_ID_1 = crate::Reg<u32, _COMP_ID_1>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _COMP_ID_1;
#[doc = "`read()` method returns [comp_id_1::R](comp_id_1::R) reader structure"]
impl crate::Readable for COMP_ID_1 {}
#[doc = "AXIMC component ID1 register"]
pub mod comp_id_1;
#[doc = "AXIMC component ID2 register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [comp_id_2](comp_id_2) module"]
pub type COMP_ID_2 = crate::Reg<u32, _COMP_ID_2>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _COMP_ID_2;
#[doc = "`read()` method returns [comp_id_2::R](comp_id_2::R) reader structure"]
impl crate::Readable for COMP_ID_2 {}
#[doc = "AXIMC component ID2 register"]
pub mod comp_id_2;
#[doc = "AXIMC component ID3 register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [comp_id_3](comp_id_3) module"]
pub type COMP_ID_3 = crate::Reg<u32, _COMP_ID_3>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _COMP_ID_3;
#[doc = "`read()` method returns [comp_id_3::R](comp_id_3::R) reader structure"]
impl crate::Readable for COMP_ID_3 {}
#[doc = "AXIMC component ID3 register"]
pub mod comp_id_3;
#[doc = "AXIMC master 0 packing functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m0_fn_mod2](m0_fn_mod2) module"]
pub type M0_FN_MOD2 = crate::Reg<u32, _M0_FN_MOD2>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M0_FN_MOD2;
#[doc = "`read()` method returns [m0_fn_mod2::R](m0_fn_mod2::R) reader structure"]
impl crate::Readable for M0_FN_MOD2 {}
#[doc = "`write(|w| ..)` method takes [m0_fn_mod2::W](m0_fn_mod2::W) writer structure"]
impl crate::Writable for M0_FN_MOD2 {}
#[doc = "AXIMC master 0 packing functionality register"]
pub mod m0_fn_mod2;
#[doc = "AXIMC master 1 packing functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m1_fn_mod2](m1_fn_mod2) module"]
pub type M1_FN_MOD2 = crate::Reg<u32, _M1_FN_MOD2>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M1_FN_MOD2;
#[doc = "`read()` method returns [m1_fn_mod2::R](m1_fn_mod2::R) reader structure"]
impl crate::Readable for M1_FN_MOD2 {}
#[doc = "`write(|w| ..)` method takes [m1_fn_mod2::W](m1_fn_mod2::W) writer structure"]
impl crate::Writable for M1_FN_MOD2 {}
#[doc = "AXIMC master 1 packing functionality register"]
pub mod m1_fn_mod2;
#[doc = "AXIMC master 2 packing functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m2_fn_mod2](m2_fn_mod2) module"]
pub type M2_FN_MOD2 = crate::Reg<u32, _M2_FN_MOD2>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M2_FN_MOD2;
#[doc = "`read()` method returns [m2_fn_mod2::R](m2_fn_mod2::R) reader structure"]
impl crate::Readable for M2_FN_MOD2 {}
#[doc = "`write(|w| ..)` method takes [m2_fn_mod2::W](m2_fn_mod2::W) writer structure"]
impl crate::Writable for M2_FN_MOD2 {}
#[doc = "AXIMC master 2 packing functionality register"]
pub mod m2_fn_mod2;
#[doc = "AXIMC master 0 AHB conversion override functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m0_fn_mod_ahb](m0_fn_mod_ahb) module"]
pub type M0_FN_MOD_AHB = crate::Reg<u32, _M0_FN_MOD_AHB>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M0_FN_MOD_AHB;
#[doc = "`read()` method returns [m0_fn_mod_ahb::R](m0_fn_mod_ahb::R) reader structure"]
impl crate::Readable for M0_FN_MOD_AHB {}
#[doc = "`write(|w| ..)` method takes [m0_fn_mod_ahb::W](m0_fn_mod_ahb::W) writer structure"]
impl crate::Writable for M0_FN_MOD_AHB {}
#[doc = "AXIMC master 0 AHB conversion override functionality register"]
pub mod m0_fn_mod_ahb;
#[doc = "AXIMC master 1 AHB conversion override functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m1_fn_mod_ahb](m1_fn_mod_ahb) module"]
pub type M1_FN_MOD_AHB = crate::Reg<u32, _M1_FN_MOD_AHB>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M1_FN_MOD_AHB;
#[doc = "`read()` method returns [m1_fn_mod_ahb::R](m1_fn_mod_ahb::R) reader structure"]
impl crate::Readable for M1_FN_MOD_AHB {}
#[doc = "`write(|w| ..)` method takes [m1_fn_mod_ahb::W](m1_fn_mod_ahb::W) writer structure"]
impl crate::Writable for M1_FN_MOD_AHB {}
#[doc = "AXIMC master 1 AHB conversion override functionality register"]
pub mod m1_fn_mod_ahb;
#[doc = "AXIMC master 2 AHB conversion override functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m2_fn_mod_ahb](m2_fn_mod_ahb) module"]
pub type M2_FN_MOD_AHB = crate::Reg<u32, _M2_FN_MOD_AHB>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M2_FN_MOD_AHB;
#[doc = "`read()` method returns [m2_fn_mod_ahb::R](m2_fn_mod_ahb::R) reader structure"]
impl crate::Readable for M2_FN_MOD_AHB {}
#[doc = "`write(|w| ..)` method takes [m2_fn_mod_ahb::W](m2_fn_mod_ahb::W) writer structure"]
impl crate::Writable for M2_FN_MOD_AHB {}
#[doc = "AXIMC master 2 AHB conversion override functionality register"]
pub mod m2_fn_mod_ahb;
#[doc = "AXIMC master 0 read priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m0_read_qos](m0_read_qos) module"]
pub type M0_READ_QOS = crate::Reg<u32, _M0_READ_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M0_READ_QOS;
#[doc = "`read()` method returns [m0_read_qos::R](m0_read_qos::R) reader structure"]
impl crate::Readable for M0_READ_QOS {}
#[doc = "`write(|w| ..)` method takes [m0_read_qos::W](m0_read_qos::W) writer structure"]
impl crate::Writable for M0_READ_QOS {}
#[doc = "AXIMC master 0 read priority register"]
pub mod m0_read_qos;
#[doc = "AXIMC master 1 read priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m1_read_qos](m1_read_qos) module"]
pub type M1_READ_QOS = crate::Reg<u32, _M1_READ_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M1_READ_QOS;
#[doc = "`read()` method returns [m1_read_qos::R](m1_read_qos::R) reader structure"]
impl crate::Readable for M1_READ_QOS {}
#[doc = "`write(|w| ..)` method takes [m1_read_qos::W](m1_read_qos::W) writer structure"]
impl crate::Writable for M1_READ_QOS {}
#[doc = "AXIMC master 1 read priority register"]
pub mod m1_read_qos;
#[doc = "AXIMC master 2 read priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m2_read_qos](m2_read_qos) module"]
pub type M2_READ_QOS = crate::Reg<u32, _M2_READ_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M2_READ_QOS;
#[doc = "`read()` method returns [m2_read_qos::R](m2_read_qos::R) reader structure"]
impl crate::Readable for M2_READ_QOS {}
#[doc = "`write(|w| ..)` method takes [m2_read_qos::W](m2_read_qos::W) writer structure"]
impl crate::Writable for M2_READ_QOS {}
#[doc = "AXIMC master 2 read priority register"]
pub mod m2_read_qos;
#[doc = "AXIMC master 0 write priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m0_write_qos](m0_write_qos) module"]
pub type M0_WRITE_QOS = crate::Reg<u32, _M0_WRITE_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M0_WRITE_QOS;
#[doc = "`read()` method returns [m0_write_qos::R](m0_write_qos::R) reader structure"]
impl crate::Readable for M0_WRITE_QOS {}
#[doc = "`write(|w| ..)` method takes [m0_write_qos::W](m0_write_qos::W) writer structure"]
impl crate::Writable for M0_WRITE_QOS {}
#[doc = "AXIMC master 0 write priority register"]
pub mod m0_write_qos;
#[doc = "AXIMC master 1 write priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m1_write_qos](m1_write_qos) module"]
pub type M1_WRITE_QOS = crate::Reg<u32, _M1_WRITE_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M1_WRITE_QOS;
#[doc = "`read()` method returns [m1_write_qos::R](m1_write_qos::R) reader structure"]
impl crate::Readable for M1_WRITE_QOS {}
#[doc = "`write(|w| ..)` method takes [m1_write_qos::W](m1_write_qos::W) writer structure"]
impl crate::Writable for M1_WRITE_QOS {}
#[doc = "AXIMC master 1 write priority register"]
pub mod m1_write_qos;
#[doc = "AXIMC master 2 write priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m2_write_qos](m2_write_qos) module"]
pub type M2_WRITE_QOS = crate::Reg<u32, _M2_WRITE_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M2_WRITE_QOS;
#[doc = "`read()` method returns [m2_write_qos::R](m2_write_qos::R) reader structure"]
impl crate::Readable for M2_WRITE_QOS {}
#[doc = "`write(|w| ..)` method takes [m2_write_qos::W](m2_write_qos::W) writer structure"]
impl crate::Writable for M2_WRITE_QOS {}
#[doc = "AXIMC master 2 write priority register"]
pub mod m2_write_qos;
#[doc = "AXIMC master 0 issuing capability override functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m0_fn_mod](m0_fn_mod) module"]
pub type M0_FN_MOD = crate::Reg<u32, _M0_FN_MOD>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M0_FN_MOD;
#[doc = "`read()` method returns [m0_fn_mod::R](m0_fn_mod::R) reader structure"]
impl crate::Readable for M0_FN_MOD {}
#[doc = "`write(|w| ..)` method takes [m0_fn_mod::W](m0_fn_mod::W) writer structure"]
impl crate::Writable for M0_FN_MOD {}
#[doc = "AXIMC master 0 issuing capability override functionality register"]
pub mod m0_fn_mod;
#[doc = "AXIMC master 1 issuing capability override functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m1_fn_mod](m1_fn_mod) module"]
pub type M1_FN_MOD = crate::Reg<u32, _M1_FN_MOD>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M1_FN_MOD;
#[doc = "`read()` method returns [m1_fn_mod::R](m1_fn_mod::R) reader structure"]
impl crate::Readable for M1_FN_MOD {}
#[doc = "`write(|w| ..)` method takes [m1_fn_mod::W](m1_fn_mod::W) writer structure"]
impl crate::Writable for M1_FN_MOD {}
#[doc = "AXIMC master 1 issuing capability override functionality register"]
pub mod m1_fn_mod;
#[doc = "AXIMC master 2 issuing capability override functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m2_fn_mod](m2_fn_mod) module"]
pub type M2_FN_MOD = crate::Reg<u32, _M2_FN_MOD>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M2_FN_MOD;
#[doc = "`read()` method returns [m2_fn_mod::R](m2_fn_mod::R) reader structure"]
impl crate::Readable for M2_FN_MOD {}
#[doc = "`write(|w| ..)` method takes [m2_fn_mod::W](m2_fn_mod::W) writer structure"]
impl crate::Writable for M2_FN_MOD {}
#[doc = "AXIMC master 2 issuing capability override functionality register"]
pub mod m2_fn_mod;
#[doc = "AXIMC master 5 packing functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m5_fn_mod2](m5_fn_mod2) module"]
pub type M5_FN_MOD2 = crate::Reg<u32, _M5_FN_MOD2>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M5_FN_MOD2;
#[doc = "`read()` method returns [m5_fn_mod2::R](m5_fn_mod2::R) reader structure"]
impl crate::Readable for M5_FN_MOD2 {}
#[doc = "`write(|w| ..)` method takes [m5_fn_mod2::W](m5_fn_mod2::W) writer structure"]
impl crate::Writable for M5_FN_MOD2 {}
#[doc = "AXIMC master 5 packing functionality register"]
pub mod m5_fn_mod2;
#[doc = "AXIMC master 5 AHB conversion override functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m5_fn_mod_ahb](m5_fn_mod_ahb) module"]
pub type M5_FN_MOD_AHB = crate::Reg<u32, _M5_FN_MOD_AHB>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M5_FN_MOD_AHB;
#[doc = "`read()` method returns [m5_fn_mod_ahb::R](m5_fn_mod_ahb::R) reader structure"]
impl crate::Readable for M5_FN_MOD_AHB {}
#[doc = "`write(|w| ..)` method takes [m5_fn_mod_ahb::W](m5_fn_mod_ahb::W) writer structure"]
impl crate::Writable for M5_FN_MOD_AHB {}
#[doc = "AXIMC master 5 AHB conversion override functionality register"]
pub mod m5_fn_mod_ahb;
#[doc = "AXIMC master 6 AHB conversion override functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m6_fn_mod_ahb](m6_fn_mod_ahb) module"]
pub type M6_FN_MOD_AHB = crate::Reg<u32, _M6_FN_MOD_AHB>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M6_FN_MOD_AHB;
#[doc = "`read()` method returns [m6_fn_mod_ahb::R](m6_fn_mod_ahb::R) reader structure"]
impl crate::Readable for M6_FN_MOD_AHB {}
#[doc = "`write(|w| ..)` method takes [m6_fn_mod_ahb::W](m6_fn_mod_ahb::W) writer structure"]
impl crate::Writable for M6_FN_MOD_AHB {}
#[doc = "AXIMC master 6 AHB conversion override functionality register"]
pub mod m6_fn_mod_ahb;
#[doc = "AXIMC master 5 read priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m5_read_qos](m5_read_qos) module"]
pub type M5_READ_QOS = crate::Reg<u32, _M5_READ_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M5_READ_QOS;
#[doc = "`read()` method returns [m5_read_qos::R](m5_read_qos::R) reader structure"]
impl crate::Readable for M5_READ_QOS {}
#[doc = "`write(|w| ..)` method takes [m5_read_qos::W](m5_read_qos::W) writer structure"]
impl crate::Writable for M5_READ_QOS {}
#[doc = "AXIMC master 5 read priority register"]
pub mod m5_read_qos;
#[doc = "AXIMC master 6 read priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m6_read_qos](m6_read_qos) module"]
pub type M6_READ_QOS = crate::Reg<u32, _M6_READ_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M6_READ_QOS;
#[doc = "`read()` method returns [m6_read_qos::R](m6_read_qos::R) reader structure"]
impl crate::Readable for M6_READ_QOS {}
#[doc = "`write(|w| ..)` method takes [m6_read_qos::W](m6_read_qos::W) writer structure"]
impl crate::Writable for M6_READ_QOS {}
#[doc = "AXIMC master 6 read priority register"]
pub mod m6_read_qos;
#[doc = "AXIMC master 5 write priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m5_write_qos](m5_write_qos) module"]
pub type M5_WRITE_QOS = crate::Reg<u32, _M5_WRITE_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M5_WRITE_QOS;
#[doc = "`read()` method returns [m5_write_qos::R](m5_write_qos::R) reader structure"]
impl crate::Readable for M5_WRITE_QOS {}
#[doc = "`write(|w| ..)` method takes [m5_write_qos::W](m5_write_qos::W) writer structure"]
impl crate::Writable for M5_WRITE_QOS {}
#[doc = "AXIMC master 5 write priority register"]
pub mod m5_write_qos;
#[doc = "AXIMC master 6 write priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m6_write_qos](m6_write_qos) module"]
pub type M6_WRITE_QOS = crate::Reg<u32, _M6_WRITE_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M6_WRITE_QOS;
#[doc = "`read()` method returns [m6_write_qos::R](m6_write_qos::R) reader structure"]
impl crate::Readable for M6_WRITE_QOS {}
#[doc = "`write(|w| ..)` method takes [m6_write_qos::W](m6_write_qos::W) writer structure"]
impl crate::Writable for M6_WRITE_QOS {}
#[doc = "AXIMC master 6 write priority register"]
pub mod m6_write_qos;
#[doc = "AXIMC master 5 issuing capability override functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m5_fn_mod](m5_fn_mod) module"]
pub type M5_FN_MOD = crate::Reg<u32, _M5_FN_MOD>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M5_FN_MOD;
#[doc = "`read()` method returns [m5_fn_mod::R](m5_fn_mod::R) reader structure"]
impl crate::Readable for M5_FN_MOD {}
#[doc = "`write(|w| ..)` method takes [m5_fn_mod::W](m5_fn_mod::W) writer structure"]
impl crate::Writable for M5_FN_MOD {}
#[doc = "AXIMC master 5 issuing capability override functionality register"]
pub mod m5_fn_mod;
#[doc = "AXIMC master 6 issuing capability override functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m6_fn_mod](m6_fn_mod) module"]
pub type M6_FN_MOD = crate::Reg<u32, _M6_FN_MOD>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M6_FN_MOD;
#[doc = "`read()` method returns [m6_fn_mod::R](m6_fn_mod::R) reader structure"]
impl crate::Readable for M6_FN_MOD {}
#[doc = "`write(|w| ..)` method takes [m6_fn_mod::W](m6_fn_mod::W) writer structure"]
impl crate::Writable for M6_FN_MOD {}
#[doc = "AXIMC master 6 issuing capability override functionality register"]
pub mod m6_fn_mod;
#[doc = "AXIMC master 3 read priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m3_read_qos](m3_read_qos) module"]
pub type M3_READ_QOS = crate::Reg<u32, _M3_READ_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M3_READ_QOS;
#[doc = "`read()` method returns [m3_read_qos::R](m3_read_qos::R) reader structure"]
impl crate::Readable for M3_READ_QOS {}
#[doc = "`write(|w| ..)` method takes [m3_read_qos::W](m3_read_qos::W) writer structure"]
impl crate::Writable for M3_READ_QOS {}
#[doc = "AXIMC master 3 read priority register"]
pub mod m3_read_qos;
#[doc = "AXIMC master 4 read priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m4_read_qos](m4_read_qos) module"]
pub type M4_READ_QOS = crate::Reg<u32, _M4_READ_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M4_READ_QOS;
#[doc = "`read()` method returns [m4_read_qos::R](m4_read_qos::R) reader structure"]
impl crate::Readable for M4_READ_QOS {}
#[doc = "`write(|w| ..)` method takes [m4_read_qos::W](m4_read_qos::W) writer structure"]
impl crate::Writable for M4_READ_QOS {}
#[doc = "AXIMC master 4 read priority register"]
pub mod m4_read_qos;
#[doc = "AXIMC master 3 write priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m3_write_qos](m3_write_qos) module"]
pub type M3_WRITE_QOS = crate::Reg<u32, _M3_WRITE_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M3_WRITE_QOS;
#[doc = "`read()` method returns [m3_write_qos::R](m3_write_qos::R) reader structure"]
impl crate::Readable for M3_WRITE_QOS {}
#[doc = "`write(|w| ..)` method takes [m3_write_qos::W](m3_write_qos::W) writer structure"]
impl crate::Writable for M3_WRITE_QOS {}
#[doc = "AXIMC master 3 write priority register"]
pub mod m3_write_qos;
#[doc = "AXIMC master 4 write priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m4_write_qos](m4_write_qos) module"]
pub type M4_WRITE_QOS = crate::Reg<u32, _M4_WRITE_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M4_WRITE_QOS;
#[doc = "`read()` method returns [m4_write_qos::R](m4_write_qos::R) reader structure"]
impl crate::Readable for M4_WRITE_QOS {}
#[doc = "`write(|w| ..)` method takes [m4_write_qos::W](m4_write_qos::W) writer structure"]
impl crate::Writable for M4_WRITE_QOS {}
#[doc = "AXIMC master 4 write priority register"]
pub mod m4_write_qos;
#[doc = "AXIMC master 3 packing functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m3_fn_mod](m3_fn_mod) module"]
pub type M3_FN_MOD = crate::Reg<u32, _M3_FN_MOD>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M3_FN_MOD;
#[doc = "`read()` method returns [m3_fn_mod::R](m3_fn_mod::R) reader structure"]
impl crate::Readable for M3_FN_MOD {}
#[doc = "`write(|w| ..)` method takes [m3_fn_mod::W](m3_fn_mod::W) writer structure"]
impl crate::Writable for M3_FN_MOD {}
#[doc = "AXIMC master 3 packing functionality register"]
pub mod m3_fn_mod;
#[doc = "AXIMC master 4 packing functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m4_fn_mod](m4_fn_mod) module"]
pub type M4_FN_MOD = crate::Reg<u32, _M4_FN_MOD>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M4_FN_MOD;
#[doc = "`read()` method returns [m4_fn_mod::R](m4_fn_mod::R) reader structure"]
impl crate::Readable for M4_FN_MOD {}
#[doc = "`write(|w| ..)` method takes [m4_fn_mod::W](m4_fn_mod::W) writer structure"]
impl crate::Writable for M4_FN_MOD {}
#[doc = "AXIMC master 4 packing functionality register"]
pub mod m4_fn_mod;
#[doc = "AXIMC master 7 read priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m7_read_qos](m7_read_qos) module"]
pub type M7_READ_QOS = crate::Reg<u32, _M7_READ_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M7_READ_QOS;
#[doc = "`read()` method returns [m7_read_qos::R](m7_read_qos::R) reader structure"]
impl crate::Readable for M7_READ_QOS {}
#[doc = "`write(|w| ..)` method takes [m7_read_qos::W](m7_read_qos::W) writer structure"]
impl crate::Writable for M7_READ_QOS {}
#[doc = "AXIMC master 7 read priority register"]
pub mod m7_read_qos;
#[doc = "AXIMC master 8 read priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m8_read_qos](m8_read_qos) module"]
pub type M8_READ_QOS = crate::Reg<u32, _M8_READ_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M8_READ_QOS;
#[doc = "`read()` method returns [m8_read_qos::R](m8_read_qos::R) reader structure"]
impl crate::Readable for M8_READ_QOS {}
#[doc = "`write(|w| ..)` method takes [m8_read_qos::W](m8_read_qos::W) writer structure"]
impl crate::Writable for M8_READ_QOS {}
#[doc = "AXIMC master 8 read priority register"]
pub mod m8_read_qos;
#[doc = "AXIMC master 7 write priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m7_write_qos](m7_write_qos) module"]
pub type M7_WRITE_QOS = crate::Reg<u32, _M7_WRITE_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M7_WRITE_QOS;
#[doc = "`read()` method returns [m7_write_qos::R](m7_write_qos::R) reader structure"]
impl crate::Readable for M7_WRITE_QOS {}
#[doc = "`write(|w| ..)` method takes [m7_write_qos::W](m7_write_qos::W) writer structure"]
impl crate::Writable for M7_WRITE_QOS {}
#[doc = "AXIMC master 7 write priority register"]
pub mod m7_write_qos;
#[doc = "AXIMC master 8 write priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m8_write_qos](m8_write_qos) module"]
pub type M8_WRITE_QOS = crate::Reg<u32, _M8_WRITE_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M8_WRITE_QOS;
#[doc = "`read()` method returns [m8_write_qos::R](m8_write_qos::R) reader structure"]
impl crate::Readable for M8_WRITE_QOS {}
#[doc = "`write(|w| ..)` method takes [m8_write_qos::W](m8_write_qos::W) writer structure"]
impl crate::Writable for M8_WRITE_QOS {}
#[doc = "AXIMC master 8 write priority register"]
pub mod m8_write_qos;
#[doc = "AXIMC master 7 issuing capability override functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m7_fn_mod](m7_fn_mod) module"]
pub type M7_FN_MOD = crate::Reg<u32, _M7_FN_MOD>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M7_FN_MOD;
#[doc = "`read()` method returns [m7_fn_mod::R](m7_fn_mod::R) reader structure"]
impl crate::Readable for M7_FN_MOD {}
#[doc = "`write(|w| ..)` method takes [m7_fn_mod::W](m7_fn_mod::W) writer structure"]
impl crate::Writable for M7_FN_MOD {}
#[doc = "AXIMC master 7 issuing capability override functionality register"]
pub mod m7_fn_mod;
#[doc = "AXIMC master 8 issuing capability override functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m8_fn_mod](m8_fn_mod) module"]
pub type M8_FN_MOD = crate::Reg<u32, _M8_FN_MOD>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M8_FN_MOD;
#[doc = "`read()` method returns [m8_fn_mod::R](m8_fn_mod::R) reader structure"]
impl crate::Readable for M8_FN_MOD {}
#[doc = "`write(|w| ..)` method takes [m8_fn_mod::W](m8_fn_mod::W) writer structure"]
impl crate::Writable for M8_FN_MOD {}
#[doc = "AXIMC master 8 issuing capability override functionality register"]
pub mod m8_fn_mod;
#[doc = "AXIMC long burst capability inhibition register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [fn_mod_lb](fn_mod_lb) module"]
pub type FN_MOD_LB = crate::Reg<u32, _FN_MOD_LB>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _FN_MOD_LB;
#[doc = "`read()` method returns [fn_mod_lb::R](fn_mod_lb::R) reader structure"]
impl crate::Readable for FN_MOD_LB {}
#[doc = "`write(|w| ..)` method takes [fn_mod_lb::W](fn_mod_lb::W) writer structure"]
impl crate::Writable for FN_MOD_LB {}
#[doc = "AXIMC long burst capability inhibition register"]
pub mod fn_mod_lb;
#[doc = "AXIMC master 9 read priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m9_read_qos](m9_read_qos) module"]
pub type M9_READ_QOS = crate::Reg<u32, _M9_READ_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M9_READ_QOS;
#[doc = "`read()` method returns [m9_read_qos::R](m9_read_qos::R) reader structure"]
impl crate::Readable for M9_READ_QOS {}
#[doc = "`write(|w| ..)` method takes [m9_read_qos::W](m9_read_qos::W) writer structure"]
impl crate::Writable for M9_READ_QOS {}
#[doc = "AXIMC master 9 read priority register"]
pub mod m9_read_qos;
#[doc = "AXIMC master 10 read priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m10_read_qos](m10_read_qos) module"]
pub type M10_READ_QOS = crate::Reg<u32, _M10_READ_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M10_READ_QOS;
#[doc = "`read()` method returns [m10_read_qos::R](m10_read_qos::R) reader structure"]
impl crate::Readable for M10_READ_QOS {}
#[doc = "`write(|w| ..)` method takes [m10_read_qos::W](m10_read_qos::W) writer structure"]
impl crate::Writable for M10_READ_QOS {}
#[doc = "AXIMC master 10 read priority register"]
pub mod m10_read_qos;
#[doc = "AXIMC master 9 write priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m9_write_qos](m9_write_qos) module"]
pub type M9_WRITE_QOS = crate::Reg<u32, _M9_WRITE_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M9_WRITE_QOS;
#[doc = "`read()` method returns [m9_write_qos::R](m9_write_qos::R) reader structure"]
impl crate::Readable for M9_WRITE_QOS {}
#[doc = "`write(|w| ..)` method takes [m9_write_qos::W](m9_write_qos::W) writer structure"]
impl crate::Writable for M9_WRITE_QOS {}
#[doc = "AXIMC master 9 write priority register"]
pub mod m9_write_qos;
#[doc = "AXIMC master 10 write priority register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m10_write_qos](m10_write_qos) module"]
pub type M10_WRITE_QOS = crate::Reg<u32, _M10_WRITE_QOS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M10_WRITE_QOS;
#[doc = "`read()` method returns [m10_write_qos::R](m10_write_qos::R) reader structure"]
impl crate::Readable for M10_WRITE_QOS {}
#[doc = "`write(|w| ..)` method takes [m10_write_qos::W](m10_write_qos::W) writer structure"]
impl crate::Writable for M10_WRITE_QOS {}
#[doc = "AXIMC master 10 write priority register"]
pub mod m10_write_qos;
#[doc = "AXIMC master 9 issuing capability override functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m9_fn_mod](m9_fn_mod) module"]
pub type M9_FN_MOD = crate::Reg<u32, _M9_FN_MOD>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M9_FN_MOD;
#[doc = "`read()` method returns [m9_fn_mod::R](m9_fn_mod::R) reader structure"]
impl crate::Readable for M9_FN_MOD {}
#[doc = "`write(|w| ..)` method takes [m9_fn_mod::W](m9_fn_mod::W) writer structure"]
impl crate::Writable for M9_FN_MOD {}
#[doc = "AXIMC master 9 issuing capability override functionality register"]
pub mod m9_fn_mod;
#[doc = "AXIMC master 10 issuing capability override functionality register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [m10_fn_mod](m10_fn_mod) module"]
pub type M10_FN_MOD = crate::Reg<u32, _M10_FN_MOD>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _M10_FN_MOD;
#[doc = "`read()` method returns [m10_fn_mod::R](m10_fn_mod::R) reader structure"]
impl crate::Readable for M10_FN_MOD {}
#[doc = "`write(|w| ..)` method takes [m10_fn_mod::W](m10_fn_mod::W) writer structure"]
impl crate::Writable for M10_FN_MOD {}
#[doc = "AXIMC master 10 issuing capability override functionality register"]
pub mod m10_fn_mod;
|
use std::{
collections::HashMap,
time::{Duration, Instant},
};
use hyper::{body::Buf, client::HttpConnector, Client as HyperClient};
#[cfg(feature = "with-openssl")]
use hyper_openssl::HttpsConnector;
#[cfg(feature = "with-rustls")]
use hyper_rustls::HttpsConnector;
use jsonwebtoken::{Algorithm, DecodingKey, Validation};
use serde::{de::DeserializeOwned, Deserialize};
use crate::{cache_control::CacheControl, token::IdInfo, Error};
use tokio::sync::Mutex;
const CERTS_URL: &'static str = "https://www.googleapis.com/oauth2/v2/certs";
pub struct Client {
hyper: HyperClient<HttpsConnector<HttpConnector>>,
certs: Mutex<CachedCerts>,
pub audiences: Vec<String>,
pub hosted_domains: Vec<String>,
}
#[derive(Deserialize)]
struct CertsObject {
keys: Vec<Cert>,
}
#[derive(Deserialize)]
struct Cert {
kid: String,
e: String,
n: String,
}
#[derive(Default)]
pub struct CachedCerts {
keys: HashMap<String, Cert>,
expiry: Option<Instant>,
}
impl CachedCerts {
async fn refresh_if_needed(&mut self, client: &Client) -> Result<(), Error> {
if !self.should_refresh() {
return Ok(());
}
let certs = client
.get_any::<CertsObject>(CERTS_URL, &mut self.expiry)
.await?;
self.keys.clear();
for cert in certs.keys {
self.keys.insert(cert.kid.clone(), cert);
}
Ok(())
}
fn should_refresh(&self) -> bool {
match self.expiry {
None => true,
Some(expiry) => expiry <= Instant::now() - Duration::from_secs(10),
}
}
}
impl Default for Client {
fn default() -> Self {
#[cfg(feature = "with-rustls")]
let ssl = HttpsConnector::with_native_roots();
#[cfg(feature = "with-openssl")]
let ssl = HttpsConnector::new().expect("unable to build HttpsConnector");
let client = HyperClient::builder()
.http1_max_buf_size(0x2000)
.pool_max_idle_per_host(0)
.build(ssl);
Client {
hyper: client,
audiences: vec![],
hosted_domains: vec![],
certs: Default::default(),
}
}
}
impl Client {
/// Verifies that the token is signed by Google's OAuth cerificate,
/// and check that it has a valid issuer, audience, and hosted domain.
/// Returns an error if the client has no configured audiences.
pub async fn verify(&self, id_token: &str) -> Result<IdInfo, Error> {
let unverified_header = jsonwebtoken::decode_header(&id_token)?;
let mut certs = self.certs.lock().await;
certs.refresh_if_needed(self).await?;
match unverified_header.kid {
Some(kid) => {
let cert = certs.keys.get(&kid).ok_or(Error::InvalidKey)?;
self.verify_single(id_token, cert)
}
None => certs
.keys
.values()
.flat_map(|cert| self.verify_single(id_token, cert))
.next()
.ok_or(Error::InvalidToken),
}
}
fn verify_single(&self, id_token: &str, cert: &Cert) -> Result<IdInfo, Error> {
let mut validation = Validation::new(Algorithm::RS256);
validation.set_audience(&self.audiences);
let token_data = jsonwebtoken::decode::<IdInfo>(
&id_token,
&DecodingKey::from_rsa_components(&cert.n, &cert.e),
&validation,
)?;
token_data.claims.verify(self)?;
Ok(token_data.claims)
}
async fn get_any<T: DeserializeOwned>(
&self,
url: &str,
cache: &mut Option<Instant>,
) -> Result<T, Error> {
let url = url.parse().unwrap();
let response = self.hyper.get(url).await.unwrap();
if !response.status().is_success() {
return Err(Error::InvalidToken);
}
if let Some(value) = response.headers().get("Cache-Control") {
if let Ok(value) = value.to_str() {
if let Some(cc) = CacheControl::from_value(value) {
if let Some(max_age) = cc.max_age {
let seconds = max_age.as_secs();
*cache = Some(Instant::now() + Duration::from_secs(seconds as u64));
}
}
}
}
let body = hyper::body::aggregate(response).await?;
Ok(serde_json::from_reader(body.reader())?)
}
}
|
extern crate wasm_bindgen;
use wasm_bindgen::prelude::*;
extern crate qrcode;
use qrcode::render::svg;
use qrcode::QrCode;
use qrcode::types::QrError;
/// Generate a QR code from the respective data. Returns a string containing the SVG string
/// appropriate to be saved to a file or rendered to a DOM tree.
fn qrcode<T>(data: T, width: u32, height: u32) -> Result<String, QrError>
where T: AsRef<[u8]> {
QrCode::with_error_correction_level(data.as_ref(), qrcode::EcLevel::Q)
.map(|code| code.render::<svg::Color>()
.max_dimensions(width, height)
.min_dimensions(width, height)
.build()
)
}
/// Generate a qrcode from given string.
///
/// The input is a raw pointer to memory and the string located there will be freed during the
/// function execution. Use `alloc()` to get an appropriate region in memory.
///
/// Returns a new pointer to a new location in memory where the SVG code for the qrcode is located.
/// You **must** pass this pointer back to the `free()` function below.
#[wasm_bindgen]
pub fn qrcode_ffi(arg: &str, width: u32, height: u32) -> String {
match qrcode(arg, width, height) {
Ok(v) => v,
// Since we're on an FFI boundary we can't return strongly typed errors. Instead if we get
// an error from the qrcode generation we return the error string.
Err(e) => format!("{}", e),
}
}
|
//! # SQLite Driver
mod schema;
use crate::driver;
use diesel::prelude::*;
use diesel::r2d2::ConnectionManager;
embed_migrations!("migrations/sqlite");
#[derive(Clone)]
pub struct Driver {
pool: r2d2::Pool<ConnectionManager<SqliteCOnnection>>,
}
type PooledConnection = r2d2::PooledConnection<ConnectionManager<SqliteCOnnection>>;
// TODO(feature): Implement SQLite driver.
impl Driver {
pub fn initialise(database_url: &str) -> Result<Self, driver::Error> {
let manager = ConnectionManager::<SqliteCOnnection>::new(database_url);
let pool = r2d2::Pool::builder()
.build(manager)
.map_err(driver::Error::R2d2)?;
let driver = Driver { pool };
driver.run_migrations()?;
Ok(driver)
}
fn connection(&self) -> Result<PooledConnection, driver::Error> {
self.pool.get().map_err(driver::Error::R2d2)
}
fn run_migrations(&self) -> Result<(), driver::Error> {
let connection = self.connection()?;
embedded_migrations::run(&connection).map_err(driver::Error::DieselMigrations)
}
}
impl driver::Driver for Driver {
fn box_clone(&self) -> Box<driver::Driver> {
Box::new((*self).clone())
}
}
|
use std::io;
fn main() {
println!("Enter the words you want to convert to pig latin.");
println!("Press Enter or Ctrl-D at empty line to exit.");
loop {
let mut word = String::new();
io::stdin().read_line(&mut word)
.expect("Failed to read line.");
if word.trim().is_empty() {
break;
}
if word.ends_with('\n') {
word.pop();
}
pig_latin(word);
}
}
fn pig_latin(mut word: String) {
if !word.is_empty() {
let ch = word.remove(0);
if ch == 'a' || ch == 'e' || ch == 'i' || ch == 'o' || ch == 'u' {
word.insert(0, ch);
word = word + "-hay";
} else if ch.is_alphabetic() {
word.push('-');
word.push(ch);
word = word + "ay";
} else {
word.insert(0, ch);
}
println!("{}", word);
}
}
|
use gtk::prelude::{
CellLayoutExt, CellRendererExt, CellRendererTextExt, GtkListStoreExtManual, GtkWindowExt,
GtkWindowExtManual, TreeModelExt, TreeViewColumnExt, TreeViewExt, WidgetExt,
};
use gtk::{self, AdjustmentExt, BoxExt, ButtonExt, ContainerExt, LabelExt, ScrolledWindowExt};
use sysinfo::{self, NetworkExt};
use graph::{Connecter, Graph};
use notebook::NoteBook;
use utils::{connect_graph, format_number, format_number_full, get_main_window, RotateVec};
use std::cell::RefCell;
use std::iter;
use std::rc::Rc;
pub struct NetworkDialog {
pub name: String,
popup: gtk::Window,
packets_errors_history: Rc<RefCell<Graph>>,
in_out_history: Rc<RefCell<Graph>>,
incoming_peak: Rc<RefCell<u64>>,
outgoing_peak: Rc<RefCell<u64>>,
packets_incoming_peak: Rc<RefCell<u64>>,
packets_outgoing_peak: Rc<RefCell<u64>>,
errors_incoming_peak: Rc<RefCell<u64>>,
errors_outgoing_peak: Rc<RefCell<u64>>,
to_be_removed: Rc<RefCell<bool>>,
list_store: gtk::ListStore,
}
macro_rules! update_graph {
($this:expr, $t:expr, $pos:expr, $value:expr, $total_value:expr, $peak:ident, $list_pos:expr, $formatter:ident) => {{
$t.data[$pos].move_start();
*$t.data[$pos].get_mut(0).expect("cannot get data 0") = $value as f64;
let mut x = $this.$peak.borrow_mut();
if *x < $value {
*x = $value;
if let Some(iter) = $this.list_store.iter_nth_child(None, $list_pos - 1) {
$this.list_store.set(&iter, &[1], &[&$formatter($value)]);
}
}
if let Some(iter) = $this.list_store.iter_nth_child(None, $list_pos - 2) {
$this.list_store.set(&iter, &[1], &[&$formatter($value)]);
}
if let Some(iter) = $this.list_store.iter_nth_child(None, $list_pos) {
$this
.list_store
.set(&iter, &[1], &[&$formatter($total_value)]);
}
}};
}
impl NetworkDialog {
#[allow(clippy::cognitive_complexity)]
pub fn update(&self, network: &sysinfo::NetworkData) {
if self.need_remove() {
return;
}
fn formatter(value: u64) -> String {
format_number_full(value, false)
}
let mut t = self.packets_errors_history.borrow_mut();
update_graph!(
self,
t,
0,
network.get_packets_income(),
network.get_total_packets_income(),
packets_incoming_peak,
8,
formatter
);
update_graph!(
self,
t,
1,
network.get_packets_outcome(),
network.get_total_packets_outcome(),
packets_outgoing_peak,
11,
formatter
);
update_graph!(
self,
t,
2,
network.get_errors_income(),
network.get_total_errors_income(),
errors_incoming_peak,
14,
formatter
);
update_graph!(
self,
t,
3,
network.get_errors_outcome(),
network.get_total_errors_outcome(),
errors_outgoing_peak,
17,
formatter
);
t.invalidate();
let mut t = self.in_out_history.borrow_mut();
update_graph!(
self,
t,
0,
network.get_income(),
network.get_total_income(),
incoming_peak,
2,
format_number
);
update_graph!(
self,
t,
1,
network.get_outcome(),
network.get_total_outcome(),
outgoing_peak,
5,
format_number
);
t.invalidate();
}
pub fn show(&self) {
self.popup.present();
}
pub fn need_remove(&self) -> bool {
*self.to_be_removed.borrow()
}
}
fn append_text_column(tree: >k::TreeView, title: &str, pos: i32, right_align: bool) {
let column = gtk::TreeViewColumn::new();
let cell = gtk::CellRendererText::new();
if right_align {
cell.set_property_xalign(1.0);
}
column.pack_start(&cell, true);
column.add_attribute(&cell, "text", pos);
if pos == 1 {
cell.set_property_wrap_mode(pango::WrapMode::Char);
column.set_expand(true);
}
column.set_title(title);
column.set_resizable(true);
tree.append_column(&column);
}
pub fn create_network_dialog(
network: &sysinfo::NetworkData,
interface_name: &str,
) -> NetworkDialog {
let mut notebook = NoteBook::new();
let popup = gtk::Window::new(gtk::WindowType::Toplevel);
popup.set_title(&format!("Information about network {}", interface_name));
popup.set_transient_for(get_main_window().as_ref());
popup.set_destroy_with_parent(true);
let close_button = gtk::Button::new_with_label("Close");
let vertical_layout = gtk::Box::new(gtk::Orientation::Vertical, 0);
vertical_layout.pack_start(¬ebook.notebook, true, true, 0);
vertical_layout.pack_start(&close_button, false, true, 0);
popup.add(&vertical_layout);
//
// GRAPH TAB
//
let vertical_layout = gtk::Box::new(gtk::Orientation::Vertical, 0);
vertical_layout.set_spacing(5);
vertical_layout.set_margin_top(10);
vertical_layout.set_margin_bottom(10);
vertical_layout.set_margin_start(5);
vertical_layout.set_margin_end(5);
let scroll = gtk::ScrolledWindow::new(None::<>k::Adjustment>, None::<>k::Adjustment>);
let mut in_out_history = Graph::new(Some(1.), false);
in_out_history.push(
RotateVec::new(iter::repeat(0f64).take(61).collect()),
"incoming",
None,
);
in_out_history.push(
RotateVec::new(iter::repeat(0f64).take(61).collect()),
"outgoing",
None,
);
in_out_history.set_label_callbacks(Some(Box::new(|v| {
if v < 100_000. {
[
v.to_string(),
format!("{}", v / 2.),
"0".to_string(),
"KiB".to_string(),
]
} else if v < 10_000_000. {
[
format!("{:.1}", v / 1_024f64),
format!("{:.1}", v / 2_048f64),
"0".to_string(),
"MiB".to_string(),
]
} else if v < 10_000_000_000. {
[
format!("{:.1}", v / 1_048_576f64),
format!("{:.1}", v / 2_097_152f64),
"0".to_string(),
"GiB".to_string(),
]
} else {
[
format!("{:.1}", v / 1_073_741_824f64),
format!("{:.1}", v / 2_147_483_648f64),
"0".to_string(),
"TiB".to_string(),
]
}
})));
let label = gtk::Label::new(None);
label.set_markup("<b>Network usage</b>");
vertical_layout.add(&label);
in_out_history.attach_to(&vertical_layout);
in_out_history.invalidate();
in_out_history.set_labels_width(120);
let in_out_history = connect_graph(in_out_history);
let mut packets_errors_history = Graph::new(Some(1.), false);
packets_errors_history.push(
RotateVec::new(iter::repeat(0f64).take(61).collect()),
"incoming packets",
None,
);
packets_errors_history.push(
RotateVec::new(iter::repeat(0f64).take(61).collect()),
"outgoing packets",
None,
);
packets_errors_history.push(
RotateVec::new(iter::repeat(0f64).take(61).collect()),
"incoming errors",
None,
);
packets_errors_history.push(
RotateVec::new(iter::repeat(0f64).take(61).collect()),
"outgoing errors",
None,
);
packets_errors_history.set_label_callbacks(Some(Box::new(|v| {
if v < 100_000. {
[
v.to_string(),
format!("{}", v / 2.),
"0".to_string(),
"K".to_string(),
]
} else if v < 10_000_000. {
[
format!("{:.1}", v / 1_000f64),
format!("{:.1}", v / 2_000f64),
"0".to_string(),
"M".to_string(),
]
} else if v < 10_000_000_000. {
[
format!("{:.1}", v / 1_000_000f64),
format!("{:.1}", v / 2_000_000f64),
"0".to_string(),
"G".to_string(),
]
} else {
[
format!("{:.1}", v / 1_000_000_000f64),
format!("{:.1}", v / 2_000_000_000f64),
"0".to_string(),
"T".to_string(),
]
}
})));
packets_errors_history.set_labels_width(120);
let label = gtk::Label::new(None);
label.set_markup("<b>Extra data</b>");
vertical_layout.add(&label);
packets_errors_history.attach_to(&vertical_layout);
packets_errors_history.invalidate();
let packets_errors_history = connect_graph(packets_errors_history);
scroll.add(&vertical_layout);
scroll.connect_show(
clone!(@weak packets_errors_history, @weak in_out_history => move |_| {
packets_errors_history.borrow().show_all();
in_out_history.borrow().show_all();
}),
);
notebook.create_tab("Graphics", &scroll);
//
// NETWORK INFO TAB
//
let tree = gtk::TreeView::new();
let list_store = gtk::ListStore::new(&[glib::Type::String, glib::Type::String]);
tree.set_headers_visible(true);
tree.set_model(Some(&list_store));
append_text_column(&tree, "property", 0, false);
append_text_column(&tree, "value", 1, true);
list_store.insert_with_values(
None,
&[0, 1],
&[&"incoming", &format_number(network.get_income())],
);
list_store.insert_with_values(
None,
&[0, 1],
&[&"incoming peak", &format_number(network.get_income())],
);
list_store.insert_with_values(
None,
&[0, 1],
&[
&"total incoming",
&format_number(network.get_total_income()),
],
);
list_store.insert_with_values(
None,
&[0, 1],
&[&"outgoing", &format_number(network.get_outcome())],
);
list_store.insert_with_values(
None,
&[0, 1],
&[&"outgoing peak", &format_number(network.get_outcome())],
);
list_store.insert_with_values(
None,
&[0, 1],
&[
&"total outgoing",
&format_number(network.get_total_outcome()),
],
);
list_store.insert_with_values(
None,
&[0, 1],
&[
&"packets in",
&format_number_full(network.get_packets_income(), false),
],
);
list_store.insert_with_values(
None,
&[0, 1],
&[
&"packets in peak",
&format_number(network.get_packets_income()),
],
);
list_store.insert_with_values(
None,
&[0, 1],
&[
&"total packets in",
&format_number_full(network.get_total_packets_income(), false),
],
);
list_store.insert_with_values(
None,
&[0, 1],
&[
&"packets out",
&format_number_full(network.get_packets_outcome(), false),
],
);
list_store.insert_with_values(
None,
&[0, 1],
&[
&"packets out peak",
&format_number(network.get_packets_outcome()),
],
);
list_store.insert_with_values(
None,
&[0, 1],
&[
&"total packets out",
&format_number_full(network.get_total_packets_outcome(), false),
],
);
list_store.insert_with_values(
None,
&[0, 1],
&[
&"errors in",
&format_number_full(network.get_errors_income(), false),
],
);
list_store.insert_with_values(
None,
&[0, 1],
&[
&"errors in peak",
&format_number(network.get_errors_income()),
],
);
list_store.insert_with_values(
None,
&[0, 1],
&[
&"total errors in",
&format_number_full(network.get_total_errors_income(), false),
],
);
list_store.insert_with_values(
None,
&[0, 1],
&[
&"errors out",
&format_number_full(network.get_errors_outcome(), false),
],
);
list_store.insert_with_values(
None,
&[0, 1],
&[
&"errors out peak",
&format_number(network.get_errors_outcome()),
],
);
list_store.insert_with_values(
None,
&[0, 1],
&[
&"total errors out",
&format_number_full(network.get_total_errors_outcome(), false),
],
);
notebook.create_tab("Information", &tree);
// To silence the annoying warning:
// "(.:2257): Gtk-WARNING **: Allocating size to GtkWindow 0x7f8a31038290 without
// calling gtk_widget_get_preferred_width/height(). How does the code know the size to
// allocate?"
popup.get_preferred_width();
popup.set_size_request(700, 540);
close_button.connect_clicked(clone!(@weak popup => move |_| {
popup.destroy();
}));
let to_be_removed = Rc::new(RefCell::new(false));
popup.connect_destroy(clone!(@weak to_be_removed => move |_| {
*to_be_removed.borrow_mut() = true;
}));
popup.set_resizable(true);
popup.show_all();
if let Some(adjust) = scroll.get_vadjustment() {
adjust.set_value(0.);
scroll.set_vadjustment(Some(&adjust));
}
packets_errors_history.connect_to_window_events();
in_out_history.connect_to_window_events();
NetworkDialog {
name: interface_name.to_owned(),
popup,
packets_errors_history,
in_out_history,
incoming_peak: Rc::new(RefCell::new(network.get_income())),
outgoing_peak: Rc::new(RefCell::new(network.get_outcome())),
packets_incoming_peak: Rc::new(RefCell::new(network.get_packets_income())),
packets_outgoing_peak: Rc::new(RefCell::new(network.get_packets_outcome())),
errors_incoming_peak: Rc::new(RefCell::new(network.get_errors_income())),
errors_outgoing_peak: Rc::new(RefCell::new(network.get_errors_outcome())),
to_be_removed,
list_store,
}
}
|
pub mod ident;
pub mod types;
pub mod hash; |
use crate::utils::get_fl_name;
use proc_macro::TokenStream;
use quote::*;
use syn::*;
pub fn impl_image_trait(ast: &DeriveInput) -> TokenStream {
let name = &ast.ident;
let name_str = get_fl_name(name.to_string());
let ptr_name = Ident::new(name_str.as_str(), name.span());
let new = Ident::new(format!("{}_{}", name_str, "new").as_str(), name.span());
let draw = Ident::new(format!("{}_{}", name_str, "draw").as_str(), name.span());
let width = Ident::new(format!("{}_{}", name_str, "width").as_str(), name.span());
let height = Ident::new(format!("{}_{}", name_str, "height").as_str(), name.span());
let delete = Ident::new(format!("{}_{}", name_str, "delete").as_str(), name.span());
let count = Ident::new(format!("{}_{}", name_str, "count").as_str(), name.span());
let data = Ident::new(format!("{}_{}", name_str, "data").as_str(), name.span());
let copy = Ident::new(format!("{}_{}", name_str, "copy").as_str(), name.span());
let scale = Ident::new(format!("{}_{}", name_str, "scale").as_str(), name.span());
let data_w = Ident::new(format!("{}_{}", name_str, "data_w").as_str(), name.span());
let data_h = Ident::new(format!("{}_{}", name_str, "data_h").as_str(), name.span());
let d = Ident::new(format!("{}_{}", name_str, "d").as_str(), name.span());
let ld = Ident::new(format!("{}_{}", name_str, "ld").as_str(), name.span());
let inactive = Ident::new(format!("{}_{}", name_str, "inactive").as_str(), name.span());
let gen = quote! {
unsafe impl Sync for #name {}
unsafe impl Send for #name {}
impl Clone for #name {
fn clone(&self) -> Self {
assert!(!self.was_deleted());
let x = self.refcount.fetch_add(1, Ordering::Relaxed);
#name { inner: self.inner, refcount: AtomicUsize::new(x + 1) }
}
}
impl Drop for #name {
fn drop(&mut self) {
if !self.was_deleted() {
self.refcount.fetch_sub(1, Ordering::Relaxed);
if *self.refcount.get_mut() < 1 {
unsafe {
#delete(self.inner);
}
}
}
}
}
unsafe impl ImageExt for #name {
fn copy(&self) -> Self {
assert!(!self.was_deleted());
unsafe {
let img = #copy(self.inner);
assert!(!img.is_null());
#name {
inner: img,
refcount: AtomicUsize::new(1)
}
}
}
fn draw(&mut self, arg2: i32, arg3: i32, arg4: i32, arg5: i32) {
assert!(!self.was_deleted());
unsafe { #draw(self.inner, arg2, arg3, arg4, arg5) }
}
fn width(&self) -> i32 {
assert!(!self.was_deleted());
unsafe {
#width(self.inner)
}
}
fn height(&self) -> i32 {
assert!(!self.was_deleted());
unsafe {
#height(self.inner)
}
}
fn w(&self) -> i32 {
assert!(!self.was_deleted());
unsafe {
#width(self.inner)
}
}
fn h(&self) -> i32 {
assert!(!self.was_deleted());
unsafe {
#height(self.inner)
}
}
unsafe fn as_image_ptr(&self) -> *mut fltk_sys::image::Fl_Image {
assert!(!self.was_deleted());
self.inner as *mut fltk_sys::image::Fl_Image
}
unsafe fn from_image_ptr(ptr: *mut fltk_sys::image::Fl_Image) -> Self {
assert!(!ptr.is_null());
#name {
inner: ptr as *mut #ptr_name,
refcount: AtomicUsize::new(1),
}
}
fn to_rgb_data(&self) -> Vec<u8> {
assert!(!self.was_deleted());
unsafe {
let ptr = #data(self.inner);
assert!(!ptr.is_null());
assert!(!(*ptr).is_null());
let cnt = self.data_w() * self.data_h() * self.depth() as i32;
let ret: &[u8] = std::slice::from_raw_parts(*ptr as *const u8, cnt as usize);
ret.to_vec()
}
}
fn to_raw_data(&self) -> *const *const u8 {
assert!(!self.was_deleted());
unsafe {
#data(self.inner) as *const *const u8
}
}
fn to_rgb(&self) -> Result<crate::image::RgbImage, FltkError> {
assert!(!self.was_deleted());
let data = self.to_rgb_data();
unsafe { RgbImage::new(&data, self.data_w(), self.data_h(), self.depth()) }
}
fn scale(&mut self, width: i32, height: i32, proportional: bool, can_expand: bool) {
assert!(!self.was_deleted());
unsafe {
#scale(self.inner, width, height, proportional as i32, can_expand as i32)
}
}
fn count(&self) -> i32 {
assert!(!self.was_deleted());
unsafe {
#count(self.inner)
}
}
fn data_w(&self) -> i32 {
assert!(!self.was_deleted());
unsafe {
#data_w(self.inner)
}
}
fn data_h(&self) -> i32 {
assert!(!self.was_deleted());
unsafe {
#data_h(self.inner)
}
}
fn depth(&self) -> ColorDepth {
assert!(!self.was_deleted());
unsafe {
mem::transmute(#d(self.inner) as u8)
}
}
fn ld(&self) -> i32 {
assert!(!self.was_deleted());
unsafe {
#ld(self.inner)
}
}
fn inactive(&mut self) {
assert!(!self.was_deleted());
unsafe {
#inactive(self.inner)
}
}
unsafe fn delete(mut img: Self) {
assert!(!img.inner.is_null());
#delete(img.inner);
img.inner = std::ptr::null_mut() as *mut #ptr_name;
}
unsafe fn increment_arc(&mut self) {
assert!(!self.was_deleted());
self.refcount.fetch_add(1, Ordering::Relaxed);
}
unsafe fn decrement_arc(&mut self) {
assert!(!self.was_deleted());
self.refcount.fetch_sub(1, Ordering::Relaxed);
assert!(*self.refcount.get_mut() > 1, "The image should outlive the widget!");
}
fn was_deleted(&self) -> bool {
self.inner.is_null()
}
unsafe fn into_image<I: ImageExt>(self) -> I {
I::from_image_ptr(self.inner as *mut _)
}
}
};
gen.into()
}
|
fn main() {
let mut v = vec![1, 2, 3, 4, 5];
let first = &v[0];
v.push(6); // error
}
|
// SPDX-License-Identifier: GPL-2.0
//! An I/O Project: Building a Command Line Program
use super::ch09::Error;
use std::{fs, io::ErrorKind};
/// Config to capture command line arguments for the I/O project.
///
/// # Examples
/// ```
/// use the_book as book;
/// use book::ch09::Error;
/// use book::ch12::Config;
///
/// fn main() -> Result<(), Error> {
/// let args = vec![
/// String::from("crate"),
/// String::from("some query"),
/// String::from("some filename"),
/// ];
/// let config = Config::new(&args)?;
/// assert_eq!("some query", config.query());
/// assert_eq!("some filename", config.filename());
/// Ok(())
/// }
/// ```
#[derive(Debug, PartialEq)]
pub struct Config {
query: String,
filename: String,
}
impl Config {
pub fn new(args: &[String]) -> Result<Self, Error> {
if args.len() < 3 {
Err(Error::from(ErrorKind::InvalidInput))
} else {
let query = args[1].clone();
let filename = args[2].clone();
Ok(Self { query, filename })
}
}
pub fn query(&self) -> &str {
&self.query
}
pub fn filename(&self) -> &str {
&self.filename
}
}
/// run reads a file provided by Config.filename() and returns
/// the contents throught Result<String, Error>.
pub fn run(cfg: Config) -> Result<(), Error> {
let contents = fs::read_to_string(cfg.filename())?;
for line in search(cfg.query(), &contents) {
println!("{}", line);
}
Ok(())
}
/// search takes `query` as a first parameter and returns the line
/// in case it's in `line`.
///
/// # Examples
/// ```
/// use the_book::ch12::search;
///
/// let data = "\
/// something here,
/// and some there.";
///
/// let query = "some";
/// let want = vec!["something here,", "and some there."];
/// assert_eq!(want, search(query, data));
///
/// let query = "another";
/// let want: Vec<&str> = vec![];
/// assert_eq!(want, search(query, data));
///
/// let query = "Some";
/// let want: Vec<&str> = vec![];
/// assert_eq!(want, search(query, data));
/// ```
pub fn search<'a>(query: &str, data: &'a str) -> Vec<&'a str> {
let mut result = Vec::<&str>::new();
for line in data.lines() {
if line.contains(query) {
result.push(line);
}
}
result
}
#[cfg(test)]
mod tests {
use super::*;
use std::io;
#[test]
fn config_new() {
struct Test {
args: Vec<String>,
want: Result<Config, Error>,
}
let tests = [
Test {
args: vec![],
want: Err(Error::Io(io::Error::from(io::ErrorKind::InvalidInput))),
},
Test {
args: vec![String::from("no filename")],
want: Err(Error::Io(io::Error::from(io::ErrorKind::InvalidInput))),
},
Test {
args: vec![String::from("with query"), String::from("query")],
want: Err(Error::Io(io::Error::from(io::ErrorKind::InvalidInput))),
},
Test {
args: vec![
String::from("with query and filename"),
String::from("query"),
String::from("filename"),
],
want: Ok(Config {
query: String::from("query"),
filename: String::from("filename"),
}),
},
Test {
args: vec![
String::from("with more than query and filename"),
String::from("query"),
String::from("filename"),
String::from("another argument"),
],
want: Ok(Config {
query: String::from("query"),
filename: String::from("filename"),
}),
},
];
for t in &tests {
match Config::new(&t.args) {
Ok(got) => {
if let Ok(want) = &t.want {
assert_eq!(want, &got);
} else {
panic!("unexpected success");
}
}
Err(got) => {
if let Err(want) = &t.want {
assert_eq!(want, &got);
} else {
panic!("unexpected error");
}
}
}
}
}
#[test]
fn config_query() {
struct Test {
config: Config,
want: &'static str,
}
let tests = [
Test {
config: Config {
query: String::from("some query"),
filename: String::from("some file"),
},
want: "some query",
},
Test {
config: Config {
query: String::from(""),
filename: String::from("some filename"),
},
want: "",
},
];
for t in &tests {
assert_eq!(t.want, t.config.query());
}
}
#[test]
fn config_filename() {
struct Test {
config: Config,
want: &'static str,
}
let tests = [
Test {
config: Config {
query: String::from("some query"),
filename: String::from("some file"),
},
want: "some file",
},
Test {
config: Config {
query: String::from("some query"),
filename: String::from(""),
},
want: "",
},
];
for t in &tests {
assert_eq!(t.want, t.config.filename());
}
}
#[test]
fn search_string() {
struct Test {
query: &'static str,
data: &'static str,
want: Vec<&'static str>,
}
let tests = [
Test {
query: "",
data: "",
want: vec![],
},
Test {
query: "line",
data: "
This is a line.
Another line.
and another line.
",
want: vec!["This is a line.", "Another line.", "and another line."],
},
];
for t in &tests {
let got = search(t.query, t.data);
assert_eq!(t.want, got);
}
}
}
|
use std::collections::HashMap;
/// Checks API calling code.
///
/// Wraps https://api.slack.com/methods/api.test
#[derive(Debug, Clone, Serialize, new)]
pub struct TestRequest<'a> {
/// Error response to return
#[new(default)]
error: Option<&'a str>,
/// example property to return
#[new(default)]
foo: Option<&'a str>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct TestResponse {
args: HashMap<String, String>,
}
|
use std::ops::*;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct Vec2 { pub x: f32, pub y: f32 }
impl Vec2 {
pub fn new(x: f32, y: f32) -> Vec2 {
Vec2 { x: x, y: y }
}
pub fn zero() -> Vec2 { Vec2::new(0.0, 0.0) }
pub fn from_slice(s: &[f32]) -> Vec2 {
assert!(s.len() >= 2);
Vec2::new(s[0], s[1])
}
pub fn rotate(&self, a: f32) -> Vec2 {
let (s, c) = a.sin_cos();
Vec2::new((c * self.x) - (s * self.y), (s * self.x) + (c * self.y))
}
pub fn extend(&self, z: f32) -> Vec3 {
Vec3::new(self.x, self.y, z)
}
pub fn dot(&self, other: Vec2) -> f32 {
self.x * other.x + self.y * other.y
}
pub fn perp_dot(&self, other: Vec2) -> f32 {
self.x * other.y - self.y * other.x
}
pub fn length(self) -> f32 {
self.dot(self).sqrt()
}
pub fn normalized(&self) -> Vec2 {
let mut len = self.length();
if len <= 0.0 { len = 1.0; }
Vec2 { x: self.x / len, y: self.y / len }
}
}
impl Add<Vec2> for Vec2 {
type Output = Vec2;
fn add(self, rhs: Vec2) -> Vec2 {
Vec2 { x: self.x + rhs.x, y: self.y + rhs.y }
}
}
impl Sub<Vec2> for Vec2 {
type Output = Vec2;
fn sub(self, rhs: Vec2) -> Vec2 {
Vec2 { x: self.x - rhs.x, y: self.y - rhs.y }
}
}
impl Mul<Vec2> for Vec2 {
type Output = Vec2;
fn mul(self, rhs: Vec2) -> Vec2 {
Vec2 { x: self.x * rhs.x, y: self.y * rhs.y }
}
}
impl Mul<f32> for Vec2 {
type Output = Vec2;
fn mul(self, rhs: f32) -> Vec2 {
Vec2 { x: self.x * rhs, y: self.y * rhs }
}
}
impl Neg for Vec2 {
type Output = Vec2;
fn neg(self) -> Vec2 {
Vec2 { x: -self.x, y: -self.y }
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct Vec3 { pub x: f32, pub y: f32, pub z: f32 }
impl Vec3 {
pub fn new(x: f32, y: f32, z: f32) -> Vec3 {
Vec3 { x: x, y: y, z: z }
}
pub fn unproject(&self, viewport: Vec4, model_view: Mat4, projection: Mat4) -> Vec3 {
let invpv = (projection * model_view).inverted();
let w = viewport[2] - viewport[0];
let h = viewport[3] - viewport[1];
let x = (2.0 * (self.x - viewport.x) / w) - 1.0;
let y = -((2.0 * (self.y - viewport.y) / h) - 1.0);
let z = 2.0 * self.z - 1.0;
let r_cast = invpv * Vec4::new(x, y, z, 1.0);
Vec3::new(
r_cast.x / r_cast.w,
r_cast.y / r_cast.w,
r_cast.z / r_cast.w
)
}
pub fn zero() -> Vec3 { Vec3::new(0.0, 0.0, 0.0) }
pub fn from_slice(s: &[f32]) -> Vec3 {
assert!(s.len() >= 3);
Vec3::new(s[0], s[1], s[2])
}
pub fn extend(&self, w: f32) -> Vec4 {
Vec4::new(self.x, self.y, self.z, w)
}
pub fn dot(&self, other: Vec3) -> f32 {
self.x * other.x + self.y * other.y + self.z * other.z
}
pub fn cross(self, other: Vec3) -> Vec3 {
Vec3 {
x: self.y * other.z - self.z * other.y,
y: self.z * other.x - self.x * other.z,
z: self.x * other.y - self.y * self.x
}
}
pub fn length(&self) -> f32 {
self.dot(self.clone()).sqrt()
}
pub fn normalized(&self) -> Vec3 {
let len = self.length();
Vec3 { x: self.x / len, y: self.y / len, z: self.z / len }
}
}
impl Add<Vec3> for Vec3 {
type Output = Vec3;
fn add(self, rhs: Vec3) -> Vec3 {
Vec3 { x: self.x + rhs.x, y: self.y + rhs.y, z: self.z + rhs.z }
}
}
impl Sub<Vec3> for Vec3 {
type Output = Vec3;
fn sub(self, rhs: Vec3) -> Vec3 {
Vec3 { x: self.x - rhs.x, y: self.y - rhs.y, z: self.z - rhs.z }
}
}
impl Mul<Vec3> for Vec3 {
type Output = Vec3;
fn mul(self, rhs: Vec3) -> Vec3 {
Vec3 { x: self.x * rhs.x, y: self.y * rhs.y, z: self.z * rhs.z }
}
}
impl Mul<f32> for Vec3 {
type Output = Vec3;
fn mul(self, rhs: f32) -> Vec3 {
Vec3 { x: self.x * rhs, y: self.y * rhs, z: self.z * rhs }
}
}
impl Neg for Vec3 {
type Output = Vec3;
fn neg(self) -> Vec3 {
Vec3 { x: -self.x, y: -self.y, z: -self.z }
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct Vec4 { pub x: f32, pub y: f32, pub z: f32, pub w: f32 }
impl Index<usize> for Vec4 {
type Output = f32;
fn index(&self, i: usize) -> &f32 {
match i {
0 => { &self.x },
1 => { &self.y },
2 => { &self.z },
_ => { &self.w }
}
}
}
impl IndexMut<usize> for Vec4 {
fn index_mut(&mut self, i: usize) -> &mut f32 {
match i {
0 => { &mut self.x },
1 => { &mut self.y },
2 => { &mut self.z },
_ => { &mut self.w }
}
}
}
impl Vec4 {
pub fn new(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
Vec4 { x: x, y: y, z: z, w: w }
}
pub fn to_vec3(&self) -> Vec3 {
Vec3::new(self.x, self.y, self.z)
}
pub fn from_slice(s: &[f32]) -> Vec4 {
assert!(s.len() >= 4);
Vec4::new(s[0], s[1], s[2], s[3])
}
pub fn dot(&self, other: Vec4) -> f32 {
self.x * other.x + self.y * other.y + self.z * other.z + self.w * other.w
}
pub fn length(&self) -> f32 {
self.dot(self.clone()).sqrt()
}
pub fn normalized(&self) -> Vec4 {
let len = self.length();
Vec4 { x: self.x / len, y: self.y / len, z: self.z / len, w: self.w / len }
}
}
impl Add<Vec4> for Vec4 {
type Output = Vec4;
fn add(self, rhs: Vec4) -> Vec4 {
Vec4 { x: self.x + rhs.x, y: self.y + rhs.y, z: self.z + rhs.z, w: self.w + rhs.w }
}
}
impl Sub<Vec4> for Vec4 {
type Output = Vec4;
fn sub(self, rhs: Vec4) -> Vec4 {
Vec4 { x: self.x - rhs.x, y: self.y - rhs.y, z: self.z - rhs.z, w: self.w - rhs.w }
}
}
impl Mul<Vec4> for Vec4 {
type Output = Vec4;
fn mul(self, rhs: Vec4) -> Vec4 {
Vec4 { x: self.x * rhs.x, y: self.y * rhs.y, z: self.z * rhs.z, w: self.w * rhs.w }
}
}
impl Mul<f32> for Vec4 {
type Output = Vec4;
fn mul(self, rhs: f32) -> Vec4 {
Vec4 { x: self.x * rhs, y: self.y * rhs, z: self.z * rhs, w: self.w * rhs }
}
}
impl Neg for Vec4 {
type Output = Vec4;
fn neg(self) -> Vec4 {
Vec4 { x: -self.x, y: -self.y, z: -self.z, w: -self.w }
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct Mat4 { rows: [Vec4; 4] }
impl Index<usize> for Mat4 {
type Output = Vec4;
fn index(&self, i: usize) -> &Vec4 {
&self.rows[i]
}
}
impl IndexMut<usize> for Mat4 {
fn index_mut(&mut self, i: usize) -> &mut Vec4 {
&mut self.rows[i]
}
}
impl Mat4 {
pub fn new(m: &[f32; 16]) -> Mat4 {
Mat4 {
rows: [
Vec4::from_slice(&m[0..4]),
Vec4::from_slice(&m[4..8]),
Vec4::from_slice(&m[8..12]),
Vec4::from_slice(&m[12..16])
]
}
}
pub fn from_rows(r0: Vec4, r1: Vec4, r2: Vec4, r3: Vec4) -> Mat4 {
Mat4 { rows: [ r0, r1, r2, r3 ] }
}
pub fn identity() -> Mat4 {
Mat4::uniform_scaling(1.0)
}
pub fn translation(t: Vec3) -> Mat4 {
Mat4::new(&[
1.0, 0.0, 0.0, t.x,
0.0, 1.0, 0.0, t.y,
0.0, 0.0, 1.0, t.z,
0.0, 0.0, 0.0, 1.0
])
}
pub fn rotation_x(a: f32) -> Mat4 {
let (s, c) = a.sin_cos();
Mat4::new(&[
1.0, 0.0, 0.0, 0.0,
0.0, c, -s, 0.0,
0.0, s, c, 0.0,
0.0, 0.0, 0.0, 1.0
])
}
pub fn rotation_y(a: f32) -> Mat4 {
let (s, c) = a.sin_cos();
Mat4::new(&[
c, 0.0, -s, 0.0,
0.0, 1.0, 0.0, 0.0,
s, 0.0, c, 0.0,
0.0, 0.0, 0.0, 1.0
])
}
pub fn rotation_z(a: f32) -> Mat4 {
let (s, c) = a.sin_cos();
Mat4::new(&[
c, -s, 0.0, 0.0,
s, c, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0
])
}
pub fn axis_angle(axis: Vec3, a: f32) -> Mat4 {
let (s, c) = a.sin_cos();
let t = 1.0 - c;
let ax = axis.normalized();
let x = ax.x;
let y = ax.y;
let z = ax.z;
Mat4::new(&[
t * x * x + c, t * x * y - z * s, t * x * z + y * s, 0.0,
t * x * y + z * s, t * y * y + c, t * y * z - x * s, 0.0,
t * x * z - y * s, t * y * z + x * s, t * z * z + c, 0.0,
0.0, 0.0, 0.0, 1.0
])
}
pub fn scaling(s: Vec3) -> Mat4 {
Mat4::new(&[
s.x, 0.0, 0.0, 0.0,
0.0, s.y, 0.0, 0.0,
0.0, 0.0, s.z, 0.0,
0.0, 0.0, 0.0, 1.0
])
}
pub fn uniform_scaling(s: f32) -> Mat4 { Mat4::scaling(Vec3::new(s, s, s)) }
pub fn transpose(&self) -> Mat4 {
let [a, b, c, d] = self.rows;
Mat4::new(&[
a.x, b.x, c.x, d.x,
a.y, b.y, c.y, d.y,
a.z, b.z, c.z, d.z,
a.w, b.w, c.w, d.w,
])
}
pub fn inverted(&self) -> Mat4 {
//
// Inversion by Cramer's rule. Code taken from an Intel publication
//
let mut mat = self.clone();
let mut tmp = [0.0f32; 12];
let mut src = [0.0f32; 16];
// Transpose
for i in 0..4 {
src[i + 0] = self[i][0];
src[i + 4] = self[i][1];
src[i + 8] = self[i][2];
src[i + 12] = self[i][3];
}
// Calculate pairs for first 8 elements (cofactors)
tmp[0] = src[10] * src[15];
tmp[1] = src[11] * src[14];
tmp[2] = src[9] * src[15];
tmp[3] = src[11] * src[13];
tmp[4] = src[9] * src[14];
tmp[5] = src[10] * src[13];
tmp[6] = src[8] * src[15];
tmp[7] = src[11] * src[12];
tmp[8] = src[8] * src[14];
tmp[9] = src[10] * src[12];
tmp[10] = src[8] * src[13];
tmp[11] = src[9] * src[12];
// Calculate first 8 elements (cofactors)
mat[0][0] = tmp[0] * src[5] + tmp[3] * src[6] + tmp[4] * src[7];
mat[0][0] -= tmp[1] * src[5] + tmp[2] * src[6] + tmp[5] * src[7];
mat[0][1] = tmp[1] * src[4] + tmp[6] * src[6] + tmp[9] * src[7];
mat[0][1] -= tmp[0] * src[4] + tmp[7] * src[6] + tmp[8] * src[7];
mat[0][2] = tmp[2] * src[4] + tmp[7] * src[5] + tmp[10] * src[7];
mat[0][2] -= tmp[3] * src[4] + tmp[6] * src[5] + tmp[11] * src[7];
mat[0][3] = tmp[5] * src[4] + tmp[8] * src[5] + tmp[11] * src[6];
mat[0][3] -= tmp[4] * src[4] + tmp[9] * src[5] + tmp[10] * src[6];
mat[1][0] = tmp[1] * src[1] + tmp[2] * src[2] + tmp[5] * src[3];
mat[1][0] -= tmp[0] * src[1] + tmp[3] * src[2] + tmp[4] * src[3];
mat[1][1] = tmp[0] * src[0] + tmp[7] * src[2] + tmp[8] * src[3];
mat[1][1] -= tmp[1] * src[0] + tmp[6] * src[2] + tmp[9] * src[3];
mat[1][2] = tmp[3] * src[0] + tmp[6] * src[1] + tmp[11] * src[3];
mat[1][2] -= tmp[2] * src[0] + tmp[7] * src[1] + tmp[10] * src[3];
mat[1][3] = tmp[4] * src[0] + tmp[9] * src[1] + tmp[10] * src[2];
mat[1][3] -= tmp[5] * src[0] + tmp[8] * src[1] + tmp[11] * src[2];
// Calculate pairs for second 8 elements (cofactors)
tmp[0] = src[2] * src[7];
tmp[1] = src[3] * src[6];
tmp[2] = src[1] * src[7];
tmp[3] = src[3] * src[5];
tmp[4] = src[1] * src[6];
tmp[5] = src[2] * src[5];
tmp[6] = src[0] * src[7];
tmp[7] = src[3] * src[4];
tmp[8] = src[0] * src[6];
tmp[9] = src[2] * src[4];
tmp[10] = src[0] * src[5];
tmp[11] = src[1] * src[4];
// Calculate second 8 elements (cofactors)
mat[2][0] = tmp[0] * src[13] + tmp[3] * src[14] + tmp[4] * src[15];
mat[2][0] -= tmp[1] * src[13] + tmp[2] * src[14] + tmp[5] * src[15];
mat[2][1] = tmp[1] * src[12] + tmp[6] * src[14] + tmp[9] * src[15];
mat[2][1] -= tmp[0] * src[12] + tmp[7] * src[14] + tmp[8] * src[15];
mat[2][2] = tmp[2] * src[12] + tmp[7] * src[13] + tmp[10] * src[15];
mat[2][2] -= tmp[3] * src[12] + tmp[6] * src[13] + tmp[11] * src[15];
mat[2][3] = tmp[5] * src[12] + tmp[8] * src[13] + tmp[11] * src[14];
mat[2][3] -= tmp[4] * src[12] + tmp[9] * src[13] + tmp[10] * src[14];
mat[3][0] = tmp[2] * src[10] + tmp[5] * src[11] + tmp[1] * src[9];
mat[3][0] -= tmp[4] * src[11] + tmp[0] * src[9] + tmp[3] * src[10];
mat[3][1] = tmp[8] * src[11] + tmp[0] * src[8] + tmp[7] * src[10];
mat[3][1] -= tmp[6] * src[10] + tmp[9] * src[11] + tmp[1] * src[8];
mat[3][2] = tmp[6] * src[9] + tmp[11] * src[11] + tmp[3] * src[8];
mat[3][2] -= tmp[10] * src[11] + tmp[2] * src[8] + tmp[7] * src[9];
mat[3][3] = tmp[10] * src[10] + tmp[4] * src[8] + tmp[9] * src[9];
mat[3][3] -= tmp[8] * src[9] + tmp[11] * src[10] + tmp[5] * src[8];
// Calculate determinant
let det = 1.0f32 / (src[0] * mat[0][0] + src[1] * mat[0][1] + src[2] * mat[0][2] + src[3] * mat[0][3]);
for i in 0..4 {
for j in 0..4 {
mat[i][j] = mat[i][j] * det;
}
}
mat
}
pub fn ortho(l: f32, r: f32, b: f32, t: f32, n: f32, f: f32) -> Mat4 {
let w = r - l;
let h = t - b;
let d = f - n;
Mat4::new(&[
2.0 / w, 0.0, 0.0, -(r + l) / w,
0.0, 2.0 / h, 0.0, -(t + b) / h,
0.0, 0.0, -2.0 / d, -(f + n) / d,
0.0, 0.0, 0.0, 1.0,
])
}
pub fn perspective(fov: f32, asp: f32, n: f32, f: f32) -> Mat4 {
let cot = 1.0 / (fov / 2.0).tan();
let d = n - f;
Mat4::new(&[
cot / asp, 0.0, 0.0, 0.0,
0.0, cot, 0.0, 0.0,
0.0, 0.0, (f + n) / d, (2.0 * f * n) / d,
0.0, 0.0, -1.0, 0.0
])
}
pub fn look_at(eye: Vec3, at: Vec3, up: Vec3) -> Mat4 {
let z = (eye - at).normalized();
let x = up.cross(z).normalized();
let y = z.cross(x);
let R = Mat4::new(&[
x.x, x.y, -x.z, 0.0,
y.x, y.y, -y.z, 0.0,
z.x, z.y, -z.z, 0.0,
0.0, 0.0, 0.0, 1.0
]);
Mat4::translation(-eye) * R
}
pub fn as_ptr(&self) -> *const f32 {
&self.rows[0][0]
}
}
impl Mul<Mat4> for Mat4 {
type Output = Mat4;
fn mul(self, rhs: Mat4) -> Mat4 {
let mut d = [0.0f32; 16];
let ot = rhs.transpose();
for j in 0..4 {
for i in 0..4 {
d[i + j * 4] = self.rows[j].dot(ot.rows[i]);
}
}
Mat4::new(&d)
}
}
impl Mul<Vec4> for Mat4 {
type Output = Vec4;
fn mul(self, rhs: Vec4) -> Vec4 {
Vec4::new(
self.rows[0].dot(rhs),
self.rows[1].dot(rhs),
self.rows[2].dot(rhs),
self.rows[3].dot(rhs),
)
}
}
impl Mul<Vec3> for Mat4 {
type Output = Vec3;
fn mul(self, rhs: Vec3) -> Vec3 {
let v = rhs.extend(1.0);
Vec3::new(
self.rows[0].dot(v),
self.rows[1].dot(v),
self.rows[2].dot(v)
)
}
}
impl Mul<f32> for Mat4 {
type Output = Mat4;
fn mul(self, rhs: f32) -> Mat4 {
Mat4::from_rows(
self.rows[0] * rhs,
self.rows[1] * rhs,
self.rows[2] * rhs,
self.rows[3] * rhs
)
}
} |
//! In this example we will implement something similar
//! to `kubectl get all --all-namespaces`.
use k8s_openapi::apimachinery::pkg::apis::meta::v1::APIResourceList;
use kube::{
api::{Api, DynamicObject, GroupVersionKind, ResourceExt},
Client,
};
use log::{info, warn};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
std::env::set_var("RUST_LOG", "info,kube=debug");
env_logger::init();
let client = Client::try_default().await?;
let v = client.apiserver_version().await?;
info!("api version: {:?}", v);
let ns_filter = std::env::var("NAMESPACE").ok();
// The following loops turn the /api or /apis listers into kube::api::Resource
// objects which can be used to make dynamic api calls.
// This is slightly awkward because of corev1 types
// and data split over the list types and the inner get calls.
// loop over all api groups (except core v1)
let apigroups = client.list_api_groups().await?;
for g in apigroups.groups {
warn!("api group: {}", g.name);
let ver = g
.preferred_version
.as_ref()
.or_else(|| g.versions.first())
.expect("preferred or versions exists");
let apis = client.list_api_group_resources(&ver.group_version).await?;
print_group(&client, &ver.group_version, apis, ns_filter.as_deref()).await?;
}
warn!("core/v1 legacy group");
let coreapis = client.list_core_api_versions().await?;
assert_eq!(coreapis.versions.len(), 1);
let corev1 = client.list_core_api_resources(&coreapis.versions[0]).await?;
print_group(&client, &coreapis.versions[0], corev1, ns_filter.as_deref()).await
}
async fn print_group(
client: &Client,
group_version: &str,
apis: APIResourceList,
ns_filter: Option<&str>,
) -> anyhow::Result<()> {
for ar in apis.resources {
if !ar.verbs.contains(&"list".to_string()) {
continue;
}
let gvk = GroupVersionKind::from_api_resource(&ar, &apis.group_version);
let api: Api<DynamicObject> = if ar.namespaced {
if let Some(ns) = ns_filter {
Api::namespaced_with(client.clone(), ns, &gvk)
} else {
Api::all_with(client.clone(), &gvk)
}
} else {
Api::all_with(client.clone(), &gvk)
};
let list = api.list(&Default::default()).await?;
info!("{} : {}", group_version, ar.kind);
for item in list.items {
let name = item.name();
let ns = item.metadata.namespace.map(|s| s + "/").unwrap_or_default();
info!("\t\t{}{}", ns, name);
}
}
Ok(())
}
|
use feature::Feature;
pub struct Suite {
features: Vec<Feature>,
}
impl Suite {
pub fn add_feature(&mut self, feature: Feature) {
self.features.push(feature);
}
}
|
#![allow(clippy::collapsible_match)]
#![allow(clippy::single_match)]
use std::cell::RefCell;
mod frame_counter;
mod graphics;
mod shader_compilation;
mod vertex;
use graphics::GraphicsState;
fn main() {
{
fern::Dispatch::new()
.format(|out, message, record| {
out.finish(format_args!(
"[{}] [{}] [{}] : {}",
chrono::Local::now().format("%H:%M:%S.%3f"),
record.target(),
record.level(),
message
))
})
.level(log::LevelFilter::Trace)
.level_for("naga", log::LevelFilter::Error)
// .level_for("gfx_backend_vulkan", log::LevelFilter::Warn)
.level_for("gfx_backend_vulkan", log::LevelFilter::Off)
.chain(std::io::stdout())
.apply()
.unwrap();
} // fern::Dispatch::new()
let event_loop = winit::event_loop::EventLoop::new();
let window = winit::window::WindowBuilder::new()
.build(&event_loop)
.unwrap();
let (imgui_context, imgui_platform) = {
let context = Box::leak(Box::new(RefCell::new(imgui::Context::create())));
let platform = Box::leak(Box::new(RefCell::new(
imgui_winit_support::WinitPlatform::init(&mut *context.borrow_mut()),
)));
platform.borrow_mut().attach_window(
context.borrow_mut().io_mut(),
&window,
imgui_winit_support::HiDpiMode::Default,
);
(context, platform)
};
let mut graphics_state = GraphicsState::new(window, imgui_context);
let mut view = graphics::SfView {
center: (500.0, 500.0),
size: (1000.0, 1000.0),
rotation: 0.0,
};
let mut renderer_with_view = graphics::renderers::RendererWithView::new(&mut graphics_state);
let mut renderer_simple_triangle =
graphics::renderers::RendererSimpleTriangle::new(&mut graphics_state);
let mut renderer_glyph = graphics::renderers::RendererGlyph::new(&graphics_state);
let mut renderer_imgui =
graphics::renderers::RendererImgui::new(&graphics_state, imgui_context, imgui_platform);
event_loop.run(move |event, _, control_flow| {
use winit::event::*;
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == graphics_state.window.id() => {
match event {
WindowEvent::CloseRequested => {
*control_flow = winit::event_loop::ControlFlow::Exit
}
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(keycode),
..
} => {
//
match keycode {
VirtualKeyCode::LShift => {
view.size.0 += 10.0;
view.size.1 += 10.0;
}
VirtualKeyCode::LControl => {
view.size.0 -= 10.0;
view.size.1 -= 10.0;
}
VirtualKeyCode::E => {
view.rotation -= 5.0;
}
VirtualKeyCode::Q => {
view.rotation += 5.0;
}
VirtualKeyCode::W | VirtualKeyCode::Up => {
view.center.1 += 5.0;
}
VirtualKeyCode::A | VirtualKeyCode::Left => {
view.center.0 += 5.0;
}
VirtualKeyCode::S | VirtualKeyCode::Down => {
view.center.1 -= 5.0;
}
VirtualKeyCode::D | VirtualKeyCode::Right => {
view.center.0 -= 5.0;
}
_ => {}
}
}
_ => {}
},
WindowEvent::Resized(..) => {
graphics_state.resize();
}
WindowEvent::ScaleFactorChanged { .. } => {
// new_inner_size is &&mut so w have to dereference it twice
graphics_state.resize();
}
_ => {}
}
}
Event::RedrawRequested(_) => match graphics_state.begin_current_frame() {
Err(wgpu::SwapChainError::Lost) => graphics_state.resize(),
Err(wgpu::SwapChainError::OutOfMemory) => {
*control_flow = winit::event_loop::ControlFlow::Exit
}
Err(e) => log::warn!("{:?}", e),
Ok(mut current_frame) => {
renderer_with_view.draw(&mut current_frame, &view);
renderer_simple_triangle.draw(&mut current_frame);
renderer_glyph.draw(&mut current_frame);
renderer_imgui.draw(&mut current_frame);
current_frame.finish_and_present();
}
},
Event::MainEventsCleared => {
// incoming networking here
// updating + physics here
// outgoing networking again here?
// draw:
graphics_state.window.request_redraw();
// std::thread::sleep(std::time::Duration::from_micros(1));
}
_ => {}
}
// graphics_state.input(&event);
renderer_imgui.imgui_platform.borrow_mut().handle_event(
renderer_imgui.imgui_context.borrow_mut().io_mut(),
&graphics_state.window,
&event,
);
});
}
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under both the MIT license found in the
* LICENSE-MIT file in the root directory of this source tree and the Apache
* License, Version 2.0 found in the LICENSE-APACHE file in the root directory
* of this source tree.
*/
use super::{
common::{Job, JobResult},
Iter,
};
use futures::{stream::FuturesUnordered, try_ready, Async, Future, IntoFuture, Poll, Stream};
use std::collections::{HashMap, VecDeque};
/// `bounded_traversal` traverses implicit asynchronous tree specified by `init`
/// and `unfold` arguments, and it also does backward pass with `fold` operation.
/// All `unfold` and `fold` operations are executed in parallel if they do not
/// depend on each other (not related by ancestor-descendant relation in implicit tree)
/// with amount of concurrency constrained by `scheduled_max`.
///
/// ## `init: In`
/// Is the root of the implicit tree to be traversed
///
/// ## `unfold: FnMut(In) -> impl IntoFuture<Item = (OutCtx, impl IntoIterator<Item = In>)>`
/// Asynchronous function which given input value produces list of its children. And context
/// associated with current node. If this list is empty, it is a leaf of the tree, and `fold`
/// will be run on this node.
///
/// ## `fold: FnMut(OutCtx, impl Iterator<Out>) -> impl IntoFuture<Item=Out>`
/// Aynchronous function which given node context and output of `fold` for its chidlren
/// should produce new output value.
///
/// ## return value `impl Future<Item = Out>`
/// Result of running fold operation on the root of the tree.
///
pub fn bounded_traversal<In, Ins, Out, OutCtx, Unfold, UFut, Fold, FFut>(
scheduled_max: usize,
init: In,
unfold: Unfold,
fold: Fold,
) -> impl Future<Item = Out, Error = UFut::Error>
where
Unfold: FnMut(In) -> UFut,
UFut: IntoFuture<Item = (OutCtx, Ins)>,
Ins: IntoIterator<Item = In>,
Fold: FnMut(OutCtx, Iter<Out>) -> FFut,
FFut: IntoFuture<Item = Out, Error = UFut::Error>,
{
BoundedTraversal::new(scheduled_max, init, unfold, fold)
}
// execution tree node
struct Node<Out, OutCtx> {
parent: NodeLocation, // location of this node relative to it's parent
context: OutCtx, // context associated with node
children: Vec<Option<Out>>, // results of children folds
children_left: usize, // number of unresolved children
}
#[derive(Clone, Copy, Eq, PartialEq, Hash)]
struct NodeIndex(usize);
type NodeLocation = super::common::NodeLocation<NodeIndex>;
#[must_use = "futures do nothing unless polled"]
struct BoundedTraversal<Out, OutCtx, Unfold, UFut, Fold, FFut>
where
UFut: IntoFuture,
FFut: IntoFuture,
{
unfold: Unfold,
fold: Fold,
scheduled_max: usize,
scheduled: FuturesUnordered<Job<NodeLocation, UFut::Future, FFut::Future>>, // jobs being executed
unscheduled: VecDeque<Job<NodeLocation, UFut::Future, FFut::Future>>, // as of yet unscheduled jobs
execution_tree: HashMap<NodeIndex, Node<Out, OutCtx>>, // tree tracking execution process
execution_tree_index: NodeIndex, // last allocated node index
}
impl<In, Ins, Out, OutCtx, Unfold, UFut, Fold, FFut>
BoundedTraversal<Out, OutCtx, Unfold, UFut, Fold, FFut>
where
Unfold: FnMut(In) -> UFut,
UFut: IntoFuture<Item = (OutCtx, Ins)>,
Ins: IntoIterator<Item = In>,
Fold: FnMut(OutCtx, Iter<Out>) -> FFut,
FFut: IntoFuture<Item = Out, Error = UFut::Error>,
{
fn new(scheduled_max: usize, init: In, unfold: Unfold, fold: Fold) -> Self {
let mut this = Self {
unfold,
fold,
scheduled_max,
scheduled: FuturesUnordered::new(),
unscheduled: VecDeque::new(),
execution_tree: HashMap::new(),
execution_tree_index: NodeIndex(0),
};
this.enqueue_unfold(
NodeLocation {
node_index: NodeIndex(0),
child_index: 0,
},
init,
);
this
}
fn enqueue_unfold(&mut self, parent: NodeLocation, value: In) {
self.unscheduled.push_front(Job::Unfold {
value: parent,
future: (self.unfold)(value).into_future(),
});
}
fn enqueue_fold(&mut self, parent: NodeLocation, context: OutCtx, children: Iter<Out>) {
self.unscheduled.push_front(Job::Fold {
value: parent,
future: (self.fold)(context, children).into_future(),
});
}
fn process_unfold(&mut self, parent: NodeLocation, (context, children): UFut::Item) {
// allocate index
self.execution_tree_index = NodeIndex(self.execution_tree_index.0 + 1);
let node_index = self.execution_tree_index;
// schedule unfold for node's children
let count = children.into_iter().fold(0, |child_index, child| {
self.enqueue_unfold(
NodeLocation {
node_index,
child_index,
},
child,
);
child_index + 1
});
if count != 0 {
// allocate node
let mut children = Vec::new();
children.resize_with(count, || None);
self.execution_tree.insert(
node_index,
Node {
parent,
context,
children,
children_left: count,
},
);
} else {
// leaf node schedules fold for itself immediately
self.enqueue_fold(parent, context, Vec::new().into_iter().flatten());
}
}
fn process_fold(&mut self, parent: NodeLocation, result: Out) {
// update parent
let node = self
.execution_tree
.get_mut(&parent.node_index)
.expect("fold referenced invalid node");
debug_assert!(node.children[parent.child_index].is_none());
node.children[parent.child_index] = Some(result);
node.children_left -= 1;
if node.children_left == 0 {
// all parents children have been completed, so we need
// to schedule fold operation for it
let Node {
parent,
context,
children,
..
} = self
.execution_tree
.remove(&parent.node_index)
.expect("fold referenced invalid node");
self.enqueue_fold(parent, context, children.into_iter().flatten());
}
}
}
impl<In, Ins, Out, OutCtx, Unfold, UFut, Fold, FFut> Future
for BoundedTraversal<Out, OutCtx, Unfold, UFut, Fold, FFut>
where
Unfold: FnMut(In) -> UFut,
UFut: IntoFuture<Item = (OutCtx, Ins)>,
Ins: IntoIterator<Item = In>,
Fold: FnMut(OutCtx, Iter<Out>) -> FFut,
FFut: IntoFuture<Item = Out, Error = UFut::Error>,
{
type Item = Out;
type Error = UFut::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
// schedule as many jobs as possible
for job in self.unscheduled.drain(
..std::cmp::min(
self.unscheduled.len(),
self.scheduled_max - self.scheduled.len(),
),
) {
self.scheduled.push(job);
}
// execute scheduled until it is blocked or done
if let Some(job_result) = try_ready!(self.scheduled.poll()) {
match job_result {
JobResult::Unfold { value, result } => self.process_unfold(value, result),
JobResult::Fold { value, result } => {
// `0` is special index which means whole tree have been executed
if value.node_index == NodeIndex(0) {
// all jobs have to be completed and execution_tree empty
assert!(self.execution_tree.is_empty());
assert!(self.unscheduled.is_empty());
assert!(self.scheduled.is_empty());
return Ok(Async::Ready(result));
}
self.process_fold(value, result);
}
}
}
}
}
}
|
use crate::huffman::Huffman;
use crate::lz77::LZ77;
#[derive(Debug)]
pub struct Deflate {
lz77: LZ77,
}
impl Deflate {
pub fn new(window_size: usize, dictionary_size: usize) -> Deflate {
Deflate {
lz77: LZ77::new(window_size, dictionary_size),
}
}
pub fn compress<S>(&mut self, data: S) -> String
where
S: Sized + ToString,
{
self.lz77.encode(data);
let mut huffman = Huffman::new();
let lz77_vec: Vec<u8> = self.lz77.iter().map(|n| n.to_vec_u8()).flatten().collect();
let lz77_string: String = lz77_vec.iter().map(|v| *v as char).collect();
huffman.encode(&lz77_string)
}
pub fn decompress<S>(&self, data: S) -> String
where
S: Sized + ToString,
{
let mut huffman = Huffman::new();
let huffman_decoded = huffman.decode(&data);
self.lz77.decode()
}
}
|
use crate::errors::validation::{auth::ErrorVariants, ValidationError};
use crate::models::user::UserInsert;
impl super::Validate for UserInsert {
/// Validates a user registration request body
fn validate(&self) -> Option<ValidationError> {
let dn_len = self.displayname.len();
let un_len = self.username.len();
let em_len = self.email.len();
Some(ErrorVariants::to_validation_error(
if dn_len <= 2 || dn_len > 128 {
ErrorVariants::DisplaynameLength
} else if un_len <= 2 || un_len > 128 {
ErrorVariants::UsernameLength
} else if em_len < 6 || em_len > 256 {
ErrorVariants::EmailLength
} else if !super::EMAIL_VALIDATOR.is_match(&self.email).unwrap() {
ErrorVariants::EmailInvalid
} else if !super::PASSWORD_VALIDATOR.is_match(&self.password).unwrap() {
ErrorVariants::PasswordWeak
} else {
return None;
},
))
}
}
|
//! Wrappers that abstracts references (or pointers) and owned data accesses.
// The serialization is towards owned, allowing to serialize pointers without troubles.
use alloc::{boxed::Box, vec::Vec};
use core::{clone::Clone, fmt::Debug};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
/// Trait to convert into an Owned type
pub trait IntoOwned {
/// Returns if the current type is an owned type.
#[must_use]
fn is_owned(&self) -> bool;
/// Transfer the current type into an owned type.
#[must_use]
fn into_owned(self) -> Self;
}
/// Wrap a reference and convert to a [`Box`] on serialize
#[derive(Clone, Debug)]
pub enum OwnedRef<'a, T>
where
T: 'a + ?Sized,
{
/// A ref to a type
Ref(&'a T),
/// An owned [`Box`] of a type
Owned(Box<T>),
}
impl<'a, T> Serialize for OwnedRef<'a, T>
where
T: 'a + ?Sized + Serialize,
{
fn serialize<S>(&self, se: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
OwnedRef::Ref(r) => r.serialize(se),
OwnedRef::Owned(b) => b.serialize(se),
}
}
}
impl<'de, 'a, T> Deserialize<'de> for OwnedRef<'a, T>
where
T: 'a + ?Sized,
Box<T>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(OwnedRef::Owned)
}
}
impl<'a, T> AsRef<T> for OwnedRef<'a, T>
where
T: Sized,
{
#[must_use]
fn as_ref(&self) -> &T {
match self {
OwnedRef::Ref(r) => r,
OwnedRef::Owned(v) => v.as_ref(),
}
}
}
impl<'a, T> IntoOwned for OwnedRef<'a, T>
where
T: Sized + Clone,
{
#[must_use]
fn is_owned(&self) -> bool {
match self {
OwnedRef::Ref(_) => false,
OwnedRef::Owned(_) => true,
}
}
#[must_use]
fn into_owned(self) -> Self {
match self {
OwnedRef::Ref(r) => OwnedRef::Owned(Box::new(r.clone())),
OwnedRef::Owned(v) => OwnedRef::Owned(v),
}
}
}
/// Wrap a mutable reference and convert to a Box on serialize
#[derive(Debug)]
pub enum OwnedRefMut<'a, T: 'a + ?Sized> {
/// A mutable ref to a type
Ref(&'a mut T),
/// An owned [`Box`] of a type
Owned(Box<T>),
}
impl<'a, T: 'a + ?Sized + Serialize> Serialize for OwnedRefMut<'a, T> {
fn serialize<S>(&self, se: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
OwnedRefMut::Ref(r) => r.serialize(se),
OwnedRefMut::Owned(b) => b.serialize(se),
}
}
}
impl<'de, 'a, T: 'a + ?Sized> Deserialize<'de> for OwnedRefMut<'a, T>
where
Box<T>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(OwnedRefMut::Owned)
}
}
impl<'a, T: Sized> AsRef<T> for OwnedRefMut<'a, T> {
#[must_use]
fn as_ref(&self) -> &T {
match self {
OwnedRefMut::Ref(r) => r,
OwnedRefMut::Owned(v) => v.as_ref(),
}
}
}
impl<'a, T: Sized> AsMut<T> for OwnedRefMut<'a, T> {
#[must_use]
fn as_mut(&mut self) -> &mut T {
match self {
OwnedRefMut::Ref(r) => r,
OwnedRefMut::Owned(v) => v.as_mut(),
}
}
}
impl<'a, T> IntoOwned for OwnedRefMut<'a, T>
where
T: Sized + Clone,
{
#[must_use]
fn is_owned(&self) -> bool {
match self {
OwnedRefMut::Ref(_) => false,
OwnedRefMut::Owned(_) => true,
}
}
#[must_use]
fn into_owned(self) -> Self {
match self {
OwnedRefMut::Ref(r) => OwnedRefMut::Owned(Box::new(r.clone())),
OwnedRefMut::Owned(v) => OwnedRefMut::Owned(v),
}
}
}
/// Wrap a slice and convert to a Vec on serialize
#[derive(Clone, Debug)]
pub enum OwnedSlice<'a, T: 'a + Sized> {
/// A ref to a slice
Ref(&'a [T]),
/// A ref to an owned [`Vec`]
Owned(Vec<T>),
}
impl<'a, T: 'a + Sized + Serialize> Serialize for OwnedSlice<'a, T> {
fn serialize<S>(&self, se: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
OwnedSlice::Ref(r) => r.serialize(se),
OwnedSlice::Owned(b) => b.serialize(se),
}
}
}
impl<'de, 'a, T: 'a + Sized> Deserialize<'de> for OwnedSlice<'a, T>
where
Vec<T>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(OwnedSlice::Owned)
}
}
impl<'a, T: Sized> OwnedSlice<'a, T> {
/// Get the [`OwnedSlice`] as slice.
#[must_use]
pub fn as_slice(&self) -> &[T] {
match self {
OwnedSlice::Ref(r) => r,
OwnedSlice::Owned(v) => v.as_slice(),
}
}
}
impl<'a, T> IntoOwned for OwnedSlice<'a, T>
where
T: Sized + Clone,
{
#[must_use]
fn is_owned(&self) -> bool {
match self {
OwnedSlice::Ref(_) => false,
OwnedSlice::Owned(_) => true,
}
}
#[must_use]
fn into_owned(self) -> Self {
match self {
OwnedSlice::Ref(r) => OwnedSlice::Owned(r.to_vec()),
OwnedSlice::Owned(v) => OwnedSlice::Owned(v),
}
}
}
/// Wrap a mutable slice and convert to a Vec on serialize
#[derive(Debug)]
pub enum OwnedSliceMut<'a, T: 'a + Sized> {
/// A ptr to a mutable slice of the type
Ref(&'a mut [T]),
/// An owned [`Vec`] of the type
Owned(Vec<T>),
}
impl<'a, T: 'a + Sized + Serialize> Serialize for OwnedSliceMut<'a, T> {
fn serialize<S>(&self, se: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
OwnedSliceMut::Ref(r) => r.serialize(se),
OwnedSliceMut::Owned(b) => b.serialize(se),
}
}
}
impl<'de, 'a, T: 'a + Sized> Deserialize<'de> for OwnedSliceMut<'a, T>
where
Vec<T>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(OwnedSliceMut::Owned)
}
}
impl<'a, T: Sized> OwnedSliceMut<'a, T> {
/// Get the value as slice
#[must_use]
pub fn as_slice(&self) -> &[T] {
match self {
OwnedSliceMut::Ref(r) => r,
OwnedSliceMut::Owned(v) => v.as_slice(),
}
}
/// Get the value as mut slice
#[must_use]
pub fn as_mut_slice(&mut self) -> &mut [T] {
match self {
OwnedSliceMut::Ref(r) => r,
OwnedSliceMut::Owned(v) => v.as_mut_slice(),
}
}
}
impl<'a, T> IntoOwned for OwnedSliceMut<'a, T>
where
T: Sized + Clone,
{
#[must_use]
fn is_owned(&self) -> bool {
match self {
OwnedSliceMut::Ref(_) => false,
OwnedSliceMut::Owned(_) => true,
}
}
#[must_use]
fn into_owned(self) -> Self {
match self {
OwnedSliceMut::Ref(r) => OwnedSliceMut::Owned(r.to_vec()),
OwnedSliceMut::Owned(v) => OwnedSliceMut::Owned(v),
}
}
}
/// Wrap a C-style pointer and convert to a Box on serialize
#[derive(Clone, Debug)]
pub enum OwnedPtr<T: Sized> {
/// Ptr to the content
Ptr(*const T),
/// Ptr to an owned [`Box`] of the content.
Owned(Box<T>),
}
impl<T: Sized + Serialize> Serialize for OwnedPtr<T> {
fn serialize<S>(&self, se: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.as_ref().serialize(se)
}
}
impl<'de, T: Sized + serde::de::DeserializeOwned> Deserialize<'de> for OwnedPtr<T>
where
Vec<T>: Deserialize<'de>,
{
fn deserialize<D>(de: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(de).map(OwnedPtr::Owned)
}
}
impl<T: Sized> AsRef<T> for OwnedPtr<T> {
#[must_use]
fn as_ref(&self) -> &T {
match self {
OwnedPtr::Ptr(p) => unsafe { p.as_ref().unwrap() },
OwnedPtr::Owned(v) => v.as_ref(),
}
}
}
impl<T> IntoOwned for OwnedPtr<T>
where
T: Sized + Clone,
{
#[must_use]
fn is_owned(&self) -> bool {
match self {
OwnedPtr::Ptr(_) => false,
OwnedPtr::Owned(_) => true,
}
}
#[must_use]
fn into_owned(self) -> Self {
match self {
OwnedPtr::Ptr(p) => unsafe { OwnedPtr::Owned(Box::new(p.as_ref().unwrap().clone())) },
OwnedPtr::Owned(v) => OwnedPtr::Owned(v),
}
}
}
/// Wrap a C-style mutable pointer and convert to a Box on serialize
#[derive(Clone, Debug)]
pub enum OwnedPtrMut<T: Sized> {
/// A mut ptr to the content
Ptr(*mut T),
/// An owned [`Box`] to the content
Owned(Box<T>),
}
impl<T: Sized + Serialize> Serialize for OwnedPtrMut<T> {
fn serialize<S>(&self, se: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.as_ref().serialize(se)
}
}
impl<'de, T: Sized + serde::de::DeserializeOwned> Deserialize<'de> for OwnedPtrMut<T>
where
Vec<T>: Deserialize<'de>,
{
fn deserialize<D>(de: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(de).map(OwnedPtrMut::Owned)
}
}
impl<T: Sized> AsRef<T> for OwnedPtrMut<T> {
#[must_use]
fn as_ref(&self) -> &T {
match self {
OwnedPtrMut::Ptr(p) => unsafe { p.as_ref().unwrap() },
OwnedPtrMut::Owned(b) => b.as_ref(),
}
}
}
impl<T: Sized> AsMut<T> for OwnedPtrMut<T> {
fn as_mut(&mut self) -> &mut T {
match self {
OwnedPtrMut::Ptr(p) => unsafe { p.as_mut().unwrap() },
OwnedPtrMut::Owned(b) => b.as_mut(),
}
}
}
impl<T> IntoOwned for OwnedPtrMut<T>
where
T: Sized + Clone,
{
#[must_use]
fn is_owned(&self) -> bool {
match self {
OwnedPtrMut::Ptr(_) => false,
OwnedPtrMut::Owned(_) => true,
}
}
#[must_use]
fn into_owned(self) -> Self {
match self {
OwnedPtrMut::Ptr(p) => unsafe {
OwnedPtrMut::Owned(Box::new(p.as_ref().unwrap().clone()))
},
OwnedPtrMut::Owned(v) => OwnedPtrMut::Owned(v),
}
}
}
/// Wrap a C-style pointer to an array (with size) and convert to a Vec on serialize
#[derive(Clone, Debug)]
pub enum OwnedArrayPtr<T: Sized> {
/// Ptr to a slice
ArrayPtr((*const T, usize)),
/// A owned [`Vec`].
Owned(Vec<T>),
}
impl<T: Sized + Serialize> Serialize for OwnedArrayPtr<T> {
fn serialize<S>(&self, se: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.as_slice().serialize(se)
}
}
impl<'de, T: Sized + Serialize> Deserialize<'de> for OwnedArrayPtr<T>
where
Vec<T>: Deserialize<'de>,
{
fn deserialize<D>(de: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(de).map(OwnedArrayPtr::Owned)
}
}
impl<T: Sized> OwnedArrayPtr<T> {
/// Get a slice from this array.
#[must_use]
pub fn as_slice(&self) -> &[T] {
match self {
OwnedArrayPtr::ArrayPtr(p) => unsafe { core::slice::from_raw_parts(p.0, p.1) },
OwnedArrayPtr::Owned(v) => v.as_slice(),
}
}
}
impl<T> IntoOwned for OwnedArrayPtr<T>
where
T: Sized + Clone,
{
#[must_use]
fn is_owned(&self) -> bool {
match self {
OwnedArrayPtr::ArrayPtr(_) => false,
OwnedArrayPtr::Owned(_) => true,
}
}
#[must_use]
fn into_owned(self) -> Self {
match self {
OwnedArrayPtr::ArrayPtr(p) => unsafe {
OwnedArrayPtr::Owned(core::slice::from_raw_parts(p.0, p.1).to_vec())
},
OwnedArrayPtr::Owned(v) => OwnedArrayPtr::Owned(v),
}
}
}
/// Wrap a C-style mutable pointer to an array (with size) and convert to a Vec on serialize
#[derive(Clone, Debug)]
pub enum OwnedArrayPtrMut<T: Sized> {
/// A ptr to the array (or slice).
ArrayPtr((*mut T, usize)),
/// An owned [`Vec`].
Owned(Vec<T>),
}
impl<T: Sized + Serialize> Serialize for OwnedArrayPtrMut<T> {
fn serialize<S>(&self, se: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.as_slice().serialize(se)
}
}
impl<'de, T: Sized + Serialize> Deserialize<'de> for OwnedArrayPtrMut<T>
where
Vec<T>: Deserialize<'de>,
{
fn deserialize<D>(de: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(de).map(OwnedArrayPtrMut::Owned)
}
}
impl<T: Sized> OwnedArrayPtrMut<T> {
/// Return this array as slice
#[must_use]
pub fn as_slice(&self) -> &[T] {
match self {
OwnedArrayPtrMut::ArrayPtr(p) => unsafe { core::slice::from_raw_parts(p.0, p.1) },
OwnedArrayPtrMut::Owned(v) => v.as_slice(),
}
}
/// Return this array as mut slice
#[must_use]
pub fn as_mut_slice(&mut self) -> &mut [T] {
match self {
OwnedArrayPtrMut::ArrayPtr(p) => unsafe { core::slice::from_raw_parts_mut(p.0, p.1) },
OwnedArrayPtrMut::Owned(v) => v.as_mut_slice(),
}
}
}
impl<T> IntoOwned for OwnedArrayPtrMut<T>
where
T: Sized + Clone,
{
#[must_use]
fn is_owned(&self) -> bool {
match self {
OwnedArrayPtrMut::ArrayPtr(_) => false,
OwnedArrayPtrMut::Owned(_) => true,
}
}
#[must_use]
fn into_owned(self) -> Self {
match self {
OwnedArrayPtrMut::ArrayPtr(p) => unsafe {
OwnedArrayPtrMut::Owned(core::slice::from_raw_parts(p.0, p.1).to_vec())
},
OwnedArrayPtrMut::Owned(v) => OwnedArrayPtrMut::Owned(v),
}
}
}
|
use std::fmt::Write;
use crate::MetadataError;
use librespot_core::{Error, Session};
pub type RequestResult = Result<bytes::Bytes, Error>;
#[async_trait]
pub trait MercuryRequest {
async fn request(session: &Session, uri: &str) -> RequestResult {
let mut metrics_uri = uri.to_owned();
let separator = match metrics_uri.find('?') {
Some(_) => "&",
None => "?",
};
let _ = write!(metrics_uri, "{separator}country={}", session.country());
if let Some(product) = session.get_user_attribute("type") {
let _ = write!(metrics_uri, "&product={product}");
}
trace!("Requesting {}", metrics_uri);
let request = session.mercury().get(metrics_uri)?;
let response = request.await?;
match response.payload.first() {
Some(data) => {
let data = data.to_vec().into();
trace!("Received metadata: {data:?}");
Ok(data)
}
None => Err(Error::unavailable(MetadataError::Empty)),
}
}
}
|
use std::fmt::{Display, Formatter};
use std::result::{Result};
use std::rc::{Rc};
use std::fmt;
pub type Literal = String;
use position::*;
use utils::*;
use transform::*;
use symbol::*;
use context::*;
type P = Position;
#[derive(Debug)]
pub enum Tree {
AssignByName(P, SymbolName, Box<Tree>),
IdentByName(P, SymbolName),
DeclByName(P, SymbolName, Box<Tree>),
ForByName(P, SymbolName, Box<Tree>, Box<Tree>),
FuncByName(P, Vec<SymbolName>, Box<Tree>),
Ident(P, SymbolRef),
Assign(P, SymbolRef, Box<Tree>),
Decl(P, SymbolRef, Box<Tree>),
For(P, SymbolRef, Box<Tree>, Box<Tree>),
Func(P, Vec<SymbolRef>, Rc<Tree>),
Add(P, Box<Tree>, Box<Tree>),
Sub(P, Box<Tree>, Box<Tree>),
Mul(P, Box<Tree>, Box<Tree>),
Div(P, Box<Tree>, Box<Tree>),
Eq(P, Box<Tree>, Box<Tree>),
Neq(P, Box<Tree>, Box<Tree>),
Not(P, Box<Tree>),
Call(P, Box<Tree>, Vec<Tree>),
NumLit(P, f64),
StrLit(P, Literal),
ListLit(P, Vec<Tree>),
Block(P, Vec<Tree>),
If(P, Box<Tree>, Box<Tree>, Box<Tree>),
While(P, Box<Tree>, Box<Tree>),
Unit(P),
}
impl Tree {
pub fn for_each<F: FnMut(&Tree) -> ()>(&self, mut func: F) {
self.for_each_ref(&mut func);
}
pub fn transform<'a, F: TryTransform>(self, mut f: F, ctx: &mut Context) -> Result<Tree, Error> {
self.try_transform_ref(&mut f, ctx)
}
fn is_rec(&self) -> bool {
match self {
&Tree::FuncByName(..) | &Tree::Func(..) => true,
&Tree::ForByName(..) | &Tree::For(..) => true,
&Tree::Block(..) | &Tree::If(..) | &Tree::While(..) => true,
_ => false
}
}
fn for_each_ref<F: FnMut(&Tree) -> ()>(&self, func: &mut F) {
match self {
&Tree::AssignByName(_, _, ref r) => r.for_each_ref(func),
&Tree::DeclByName(_, _, ref e) => e.for_each_ref(func),
&Tree::ForByName(_, _, ref l, ref b) => { l.for_each_ref(func); b.for_each_ref(func) },
&Tree::FuncByName(_, _, ref b) => b.for_each_ref(func),
&Tree::Add(_, ref l, ref r) => { l.for_each_ref(func); r.for_each_ref(func) },
&Tree::Sub(_, ref l, ref r) => { l.for_each_ref(func); r.for_each_ref(func) },
&Tree::Mul(_, ref l, ref r) => { l.for_each_ref(func); r.for_each_ref(func) },
&Tree::Div(_, ref l, ref r) => { l.for_each_ref(func); r.for_each_ref(func) },
&Tree::Eq(_, ref l, ref r) => { l.for_each_ref(func); r.for_each_ref(func) },
&Tree::Neq(_, ref l, ref r) => { l.for_each_ref(func); r.for_each_ref(func) },
&Tree::Not(_, ref e) => e.for_each_ref(func),
&Tree::Call(_, ref o, ref a) => { o.for_each_ref(func); for a in a.iter() { a.for_each_ref(func); } },
&Tree::ListLit(_, ref lst) => for t in lst.iter() { t.for_each_ref(func) },
&Tree::If(_, ref c, ref t, ref e) => { c.for_each_ref(func); t.for_each_ref(func); e.for_each_ref(func) },
&Tree::While(_, ref c, ref b) => { c.for_each_ref(func); b.for_each_ref(func) },
&Tree::Block(_, ref s) => for t in s.iter() { t.for_each_ref(func) },
&Tree::Assign(_, _, ref r) => r.for_each_ref(func),
&Tree::Decl(_, _, ref r) => r.for_each_ref(func),
&Tree::For(_, _, ref c, ref b) => { c.for_each_ref(func); b.for_each_ref(func) },
&Tree::Func(_, _, ref b) => b.for_each_ref(func),
_ => ()
}
func(self);
}
fn try_transform_ref<F: TryTransform>(self, f: &mut F, ctx: &mut Context) -> Result<Tree, Error> {
let tr = |t: Box<Tree>, f: &mut F, ctx: &mut Context| -> Result<Box<Tree>, Error> { Ok(box_(t.try_transform_ref(f, ctx)?)) };
let rec = self.is_rec();
if rec {
ctx.push();
}
let r = Ok(match f.transform(self, ctx)? {
Tree::AssignByName(p, s, r) => Tree::AssignByName(p, s, tr(r, f, ctx)?),
Tree::DeclByName(p, s, r) => Tree::DeclByName(p, s, tr(r, f, ctx)?),
Tree::ForByName(p, s, c, b) => Tree::ForByName(p, s, tr(c, f, ctx)?, tr(b, f, ctx)?),
Tree::FuncByName(p, s, b) => Tree::FuncByName(p, s, tr(b, f, ctx)?),
Tree::Add(p, l, r) => Tree::Add(p, tr(l, f, ctx)?, tr(r, f, ctx)?),
Tree::Sub(p, l, r) => Tree::Sub(p, tr(l, f, ctx)?, tr(r, f, ctx)?),
Tree::Mul(p, l, r) => Tree::Mul(p, tr(l, f, ctx)?, tr(r, f, ctx)?),
Tree::Div(p, l, r) => Tree::Div(p, tr(l, f, ctx)?, tr(r, f, ctx)?),
Tree::Eq(p, l, r) => Tree::Eq(p, tr(l, f, ctx)?, tr(r, f, ctx)?),
Tree::Neq(p, l, r) => Tree::Neq(p, tr(l, f, ctx)?, tr(r, f, ctx)?),
Tree::Not(p, e) => Tree::Not(p, tr(e, f, ctx)?),
Tree::Call(p, o, a) => Tree::Call(p, tr(o, f, ctx)?, a.into_iter().map(|t| t.try_transform_ref(f, ctx)).collect::<Result<Vec<_>, Error>>()?),
Tree::ListLit(p, l) => Tree::ListLit(p, l.into_iter().map(|t| t.try_transform_ref(f, ctx)).collect::<Result<Vec<_>, Error>>()?),
Tree::Block(p, l) => Tree::Block(p, l.into_iter().map(|t| t.try_transform_ref(f, ctx)).collect::<Result<Vec<_>, Error>>()?),
Tree::If(p, c, t, e) => Tree::If(p, tr(c, f, ctx)?, tr(t, f, ctx)?, tr(e, f, ctx)?),
Tree::While(p, c, b) => Tree::While(p, tr(c, f, ctx)?, tr(b, f, ctx)?),
Tree::Assign(p, i, r) => Tree::Assign(p, i, tr(r, f, ctx)?),
Tree::Decl(p, i, r) => Tree::Decl(p, i, tr(r, f, ctx)?),
Tree::For(p, i, c, b) => Tree::For(p, i, tr(c, f, ctx)?, tr(b, f, ctx)?),
Tree::Func(p, s, b) => {
let b = Rc::try_unwrap(b).map_err(|_| ErrorKind::Generic("function in use".to_owned()).with_position(&p))?;
Tree::Func(p, s, Rc::new(b.try_transform_ref(f, ctx)?))
},
t => t
});
if rec {
ctx.pop();
}
r
}
}
impl Display for Tree {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
&Tree::IdentByName(_, ref name) => write!(f, "{}", name),
&Tree::AssignByName(_, ref l, ref r) => write!(f, "{} = {}", l, r),
&Tree::DeclByName(_, ref s, ref e) => write!(f, "let {} = {}", s, e),
&Tree::ForByName(_, ref n, ref l, ref b) => write!(f, "for({} : {}) {}", n, l, b),
&Tree::FuncByName(_, ref a, ref b) => write!(f, "({}) => {}", concat(a), b),
&Tree::Add(_, ref l, ref r) => write!(f, "({} + {})", l, r),
&Tree::Sub(_, ref l, ref r) => write!(f, "({} - {})", l, r),
&Tree::Mul(_, ref l, ref r) => write!(f, "({} * {})", l, r),
&Tree::Div(_, ref l, ref r) => write!(f, "({} / {})", l, r),
&Tree::Eq(_, ref l, ref r) => write!(f, "({} == {})", l, r),
&Tree::Neq(_, ref l, ref r) => write!(f, "({} != {})", l, r),
&Tree::Not(_, ref e) => write!(f, "!({})", e),
&Tree::Call(_, ref o, ref a) => write!(f, "{}({})", o, concat(a)),
&Tree::StrLit(_, ref val) => write!(f, "\"{}\"", val),
&Tree::NumLit(_, ref val) => write!(f, "{}", val),
&Tree::ListLit(_, ref lst) => write!(f, "[{}]", concat(lst)),
&Tree::If(_, ref c, ref t, ref e) => write!(f, "if({}) {} else {}", c, t, e),
&Tree::While(_, ref c, ref b) => write!(f, "while({}) {}", c, b),
&Tree::Unit(_) => Ok(()),
&Tree::Block(_, ref s) => {
let mut r = write!(f, "{{\n");
for s in s {
r = r.and_then(|_| write!(f, "{}\n", s));
}
r.and_then(|_| write!(f, "}}"))
},
&Tree::Ident(_, ref re) => write!(f, "{}", re),
&Tree::Assign(_, ref re, ref rhs) => write!(f, "{} = {}", re, rhs),
&Tree::Decl(_, ref re, ref rhs) => write!(f, "let {} = {}", re, rhs),
&Tree::For(_, ref re, ref c, ref b) => write!(f, "for({} : {}) {}", re, c, b),
&Tree::Func(_, ref a, ref b) => write!(f, "({}) => {}", concat(a), b),
}
}
}
impl Tree {
pub fn position(&self) -> Position {
match self {
&Tree::IdentByName(ref p, _) => p,
&Tree::AssignByName(ref p, _, _) => p,
&Tree::DeclByName(ref p, _, _) => p,
&Tree::ForByName(ref p, _, _, _) => p,
&Tree::FuncByName(ref p, _, _) => p,
&Tree::Add(ref p, _, _) => p,
&Tree::Sub(ref p, _, _) => p,
&Tree::Mul(ref p, _, _) => p,
&Tree::Div(ref p, _, _) => p,
&Tree::Eq(ref p, _, _) => p,
&Tree::Neq(ref p, _, _) => p,
&Tree::Not(ref p, _) => p,
&Tree::Call(ref p, _, _) => p,
&Tree::StrLit(ref p, _) => p,
&Tree::NumLit(ref p, _) => p,
&Tree::ListLit(ref p, _) => p,
&Tree::If(ref p, _, _, _) => p,
&Tree::While(ref p, _, _) => p,
&Tree::Unit(ref p) => p,
&Tree::Block(ref p, _) => p,
&Tree::Ident(ref p, _) => p,
&Tree::Assign(ref p, _, _) => p,
&Tree::Decl(ref p, _, _) => p,
&Tree::For(ref p, _, _, _) => p,
&Tree::Func(ref p, _, _) => p,
}.clone()
}
}
|
//! TOML blog configuration
use super::{
env_or_empty, load_config,
vendors::{FacebookConfig, GoogleConfig, MapBoxConfig},
ReadsEnv,
};
use crate::{deserialize::regex_string, models::Location, tools::Pairs};
use regex::Regex;
use serde::Deserialize;
use std::path::Path;
/// Replacement camera, lens and software text
#[derive(Deserialize, Debug)]
pub struct ExifConfig {
pub camera: Pairs,
pub software: Pairs,
pub lens: Pairs,
}
/// Photo sizes to generate from original
#[derive(Deserialize, Debug)]
pub struct SizeConfig {
pub large: u16,
pub medium: u16,
pub small: u16,
pub thumb: u16,
}
#[derive(Deserialize, Debug)]
pub struct SizeTypes {
/// Photo sizes to generate from original
pub render: SizeConfig,
/// Photo sizes to display (CSS pixel size)
pub display: SizeConfig,
}
#[derive(Deserialize, Debug)]
pub struct PhotoConfig {
/// Regex pattern to extract photo index and count from file name
///
/// *Exmaple* `(\\d{3})-of-(\\d{3})\\.jpg$` for `neat_place_012-of-015.jpg`
#[serde(deserialize_with = "regex_string")]
pub capture_index: Regex,
pub size: SizeTypes,
/// EXIF normalization settings
pub exif: ExifConfig,
/// Tags to exclude from rendered views
pub remove_tags: Vec<String>,
/// Extension (*with* leading period) of source files from which published
/// web files are rendered
pub source_ext: String,
/// Extension (*with* leading period) applied to resized photos
pub output_ext: String,
/// Maximum edge size of source image. This may be used to determine if a
/// resize is required for the largest photo.
pub source_size: u16,
}
#[derive(Deserialize, Debug)]
pub struct CategoryConfig {
/// Match name of "what" category to transportation mode that may in turn
/// match an icon
pub what_regex: Option<Pairs>,
pub icon: CategoryIcon,
/// Which category kinds to display and in what order
pub display: Vec<String>,
}
#[derive(Deserialize, Debug)]
pub struct GpsPrivacy {
/// Erase tracks around given latitude and longitude
pub center: Location,
/// Radius around privacyCenter to exclude from GeoJSON
pub miles: usize,
pub verify: bool,
}
#[derive(Deserialize, Debug)]
pub struct GpsTrackConfig {
pub min_track_points: usize,
/// Distance a track point must deviate from others to avoid Douglas-Peucker
/// simplification
pub max_point_deviation_feet: f32,
/// Manually adjusted tracks may have infinite speeds between points so
/// throw out anything over a threshold
pub max_possible_speed_mph: f32,
pub privacy: Option<GpsPrivacy>,
/// Whether track GPX files can be downloaded
pub allow_download: bool,
// Link patterns to external maps with `lat`, `lon`, `zoom` and `altitude`
// tokens
}
/// Match category kind to material icon
/// https://material.io/icons/
#[derive(Deserialize, Debug)]
pub struct CategoryIcon {
pub who: String,
pub what: String,
pub when: String,
pub r#where: String,
pub default: String,
}
#[derive(Deserialize, Debug)]
pub struct StyleConfig {
/// Maximum pixel height of static maps displayed with post summaries
pub inline_map_height: u16,
/// Pixel width of main content used to compute generated image widths
pub content_width: u16,
}
#[derive(Deserialize, Debug)]
pub struct ImageConfig {
pub url: String,
pub width: u16,
pub height: u16,
}
#[derive(Deserialize, Debug)]
pub struct SiteConfig {
pub url: String,
pub title: String,
pub subtitle: String,
pub description: String,
pub logo: ImageConfig,
pub company_logo: Option<ImageConfig>,
/// Generic name for a post (usually just "post") that can be used in a
/// category page subtitle, e.g. "27 posts" and pluralized with just an `s`
#[serde(default = "default_post_alias")]
pub post_alias: String,
}
#[derive(Deserialize, Debug)]
pub struct OwnerConfig {
pub name: String,
#[serde(skip)]
pub email: Option<String>,
pub urls: Option<Vec<String>>,
pub image: Option<ImageConfig>,
}
impl ReadsEnv for OwnerConfig {
fn from_env(&mut self) {
self.email = Some(env_or_empty("EMAIL_CONTACT"))
}
}
#[derive(Deserialize, Debug)]
pub struct FeaturedPost {
pub path: String,
// title will be retrieved from actual post
#[serde(skip)]
pub title: String,
}
#[derive(Deserialize, Debug, Default)]
pub struct Overrides {
/// Re-render all post and root pages
pub html: bool,
/// Re-download all static maps
pub maps: bool,
/// Re-generate resized photos
pub photos: bool,
/// Re-render all photo tag pages
pub tags: bool,
}
#[derive(Deserialize, Debug)]
pub struct BlogConfig {
pub author_name: String,
pub repo_url: String,
pub featured_post: Option<FeaturedPost>,
/// Folders known not to contain posts
pub ignore_folders: Vec<String>,
/// Whether to force writing operations even if source files haven't changed
#[serde(skip, default)]
pub force: Overrides,
/// Redirect source slug to target
pub redirects: Option<Pairs>,
pub site: SiteConfig,
pub owner: OwnerConfig,
pub style: StyleConfig,
pub category: CategoryConfig,
pub photo: PhotoConfig,
pub facebook: FacebookConfig,
pub mapbox: MapBoxConfig,
pub google: GoogleConfig,
}
impl BlogConfig {
pub fn load(path: &Path) -> Option<Self> {
load_config::<Self>(path).and_then(|mut c| {
c.from_env();
Some(c)
})
}
}
impl ReadsEnv for BlogConfig {
fn from_env(&mut self) {
self.mapbox.from_env();
self.google.from_env();
self.owner.from_env();
}
}
fn default_post_alias() -> String {
String::from("Post")
}
|
use rand::Rng;
#[derive(PartialEq, Clone, Copy)]
pub enum State {
MainMenu,
Game,
Lost,
Quit,
}
#[derive(PartialEq)]
pub enum Direction {
Up,
Down,
Right,
Left,
}
pub struct Snake {
pub body: Vec<(i32, i32)>,
pub direction: Direction,
pub growth: u32,
}
impl Snake {
/// Creates new snake in the given position, pointing to the right
pub fn new(position: (i32, i32)) -> Snake {
Snake {
body: vec![position],
direction: Direction::Up,
growth: 3,
}
}
/// Changes direction of the snake's head to new_dir
pub fn turn(&mut self, new_dir: Direction) {
if (new_dir == Direction::Up && self.direction != Direction::Down)
|| (new_dir == Direction::Down && self.direction != Direction::Up)
|| (new_dir == Direction::Right && self.direction != Direction::Left)
|| (new_dir == Direction::Left && self.direction != Direction::Right)
{
self.direction = new_dir;
}
}
fn move_head(&mut self) {
match self.direction {
Direction::Right => self.body[0].0 += 1,
Direction::Left => self.body[0].0 -= 1,
Direction::Up => self.body[0].1 -= 1,
Direction::Down => self.body[0].1 += 1,
}
}
/// Moves all other pieces in their directions and adds a new piece if self.growth is non-zero
pub fn advance(&mut self) {
if self.growth > 0 {
if let Some(tail) = self.body.last().cloned() {
self.body.push(tail);
}
// leaving growth as it was for a later conditional
}
let body_len = self.body.len();
for index in (0..body_len).rev() {
// skipping the new piece
if index == body_len - 1 && self.growth > 0 {
self.growth -= 1; // ok now it can be decremented
continue;
}
if index == 0 {
self.move_head();
} else {
self.body[index] = self.body[index - 1];
}
}
}
/// checks if given position is inside of the snake
pub fn inside(&self, pos: (i32, i32)) -> bool {
for piece in &self.body {
if pos == *piece {
return true;
}
}
false
}
}
pub struct FruitManager {
pub fruits: Vec<(i32, i32)>,
}
impl FruitManager {
pub fn new() -> FruitManager {
FruitManager {
fruits: Vec::<(i32, i32)>::new(),
}
}
/// place a new fruit in a random spot between (0,0) and max_pos-1 exclusively
pub fn place_new(&mut self, max_pos: (i32, i32), snake: &Snake) {
let mut rng = rand::thread_rng();
let mut x = 0;
let mut y = 0;
while x == 0 || y == 0 || !self.fruit_unique((x, y)) || snake.inside((x, y)) {
x = rng.gen_range(1, max_pos.0 - 1);
y = rng.gen_range(1, max_pos.1 - 1);
}
self.fruits.push((x, y));
}
/// check if new_fruit with given position doesn't already exist
fn fruit_unique(&self, new_fruit: (i32, i32)) -> bool {
for fruit in &self.fruits {
if new_fruit == *fruit {
return false;
}
}
true
}
/// check if a fruit has been eaten and remove it
pub fn fruit_eaten(&mut self, snake: &Snake) -> bool {
let mut remove_index: i32 = -1;
for (index, fruit) in self.fruits.iter().enumerate() {
if snake.body[0] == *fruit {
remove_index = index as i32;
break;
}
}
if remove_index >= 0 {
self.fruits.remove(remove_index as usize);
true
} else {
false
}
}
}
pub fn check_if_lost(max_pos: (i32, i32), snake: &Snake) -> bool {
for (index, piece) in snake.body.iter().enumerate() {
if (index == 0
&& (piece.0 <= 0
|| piece.1 <= 0
|| piece.0 >= max_pos.1 - 1
|| piece.1 >= max_pos.0 - 1))
|| (index != 0 && *piece == snake.body[0])
{
return true;
}
}
false
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.