text stringlengths 8 4.13M |
|---|
#![feature(drop_types_in_const)]
#![feature(plugin)]
#![plugin(maud_macros)]
#![plugin(rocket_codegen)]
#![feature(field_init_shorthand)]
extern crate rocket;
extern crate redis;
extern crate maud;
extern crate rocket_contrib;
extern crate uuid;
mod templates;
use redis::Commands;
use redis::Connection;
use rocket::Outcome;
use rocket::http::{Status, Cookies, Cookie};
use rocket::request::{Request, FromRequest};
use rocket::response::{Redirect, NamedFile};
use rocket::config;
use uuid::Uuid;
use std::path::{PathBuf, Path};
fn get_session_id(cookies: &Cookies) -> String {
match cookies.find("session_id") {
Some(cookie) => cookie.value,
None => {
let uuid = Uuid::new_v4().simple().to_string();
cookies.add(Cookie::new("session_id".into(), uuid.clone()));
uuid
}
}
}
fn counter_id(session_id: String) -> String {
format!("counter:{}", session_id)
}
#[get("/")]
fn index(conn: RedisConnection, cookies: &Cookies) -> String {
let session_id = get_session_id(cookies);
let val: i32 = conn.conn.get("counter").unwrap();
let user_val: i32 = conn.conn.get(counter_id(session_id)).unwrap_or(0);
templates::index(val, user_val)
}
#[post("/press")]
fn press(conn: RedisConnection, cookies: &Cookies) -> Result<Redirect, String> {
let session_id = get_session_id(cookies);
let _: i32 = conn.conn.incr(counter_id(session_id), 1).unwrap();
let _: i32 = conn.conn.incr("counter", 1).unwrap();
Ok(Redirect::to("/"))
}
#[get("/static/<file..>")]
fn files(file: PathBuf) -> Option<NamedFile> {
NamedFile::open(Path::new("static/").join(file)).ok()
}
struct RedisConnection {
conn: Connection
}
fn new_redis_connection() -> Option<RedisConnection> {
let redis_port = config::active().expect("No config found")
.get_int("redis_port").unwrap_or(6379);
match redis::Client::open(format!("redis://localhost:{}", redis_port).as_str()) {
Ok(client) => {
match client.get_connection() {
Ok(conn) => Some(RedisConnection{ conn }),
_ => None
}
}, _ => {
None
}
}
}
fn initialize_redis() {
let conn = new_redis_connection().expect("Could not connect to redis");
let _ : i32 = conn.conn.set_nx("counter", 0).expect("Could not initialize counter");
}
impl<'a, 'r> FromRequest<'a, 'r> for RedisConnection {
type Error = ();
fn from_request(_: &'a Request<'r>) -> Outcome<Self, (Status, Self::Error), ()> {
match new_redis_connection() {
Some(conn) => Outcome::Success(conn),
_ => Outcome::Failure((Status::InternalServerError, ()))
}
}
}
fn main() {
let app = rocket::ignite().mount("/", routes![index, press, files]);
initialize_redis();
app.launch();
}
|
use testutil::*;
use seven_client::sms::{Sms, SmsTextParams, SmsJsonParams};
mod testutil;
fn init_client() -> Sms {
Sms::new(get_client())
}
#[test]
fn text() {
assert!(init_client().text(SmsTextParams {
debug: None,
delay: None,
details: None,
flash: None,
foreign_id: None,
from: None,
label: None,
no_reload: None,
performance_tracking: None,
return_msg_id: None,
text: "HI2U!".to_string(),
to: "+491716992343".to_string(),
ttl: None,
udh: None,
unicode: None,
utf8: None,
}).is_ok());
}
#[test]
fn json() {
assert!(init_client().json(SmsJsonParams {
debug: None,
delay: None,
flash: None,
foreign_id: None,
from: None,
label: None,
no_reload: None,
performance_tracking: None,
text: "HI2U!".to_string(),
to: "+491716992343".to_string(),
ttl: None,
udh: None,
unicode: None,
utf8: None,
}).is_ok());
}
|
use rand::thread_rng;
fn raw_input(prompt: &str) -> String{
use std::io::{stdin,stdout,Write};
let mut s=String::new();
print!("{}", prompt);
let _=stdout().flush();
stdin().read_line(&mut s).expect("Did not enter a correct string");
if let Some('\n')=s.chars().next_back() {
s.pop();
}
if let Some('\r')=s.chars().next_back() {
s.pop();
}
return s;
}
// 'from' is how much person being taken for has 'amt' is what 'to' want to take
// takes amt from 'from', returns how much it took, up to amt, can take less if
// not enought to take
fn take(from: &mut i32, amt: i32) -> i32{
if *from < amt{
let t = *from;
*from = 0;
return t;
}
*from = *from - amt;
return amt;
}
//tries to move value from 'from' to 'to', if 'from' does not have enought
//whaterver 'from' has, is transfered to 'to'
fn transfer(from: &mut i32, to: &mut i32, amt: i32){
//from is person who has to person who wants amt how much to wants
*to += take(from, amt);
}
const FARM_COST: i32 = 25;
const FOOD_PRICE: i32 = 1;
const FOOD_PER_PERSON: i32 = 1;
const FOOD_PER_FARM: i32 = 4;
const BABIES_PER_POP: f32 = 0.1;
const HOSPITAL_BABY_SURVIVE_BONUS: f32 = BABIES_PER_POP/3.0;
const ARMY_COST: i32 = 19;
const BUILDING_COST: i32 = 50;
#[derive(Debug)]
struct Village{
name: String,
pop: i32,
money: i32,
farms: i32,
food_per_farm: i32,
hospitals: i32,
granarys: i32,
walls: i32,
}
impl Village{
fn new(name: String) -> Village{
return Village{name: name,
pop: 2,
money: 50,
farms: 1,
food_per_farm: FOOD_PER_FARM,
hospitals: 0,
granarys: 0,
walls: 0,
};
}
}
struct Gov{
money: i32,
tax_rate: i32,
}
fn update(gov: &mut Gov, vil: &mut Village){
let req_food = FOOD_PER_PERSON * vil.pop;
let made_food = vil.farms * FOOD_PER_FARM;
let rem_food = made_food - req_food;
if rem_food >= 0{ // we have leftovers
vil.money += rem_food * FOOD_PRICE;
if vil.money > 20{
vil.pop += (vil.pop as f32 * BABIES_PER_POP).ceil() as i32;
}
} else { //not enough food
let starving = (rem_food.abs() - 1)/FOOD_PER_PERSON + 1;
take(&mut vil.pop, starving);// send pepole to village with extra food?
}
println!("Rem food {:?}", rem_food);
let tax = vil.pop * gov.tax_rate;
println!("tax {:?}", tax);
println!("vill money {:?}", vil.money);
transfer(&mut vil.money, &mut gov.money, tax);
if vil.money > FOOD_PRICE{
vil.farms += 1;
}
println!("You have {:?} gold coins.", gov.money);
println!("Village state:\n {:?}", vil);
}
fn governer(gov: &mut Gov, vil: &mut Village) -> bool{
let gove_ds = raw_input("What is the tax rate: ");
let asnum: Result<i32, _> = gove_ds.parse();
match asnum{
Ok(n) => {
gov.tax_rate = n;
},
Err(_) => println!("Thats not a number")
}
let army = raw_input("do you want to build an army: ");
if army == ("yes"){//not sure how army will help
gov.money -= ARMY_COST
}else{
println!("you said {:?} the consequenses are unkown", army);
}
let cat = raw_input ("would you like to build something to assist in production or saftey: ");
if cat == "yes"{
let build = raw_input("do you want to build a granary wall or hospital: ");
if build == ("granary"){
vil.granarys += 1;
gov.money -= BUILDING_COST;
vil.food_per_farm += 2;
//famines will go down
}
if build ==("hospital"){
vil.hospitals += 1;
gov.money -= BUILDING_COST;
if vil.hospitals >= 1 && (vil.pop >= 2){
vil.pop += vil.hospitals * 4;//may change
//sickness will go down
}
}
if build ==("wall"){
vil.walls += 1;
gov.money -= BUILDING_COST;
//raids go down
}
}else{
println!("you said {:?} the consequenses are unkown", cat)
}
return false;
}
//sick famine raids
fn promblems(vil: &mut Village, gov: &mut Gov) {
let mut rng = thread_rng();
let famine: u32 = rng.gen_range(1, 95);
if famine == 36{
vil.food_per_farm -= rng.gen_range(1, 20);
if vil.food_per_farm < 0{
vil.food_per_farm = 0;
}
vil.farms -= 1;
//for random amount of turns 1-7
println!("A famine has hit")
}
//sick = random number beetween 1-70;
//if sick == 22;{
//vil.pop -= randomly beetween 1-30;
//for random amount of turns 1-6;
//println("your people are experiencing sickness");
//}else{
//do nothing
//}
//raid = random number beetween 1-150;
//if raid == 79;{
//vil.money -= random btw 1-40;
//gov.money -= rdm btw 1-60;
//if NUM_OF_BUILDINGS > (1){
//NUM_OF_BUILDINGS -= rdm btw 1-3
//}
//println("you have been raided");
//}else{
//do nothing
//}
}
fn main() {
println!("Hello, world!");
let mut g: Gov = Gov{money: 0, tax_rate: 1};
let mut v: Village = Village::new(String::from("Valanthar"));
let mut exit = false;
while !exit {
update(&mut g, &mut v);
exit = governer(&mut g, &mut v);
}
}
|
pub mod author_builder;
pub mod subscriber_builder;
|
use fibers_timeout_queue::TimeoutQueue;
use fibers_transport::{PollRecv, PollSend, Result, Transport, UdpTransport};
use std::collections::{HashMap, HashSet, VecDeque};
use std::net::SocketAddr;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use stun_codec::{Attribute, DecodedMessage, Message, MessageClass, TransactionId};
use super::StunTransport;
/// [`StunUdpTransporter`] builder.
///
/// [`StunUdpTransporter`]: ./struct.StunUdpTransporter.html
#[derive(Debug, Clone)]
pub struct StunUdpTransporterBuilder {
rto: Duration,
rto_cache_duration: Duration,
min_transaction_interval: Duration,
max_outstanding_transactions: usize,
}
impl StunUdpTransporterBuilder {
/// The default value of RTO (Retransmission TimeOut).
///
/// > A client SHOULD retransmit a STUN request message starting with an
/// > interval of RTO ("Retransmission TimeOut"), doubling after each
/// > retransmission. The RTO is an estimate of the round-trip time (RTT),
/// > and is computed as described in RFC 2988 [RFC2988], with two
/// > exceptions. First, the initial value for RTO SHOULD be configurable
/// > (rather than the 3 s recommended in RFC 2988) and SHOULD be greater
/// > than **500 ms**.
/// >
/// > [RFC 5389 -- 7.2.1. Sending over UDP]
///
/// [RFC 5389 -- 7.2.1. Sending over UDP]: https://tools.ietf.org/html/rfc5389#section-7.2.1
pub const DEFAULT_RTO_MS: u64 = 500;
/// The default duration preserving a cached RTO (Retransmission TimeOut).
///
/// > The value for RTO SHOULD be cached by a client after the completion
/// > of the transaction, and used as the starting value for RTO for the
/// > next transaction to the same server (based on equality of IP
/// > address). The value SHOULD be considered stale and discarded after
/// > **10 minutes**.
/// >
/// > [RFC 5389 -- 7.2.1. Sending over UDP]
///
/// [RFC 5389 -- 7.2.1. Sending over UDP]: https://tools.ietf.org/html/rfc5389#section-7.2.1
pub const DEFAULT_RTO_CACHE_DURATION_MS: u64 = 10 * 60 * 1000;
/// The default max concurrent transactions by a client to a server.
///
/// > At any time, a client MAY have multiple outstanding STUN requests
/// > with the same STUN server (that is, multiple transactions in
/// > progress, with different transaction IDs). Absent other limits to
/// > the rate of new transactions (such as those specified by ICE for
/// > connectivity checks or when STUN is run over TCP), a client SHOULD
/// > space new transactions to a server by RTO and SHOULD limit itself to
/// > **ten outstanding transactions** to the same server.
/// >
/// > [RFC 5389 -- 7.2. Sending the Request or Indication]
///
/// [RFC 5389 -- 7.2. Sending the Request or Indication]: https://tools.ietf.org/html/rfc5389#section-7.2
pub const DEFAULT_MAX_OUTSTANDING_TRANSACTIONS: usize = 10;
/// The default interval between transactions issued by a client to a serve.
///
/// > At any time, a client MAY have multiple outstanding STUN requests
/// > with the same STUN server (that is, multiple transactions in
/// > progress, with different transaction IDs). Absent other limits to
/// > the rate of new transactions (such as those specified by ICE for
/// > connectivity checks or when STUN is run over TCP), **a client SHOULD
/// > space new transactions to a server by RTO** and SHOULD limit itself to
/// > ten outstanding transactions to the same server.
/// >
/// > [RFC 5389 -- 7.2. Sending the Request or Indication]
///
/// [RFC 5389 -- 7.2. Sending the Request or Indication]: https://tools.ietf.org/html/rfc5389#section-7.2
pub const DEFAULT_MIN_TRANSACTION_INTERVAL_MS: u64 = Self::DEFAULT_RTO_MS;
/// Makes a new `StunUdpTransporterBuilder` instance with the default settings.
pub fn new() -> Self {
Self::default()
}
/// Sets the RTO of the resulting instance.
///
/// The default value is `Duration::from_millis(DEFAULT_RTO_MS)`.
pub fn rto(&mut self, rto: Duration) -> &mut Self {
self.rto = rto;
self
}
/// Sets the RTO cache duration of the resulting instance.
///
/// The default value is `Duration::from_millis(DEFAULT_RTO_CACHE_DURATION_MS)`.
pub fn rto_cache_duration(&mut self, duration: Duration) -> &mut Self {
self.rto_cache_duration = duration;
self
}
/// Sets the minimum interval of the consecutive request/response transactions of
/// the resulting instance.
///
/// The default value is `Duration::from_millis(DEFAULT_MIN_TRANSACTION_INTERVAL_MS)`.
pub fn min_transaction_interval(&mut self, interval: Duration) -> &mut Self {
self.min_transaction_interval = interval;
self
}
/// Sets the number of the maximum outstanding transactions of the resulting instance.
///
/// The default value is `DEFAULT_MAX_OUTSTANDING_TRANSACTIONS`.
pub fn max_outstanding_transactions(&mut self, max: usize) -> &mut Self {
self.max_outstanding_transactions = max;
self
}
/// Makes a new `StunUdpTransporter` instance with the given settings.
pub fn finish<A, T>(&self, inner: T) -> StunUdpTransporter<A, T>
where
A: Attribute,
T: UdpTransport<SendItem = Message<A>, RecvItem = DecodedMessage<A>>,
{
let inner = RetransmitTransporter {
inner,
timeout_queue: TimeoutQueue::new(),
peers: HashMap::new(),
rto: self.rto,
rto_cache_duration: self.rto_cache_duration,
min_transaction_interval: self.min_transaction_interval,
max_outstanding_transactions: self.max_outstanding_transactions,
};
StunUdpTransporter { inner }
}
}
impl Default for StunUdpTransporterBuilder {
fn default() -> Self {
StunUdpTransporterBuilder {
rto: Duration::from_millis(Self::DEFAULT_RTO_MS),
rto_cache_duration: Duration::from_millis(Self::DEFAULT_RTO_CACHE_DURATION_MS),
min_transaction_interval: Duration::from_millis(
Self::DEFAULT_MIN_TRANSACTION_INTERVAL_MS,
),
max_outstanding_transactions: Self::DEFAULT_MAX_OUTSTANDING_TRANSACTIONS,
}
}
}
/// UDP transport layer that can be used for STUN.
#[derive(Debug)]
pub struct StunUdpTransporter<A, T> {
inner: RetransmitTransporter<A, T>,
}
impl<A, T> StunUdpTransporter<A, T>
where
A: Attribute,
T: UdpTransport<SendItem = Message<A>, RecvItem = DecodedMessage<A>>,
{
/// Makes a new `StunUdpTransporter` instance.
///
/// This is equivalent to `StunUdpTransporterBuilder::new().finish(inner)`.
pub fn new(inner: T) -> Self {
StunUdpTransporterBuilder::new().finish(inner)
}
/// Returns a reference to the inner transporter.
pub fn inner_ref(&self) -> &T {
&self.inner.inner
}
/// Returns a mutable reference to the inner transporter.
pub fn inner_mut(&mut self) -> &mut T {
&mut self.inner.inner
}
}
impl<A, T> Transport for StunUdpTransporter<A, T>
where
A: Attribute,
T: UdpTransport<SendItem = Message<A>, RecvItem = DecodedMessage<A>>,
{
type PeerAddr = SocketAddr;
type SendItem = Message<A>;
type RecvItem = DecodedMessage<A>;
fn start_send(&mut self, peer: Self::PeerAddr, item: Self::SendItem) -> Result<()> {
track!(self.inner.start_send(peer, item))
}
fn poll_send(&mut self) -> PollSend {
track!(self.inner.poll_send())
}
fn poll_recv(&mut self) -> PollRecv<(Self::PeerAddr, Self::RecvItem)> {
track!(self.inner.poll_recv())
}
}
impl<A, T> StunTransport<A> for StunUdpTransporter<A, T>
where
A: Attribute,
T: UdpTransport<SendItem = Message<A>, RecvItem = DecodedMessage<A>>,
{
fn finish_transaction(
&mut self,
peer: &SocketAddr,
transaction_id: TransactionId,
) -> Result<()> {
track!(self.inner.finish_transaction(peer, transaction_id))
}
}
/// An implementation of [`StunTransport`] that retransmits request messages for improving reliability.
///
/// [`StunTransport`]: ./trait.StunTransport.html
#[derive(Debug)]
struct RetransmitTransporter<A, T> {
inner: T,
timeout_queue: TimeoutQueue<TimeoutEntry<A>>,
peers: HashMap<SocketAddr, PeerState<A>>,
rto: Duration,
rto_cache_duration: Duration,
min_transaction_interval: Duration,
max_outstanding_transactions: usize,
}
impl<A, T> RetransmitTransporter<A, T>
where
A: Attribute,
T: UdpTransport<SendItem = Message<A>, RecvItem = DecodedMessage<A>>,
{
fn waiting_time(&self, peer: SocketAddr) -> Option<Duration> {
self.peers[&peer]
.last_transaction_start_time
.elapsed()
.ok()
.and_then(|d| self.min_transaction_interval.checked_sub(d))
}
fn peer_mut(&mut self, peer: SocketAddr) -> &mut PeerState<A> {
self.peers.get_mut(&peer).expect("never fails")
}
#[allow(clippy::map_entry)]
fn start_transaction(
&mut self,
peer: SocketAddr,
request: Message<A>,
first: bool,
) -> Result<()> {
if !self.peers.contains_key(&peer) {
self.peers.insert(peer, PeerState::new(peer, self.rto));
}
if self.peers[&peer].waiting {
self.peer_mut(peer).pending(request, first);
} else if let Some(duration) = self.waiting_time(peer) {
self.peer_mut(peer).waiting = true;
self.timeout_queue
.push(TimeoutEntry::AllowNextRequest { peer }, duration);
self.peer_mut(peer).pending(request, first);
} else if self.peers[&peer].transactions.len() >= self.max_outstanding_transactions {
self.peer_mut(peer).pending(request, first);
} else {
track!(self.inner.start_send(peer, request.clone()))?;
let timeout = self.peer_mut(peer).start_transaction(request);
self.timeout_queue.push(timeout.0, timeout.1);
}
Ok(())
}
fn poll_timeout(&mut self) -> Option<TimeoutEntry<A>> {
let peers = &self.peers;
self.timeout_queue.filter_pop(|entry| {
if let TimeoutEntry::Retransmit { peer, request, .. } = entry {
peers.get(peer).map_or(false, |p| {
p.transactions.contains(&request.transaction_id())
})
} else {
true
}
})
}
fn handle_pending_request(&mut self, peer: SocketAddr) -> Result<()> {
if !self.peers.contains_key(&peer) {
return Ok(());
}
if let Some(request) = self.peer_mut(peer).pop_pending_request() {
track!(self.start_transaction(peer, request, false))?;
}
if self.peers[&peer].is_idle() {
self.peers.remove(&peer);
}
Ok(())
}
fn handle_retransmit(
&mut self,
peer: SocketAddr,
request: Message<A>,
rto: Duration,
) -> Result<()> {
if let Some(p) = self.peers.get_mut(&peer) {
if let Some(request) = p.retransmit(
request,
rto,
self.rto_cache_duration,
&mut self.timeout_queue,
) {
track!(self.inner.start_send(peer, request))?;
}
}
Ok(())
}
}
impl<A, T> Transport for RetransmitTransporter<A, T>
where
A: Attribute,
T: UdpTransport<SendItem = Message<A>, RecvItem = DecodedMessage<A>>,
{
type PeerAddr = SocketAddr;
type SendItem = Message<A>;
type RecvItem = DecodedMessage<A>;
fn start_send(&mut self, peer: SocketAddr, item: Self::SendItem) -> Result<()> {
if item.class() == MessageClass::Request {
track!(self.start_transaction(peer, item, true))
} else {
track!(self.inner.start_send(peer, item))
}
}
fn poll_send(&mut self) -> PollSend {
while let Some(entry) = self.poll_timeout() {
match entry {
TimeoutEntry::Retransmit {
peer,
request,
next_rto,
} => {
track!(self.handle_retransmit(peer, request, next_rto))?;
}
TimeoutEntry::ExpireRtoCache { peer, cached_rto } => {
if let Some(p) = self.peers.get_mut(&peer) {
if p.cached_rto == cached_rto {
p.cached_rto = self.rto;
}
}
}
TimeoutEntry::AllowNextRequest { peer } => {
self.peer_mut(peer).waiting = false;
track!(self.handle_pending_request(peer))?;
}
}
}
track!(self.inner.poll_send())
}
fn poll_recv(&mut self) -> PollRecv<(Self::PeerAddr, Self::RecvItem)> {
track!(self.inner.poll_recv())
}
}
impl<A, T> StunTransport<A> for RetransmitTransporter<A, T>
where
A: Attribute,
T: UdpTransport<SendItem = Message<A>, RecvItem = DecodedMessage<A>>,
{
fn finish_transaction(
&mut self,
peer: &SocketAddr,
transaction_id: TransactionId,
) -> Result<()> {
if let Some(p) = self.peers.get_mut(peer) {
p.finish_transaction(transaction_id);
}
track!(self.handle_pending_request(*peer))
}
}
#[derive(Debug)]
enum TimeoutEntry<A> {
Retransmit {
peer: SocketAddr,
request: Message<A>,
next_rto: Duration,
},
ExpireRtoCache {
peer: SocketAddr,
cached_rto: Duration,
},
AllowNextRequest {
peer: SocketAddr,
},
}
#[derive(Debug)]
struct PeerState<A> {
peer: SocketAddr,
transactions: HashSet<TransactionId>,
pending_requests: VecDeque<Message<A>>,
waiting: bool,
last_transaction_start_time: SystemTime,
cached_rto: Duration,
}
impl<A: Attribute> PeerState<A> {
fn new(peer: SocketAddr, rto: Duration) -> Self {
PeerState {
peer,
transactions: HashSet::new(),
pending_requests: VecDeque::new(),
waiting: false,
last_transaction_start_time: UNIX_EPOCH,
cached_rto: rto,
}
}
fn pending(&mut self, request: Message<A>, first: bool) {
if first {
self.pending_requests.push_back(request);
} else {
self.pending_requests.push_front(request);
}
}
fn is_idle(&self) -> bool {
self.transactions.is_empty() && !self.waiting
}
fn pop_pending_request(&mut self) -> Option<Message<A>> {
while let Some(request) = self.pending_requests.pop_front() {
if self.transactions.contains(&request.transaction_id()) {
return Some(request);
}
}
None
}
fn retransmit(
&mut self,
request: Message<A>,
rto: Duration,
rto_cache_duration: Duration,
queue: &mut TimeoutQueue<TimeoutEntry<A>>,
) -> Option<Message<A>> {
if self.transactions.contains(&request.transaction_id()) {
queue.push(
TimeoutEntry::Retransmit {
peer: self.peer,
request: request.clone(),
next_rto: rto * 2,
},
rto,
);
if self.cached_rto < rto {
self.cached_rto = rto;
queue.push(
TimeoutEntry::ExpireRtoCache {
peer: self.peer,
cached_rto: rto,
},
rto_cache_duration,
);
}
Some(request)
} else {
None
}
}
fn start_transaction(&mut self, request: Message<A>) -> (TimeoutEntry<A>, Duration) {
self.transactions.insert(request.transaction_id());
self.last_transaction_start_time = SystemTime::now();
let entry = TimeoutEntry::Retransmit {
peer: self.peer,
request,
next_rto: self.cached_rto * 2,
};
(entry, self.cached_rto)
}
fn finish_transaction(&mut self, transaction_id: TransactionId) {
self.transactions.remove(&transaction_id);
}
}
|
/*
* Copyright (c) 2013, David Renshaw (dwrenshaw@gmail.com)
*
* See the LICENSE file in the capnproto-rust root directory.
*/
#[link(name = "test", vers = "alpha", author = "dwrensha")];
#[crate_type = "bin"];
extern mod capnprust;
//use capnprust::*;
pub mod test_capnp;
#[test]
fn testPrimList () {
use capnprust::message::*;
use test_capnp::*;
// Make the first segment small to force allocation of a second segment.
let message = MessageBuilder::new(50,
SUGGESTED_ALLOCATION_STRATEGY);
let testPrimList = message.initRoot::<TestPrimList::Builder>();
let uint8List = testPrimList.initUint8List(100);
for i in range(0, uint8List.size()) {
uint8List.set(i, i as u8);
}
let uint64List = testPrimList.initUint64List(20);
for i in range(0, uint64List.size()) {
uint64List.set(i, i as u64);
}
let boolList = testPrimList.initBoolList(65);
boolList.set(0, true);
boolList.set(1, true);
boolList.set(2, true);
boolList.set(3, true);
boolList.set(5, true);
boolList.set(8, true);
boolList.set(13, true);
boolList.set(64, true);
assert!(boolList.get(0));
assert!(!boolList.get(4));
assert!(!boolList.get(63));
assert!(boolList.get(64));
let voidList = testPrimList.initVoidList(1025);
voidList.set(257, ());
do testPrimList.asReader |testPrimListReader| {
let uint8List = testPrimListReader.getUint8List();
for i in range(0, uint8List.size()) {
assert!(uint8List.get(i) == i as u8);
}
let uint64List = testPrimListReader.getUint64List();
for i in range(0, uint64List.size()) {
assert!(uint64List.get(i) == i as u64);
}
let boolList = testPrimListReader.getBoolList();
assert!(boolList.get(0));
assert!(boolList.get(1));
assert!(boolList.get(2));
assert!(boolList.get(3));
assert!(!boolList.get(4));
assert!(boolList.get(5));
assert!(!boolList.get(6));
assert!(!boolList.get(7));
assert!(boolList.get(8));
assert!(!boolList.get(9));
assert!(!boolList.get(10));
assert!(!boolList.get(11));
assert!(!boolList.get(12));
assert!(boolList.get(13));
assert!(!boolList.get(63));
assert!(boolList.get(64));
assert!(testPrimListReader.getVoidList().size() == 1025);
}
}
#[test]
fn testBigStruct() {
use capnprust::message::*;
use test_capnp::*;
// Make the first segment small to force allocation of a second segment.
let message = MessageBuilder::new(5,
SUGGESTED_ALLOCATION_STRATEGY);
let bigStruct = message.initRoot::<BigStruct::Builder>();
bigStruct.setBoolField(false);
bigStruct.setInt8Field(-128);
bigStruct.setInt16Field(0);
bigStruct.setInt32Field(1009);
let inner = bigStruct.initStructField();
inner.setFloat64Field(0.1234567);
inner.setBoolFieldB(true);
bigStruct.setBoolField(true);
do bigStruct.asReader |bigStructReader| {
assert!(bigStructReader.getInt8Field() == -128);
assert!(bigStructReader.getInt32Field() == 1009);
let innerReader = bigStructReader.getStructField();
assert!(!innerReader.getBoolFieldA());
assert!(innerReader.getBoolFieldB());
assert!(innerReader.getFloat64Field() == 0.1234567);
}
}
#[test]
fn testComplexList () {
use capnprust::message::*;
use test_capnp::*;
let message = MessageBuilder::new_default();
let testComplexList = message.initRoot::<TestComplexList::Builder>();
let enumList = testComplexList.initEnumList(100);
for i in range::<uint>(0, 10) {
enumList.set(i, AnEnum::qux);
}
for i in range::<uint>(10, 20) {
enumList.set(i, AnEnum::bar);
}
do testComplexList.asReader |complexListReader| {
let enumListReader = complexListReader.getEnumList();
for i in range::<uint>(0,10) {
match enumListReader.get(i) {
Some(AnEnum::qux) => {}
_ => fail!()
}
}
for i in range::<uint>(10,20) {
match enumListReader.get(i) {
Some(AnEnum::bar) => {}
_ => fail!()
}
}
}
}
fn main () {
}
|
use std::sync::Arc;
use apllodb_shared_components::{ApllodbSessionResult, SessionWithTx};
use apllodb_storage_engine_interface::{
RowSelectionQuery, StorageEngine, TableName, WithTxMethods,
};
use crate::{
attribute::attribute_name::AttributeName,
condition::Condition,
sql_processor::{
query::{
query_executor::QueryExecutor,
query_plan::{query_plan_tree::QueryPlanTree, QueryPlan},
},
sql_processor_context::SqlProcessorContext,
},
};
use super::modification_plan::{
modification_plan_tree::modification_plan_node::{
InsertNode, ModificationPlanNode, UpdateNode,
},
ModificationPlan,
};
/// Modification (INSERT, UPDATE, and DELETE) executor which inputs a ModificationPlan requests to storage engine.
#[derive(Clone, Debug, new)]
pub(crate) struct ModificationExecutor<Engine: StorageEngine> {
context: Arc<SqlProcessorContext<Engine>>,
}
impl<Engine: StorageEngine> ModificationExecutor<Engine> {
pub(crate) async fn run(
&self,
session: SessionWithTx,
plan: ModificationPlan,
) -> ApllodbSessionResult<SessionWithTx> {
let plan_tree = plan.plan_tree;
match plan_tree.root {
ModificationPlanNode::Insert(insert_node) => {
self.run_insert(session, insert_node).await
}
ModificationPlanNode::Update(update_node) => {
self.run_update(session, update_node).await
}
}
}
async fn run_insert(
&self,
session: SessionWithTx,
insert_node: InsertNode,
) -> ApllodbSessionResult<SessionWithTx> {
let query_executor = QueryExecutor::new(self.context.clone());
let input_query_plan_root_id = insert_node.child;
let (input, session) = query_executor
.run(
session,
QueryPlan::new(QueryPlanTree::new(input_query_plan_root_id)),
)
.await?;
let session = self
.context
.engine
.with_tx()
.insert(
session,
insert_node.table_name,
input
.as_schema()
.to_aliased_field_names()
.iter()
.map(|afn| match afn.as_attribute_name() {
AttributeName::ColumnNameVariant(cn) => cn,
})
.cloned()
.collect(),
input.into_rows(),
)
.await?;
Ok(session)
}
async fn run_update(
&self,
session: SessionWithTx,
update_node: UpdateNode,
) -> ApllodbSessionResult<SessionWithTx> {
let selection =
Self::condition_into_selection(&update_node.table_name, update_node.where_condition);
let session = self
.context
.engine
.with_tx()
.update(
session,
update_node.table_name,
update_node.column_values,
selection,
)
.await?;
Ok(session)
}
fn condition_into_selection(
table_name: &TableName,
condition: Option<Condition>,
) -> RowSelectionQuery {
match condition {
None => RowSelectionQuery::FullScan,
Some(cond) => cond.into_row_selection_query(table_name.clone()),
}
}
}
|
// fn main() {
// // 声明变量类型
// let logical: bool = true;
// let a_float: f64 = 1.0; //常规声明
// let an_integer = 5i32; //后缀声明
// // 自动推断类型
// let default_float = 3.0;
// let default_integer = 8;
// let mut mutable = 12;
// // mutable = true;//变量的类型不可改变
// }
fn main() {
// u32表示32位存储的无符号整数,i32表示32位存储的带符号整数
println!("1 + 2 = {}", 1u32 + 2);
println!("1 - 2 = {}", 1i32 - 2);
// 使用下划线改善数字的可读性
println!("One million is written as {}", 1_000_000u32);
}
|
use core::ops::Range;
use serde::{Deserialize, Serialize};
use std::fmt;
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct Span {
start: usize,
end: usize,
}
impl Span {
#[inline]
pub const fn new(start: usize, end: usize) -> Self {
Self { start, end }
}
#[inline]
pub const fn double(span: usize) -> Self {
Self {
start: span,
end: span,
}
}
#[inline]
pub const fn start(&self) -> usize {
self.start
}
#[inline]
pub const fn end(&self) -> usize {
self.end
}
#[inline]
pub const fn range(&self) -> Range<usize> {
self.start..self.end
}
#[inline]
pub const fn merge(start: Self, end: Self) -> Span {
Self::new(start.start, end.end)
}
#[inline]
pub const fn width(&self) -> usize {
self.end - self.start
}
pub fn coalesce_span(spans: &[Span]) -> Span {
let span = spans.get(0);
if let Some(span) = span {
let mut span = *span;
for s in spans {
if s.start() < span.start() {
span = Span::new(s.start(), span.end());
}
if s.end() > span.end() {
span = Span::new(s.start(), s.end());
}
}
span
} else {
Span::new(0, 0)
}
}
}
impl fmt::Debug for Span {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}..{}", self.start, self.end)
}
}
impl From<Range<usize>> for Span {
#[inline]
fn from(range: Range<usize>) -> Self {
Self {
start: range.start,
end: range.end,
}
}
}
impl Into<Range<usize>> for Span {
#[inline]
fn into(self) -> Range<usize> {
self.start..self.end
}
}
impl From<(usize, usize)> for Span {
#[inline]
fn from(range: (usize, usize)) -> Self {
Self {
start: range.0,
end: range.1,
}
}
}
impl Into<(usize, usize)> for Span {
#[inline]
fn into(self) -> (usize, usize) {
(self.start, self.end)
}
}
impl From<[usize; 2]> for Span {
#[inline]
fn from(range: [usize; 2]) -> Self {
Self {
start: range[0],
end: range[1],
}
}
}
impl Into<[usize; 2]> for Span {
#[inline]
fn into(self) -> [usize; 2] {
[self.start, self.end]
}
}
|
// thread 'rustc' panicked at 'called `Option::unwrap()` on a `None` value'
// prusti-viper/src/encoder/procedure_encoder.rs:2546:57
fn main() {
let x = |_, _| 0;
let _ = x(0, 0);
} |
use stretch::geometry::Size;
use stretch::node::Node;
use stretch::style::*;
fn main() {
let node = Node::new(
Style {
size: Size { width: Dimension::Points(100.0), height: Dimension::Points(100.0) },
justify_content: JustifyContent::Center,
..Default::default()
},
vec![&Node::new(
Style { size: Size { width: Dimension::Percent(0.5), height: Dimension::Auto }, ..Default::default() },
vec![],
)],
);
let layout = node.compute_layout(Size::undefined()).unwrap();
println!("{:#?}", layout);
}
|
use crate::vec2::Vec2;
use crate::cast_slice::cast_boxed_slice;
use std::fmt;
use std::borrow::Cow;
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum ImageFormat {
R8,
RGB888,
RGBA8888,
}
impl ImageFormat {
pub fn bytes_per_pixel(&self) -> usize {
match *self {
ImageFormat::R8 => 1,
ImageFormat::RGB888 => 3,
ImageFormat::RGBA8888 => 4,
}
}
}
#[derive(Clone, PartialEq)]
pub struct ImageData<'data> {
data: Cow<'data, [u8]>,
size: Vec2<usize>,
format: ImageFormat,
}
impl<'data> fmt::Debug for ImageData<'data> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ImageData({:?}, {:?})", self.size, self.format)
}
}
impl<'data> ImageData<'data> {
/// Create a new ImageData. Returns None if the data has the wrong size for the given `size` and
/// `format`.
pub fn new(data: Vec<u8>, size: Vec2<usize>, format: ImageFormat) -> Option<ImageData<'static>> {
let expected_data_size = size.x * size.y * format.bytes_per_pixel();
if data.len() != expected_data_size {
return None;
}
Some(ImageData {
data: Cow::Owned(data), size, format
})
}
pub fn new_borrowed(data: &'data [u8], size: Vec2<usize>, format: ImageFormat) -> Option<ImageData<'data>> {
let expected_data_size = size.x * size.y * format.bytes_per_pixel();
if data.len() != expected_data_size {
return None;
}
Some(ImageData {
data: Cow::Borrowed(data), size, format
})
}
pub fn new_zero(size: Vec2<usize>, format: ImageFormat) -> ImageData<'static> {
let expected_data_size = size.x * size.y * format.bytes_per_pixel();
ImageData::new(vec![0; expected_data_size], size, format).unwrap()
}
pub fn new_rgb888(data: Vec<(u8, u8, u8)>, size: Vec2<usize>) -> Option<ImageData<'static>> {
let data_raw = unsafe { cast_boxed_slice(data.into_boxed_slice()) }.into_vec();
ImageData::new(data_raw, size, ImageFormat::RGB888)
}
pub fn new_rgba8888(data: Vec<(u8, u8, u8, u8)>, size: Vec2<usize>) -> Option<ImageData<'static>> {
let data_raw = unsafe { cast_boxed_slice(data.into_boxed_slice()) }.into_vec();
ImageData::new(data_raw, size, ImageFormat::RGBA8888)
}
pub fn data(&self) -> &[u8] {
&self.data
}
pub fn size(&self) -> Vec2<usize> {
self.size
}
pub fn format(&self) -> ImageFormat {
self.format
}
}
|
use std::fmt;
use std::io;
use cbm::disk::directory::FileType;
use cbm::disk::file::{FileOps, Scheme};
use cbm::disk::{D64, D71, D81, Disk, DiskError, DiskType, Id};
use cbm::Petscii;
use rand::{Rng, XorShiftRng};
const ITERATIONS: usize = 100;
const MIN_FILE_SIZE: usize = 0;
const MAX_FILE_SIZE: usize = 64 * 1024;
const MAX_ITERATIONS_PER_IMAGE: usize = 10_000;
const DELETE_CHANCE: f32 = 0.33;
const ADD_CHANCE: f32 = 0.66;
const RNG_SEED: [u8; 16] = [
0x04, 0xC1, 0x1D, 0xB7, 0x1E, 0xDC, 0x6F, 0x41, 0x74, 0x1B, 0x8C, 0xD7, 0x32, 0x58, 0x34, 0x99,
];
const CONTENT_BYTES_PER_BLOCK: usize = 254;
static DISK_TYPES: &[DiskType] = &[DiskType::D64, DiskType::D71, DiskType::D81];
fn deterministic_rng() -> XorShiftRng {
rand::SeedableRng::from_seed(RNG_SEED)
}
fn random_name(rng: &mut impl Rng) -> Petscii {
const MIN_NAME_SIZE: usize = 1;
const MAX_NAME_SIZE: usize = 16;
let name_size = rng.gen_range(MIN_NAME_SIZE, MAX_NAME_SIZE + 1);
let mut bytes = vec![0u8; name_size];
rng.fill(&mut bytes[..]);
Petscii::from_bytes(&bytes)
}
fn random_available_name(rng: &mut impl Rng, disk: &Box<dyn Disk>) -> Petscii {
loop {
let name = random_name(rng);
// Filenames can't end with 0xA0 since the field is padded with 0xA0 bytes.
if name.as_bytes()[name.len() - 1] == 0xA0 {
continue;
}
match disk.check_filename_availability(&name) {
Ok(_) => return name,
Err(ref e) => match DiskError::from_io_error(e) {
Some(ref e) if *e == DiskError::FileExists => {}
Some(_) | None => panic!("cannot check filename availability: {}", e),
},
}
}
}
fn random_id(rng: &mut impl Rng) -> Id {
const ID_SIZE: usize = 2;
let mut bytes = [0u8; ID_SIZE];
rng.fill(&mut bytes);
Id::from_bytes(&bytes)
}
fn random_file_type(rng: &mut impl Rng) -> FileType {
static LINEAR_FILE_TYPES: &[FileType] = &[FileType::PRG, FileType::SEQ, FileType::USR];
LINEAR_FILE_TYPES[rng.gen_range(0, LINEAR_FILE_TYPES.len())]
}
fn new_disk(mut rng: &mut impl Rng, disk_type: &DiskType) -> Box<dyn Disk> {
let name = random_name(&mut rng);
let id = random_id(&mut rng);
match disk_type {
DiskType::D64 => {
let mut d64 = D64::open_memory(D64::geometry(false)).unwrap();
d64.write_format(&name, &id).unwrap();
Box::new(d64)
}
DiskType::D71 => {
let mut d71 = D71::open_memory(D71::geometry(false)).unwrap();
d71.write_format(&name, &id).unwrap();
Box::new(d71)
}
DiskType::D81 => {
let mut d81 = D81::open_memory(D81::geometry(false)).unwrap();
d81.write_format(&name, &id).unwrap();
Box::new(d81)
}
}
}
struct RandomFile {
name: Petscii,
size: usize,
file_type: FileType,
contents: Vec<u8>,
}
impl RandomFile {
fn new(mut rng: &mut XorShiftRng, disk: &Box<dyn Disk>) -> RandomFile {
let name = random_available_name(&mut rng, disk);
let size: usize = rng.gen_range(MIN_FILE_SIZE, MAX_FILE_SIZE);
let file_type = random_file_type(&mut rng);
let mut contents = vec![0u8; size];
rng.fill(&mut contents[..]);
RandomFile {
name,
size,
file_type,
contents,
}
}
fn blocks(&self) -> usize {
(self.size + CONTENT_BYTES_PER_BLOCK - 1) / CONTENT_BYTES_PER_BLOCK
}
fn write(&self, disk: &mut Box<dyn Disk>) -> io::Result<()> {
let file = disk.create_file(&self.name, self.file_type, Scheme::Linear)?;
let mut writer = file.writer()?;
writer.write_all(&self.contents)?;
writer.flush()?;
Ok(())
}
fn verify(&self, disk: &Box<dyn Disk>) -> io::Result<()> {
// Read file.
let file = disk.open_file(&self.name)?;
let mut reader = file.reader()?;
let mut read_contents = Vec::new();
reader.read_to_end(&mut read_contents)?;
assert_eq!(self.contents, read_contents);
// Check directory entry.
let entry = disk.find_directory_entry(&self.name)?;
assert_eq!(entry.filename, self.name);
assert_eq!(entry.file_size, self.blocks() as u16);
assert_eq!(entry.file_attributes.file_type, self.file_type);
assert_eq!(entry.file_attributes.locked_flag, false);
assert_eq!(entry.file_attributes.closed_flag, true);
Ok(())
}
}
impl fmt::Debug for RandomFile {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"RandomFile {{ name: {:?}, size: {}, file_type: {} }}",
self.name, self.size, self.file_type
)
}
}
fn verify_disk_state(disk: &Box<dyn Disk>, files: &Vec<RandomFile>) -> io::Result<()> {
// Validate disk image
disk.validate().unwrap();
// Confirm blocks free
let total_data_blocks = disk.disk_format()?.total_data_blocks();
let blocks_written: usize = files.iter().map(|f| f.blocks()).sum();
let expected_blocks_free = total_data_blocks - blocks_written;
assert_eq!(disk.blocks_free().unwrap(), expected_blocks_free);
Ok(())
}
#[test]
#[ignore]
fn integration_test() {
let mut rng = deterministic_rng();
for i in 0..ITERATIONS {
for disk_type in DISK_TYPES {
println!("Iteration: {} disk type: {:?}", i, disk_type);
let mut disk = new_disk(&mut rng, disk_type);
assert!(disk.directory().unwrap().is_empty());
assert_eq!(
disk.blocks_free().unwrap(),
disk.disk_format().unwrap().total_data_blocks()
);
let mut written_files = vec![];
let mut disk_full = false;
for _i in 0..MAX_ITERATIONS_PER_IMAGE {
// Randomly add files
if rng.gen::<f32>() < ADD_CHANCE {
let random_file = RandomFile::new(&mut rng, &disk);
// println!("Add: {:?}", random_file);
match random_file.write(&mut disk) {
Ok(_) => {}
Err(ref e) if e == &DiskError::DiskFull => {
// Confirm the legitimacy of this "disk full" error
let entries = disk.iter().count();
let free = disk.blocks_free().unwrap();
if entries != disk.disk_format().unwrap().max_directory_entries()
&& free > random_file.size
{
panic!("Disk full unexpectedly.");
}
// Remove the underwritten file
match disk.open_file(&random_file.name) {
Ok(mut f) => f.delete().unwrap(),
Err(ref e) if e == &DiskError::NotFound => {},
Err(ref e) => panic!("error opening underwritten file: {}", e),
}
// Conclude the test of this disk image.
disk_full = true;
break;
}
Err(ref e) => panic!("error writing file: {}", e),
};
random_file.verify(&disk).unwrap();
written_files.push(random_file);
verify_disk_state(&disk, &written_files).unwrap();
}
// Randomly delete files
if rng.gen::<f32>() < DELETE_CHANCE {
if !written_files.is_empty() {
let target_index = rng.gen_range(0, written_files.len());
let target = written_files.remove(target_index);
// println!("Delete: {:?}", target);
let mut file = disk.open_file(&target.name).unwrap();
file.delete().unwrap();
}
verify_disk_state(&disk, &written_files).unwrap();
}
}
assert!(disk_full);
// Re-verify all remaining files.
written_files.iter().for_each(|f| {
f.verify(&disk).unwrap();
});
// Delete all remaining files
written_files.drain(..).for_each(|f| {
let mut file = disk.open_file(&f.name).unwrap();
file.delete().unwrap();
});
verify_disk_state(&disk, &written_files).unwrap();
}
}
}
|
use crate::function::Function;
use crate::runtime::Runtime;
use crate::variable::{FnResult, InnerVar, Variable};
use std::time::Instant;
pub fn test_internal(args: Vec<Variable>, runtime: &mut Runtime) -> FnResult {
let start = Instant::now();
let length = args.len();
let mut failed = 0usize;
for (i, arg) in args.into_iter().enumerate() {
let function = match arg {
Variable::Normal(InnerVar::Function(f)) => f,
_ => panic!("Expected a function"),
};
let result = function.call((vec![], runtime));
if result.is_err() {
let fn_name = match function {
Function::Standard(file, fn_no) => runtime.get_fn_name(file, fn_no),
Function::Native(_) => "[unknown native function]".into(),
};
let error = runtime.pop_err().unwrap();
let err_str = error.str(runtime).unwrap();
println!("Test {} ({}) failed:\n{}", i, fn_name, err_str);
failed += 1;
}
}
let duration = start.elapsed();
if failed == 0 {
println!("All tests passed in {:?}", duration);
} else {
println!("{}/{} tests failed in {:?}", failed, length, duration);
}
FnResult::Ok(())
}
|
use std::fs::File;
use std::io;
use std::io::*;
use std::process;
use logging::*;
use string_tools::*;
pub fn stdin() -> String {
io::stdout().flush().unwrap();
let mut input = String::new();
io::stdin()
.read_line(&mut input)
.expect("Couldn't read line");
let line = input[..input.len() - 1].to_string();
let mut result = "\\.".to_string();
for c in line.chars() {
match c {
' ' => {
result += "\\. \\_ \\.";
}
'=' => {
result += "\\.\\e\\.";
}
'!' => {
result += "\\.\\x\\.";
}
'(' => {
result += "\\.\\lp\\.";
}
')' => {
result += "\\.\\rp\\.";
}
'[' => {
result += "\\.\\lb\\.";
}
']' => {
result += "\\.\\rb\\.";
}
'\\' => {
result += "\\.\\\\\\.";
}
'@' => {
result += "\\.@\\.";
}
some_char => {
result += &some_char.to_string();
}
}
}
return result+"\\.";
}
#[allow(dead_code)]
pub fn read(file_name: &str) -> String {
let mut file = BufReader::new(File::open(file_name).unwrap());
let mut s = String::new();
file.read_to_string(&mut s).unwrap();
return s;
}
pub fn readlines(file_name: &str) -> Vec<String> {
let file = match File::open(file_name) {
Ok(f) => BufReader::new(f),
Err(_) => {
error("Could not open file.");
process::exit(0);
}
};
let lines: Vec<_> = file
.lines()
.map(|line| remove_comments(&line.unwrap()))
.filter(|line| remove_whitespace(line) != "")
.collect();
return lines;
}
pub fn readstring(contents: &str) -> Vec<String> {
let lines: Vec<_> = contents
.lines()
.map(|line| remove_comments(&line))
.filter(|line| remove_whitespace(line) != "")
.collect();
return lines;
}
pub fn import(file_name: &str, module_name: &str) -> Vec<String> {
let file = match File::open(file_name) {
Ok(f) => BufReader::new(f),
Err(_) => {
// error(
// format!("Could not open file: {}", e.to_string())
// );
error(format!("Could not import \"{}\"", module_name));
process::exit(0);
}
};
let lines: Vec<_> = file
.lines()
.map(|line| remove_comments(&line.unwrap()))
.collect();
return lines;
}
|
use std::collections::HashSet;
pub struct SimpleDomain{
set: HashSet<i16>,
}
impl SimpleDomain{
pub fn new(start: i16 , end: i16) -> SimpleDomain {
let mut s = HashSet::new();
for i in start..end{
s.insert(i);
}
SimpleDomain{set: s}
}
pub fn getPosition(self) -> i16{
1
}
} |
#[doc = "Register `APB2ENR` reader"]
pub type R = crate::R<APB2ENR_SPEC>;
#[doc = "Register `APB2ENR` writer"]
pub type W = crate::W<APB2ENR_SPEC>;
#[doc = "Field `AFIOEN` reader - Alternate function I/O clock enable"]
pub type AFIOEN_R = crate::BitReader<AFIOEN_A>;
#[doc = "Alternate function I/O clock enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum AFIOEN_A {
#[doc = "0: The selected clock is disabled"]
Disabled = 0,
#[doc = "1: The selected clock is enabled"]
Enabled = 1,
}
impl From<AFIOEN_A> for bool {
#[inline(always)]
fn from(variant: AFIOEN_A) -> Self {
variant as u8 != 0
}
}
impl AFIOEN_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> AFIOEN_A {
match self.bits {
false => AFIOEN_A::Disabled,
true => AFIOEN_A::Enabled,
}
}
#[doc = "The selected clock is disabled"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == AFIOEN_A::Disabled
}
#[doc = "The selected clock is enabled"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == AFIOEN_A::Enabled
}
}
#[doc = "Field `AFIOEN` writer - Alternate function I/O clock enable"]
pub type AFIOEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, AFIOEN_A>;
impl<'a, REG, const O: u8> AFIOEN_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "The selected clock is disabled"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(AFIOEN_A::Disabled)
}
#[doc = "The selected clock is enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(AFIOEN_A::Enabled)
}
}
#[doc = "Field `IOPAEN` reader - I/O port A clock enable"]
pub use AFIOEN_R as IOPAEN_R;
#[doc = "Field `IOPBEN` reader - I/O port B clock enable"]
pub use AFIOEN_R as IOPBEN_R;
#[doc = "Field `IOPCEN` reader - I/O port C clock enable"]
pub use AFIOEN_R as IOPCEN_R;
#[doc = "Field `IOPDEN` reader - I/O port D clock enable"]
pub use AFIOEN_R as IOPDEN_R;
#[doc = "Field `IOPEEN` reader - I/O port E clock enable"]
pub use AFIOEN_R as IOPEEN_R;
#[doc = "Field `IOPFEN` reader - I/O port F clock enable"]
pub use AFIOEN_R as IOPFEN_R;
#[doc = "Field `IOPGEN` reader - I/O port G clock enable"]
pub use AFIOEN_R as IOPGEN_R;
#[doc = "Field `ADC1EN` reader - ADC 1 interface clock enable"]
pub use AFIOEN_R as ADC1EN_R;
#[doc = "Field `ADC2EN` reader - ADC 2 interface clock enable"]
pub use AFIOEN_R as ADC2EN_R;
#[doc = "Field `TIM1EN` reader - TIM1 Timer clock enable"]
pub use AFIOEN_R as TIM1EN_R;
#[doc = "Field `SPI1EN` reader - SPI 1 clock enable"]
pub use AFIOEN_R as SPI1EN_R;
#[doc = "Field `TIM8EN` reader - TIM8 Timer clock enable"]
pub use AFIOEN_R as TIM8EN_R;
#[doc = "Field `USART1EN` reader - USART1 clock enable"]
pub use AFIOEN_R as USART1EN_R;
#[doc = "Field `ADC3EN` reader - ADC3 interface clock enable"]
pub use AFIOEN_R as ADC3EN_R;
#[doc = "Field `TIM9EN` reader - TIM9 Timer clock enable"]
pub use AFIOEN_R as TIM9EN_R;
#[doc = "Field `TIM10EN` reader - TIM10 Timer clock enable"]
pub use AFIOEN_R as TIM10EN_R;
#[doc = "Field `TIM11EN` reader - TIM11 Timer clock enable"]
pub use AFIOEN_R as TIM11EN_R;
#[doc = "Field `IOPAEN` writer - I/O port A clock enable"]
pub use AFIOEN_W as IOPAEN_W;
#[doc = "Field `IOPBEN` writer - I/O port B clock enable"]
pub use AFIOEN_W as IOPBEN_W;
#[doc = "Field `IOPCEN` writer - I/O port C clock enable"]
pub use AFIOEN_W as IOPCEN_W;
#[doc = "Field `IOPDEN` writer - I/O port D clock enable"]
pub use AFIOEN_W as IOPDEN_W;
#[doc = "Field `IOPEEN` writer - I/O port E clock enable"]
pub use AFIOEN_W as IOPEEN_W;
#[doc = "Field `IOPFEN` writer - I/O port F clock enable"]
pub use AFIOEN_W as IOPFEN_W;
#[doc = "Field `IOPGEN` writer - I/O port G clock enable"]
pub use AFIOEN_W as IOPGEN_W;
#[doc = "Field `ADC1EN` writer - ADC 1 interface clock enable"]
pub use AFIOEN_W as ADC1EN_W;
#[doc = "Field `ADC2EN` writer - ADC 2 interface clock enable"]
pub use AFIOEN_W as ADC2EN_W;
#[doc = "Field `TIM1EN` writer - TIM1 Timer clock enable"]
pub use AFIOEN_W as TIM1EN_W;
#[doc = "Field `SPI1EN` writer - SPI 1 clock enable"]
pub use AFIOEN_W as SPI1EN_W;
#[doc = "Field `TIM8EN` writer - TIM8 Timer clock enable"]
pub use AFIOEN_W as TIM8EN_W;
#[doc = "Field `USART1EN` writer - USART1 clock enable"]
pub use AFIOEN_W as USART1EN_W;
#[doc = "Field `ADC3EN` writer - ADC3 interface clock enable"]
pub use AFIOEN_W as ADC3EN_W;
#[doc = "Field `TIM9EN` writer - TIM9 Timer clock enable"]
pub use AFIOEN_W as TIM9EN_W;
#[doc = "Field `TIM10EN` writer - TIM10 Timer clock enable"]
pub use AFIOEN_W as TIM10EN_W;
#[doc = "Field `TIM11EN` writer - TIM11 Timer clock enable"]
pub use AFIOEN_W as TIM11EN_W;
impl R {
#[doc = "Bit 0 - Alternate function I/O clock enable"]
#[inline(always)]
pub fn afioen(&self) -> AFIOEN_R {
AFIOEN_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 2 - I/O port A clock enable"]
#[inline(always)]
pub fn iopaen(&self) -> IOPAEN_R {
IOPAEN_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - I/O port B clock enable"]
#[inline(always)]
pub fn iopben(&self) -> IOPBEN_R {
IOPBEN_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - I/O port C clock enable"]
#[inline(always)]
pub fn iopcen(&self) -> IOPCEN_R {
IOPCEN_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - I/O port D clock enable"]
#[inline(always)]
pub fn iopden(&self) -> IOPDEN_R {
IOPDEN_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - I/O port E clock enable"]
#[inline(always)]
pub fn iopeen(&self) -> IOPEEN_R {
IOPEEN_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - I/O port F clock enable"]
#[inline(always)]
pub fn iopfen(&self) -> IOPFEN_R {
IOPFEN_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 8 - I/O port G clock enable"]
#[inline(always)]
pub fn iopgen(&self) -> IOPGEN_R {
IOPGEN_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 9 - ADC 1 interface clock enable"]
#[inline(always)]
pub fn adc1en(&self) -> ADC1EN_R {
ADC1EN_R::new(((self.bits >> 9) & 1) != 0)
}
#[doc = "Bit 10 - ADC 2 interface clock enable"]
#[inline(always)]
pub fn adc2en(&self) -> ADC2EN_R {
ADC2EN_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - TIM1 Timer clock enable"]
#[inline(always)]
pub fn tim1en(&self) -> TIM1EN_R {
TIM1EN_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - SPI 1 clock enable"]
#[inline(always)]
pub fn spi1en(&self) -> SPI1EN_R {
SPI1EN_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 13 - TIM8 Timer clock enable"]
#[inline(always)]
pub fn tim8en(&self) -> TIM8EN_R {
TIM8EN_R::new(((self.bits >> 13) & 1) != 0)
}
#[doc = "Bit 14 - USART1 clock enable"]
#[inline(always)]
pub fn usart1en(&self) -> USART1EN_R {
USART1EN_R::new(((self.bits >> 14) & 1) != 0)
}
#[doc = "Bit 15 - ADC3 interface clock enable"]
#[inline(always)]
pub fn adc3en(&self) -> ADC3EN_R {
ADC3EN_R::new(((self.bits >> 15) & 1) != 0)
}
#[doc = "Bit 19 - TIM9 Timer clock enable"]
#[inline(always)]
pub fn tim9en(&self) -> TIM9EN_R {
TIM9EN_R::new(((self.bits >> 19) & 1) != 0)
}
#[doc = "Bit 20 - TIM10 Timer clock enable"]
#[inline(always)]
pub fn tim10en(&self) -> TIM10EN_R {
TIM10EN_R::new(((self.bits >> 20) & 1) != 0)
}
#[doc = "Bit 21 - TIM11 Timer clock enable"]
#[inline(always)]
pub fn tim11en(&self) -> TIM11EN_R {
TIM11EN_R::new(((self.bits >> 21) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - Alternate function I/O clock enable"]
#[inline(always)]
#[must_use]
pub fn afioen(&mut self) -> AFIOEN_W<APB2ENR_SPEC, 0> {
AFIOEN_W::new(self)
}
#[doc = "Bit 2 - I/O port A clock enable"]
#[inline(always)]
#[must_use]
pub fn iopaen(&mut self) -> IOPAEN_W<APB2ENR_SPEC, 2> {
IOPAEN_W::new(self)
}
#[doc = "Bit 3 - I/O port B clock enable"]
#[inline(always)]
#[must_use]
pub fn iopben(&mut self) -> IOPBEN_W<APB2ENR_SPEC, 3> {
IOPBEN_W::new(self)
}
#[doc = "Bit 4 - I/O port C clock enable"]
#[inline(always)]
#[must_use]
pub fn iopcen(&mut self) -> IOPCEN_W<APB2ENR_SPEC, 4> {
IOPCEN_W::new(self)
}
#[doc = "Bit 5 - I/O port D clock enable"]
#[inline(always)]
#[must_use]
pub fn iopden(&mut self) -> IOPDEN_W<APB2ENR_SPEC, 5> {
IOPDEN_W::new(self)
}
#[doc = "Bit 6 - I/O port E clock enable"]
#[inline(always)]
#[must_use]
pub fn iopeen(&mut self) -> IOPEEN_W<APB2ENR_SPEC, 6> {
IOPEEN_W::new(self)
}
#[doc = "Bit 7 - I/O port F clock enable"]
#[inline(always)]
#[must_use]
pub fn iopfen(&mut self) -> IOPFEN_W<APB2ENR_SPEC, 7> {
IOPFEN_W::new(self)
}
#[doc = "Bit 8 - I/O port G clock enable"]
#[inline(always)]
#[must_use]
pub fn iopgen(&mut self) -> IOPGEN_W<APB2ENR_SPEC, 8> {
IOPGEN_W::new(self)
}
#[doc = "Bit 9 - ADC 1 interface clock enable"]
#[inline(always)]
#[must_use]
pub fn adc1en(&mut self) -> ADC1EN_W<APB2ENR_SPEC, 9> {
ADC1EN_W::new(self)
}
#[doc = "Bit 10 - ADC 2 interface clock enable"]
#[inline(always)]
#[must_use]
pub fn adc2en(&mut self) -> ADC2EN_W<APB2ENR_SPEC, 10> {
ADC2EN_W::new(self)
}
#[doc = "Bit 11 - TIM1 Timer clock enable"]
#[inline(always)]
#[must_use]
pub fn tim1en(&mut self) -> TIM1EN_W<APB2ENR_SPEC, 11> {
TIM1EN_W::new(self)
}
#[doc = "Bit 12 - SPI 1 clock enable"]
#[inline(always)]
#[must_use]
pub fn spi1en(&mut self) -> SPI1EN_W<APB2ENR_SPEC, 12> {
SPI1EN_W::new(self)
}
#[doc = "Bit 13 - TIM8 Timer clock enable"]
#[inline(always)]
#[must_use]
pub fn tim8en(&mut self) -> TIM8EN_W<APB2ENR_SPEC, 13> {
TIM8EN_W::new(self)
}
#[doc = "Bit 14 - USART1 clock enable"]
#[inline(always)]
#[must_use]
pub fn usart1en(&mut self) -> USART1EN_W<APB2ENR_SPEC, 14> {
USART1EN_W::new(self)
}
#[doc = "Bit 15 - ADC3 interface clock enable"]
#[inline(always)]
#[must_use]
pub fn adc3en(&mut self) -> ADC3EN_W<APB2ENR_SPEC, 15> {
ADC3EN_W::new(self)
}
#[doc = "Bit 19 - TIM9 Timer clock enable"]
#[inline(always)]
#[must_use]
pub fn tim9en(&mut self) -> TIM9EN_W<APB2ENR_SPEC, 19> {
TIM9EN_W::new(self)
}
#[doc = "Bit 20 - TIM10 Timer clock enable"]
#[inline(always)]
#[must_use]
pub fn tim10en(&mut self) -> TIM10EN_W<APB2ENR_SPEC, 20> {
TIM10EN_W::new(self)
}
#[doc = "Bit 21 - TIM11 Timer clock enable"]
#[inline(always)]
#[must_use]
pub fn tim11en(&mut self) -> TIM11EN_W<APB2ENR_SPEC, 21> {
TIM11EN_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "APB2 peripheral clock enable register (RCC_APB2ENR)\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`apb2enr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`apb2enr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct APB2ENR_SPEC;
impl crate::RegisterSpec for APB2ENR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`apb2enr::R`](R) reader structure"]
impl crate::Readable for APB2ENR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`apb2enr::W`](W) writer structure"]
impl crate::Writable for APB2ENR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets APB2ENR to value 0"]
impl crate::Resettable for APB2ENR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
//! # Configuration
//!
//! Included here are methods to setup and configure `libpasta`.
//! Currently, this refers to the choice of default hashing algorithm.
//!
//! Configuration can be specified in two ways: through configuration files,
//! or programmatically.
//!
//! Alternatively, the `set_primitive` function, and others, can be used
//! to configure the library. However, note that once the library is "in use",
//! i.e. a function like `hash_password` has been called, then attempting
//! to configure the library will cause a panic.
//!
//! There are a number of ways panics can happen through using the configuration
//! files. `libpasta` does not try to recover gracefully if
use lazy_static;
use ring::rand::SecureRandom;
use ring::{hkdf, rand};
use serde_mcf;
use serde_yaml;
use super::HashUpdate;
use errors::{ExpectReport, Result};
use hashing::{Algorithm, Output};
use key;
use primitives::{self, Primitive};
use std::default::Default;
use std::fmt;
use std::fs::File;
use std::io::BufReader;
use std::path::Path;
use std::sync::{Arc, Mutex};
lazy_static! {
/// Global source of randomness for generating salts
pub static ref RANDOMNESS_SOURCE: rand::SystemRandom = {
lazy_static::initialize(&RAND_BACKUP);
rand::SystemRandom::new()
};
/// Backup PRNG source for when `SystemRandom` is unavailable
static ref RAND_BACKUP: Arc<Mutex<BackupPrng>> = {
let rng = rand::SystemRandom::new();
let mut seed = [0_u8; 32];
let mut salt_key_value = [0_u8; 32];
rng.fill(&mut seed).expect("could not generate any randomness");
rng.fill(&mut salt_key_value).expect("could not generate any randomness");
Arc::new(Mutex::new(BackupPrng {
salt: hkdf::Salt::new(hkdf::HKDF_SHA256, &salt_key_value[..]),
seed,
}))
};
/// Default primitive used for hash computations
pub static ref DEFAULT_PRIM: Primitive = {
primitives::Scrypt::default()
};
/// Default algorithm to use for new hash computations.
pub static ref DEFAULT_ALG: Algorithm = {
Algorithm::Single(DEFAULT_PRIM.clone())
};
/// Default configuration set.
pub static ref DEFAULT_CONFIG: Config = {
Config::default()
};
}
/// Holds possible configuration options
/// See the [module level documentation](index.html) for more information.
#[derive(Debug, Deserialize, Serialize)]
pub struct Config {
#[serde(skip)]
algorithm: Algorithm,
#[serde(default = "primitives::Scrypt::default")]
primitive: Primitive,
keyed: Option<Primitive>,
#[serde(skip, default = "key::get_global")]
keys: &'static dyn key::Store,
}
impl Default for Config {
fn default() -> Self {
Self {
algorithm: DEFAULT_ALG.clone(),
primitive: DEFAULT_PRIM.clone(),
keyed: None,
keys: key::get_global(),
}
}
}
impl fmt::Display for Config {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&serde_yaml::to_string(&self).map_err(|_| fmt::Error)?)
}
}
impl Config {
/// Create a new empty `Config` for setting parameters.
#[must_use]
pub fn with_primitive(primitive: Primitive) -> Self {
Self {
algorithm: Algorithm::Single(primitive.clone()),
primitive,
keyed: None,
keys: key::get_global(),
}
}
/// Generates a `Config` from a .toml file.
/// Config files can be generated using the `Config::to_string` method on
/// an existing config.
///
/// # Errors
///
/// If the config file could not be opened
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self> {
let file = File::open(path.as_ref());
if let Ok(file) = file {
let reader = BufReader::new(file);
let mut config: Self = serde_yaml::from_reader(reader).expect("invalid config file");
config.algorithm = Algorithm::Single(config.primitive.clone());
if let Some(kh) = config.keyed.clone() {
config.algorithm = config.algorithm.into_wrapped(kh);
}
trace!("imported config as: {:?}", config);
Ok(config)
} else {
info!("could not open config file {:?}: {:?}", path.as_ref(), file);
Err("could not open config file".into())
}
}
/// Generates hash for a given password.
///
/// Will automatically generate a random salt. In the extreme case that the
/// default source of randomness is unavailable, this will fallback to a seed
/// generated when the library is initialised. An error will be logged when this
/// happens.
/// /// ## Panics
/// A panic indicates a problem with the serialization mechanisms, and should
/// be reported.
#[must_use]
pub fn hash_password(&self, password: &str) -> String {
self.hash_password_safe(password)
.expect_report("failed to hash password")
}
/// Same as `hash_password` but returns `Result` to allow error handling.
/// TODO: decide on which API is best to use.
#[doc(hidden)]
pub fn hash_password_safe(&self, password: &str) -> Result<String> {
let pwd_hash = self.algorithm.hash(password);
Ok(serde_mcf::to_string(&pwd_hash)?)
}
/// Verifies the provided password matches the inputted hash string.
///
/// If there is any error in processing the hash or password, this
/// will simply return `false`.
#[must_use]
pub fn verify_password(&self, hash: &str, password: &str) -> bool {
self.verify_password_safe(hash, password).unwrap_or(false)
}
/// Same as `verify_password` but returns `Result` to allow error handling.
/// TODO: decide on which API is best to use.
#[doc(hidden)]
pub fn verify_password_safe(&self, hash: &str, password: &str) -> Result<bool> {
let mut pwd_hash: Output = serde_mcf::from_str(hash)?;
pwd_hash.check_keys(self);
Ok(pwd_hash.verify(password))
}
/// Verifies a supplied password against a previously computed password hash,
/// and performs an in-place update of the hash value if the password verifies.
/// Hence this needs to take a mutable `String` reference.
pub fn verify_password_update_hash(&self, hash: &str, password: &str) -> HashUpdate {
self.verify_password_update_hash_safe(hash, password)
.unwrap_or(HashUpdate::Failed)
}
/// Same as `verify_password_update_hash`, but returns `Result` to allow error handling.
#[doc(hidden)]
pub fn verify_password_update_hash_safe(
&self,
hash: &str,
password: &str,
) -> Result<HashUpdate> {
let pwd_hash: Output = serde_mcf::from_str(hash)?;
if pwd_hash.verify(password) {
if pwd_hash.alg == self.algorithm {
Ok(HashUpdate::Verified(None))
} else {
let new_hash = serde_mcf::to_string(&self.algorithm.hash(password))?;
Ok(HashUpdate::Verified(Some(new_hash)))
}
} else {
Ok(HashUpdate::Failed)
}
}
/// Migrate the input hash to the current recommended hash.
///
/// Note that this does *not* require the password. This is for batch updating
/// of hashes, where the password is not available. This performs an onion
/// approach, returning `new_hash(old_hash)`.
///
/// If the password is also available, the `verify_password_update_hash` should
/// instead be used.
#[must_use]
pub fn migrate_hash(&self, hash: &str) -> Option<String> {
self.migrate_hash_safe(hash)
.expect("failed to migrate password")
}
/// Same as `migrate_hash` but returns `Result` to allow error handling.
#[doc(hidden)]
pub fn migrate_hash_safe(&self, hash: &str) -> Result<Option<String>> {
let pwd_hash: Output = serde_mcf::from_str(hash)?;
if !pwd_hash.alg.needs_migrating(&self.primitive) {
// no need to migrate
return Ok(None);
}
let new_params = pwd_hash.alg.to_wrapped(self.primitive.clone());
let new_salt = pwd_hash.salt;
let new_hash = self.primitive.compute(&pwd_hash.hash, &new_salt);
let new_hash = Output {
alg: new_params,
hash: new_hash,
salt: new_salt,
};
Ok(Some(serde_mcf::to_string(&new_hash)?))
}
/// Add a new key into the list of configured keys
#[must_use]
pub fn add_key(&self, key: &[u8]) -> String {
self.keys.insert(key)
}
pub(crate) fn get_key(&self, key_id: &str) -> Option<Vec<u8>> {
self.keys.get_key(key_id)
}
/// Set the default primitive
pub fn set_primitive(&mut self, primitive: Primitive) {
self.primitive = primitive.clone();
self.algorithm = match self.algorithm {
Algorithm::Single(_) => Algorithm::Single(primitive),
Algorithm::Nested { ref outer, .. } => {
Algorithm::Single(primitive).into_wrapped(outer.clone())
}
};
}
/// Set a keyed function to be applied after hashing.
pub fn set_keyed_hash(&mut self, keyed: Primitive) {
self.keyed = Some(keyed.clone());
let mut newalg = match self.algorithm {
// If just a single algorithm, wrap with the keyed primitive
Algorithm::Single(_) => self.algorithm.to_wrapped(keyed),
// Otherwise, replace the outer algorithm with the keyed primitive
Algorithm::Nested {
outer: ref _outer,
ref inner,
} => inner.to_wrapped(keyed),
};
newalg.update_key(self);
self.algorithm = newalg;
}
/// Sets the location of keys for keyed functions.
pub fn set_key_source(&mut self, store: &'static dyn key::Store) {
self.keys = store;
}
}
struct BackupPrng {
salt: hkdf::Salt,
seed: [u8; 32],
}
impl BackupPrng {
fn gen_salt(&mut self) -> Vec<u8> {
let mut buf = [0_u8; 48];
let alg = self.salt.algorithm();
self.salt
.extract(&self.seed)
.expand(&[b"libpasta backup PRNG"], alg)
.expect("expand failure")
.fill(&mut buf[..])
.expect("fill failure");
self.seed.copy_from_slice(&buf[16..]);
let mut output = Vec::with_capacity(16);
output.extend_from_slice(&buf[0..16]);
output
}
}
pub(crate) fn backup_gen_salt() -> Vec<u8> {
RAND_BACKUP
.lock()
.expect("could not acquire lock on RAND_BACKUP")
.gen_salt()
}
#[cfg(test)]
mod test {
#![allow(clippy::wildcard_imports)]
use super::*;
use crate::*;
use ring;
#[test]
fn use_config() {
let config = Config::with_primitive(primitives::Argon2::default());
let hash = config.hash_password("hunter2");
assert!(config.verify_password(&hash, "hunter2"));
let mut config = Config::default();
config.set_primitive(primitives::Bcrypt::default());
let hash = config.hash_password("hunter2");
assert!(verify_password(&hash, "hunter2"));
}
#[derive(Debug)]
struct StaticSource(&'static [u8; 16]);
impl key::Store for StaticSource {
/// Insert a new key into the `Store`.
fn insert(&self, _key: &[u8]) -> String {
"StaticKey".to_string()
}
/// Get a key from the `Store`.
fn get_key(&self, _id: &str) -> Option<Vec<u8>> {
Some(self.0.to_vec())
}
}
static STATIC_SOURCE: StaticSource = StaticSource(b"ThisIsAStaticKey");
#[test]
fn alternate_key_source() {
let mut config = Config::default();
config.set_key_source(&STATIC_SOURCE);
let id = config.add_key(&[]);
assert_eq!(config.get_key(&id), Some(b"ThisIsAStaticKey".to_vec()));
let hmac = primitives::Hmac::with_key_id(ring::hkdf::HKDF_SHA256, "dummy");
config.set_keyed_hash(hmac);
let hash = config.hash_password("hunter2");
assert!(config.verify_password_safe(&hash, "hunter2").unwrap())
}
}
|
use super::ChannelStream;
use crate::config::{ChannelConfig, DEFAULT_RELAY_BUF_SIZE};
use crate::rmux::{
create_stream, new_auth_event, process_rmux_session, read_rmux_event, write_encrypt_event,
AuthRequest, AuthResponse, CryptoContext, MuxContext, DEFAULT_RECV_BUF_SIZE,
};
use crate::utils::{
http_proxy_connect, make_io_error, AsyncTcpStream, AsyncTokioIO, WebsocketReader,
WebsocketWriter,
};
//use crate::utils::make_io_error;
use async_tls::TlsConnector;
use bytes::BytesMut;
use futures::StreamExt;
use std::error::Error;
use std::io::ErrorKind;
use tokio::io::{AsyncBufRead, AsyncRead, AsyncWrite, AsyncWriteExt};
use tokio::net::TcpStream;
use url::Url;
async fn init_client<'a, R, W>(
config: ChannelConfig,
session_id: u32,
ri: &'a mut R,
wi: &'a mut W,
) -> Result<(), std::io::Error>
where
R: AsyncBufRead + Unpin + Sized,
W: AsyncWrite + Unpin + Sized,
{
let sid = 0 as u32;
let auth = AuthRequest {
//key: String::from(key),
method: String::from(config.cipher.method.as_str()),
};
let ev = new_auth_event(sid, &auth);
let key = String::from(config.cipher.key.as_str());
let method = String::from(config.cipher.method.as_str());
let mut rctx = CryptoContext::new(method.as_str(), key.as_str(), 0);
let mut wctx = CryptoContext::new(method.as_str(), key.as_str(), 0);
write_encrypt_event(&mut wctx, wi, ev).await?;
let recv_ev = match read_rmux_event(&mut rctx, ri).await {
Err(e) => return Err(make_io_error(&e.to_string())),
Ok(ev) => ev,
};
let decoded: AuthResponse = bincode::deserialize(&recv_ev.body[..]).unwrap();
if !decoded.success {
//let _ = c.shutdown(std::net::Shutdown::Both);
return Err(std::io::Error::from(ErrorKind::ConnectionRefused));
}
let rctx = CryptoContext::new(method.as_str(), key.as_str(), decoded.rand);
let wctx = CryptoContext::new(method.as_str(), key.as_str(), decoded.rand);
let ctx = MuxContext::new(
config.name.as_str(),
session_id,
rctx,
wctx,
config.max_alive_mins as u64 * 60,
);
process_rmux_session(
ctx, // config.name.as_str(),
// session_id,
ri,
wi,
// rctx,
// wctx,
// &mut recv_buf,
// config.max_alive_mins as u64 * 60,
config.relay_buf_size(),
)
.await?;
Ok(())
}
pub async fn init_rmux_client(
config: ChannelConfig,
session_id: u32,
) -> Result<(), std::io::Error> {
let mut url = String::from(config.url.as_str());
if config.url.find("://").is_none() {
url = String::from("rmux://");
url.push_str(config.url.as_str());
}
let conn_url = match Url::parse(url.as_str()) {
Err(e) => {
error!("invalid connect url:{} with error:{}", url, e);
return Err(make_io_error("invalid connect url"));
}
Ok(u) => u,
};
let addr = if config.sni_proxy.is_some() {
let mut v = String::from(config.sni_proxy.as_ref().unwrap());
if v.find(':').is_none() {
v.push_str(":443");
}
v
} else {
format!(
"{}:{}",
conn_url.host().as_ref().unwrap(),
conn_url.port_or_known_default().unwrap()
)
};
info!("connect rmux:{} to addr:{}", url, addr);
let domain = if config.sni.is_some() {
config.sni.as_ref().unwrap().as_str()
} else {
conn_url.host_str().unwrap()
};
let mut conn = match config.proxy.as_ref() {
Some(p) => {
let proxy_url = match Url::parse(p.as_str()) {
Err(e) => {
error!("invalid connect url:{} with error:{}", url, e);
return Err(make_io_error("invalid connect url"));
}
Ok(u) => u,
};
http_proxy_connect(&proxy_url, addr.as_str()).await?
}
None => {
info!("TCP connect {}", addr);
let c = TcpStream::connect(&addr);
let dur = std::time::Duration::from_secs(5);
let s = tokio::time::timeout(dur, c).await?;
match s {
Err(e) => {
return Err(e);
}
Ok(c) => c,
}
}
};
match conn_url.scheme() {
"ws" | "wss" => {
if !url.ends_with('/') {
url.push_str("/relay")
} else {
url.push_str("relay")
}
info!("connect url:{}", url);
}
_ => {}
}
match conn_url.scheme() {
"rmux" => {
let (read, mut write) = conn.split();
let mut buf_reader = tokio::io::BufReader::with_capacity(DEFAULT_RECV_BUF_SIZE, read);
let rc = init_client(config, session_id, &mut buf_reader, &mut write).await;
let _ = conn.shutdown(std::net::Shutdown::Both);
if rc.is_err() {
return rc;
}
}
"ws" => {
let ws = match tokio_tungstenite::client_async(url, conn).await {
Err(e) => return Err(make_io_error(&e.to_string())),
Ok((s, _)) => s,
};
let (write, read) = ws.split();
let reader = WebsocketReader::new(read);
let mut writer = WebsocketWriter::new(write);
let mut buf_reader = tokio::io::BufReader::with_capacity(DEFAULT_RECV_BUF_SIZE, reader);
let rc = init_client(config, session_id, &mut buf_reader, &mut writer).await;
writer.shutdown().await?;
if rc.is_err() {
return rc;
}
}
"wss" => {
let connector = TlsConnector::default();
let conn = AsyncTcpStream::new(conn);
//let host = conn_url.host_str();
info!("TLS connect {:?}", domain);
let tls_stream = connector.connect(domain, conn)?.await?;
let conn = AsyncTokioIO::new(tls_stream);
let ws = match tokio_tungstenite::client_async(url, conn).await {
Err(e) => return Err(make_io_error(&e.to_string())),
Ok((s, _)) => s,
};
let (write, read) = ws.split();
let reader = WebsocketReader::new(read);
let mut writer = WebsocketWriter::new(write);
let mut buf_reader = tokio::io::BufReader::with_capacity(DEFAULT_RECV_BUF_SIZE, reader);
let rc = init_client(config, session_id, &mut buf_reader, &mut writer).await;
writer.shutdown().await?;
if rc.is_err() {
return rc;
}
}
_ => {
let _ = conn.shutdown(std::net::Shutdown::Both);
error!("unknown schema:{}", conn_url.scheme());
return Err(make_io_error("unknown url schema"));
}
}
Ok(())
}
pub async fn get_rmux_stream(
channel: &str,
addr: String,
) -> Result<Box<dyn ChannelStream + Send>, std::io::Error> {
let stream = create_stream(channel, "tcp", addr.as_str(), DEFAULT_RELAY_BUF_SIZE).await?;
Ok(Box::new(stream))
}
|
use super::super::prelude::{
HBRUSH
};
pub type Brush = HBRUSH; |
pub mod moder;
pub fn test() {
println!("this is gpioa");
}
|
use std::marker::PhantomData;
use necsim_core::{
cogs::{
CoalescenceSampler, DispersalSampler, EmigrationExit, Habitat, ImmigrationEntry,
LineageReference, LineageStore, MinSpeciationTrackingEventSampler, PrimeableRng,
SingularActiveLineageSampler, SpeciationProbability, TurnoverRate,
},
reporter::boolean::Boolean,
};
use rustcoalescence_algorithms_cuda_kernel_ptx_jit::host::compiler::PtxJITCompiler;
use rust_cuda::{
rustacuda::{function::Function, module::Module},
rustacuda_core::DeviceCopy,
};
use rust_cuda::common::RustToCuda;
mod r#impl;
mod launch;
mod specialiser;
#[allow(clippy::module_name_repetitions)]
#[allow(clippy::type_complexity)]
pub struct SimulationKernel<
'k,
H: Habitat + RustToCuda,
G: PrimeableRng + RustToCuda,
R: LineageReference<H> + DeviceCopy,
S: LineageStore<H, R> + RustToCuda,
X: EmigrationExit<H, G, R, S> + RustToCuda,
D: DispersalSampler<H, G> + RustToCuda,
C: CoalescenceSampler<H, R, S> + RustToCuda,
T: TurnoverRate<H> + RustToCuda,
N: SpeciationProbability<H> + RustToCuda,
E: MinSpeciationTrackingEventSampler<H, G, R, S, X, D, C, T, N> + RustToCuda,
I: ImmigrationEntry + RustToCuda,
A: SingularActiveLineageSampler<H, G, R, S, X, D, C, T, N, E, I> + RustToCuda,
ReportSpeciation: Boolean,
ReportDispersal: Boolean,
> {
compiler: &'k mut PtxJITCompiler,
ptx_jit: bool,
module: &'k mut Module,
entry_point: &'k mut Function<'k>,
marker: PhantomData<(
H,
G,
R,
S,
X,
D,
C,
T,
N,
E,
I,
A,
ReportSpeciation,
ReportDispersal,
)>,
}
|
use crate::tests::{fail_test, run_test_contains, TestResult};
// cargo version prints a string of the form:
// cargo 1.60.0 (d1fd9fe2c 2022-03-01)
#[test]
fn known_external_runs() -> TestResult {
run_test_contains(r#"extern "cargo version" []; cargo version"#, "cargo")
}
#[test]
fn known_external_unknown_flag() -> TestResult {
fail_test(
r#"extern "cargo version" []; cargo version --no-such-flag"#,
"command doesn't have flag",
)
}
/// GitHub issues #5179, #4618
#[test]
fn known_external_alias() -> TestResult {
run_test_contains(
r#"extern "cargo version" []; alias cv = cargo version; cv"#,
"cargo",
)
}
/// GitHub issues #5179, #4618
#[test]
fn known_external_subcommand_alias() -> TestResult {
run_test_contains(
r#"extern "cargo version" []; alias c = cargo; c version"#,
"cargo",
)
}
|
//! A client to interact with the Telegram bot API.
mod http_telegram_client;
mod response;
mod telegram_client;
pub use self::http_telegram_client::HttpTelegramClient;
pub use self::response::Response;
pub use self::telegram_client::TelegramClient;
|
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use futures::future;
use futures::stream::{Stream, StreamExt};
use heim::units::information::byte;
use heim::units::ratio;
use tokio::sync::Mutex;
use persist_core::daemon::{self, LOGS_DIR, PIDS_DIR};
use persist_core::error::{Error, PersistError};
use persist_core::protocol::{
ListResponse, LogEntry, LogStreamSource, ProcessInfo, ProcessSpec, ProcessStatus,
};
use crate::server::handle::ProcessHandle;
#[derive(Default)]
pub struct State {
processes: Mutex<HashMap<String, ProcessHandle>>,
}
impl State {
/// Constructs a new `State` instance, with no managed processes.
pub fn new() -> State {
State {
processes: Mutex::new(HashMap::new()),
}
}
/// Gets the process specification associated with the given name.
pub async fn spec(&self, name: impl AsRef<str>) -> Result<ProcessSpec, Error> {
let processes = self.processes.lock().await;
processes
.get(name.as_ref())
.map(|handle| handle.spec().clone())
.ok_or_else(|| Error::from(PersistError::ProcessNotFound))
}
/// Executes a closure and provides it every process handles.
///
/// This closure is executed while holding a lock, so avoid calling other methods on `State` inside that closure.
pub async fn with_handles<F, T>(&self, func: F) -> T
where
F: FnOnce(&HashMap<String, ProcessHandle>) -> T,
{
let processes = self.processes.lock().await;
func(&processes)
}
/// Executes a closure and provides it the process handle of the specified process.
///
/// This closure is executed while holding a lock, so avoid calling other methods on `State` inside that closure.
pub async fn with_handle<S, F, T>(&self, name: S, func: F) -> Result<T, Error>
where
S: AsRef<str>,
F: FnOnce(&ProcessHandle) -> T,
{
let processes = self.processes.lock().await;
let handle = processes
.get(name.as_ref())
.ok_or(PersistError::ProcessNotFound)?;
Ok(func(handle))
}
pub async fn list(&self) -> Result<Vec<ListResponse>, Error> {
let processes = self.processes.lock().await;
let futures = processes.iter().map(|(name, handle)| async move {
let (pid, status, cpu_usage, mem_usage) = handle
.with_process(|handle| async move {
let pid = handle.pid();
let cpu_usage = {
let usage1 = handle.cpu_usage().await?;
tokio::time::delay_for(Duration::from_millis(200)).await;
let usage2 = handle.cpu_usage().await?;
(usage2 - usage1).get::<ratio::percent>()
} as u32;
let mem_usage = handle.memory().await?.rss().get::<byte>();
Ok::<_, Error>((
Some(pid as usize),
ProcessStatus::Running,
cpu_usage,
mem_usage as u32,
))
})
.await
.unwrap_or_else(|| Ok((None, ProcessStatus::Stopped, 0u32, 0u32)))?;
Ok::<ListResponse, Error>(ListResponse {
pid,
status,
cpu_usage,
mem_usage,
name: name.clone(),
})
});
let metrics = future::join_all(futures).await;
let mut metrics = metrics.into_iter().collect::<Result<Vec<_>, _>>()?;
metrics.sort_by(|a, b| a.name.cmp(&b.name));
Ok(metrics)
}
pub async fn start(self: Arc<Self>, mut spec: ProcessSpec) -> Result<ProcessInfo, Error> {
let mut processes = self.processes.lock().await;
if processes.contains_key(spec.name.as_str()) {
return Err(Error::from(PersistError::ProcessAlreadyExists));
}
//? get dirs paths
let home_dir = daemon::home_dir()?;
let pids_dir = home_dir.join(PIDS_DIR);
let logs_dir = home_dir.join(LOGS_DIR);
//? ensure they exists
let future = future::join(
tokio::fs::create_dir(&pids_dir),
tokio::fs::create_dir(&logs_dir),
);
let _ = future.await;
//? get PID file path
let pid_path = format!("{}.pid", spec.name);
let pid_path = pids_dir.join(pid_path);
//? get stdout file path
let stdout_path = format!("{}-out.log", spec.name);
let stdout_path = logs_dir.join(stdout_path);
//? get stderr file path
let stderr_path = format!("{}-err.log", spec.name);
let stderr_path = logs_dir.join(stderr_path);
//? ensure they exists
let future = future::join3(
tokio::fs::File::create(pid_path.as_path()),
tokio::fs::File::create(stdout_path.as_path()),
tokio::fs::File::create(stderr_path.as_path()),
);
let _ = future.await;
let now = chrono::Local::now().naive_local();
spec.created_at = now;
spec.pid_path = pid_path.canonicalize()?;
spec.stdout_path = stdout_path.canonicalize()?;
spec.stderr_path = stderr_path.canonicalize()?;
processes.insert(spec.name.clone(), ProcessHandle::new(spec.clone()));
let handle = processes.get_mut(&spec.name).unwrap();
let future = handle.start().await?;
let name = handle.name().to_string();
let pid = handle.pid().unwrap();
let cloned_self = self.clone();
tokio::spawn(async move {
let _ = future.await;
let mut processes = cloned_self.processes.lock().await;
if let Some(handle) = processes.get_mut(name.as_str()) {
match &mut handle.process {
Some(inner) if pid == (inner.pid() as usize) => {
let _ = handle.process.take();
// TODO: restart process ?
}
_ => {}
}
}
});
let info = ProcessInfo {
name: spec.name,
cmd: spec.cmd,
cwd: spec.cwd,
env: spec.env,
pid: Some(pid),
status: ProcessStatus::Running,
created_at: spec.created_at,
pid_path: spec.pid_path,
stdout_path: spec.stdout_path,
stderr_path: spec.stderr_path,
};
Ok(info)
}
pub async fn stop(&self, name: impl AsRef<str>) -> Result<(), Error> {
let mut processes = self.processes.lock().await;
let handle = processes
.get_mut(name.as_ref())
.ok_or(PersistError::ProcessNotFound)?;
handle.stop().await?;
Ok(())
}
pub async fn restart(self: Arc<Self>, spec: ProcessSpec) -> Result<ProcessInfo, Error> {
let mut processes = self.processes.lock().await;
let handle = processes
.get_mut(spec.name.as_str())
.ok_or(PersistError::ProcessNotFound)?;
let future = handle.restart_with_spec(spec.clone()).await?;
let name = handle.name().to_string();
let pid = handle.pid().unwrap();
let cloned_self = self.clone();
tokio::spawn(async move {
let _ = future.await;
let mut processes = cloned_self.processes.lock().await;
if let Some(handle) = processes.get_mut(name.as_str()) {
match &mut handle.process {
Some(inner) if pid == (inner.pid() as usize) => {
let _ = handle.process.take();
// TODO: restart process ?
}
_ => {}
}
}
});
let info = ProcessInfo {
name: spec.name.clone(),
cmd: spec.cmd.clone(),
cwd: spec.cwd.clone(),
env: spec.env.clone(),
pid: Some(pid),
status: ProcessStatus::Running,
created_at: spec.created_at,
pid_path: spec.pid_path.clone(),
stdout_path: spec.stdout_path.clone(),
stderr_path: spec.stderr_path.clone(),
};
Ok(info)
}
pub async fn delete(&self, name: impl AsRef<str>) -> Result<(), Error> {
let mut processes = self.processes.lock().await;
let mut handle = processes
.remove(name.as_ref())
.ok_or(PersistError::ProcessNotFound)?;
handle.stop().await?;
Ok(())
}
pub async fn dump(&self, filters: Option<Vec<String>>) -> Result<Vec<ProcessSpec>, Error> {
let processes = self.processes.lock().await;
let specs = match filters {
Some(filters) => processes
.iter()
.filter(|(name, _)| filters.contains(name))
.map(|(_, handle)| handle.spec().clone())
.collect(),
None => processes
.iter()
.map(|(_, handle)| handle.spec().clone())
.collect(),
};
Ok(specs)
}
pub async fn logs(
&self,
filters: Option<Vec<String>>,
lines: usize,
stream: bool,
) -> Result<impl Stream<Item = LogEntry>, Error> {
let processes = self.processes.lock().await;
let streams = future::try_join_all(
processes
.iter()
.filter(|(name, _)| filters.as_ref().map_or(true, |names| names.contains(name)))
.map(|(_, handle)| async move {
let stdout_init = match lines {
0 => futures::stream::empty().right_stream(),
lines => {
let contents = tokio::fs::read_to_string(handle.stdout_file()).await?;
let lines = contents
.split('\n')
.rev()
.skip(1)
.take(lines)
.map(String::from)
.collect::<Vec<_>>();
futures::stream::iter(lines.into_iter().rev()).left_stream()
}
};
let stderr_init = match lines {
0 => futures::stream::empty().right_stream(),
lines => {
let contents = tokio::fs::read_to_string(handle.stderr_file()).await?;
let lines = contents
.split('\n')
.rev()
.skip(1)
.take(lines)
.map(String::from)
.collect::<Vec<_>>();
futures::stream::iter(lines.into_iter().rev()).left_stream()
}
};
if stream {
let name = handle.name().to_string();
let stdout = stdout_init.chain(handle.stdout()).map(move |msg| LogEntry {
msg,
name: name.clone(),
source: LogStreamSource::Stdout,
});
let name = handle.name().to_string();
let stderr = stderr_init.chain(handle.stderr()).map(move |msg| LogEntry {
msg,
name: name.clone(),
source: LogStreamSource::Stderr,
});
Ok::<_, Error>(Box::pin(
futures::stream::select(stdout, stderr).right_stream(),
))
} else {
let name = handle.name().to_string();
let stdout = stdout_init.map(move |msg| LogEntry {
msg,
name: name.clone(),
source: LogStreamSource::Stdout,
});
let name = handle.name().to_string();
let stderr = stderr_init.map(move |msg| LogEntry {
msg,
name: name.clone(),
source: LogStreamSource::Stderr,
});
Ok::<_, Error>(Box::pin(
futures::stream::select(stdout, stderr).left_stream(),
))
}
}),
)
.await?;
Ok(futures::stream::select_all(streams))
}
pub async fn prune(&self, stopped: bool) -> Result<Vec<String>, Error> {
let processes = self.processes.lock().await;
let mut pruned_files = Vec::new();
let expected_files: Vec<PathBuf> = processes
.values()
.filter(|handle| !stopped || (stopped && handle.status() == ProcessStatus::Running))
.flat_map(|handle| {
let fst = std::iter::once(PathBuf::from(handle.pid_file()));
let snd = std::iter::once(PathBuf::from(handle.stdout_file()));
let trd = std::iter::once(PathBuf::from(handle.stderr_file()));
fst.chain(snd).chain(trd)
})
.collect();
let (logs, pids) =
future::join(tokio::fs::read_dir(LOGS_DIR), tokio::fs::read_dir(PIDS_DIR)).await;
// We kind-of ignore errors because the logs and pids directories are only created
// upon the first process start-up, so it can legitimately not be there yet.
let mut stream = match (logs, pids) {
(Ok(logs), Ok(pids)) => logs.chain(pids).left_stream(),
(Ok(logs), Err(_)) => logs.right_stream(),
(Err(_), Ok(pids)) => pids.right_stream(),
(Err(_), Err(_)) => return Ok(pruned_files),
};
while let Some(dirent) = stream.next().await.transpose()? {
// Ignore non-regular files (like directories).
let kind = dirent.file_type().await?;
if !kind.is_file() {
continue;
}
let path = dirent.path().canonicalize()?;
if !expected_files.contains(&path) {
if let Ok(_) = tokio::fs::remove_file(&path).await {
pruned_files.push(path.display().to_string());
}
}
}
Ok(pruned_files)
}
}
|
use std::{borrow::Borrow, fmt::Debug};
use serde::{de::DeserializeOwned, Serialize};
use super::{ChangeStream, ClientSession, Cursor, SessionChangeStream, SessionCursor};
use crate::{
bson::{Bson, Document},
change_stream::{event::ChangeStreamEvent, options::ChangeStreamOptions},
error::Result,
index::IndexModel,
options::{
AggregateOptions,
CountOptions,
CreateIndexOptions,
DeleteOptions,
DistinctOptions,
DropCollectionOptions,
DropIndexOptions,
EstimatedDocumentCountOptions,
FindOneAndDeleteOptions,
FindOneAndReplaceOptions,
FindOneAndUpdateOptions,
FindOneOptions,
FindOptions,
InsertManyOptions,
InsertOneOptions,
ListIndexesOptions,
ReadConcern,
ReplaceOptions,
SelectionCriteria,
UpdateModifications,
UpdateOptions,
WriteConcern,
},
results::{
CreateIndexResult,
CreateIndexesResult,
DeleteResult,
InsertManyResult,
InsertOneResult,
UpdateResult,
},
runtime,
Collection as AsyncCollection,
Namespace,
};
/// `Collection` is the client-side abstraction of a MongoDB Collection. It can be used to
/// perform collection-level operations such as CRUD operations. A `Collection` can be obtained
/// through a [`Database`](struct.Database.html) by calling either
/// [`Database::collection`](struct.Database.html#method.collection) or
/// [`Database::collection_with_options`](struct.Database.html#method.collection_with_options).
///
/// `Collection` uses [`std::sync::Arc`](https://doc.rust-lang.org/std/sync/struct.Arc.html) internally,
/// so it can safely be shared across threads. For example:
///
/// ```rust
/// # use mongodb::{
/// # bson::doc,
/// # error::Result,
/// # sync::Client,
/// # };
/// #
/// # fn start_workers() -> Result<()> {
/// # let client = Client::with_uri_str("mongodb://example.com")?;
/// let coll = client.database("items").collection("in_stock");
///
/// for i in 0..5 {
/// let coll_ref = coll.clone();
///
/// std::thread::spawn(move || {
/// // Perform operations with `coll_ref`. For example:
/// coll_ref.insert_one(doc! { "x": i }, None);
/// });
/// }
/// #
/// # // Technically we should join the threads here, but for the purpose of the example, we'll just
/// # // sleep for a bit.
/// # std::thread::sleep(std::time::Duration::from_secs(3));
/// # Ok(())
/// # }
/// ```
#[derive(Clone, Debug)]
pub struct Collection<T> {
async_collection: AsyncCollection<T>,
}
impl<T> Collection<T> {
pub(crate) fn new(async_collection: AsyncCollection<T>) -> Self {
Self { async_collection }
}
/// Gets a clone of the `Collection` with a different type `U`.
pub fn clone_with_type<U>(&self) -> Collection<U> {
Collection::new(self.async_collection.clone_with_type())
}
/// Gets the name of the `Collection`.
pub fn name(&self) -> &str {
self.async_collection.name()
}
/// Gets the namespace of the `Collection`.
///
/// The namespace of a MongoDB collection is the concatenation of the name of the database
/// containing it, the '.' character, and the name of the collection itself. For example, if a
/// collection named "bar" is created in a database named "foo", the namespace of the collection
/// is "foo.bar".
pub fn namespace(&self) -> Namespace {
self.async_collection.namespace()
}
/// Gets the selection criteria of the `Collection`.
pub fn selection_criteria(&self) -> Option<&SelectionCriteria> {
self.async_collection.selection_criteria()
}
/// Gets the read concern of the `Collection`.
pub fn read_concern(&self) -> Option<&ReadConcern> {
self.async_collection.read_concern()
}
/// Gets the write concern of the `Collection`.
pub fn write_concern(&self) -> Option<&WriteConcern> {
self.async_collection.write_concern()
}
/// Drops the collection, deleting all data, users, and indexes stored in it.
pub fn drop(&self, options: impl Into<Option<DropCollectionOptions>>) -> Result<()> {
runtime::block_on(self.async_collection.drop(options.into()))
}
/// Drops the collection, deleting all data, users, and indexes stored in it using the provided
/// `ClientSession`.
pub fn drop_with_session(
&self,
options: impl Into<Option<DropCollectionOptions>>,
session: &mut ClientSession,
) -> Result<()> {
runtime::block_on(
self.async_collection
.drop_with_session(options.into(), &mut session.async_client_session),
)
}
/// Runs an aggregation operation.
///
/// See the documentation [here](https://www.mongodb.com/docs/manual/aggregation/) for more
/// information on aggregations.
pub fn aggregate(
&self,
pipeline: impl IntoIterator<Item = Document>,
options: impl Into<Option<AggregateOptions>>,
) -> Result<Cursor<Document>> {
let pipeline: Vec<Document> = pipeline.into_iter().collect();
runtime::block_on(self.async_collection.aggregate(pipeline, options.into()))
.map(Cursor::new)
}
/// Runs an aggregation operation using the provided `ClientSession`.
///
/// See the documentation [here](https://www.mongodb.com/docs/manual/aggregation/) for more
/// information on aggregations.
pub fn aggregate_with_session(
&self,
pipeline: impl IntoIterator<Item = Document>,
options: impl Into<Option<AggregateOptions>>,
session: &mut ClientSession,
) -> Result<SessionCursor<Document>> {
let pipeline: Vec<Document> = pipeline.into_iter().collect();
runtime::block_on(self.async_collection.aggregate_with_session(
pipeline,
options.into(),
&mut session.async_client_session,
))
.map(SessionCursor::new)
}
/// Estimates the number of documents in the collection using collection metadata.
///
/// Due to an oversight in versions 5.0.0 - 5.0.7 of MongoDB, the `count` server command,
/// which `estimatedDocumentCount` uses in its implementation, was not included in v1 of the
/// Stable API. Users of the Stable API with `estimatedDocumentCount` are recommended to
/// upgrade their cluster to 5.0.8+ or set
/// [`ServerApi::strict`](crate::options::ServerApi::strict) to false to avoid encountering
/// errors.
///
/// For more information on the behavior of the `count` server command, see
/// [Count: Behavior](https://www.mongodb.com/docs/manual/reference/command/count/#behavior).
pub fn estimated_document_count(
&self,
options: impl Into<Option<EstimatedDocumentCountOptions>>,
) -> Result<u64> {
runtime::block_on(
self.async_collection
.estimated_document_count(options.into()),
)
}
/// Gets the number of documents matching `filter`.
///
/// Note that using [`Collection::estimated_document_count`](#method.estimated_document_count)
/// is recommended instead of this method is most cases.
pub fn count_documents(
&self,
filter: impl Into<Option<Document>>,
options: impl Into<Option<CountOptions>>,
) -> Result<u64> {
runtime::block_on(
self.async_collection
.count_documents(filter.into(), options.into()),
)
}
/// Gets the number of documents matching `filter` using the provided `ClientSession`.
///
/// Note that using [`Collection::estimated_document_count`](#method.estimated_document_count)
/// is recommended instead of this method is most cases.
pub fn count_documents_with_session(
&self,
filter: impl Into<Option<Document>>,
options: impl Into<Option<CountOptions>>,
session: &mut ClientSession,
) -> Result<u64> {
runtime::block_on(self.async_collection.count_documents_with_session(
filter.into(),
options.into(),
&mut session.async_client_session,
))
}
/// Creates the given index on this collection.
pub fn create_index(
&self,
index: IndexModel,
options: impl Into<Option<CreateIndexOptions>>,
) -> Result<CreateIndexResult> {
runtime::block_on(self.async_collection.create_index(index, options))
}
/// Creates the given index on this collection using the provided `ClientSession`.
pub fn create_index_with_session(
&self,
index: IndexModel,
options: impl Into<Option<CreateIndexOptions>>,
session: &mut ClientSession,
) -> Result<CreateIndexResult> {
runtime::block_on(self.async_collection.create_index_with_session(
index,
options,
&mut session.async_client_session,
))
}
/// Creates the given indexes on this collection.
pub fn create_indexes(
&self,
indexes: impl IntoIterator<Item = IndexModel>,
options: impl Into<Option<CreateIndexOptions>>,
) -> Result<CreateIndexesResult> {
runtime::block_on(self.async_collection.create_indexes(indexes, options))
}
/// Creates the given indexes on this collection using the provided `ClientSession`.
pub fn create_indexes_with_session(
&self,
indexes: impl IntoIterator<Item = IndexModel>,
options: impl Into<Option<CreateIndexOptions>>,
session: &mut ClientSession,
) -> Result<CreateIndexesResult> {
runtime::block_on(self.async_collection.create_indexes_with_session(
indexes,
options,
&mut session.async_client_session,
))
}
/// Deletes all documents stored in the collection matching `query`.
pub fn delete_many(
&self,
query: Document,
options: impl Into<Option<DeleteOptions>>,
) -> Result<DeleteResult> {
runtime::block_on(self.async_collection.delete_many(query, options.into()))
}
/// Deletes all documents stored in the collection matching `query` using the provided
/// `ClientSession`.
pub fn delete_many_with_session(
&self,
query: Document,
options: impl Into<Option<DeleteOptions>>,
session: &mut ClientSession,
) -> Result<DeleteResult> {
runtime::block_on(self.async_collection.delete_many_with_session(
query,
options.into(),
&mut session.async_client_session,
))
}
/// Deletes up to one document found matching `query`.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn delete_one(
&self,
query: Document,
options: impl Into<Option<DeleteOptions>>,
) -> Result<DeleteResult> {
runtime::block_on(self.async_collection.delete_one(query, options.into()))
}
/// Deletes up to one document found matching `query` using the provided `ClientSession`.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn delete_one_with_session(
&self,
query: Document,
options: impl Into<Option<DeleteOptions>>,
session: &mut ClientSession,
) -> Result<DeleteResult> {
runtime::block_on(self.async_collection.delete_one_with_session(
query,
options.into(),
&mut session.async_client_session,
))
}
/// Finds the distinct values of the field specified by `field_name` across the collection.
pub fn distinct(
&self,
field_name: impl AsRef<str>,
filter: impl Into<Option<Document>>,
options: impl Into<Option<DistinctOptions>>,
) -> Result<Vec<Bson>> {
runtime::block_on(self.async_collection.distinct(
field_name.as_ref(),
filter.into(),
options.into(),
))
}
/// Finds the distinct values of the field specified by `field_name` across the collection using
/// the provided `ClientSession`.
pub fn distinct_with_session(
&self,
field_name: impl AsRef<str>,
filter: impl Into<Option<Document>>,
options: impl Into<Option<DistinctOptions>>,
session: &mut ClientSession,
) -> Result<Vec<Bson>> {
runtime::block_on(self.async_collection.distinct_with_session(
field_name.as_ref(),
filter.into(),
options.into(),
&mut session.async_client_session,
))
}
/// Updates all documents matching `query` in the collection.
///
/// Both `Document` and `Vec<Document>` implement `Into<UpdateModifications>`, so either can be
/// passed in place of constructing the enum case. Note: pipeline updates are only supported
/// in MongoDB 4.2+. See the official MongoDB
/// [documentation](https://www.mongodb.com/docs/manual/reference/command/update/#behavior) for more information on specifying updates.
pub fn update_many(
&self,
query: Document,
update: impl Into<UpdateModifications>,
options: impl Into<Option<UpdateOptions>>,
) -> Result<UpdateResult> {
runtime::block_on(
self.async_collection
.update_many(query, update.into(), options.into()),
)
}
/// Drops the index specified by `name` from this collection.
pub fn drop_index(
&self,
name: impl AsRef<str>,
options: impl Into<Option<DropIndexOptions>>,
) -> Result<()> {
runtime::block_on(self.async_collection.drop_index(name, options))
}
/// Drops the index specified by `name` from this collection using the provided `ClientSession`.
pub fn drop_index_with_session(
&self,
name: impl AsRef<str>,
options: impl Into<Option<DropIndexOptions>>,
session: &mut ClientSession,
) -> Result<()> {
runtime::block_on(self.async_collection.drop_index_with_session(
name,
options,
&mut session.async_client_session,
))
}
/// Drops all indexes associated with this collection.
pub fn drop_indexes(&self, options: impl Into<Option<DropIndexOptions>>) -> Result<()> {
runtime::block_on(self.async_collection.drop_indexes(options))
}
/// Drops all indexes associated with this collection using the provided `ClientSession`.
pub fn drop_indexes_with_session(
&self,
options: impl Into<Option<DropIndexOptions>>,
session: &mut ClientSession,
) -> Result<()> {
runtime::block_on(
self.async_collection
.drop_indexes_with_session(options, &mut session.async_client_session),
)
}
/// Lists all indexes on this collection.
pub fn list_indexes(
&self,
options: impl Into<Option<ListIndexesOptions>>,
) -> Result<Cursor<IndexModel>> {
runtime::block_on(self.async_collection.list_indexes(options)).map(Cursor::new)
}
/// Lists all indexes on this collection using the provided `ClientSession`.
pub fn list_indexes_with_session(
&self,
options: impl Into<Option<ListIndexesOptions>>,
session: &mut ClientSession,
) -> Result<SessionCursor<IndexModel>> {
runtime::block_on(
self.async_collection
.list_indexes_with_session(options, &mut session.async_client_session),
)
.map(SessionCursor::new)
}
/// Gets the names of all indexes on the collection.
pub fn list_index_names(&self) -> Result<Vec<String>> {
runtime::block_on(self.async_collection.list_index_names())
}
/// Gets the names of all indexes on the collection using the provided `ClientSession`.
pub fn list_index_names_with_session(
&self,
session: &mut ClientSession,
) -> Result<Vec<String>> {
runtime::block_on(
self.async_collection
.list_index_names_with_session(&mut session.async_client_session),
)
}
/// Updates all documents matching `query` in the collection using the provided `ClientSession`.
///
/// Both `Document` and `Vec<Document>` implement `Into<UpdateModifications>`, so either can be
/// passed in place of constructing the enum case. Note: pipeline updates are only supported
/// in MongoDB 4.2+. See the official MongoDB
/// [documentation](https://www.mongodb.com/docs/manual/reference/command/update/#behavior) for more information on specifying updates.
pub fn update_many_with_session(
&self,
query: Document,
update: impl Into<UpdateModifications>,
options: impl Into<Option<UpdateOptions>>,
session: &mut ClientSession,
) -> Result<UpdateResult> {
runtime::block_on(self.async_collection.update_many_with_session(
query,
update.into(),
options.into(),
&mut session.async_client_session,
))
}
/// Updates up to one document matching `query` in the collection.
///
/// Both `Document` and `Vec<Document>` implement `Into<UpdateModifications>`, so either can be
/// passed in place of constructing the enum case. Note: pipeline updates are only supported
/// in MongoDB 4.2+. See the official MongoDB
/// [documentation](https://www.mongodb.com/docs/manual/reference/command/update/#behavior) for more information on specifying updates.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn update_one(
&self,
query: Document,
update: impl Into<UpdateModifications>,
options: impl Into<Option<UpdateOptions>>,
) -> Result<UpdateResult> {
runtime::block_on(
self.async_collection
.update_one(query, update.into(), options.into()),
)
}
/// Updates up to one document matching `query` in the collection using the provided
/// `ClientSession`.
///
/// Both `Document` and `Vec<Document>` implement `Into<UpdateModifications>`, so either can be
/// passed in place of constructing the enum case. Note: pipeline updates are only supported
/// in MongoDB 4.2+. See the official MongoDB
/// [documentation](https://www.mongodb.com/docs/manual/reference/command/update/#behavior) for more information on specifying updates.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn update_one_with_session(
&self,
query: Document,
update: impl Into<UpdateModifications>,
options: impl Into<Option<UpdateOptions>>,
session: &mut ClientSession,
) -> Result<UpdateResult> {
runtime::block_on(self.async_collection.update_one_with_session(
query,
update.into(),
options.into(),
&mut session.async_client_session,
))
}
/// Starts a new [`ChangeStream`](change_stream/struct.ChangeStream.html) that receives events
/// for all changes in this collection. A
/// [`ChangeStream`](change_stream/struct.ChangeStream.html) cannot be started on system
/// collections.
///
/// See the documentation [here](https://www.mongodb.com/docs/manual/changeStreams/) on change
/// streams.
///
/// Change streams require either a "majority" read concern or no read concern. Anything else
/// will cause a server error.
///
/// Also note that using a `$project` stage to remove any of the `_id`, `operationType` or `ns`
/// fields will cause an error. The driver requires these fields to support resumability. For
/// more information on resumability, see the documentation for
/// [`ChangeStream`](change_stream/struct.ChangeStream.html)
///
/// If the pipeline alters the structure of the returned events, the parsed type will need to be
/// changed via [`ChangeStream::with_type`].
pub fn watch(
&self,
pipeline: impl IntoIterator<Item = Document>,
options: impl Into<Option<ChangeStreamOptions>>,
) -> Result<ChangeStream<ChangeStreamEvent<T>>>
where
T: DeserializeOwned + Unpin + Send + Sync,
{
runtime::block_on(self.async_collection.watch(pipeline, options)).map(ChangeStream::new)
}
/// Starts a new [`SessionChangeStream`] that receives events for all changes in this collection
/// using the provided [`ClientSession`]. See [`Client::watch`](crate::sync::Client::watch) for
/// more information.
pub fn watch_with_session(
&self,
pipeline: impl IntoIterator<Item = Document>,
options: impl Into<Option<ChangeStreamOptions>>,
session: &mut ClientSession,
) -> Result<SessionChangeStream<ChangeStreamEvent<T>>>
where
T: DeserializeOwned + Unpin + Send + Sync,
{
runtime::block_on(self.async_collection.watch_with_session(
pipeline,
options,
&mut session.async_client_session,
))
.map(SessionChangeStream::new)
}
/// Finds the documents in the collection matching `filter`.
pub fn find(
&self,
filter: impl Into<Option<Document>>,
options: impl Into<Option<FindOptions>>,
) -> Result<Cursor<T>> {
runtime::block_on(self.async_collection.find(filter.into(), options.into()))
.map(Cursor::new)
}
/// Finds the documents in the collection matching `filter` using the provided `ClientSession`.
pub fn find_with_session(
&self,
filter: impl Into<Option<Document>>,
options: impl Into<Option<FindOptions>>,
session: &mut ClientSession,
) -> Result<SessionCursor<T>> {
runtime::block_on(self.async_collection.find_with_session(
filter.into(),
options.into(),
&mut session.async_client_session,
))
.map(SessionCursor::new)
}
}
impl<T> Collection<T>
where
T: DeserializeOwned + Unpin + Send + Sync,
{
/// Finds a single document in the collection matching `filter`.
pub fn find_one(
&self,
filter: impl Into<Option<Document>>,
options: impl Into<Option<FindOneOptions>>,
) -> Result<Option<T>> {
runtime::block_on(
self.async_collection
.find_one(filter.into(), options.into()),
)
}
/// Finds a single document in the collection matching `filter` using the provided
/// `ClientSession`.
pub fn find_one_with_session(
&self,
filter: impl Into<Option<Document>>,
options: impl Into<Option<FindOneOptions>>,
session: &mut ClientSession,
) -> Result<Option<T>> {
runtime::block_on(self.async_collection.find_one_with_session(
filter.into(),
options.into(),
&mut session.async_client_session,
))
}
}
impl<T> Collection<T>
where
T: DeserializeOwned,
{
/// Atomically finds up to one document in the collection matching `filter` and deletes it.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn find_one_and_delete(
&self,
filter: Document,
options: impl Into<Option<FindOneAndDeleteOptions>>,
) -> Result<Option<T>> {
runtime::block_on(
self.async_collection
.find_one_and_delete(filter, options.into()),
)
}
/// Atomically finds up to one document in the collection matching `filter` and deletes it using
/// the provided `ClientSession`.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn find_one_and_delete_with_session(
&self,
filter: Document,
options: impl Into<Option<FindOneAndDeleteOptions>>,
session: &mut ClientSession,
) -> Result<Option<T>> {
runtime::block_on(self.async_collection.find_one_and_delete_with_session(
filter,
options.into(),
&mut session.async_client_session,
))
}
/// Atomically finds up to one document in the collection matching `filter` and updates it.
/// Both `Document` and `Vec<Document>` implement `Into<UpdateModifications>`, so either can be
/// passed in place of constructing the enum case. Note: pipeline updates are only supported
/// in MongoDB 4.2+.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn find_one_and_update(
&self,
filter: Document,
update: impl Into<UpdateModifications>,
options: impl Into<Option<FindOneAndUpdateOptions>>,
) -> Result<Option<T>> {
runtime::block_on(self.async_collection.find_one_and_update(
filter,
update.into(),
options.into(),
))
}
/// Atomically finds up to one document in the collection matching `filter` and updates it using
/// the provided `ClientSession`. Both `Document` and `Vec<Document>` implement
/// `Into<UpdateModifications>`, so either can be passed in place of constructing the enum
/// case. Note: pipeline updates are only supported in MongoDB 4.2+.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn find_one_and_update_with_session(
&self,
filter: Document,
update: impl Into<UpdateModifications>,
options: impl Into<Option<FindOneAndUpdateOptions>>,
session: &mut ClientSession,
) -> Result<Option<T>> {
runtime::block_on(self.async_collection.find_one_and_update_with_session(
filter,
update.into(),
options.into(),
&mut session.async_client_session,
))
}
}
impl<T> Collection<T>
where
T: Serialize + DeserializeOwned,
{
/// Atomically finds up to one document in the collection matching `filter` and replaces it with
/// `replacement`.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn find_one_and_replace(
&self,
filter: Document,
replacement: T,
options: impl Into<Option<FindOneAndReplaceOptions>>,
) -> Result<Option<T>> {
runtime::block_on(self.async_collection.find_one_and_replace(
filter,
replacement,
options.into(),
))
}
/// Atomically finds up to one document in the collection matching `filter` and replaces it with
/// `replacement` using the provided `ClientSession`.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn find_one_and_replace_with_session(
&self,
filter: Document,
replacement: T,
options: impl Into<Option<FindOneAndReplaceOptions>>,
session: &mut ClientSession,
) -> Result<Option<T>> {
runtime::block_on(self.async_collection.find_one_and_replace_with_session(
filter,
replacement,
options.into(),
&mut session.async_client_session,
))
}
}
impl<T> Collection<T>
where
T: Serialize,
{
/// Inserts the documents in `docs` into the collection.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn insert_many(
&self,
docs: impl IntoIterator<Item = impl Borrow<T>>,
options: impl Into<Option<InsertManyOptions>>,
) -> Result<InsertManyResult> {
runtime::block_on(self.async_collection.insert_many(docs, options.into()))
}
/// Inserts the documents in `docs` into the collection using the provided `ClientSession`.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn insert_many_with_session(
&self,
docs: impl IntoIterator<Item = impl Borrow<T>>,
options: impl Into<Option<InsertManyOptions>>,
session: &mut ClientSession,
) -> Result<InsertManyResult> {
runtime::block_on(self.async_collection.insert_many_with_session(
docs,
options.into(),
&mut session.async_client_session,
))
}
/// Inserts `doc` into the collection.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn insert_one(
&self,
doc: impl Borrow<T>,
options: impl Into<Option<InsertOneOptions>>,
) -> Result<InsertOneResult> {
runtime::block_on(
self.async_collection
.insert_one(doc.borrow(), options.into()),
)
}
/// Inserts `doc` into the collection using the provided `ClientSession`.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn insert_one_with_session(
&self,
doc: impl Borrow<T>,
options: impl Into<Option<InsertOneOptions>>,
session: &mut ClientSession,
) -> Result<InsertOneResult> {
runtime::block_on(self.async_collection.insert_one_with_session(
doc.borrow(),
options.into(),
&mut session.async_client_session,
))
}
/// Replaces up to one document matching `query` in the collection with `replacement`.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn replace_one(
&self,
query: Document,
replacement: impl Borrow<T>,
options: impl Into<Option<ReplaceOptions>>,
) -> Result<UpdateResult> {
runtime::block_on(self.async_collection.replace_one(
query,
replacement.borrow(),
options.into(),
))
}
/// Replaces up to one document matching `query` in the collection with `replacement` using the
/// provided `ClientSession`.
///
/// This operation will retry once upon failure if the connection and encountered error support
/// retryability. See the documentation
/// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on
/// retryable writes.
pub fn replace_one_with_session(
&self,
query: Document,
replacement: impl Borrow<T>,
options: impl Into<Option<ReplaceOptions>>,
session: &mut ClientSession,
) -> Result<UpdateResult> {
runtime::block_on(self.async_collection.replace_one_with_session(
query,
replacement.borrow(),
options.into(),
&mut session.async_client_session,
))
}
}
|
//! A crate to hack the STM32F411E-DISCO board!
#![feature(asm)]
#![feature(core_intrinsics)]
#![feature(lang_items)]
#![feature(macro_reexport)]
#![feature(naked_functions)]
#![no_std]
extern crate compiler_builtins_snapshot;
#[macro_reexport(bkpt)]
#[macro_use]
extern crate cortex_m;
extern crate r0;
extern crate volatile_register;
pub extern crate stm32f411xx_memory_map as peripheral;
mod lang_items;
pub mod exception;
pub mod interrupt;
pub mod led;
pub mod button;
// "Pre `main`" initialization routine
fn init() {}
|
extern crate byteorder;
use std::io::prelude::*;
use std::error::Error;
use std::fs::File;
use byteorder::{ByteOrder,NativeEndian};
static ENTRY_POINT_LINUX: &'static str = "/sys/firmware/dmi/tables/smbios_entry_point";
static DMI_TABLE: &'static str = "/sys/firmware/dmi/tables/DMI";
pub mod smbios2;
pub mod smbios3;
use smbios2::{SM21EntryPoint};
use smbios3::{SM3EntryPoint, SM30BiosInfo, SM31BiosInfo};
#[derive(Debug)]
pub struct SMBIOSHeader {
stype: u8,
length: u8,
handle: u16
}
impl SMBIOSHeader {
pub fn new(stype: u8, length: u8, handle: u16) -> SMBIOSHeader {
SMBIOSHeader {
stype,
length,
handle
}
}
pub fn from_bytes(data: &[u8]) ->SMBIOSHeader {
SMBIOSHeader {
stype: data[0],
length: data[1],
handle: NativeEndian::read_u16(&data[2..4])
}
}
}
pub struct SmbiosVersion {
pub major: u8,
pub minor: u8
}
impl SmbiosVersion {
pub fn new(major: u8, minor: u8) -> SmbiosVersion {
SmbiosVersion {major, minor}
}
pub fn comparable(&self) -> u16 {
((self.minor as u16) << 8) | self.major as u16
}
}
pub struct SmbiosGenericEntry {
pub version: SmbiosVersion,
pub table_size: u32,
pub table_address: u64
}
pub struct SmbiosRaw {
pub data: [u8]
}
pub struct SmbiosStructure {
pub smbios_version: SmbiosVersion,
pub smbios_header: SMBIOSHeader,
}
impl SmbiosStructure {
pub fn get_strings_offset(&self) -> u8 {
self.smbios_header.length
}
}
pub fn sysfs_read_dmi_table(buf: &mut [u8]) -> Result<usize, Box<Error>> {
let mut fp = File::open(DMI_TABLE)?;
Ok(fp.read(buf)?)
}
pub fn sysfs_read_smbios_entry_point(buf: &mut[u8]) -> Result<usize, Box<Error>> {
let mut fp = File::open(ENTRY_POINT_LINUX)?;
let n = fp.read(buf)?;
Ok(n)
}
// Returning references.. life times .. etc
pub fn get_entry_point() -> Result<Vec<u8>, Box<Error>> {
let mut entry_point_data: Vec<u8> = vec![0;31];
sysfs_read_smbios_entry_point(&mut entry_point_data)?;
Ok(entry_point_data)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_entry_point_sm2() {
let entry_data: [u8;31] =
[0x5f, 0x53, 0x4d, 0x5f, 0xd3, 0x1f, 0x02,
0x08, 0xa6, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x5f, 0x44, 0x4d, 0x49, 0x5f,
0x9f, 0xe3, 0x04, 0xb0, 0xe8, 0x0e, 0x00,
0x15, 0x00, 0x27];
let entry_point = SM21EntryPoint::new(&entry_data);
assert_eq!(entry_point.major_version, 2);
}
} |
use super::{
Parser,
};
use ir::{
Chunk,
hir::HIRInstruction,
};
use ir_traits::WriteInstruction;
use notices::{ DiagnosticSourceBuilder, DiagnosticSource, DiagnosticLevel };
use lexer::tokens::{
TokenType,
TokenData,
};
pub struct TypeParser;
impl TypeParser{
pub fn get_type(parser: &mut Parser) -> Result<Chunk, DiagnosticSource>{
let mut chunk = Chunk::new();
if let Err(notice) = parser.advance(){
return Err(notice)
}
let current_token = parser.current_token();
let ret = match (¤t_token.type_, ¤t_token.data) {
(TokenType::Identifier, TokenData::String(s)) => {
chunk.write_pos(current_token.pos);
let str = s.as_str();
match str{
"Int" => chunk.write_instruction(HIRInstruction::Integer),
"Float" => chunk.write_instruction(HIRInstruction::Float),
"String" => chunk.write_instruction(HIRInstruction::String),
"Bool" => chunk.write_instruction(HIRInstruction::Bool),
_ => {
chunk.write_instruction(HIRInstruction::Custom);
chunk.write_string(s.clone());
}
}
chunk
}
_ => {
let source = match parser.request_source_snippet(current_token.pos){
Ok(source) => source,
Err(diag) => {
return Err(diag)
}
};
let diag_source = DiagnosticSourceBuilder::new(parser.name.clone(), current_token.pos.start.0)
.level(DiagnosticLevel::Error)
.message(format!("Expected a type identifier but instead got {:?}", current_token.type_))
.source(source)
.build();
return Err(diag_source)
},
};
Ok(ret)
}
} |
use parenchyma;
use parenchyma::open_cl::OpenCLContext;
use parenchyma::{Context, SharedTensor};
use parenchyma::error::Result;
use extension::Vector;
impl Vector for OpenCLContext {
fn asum(
&self,
x: &SharedTensor<f32>,
result: &mut SharedTensor<f32>) -> Result {
unimplemented!()
}
fn axpy(
&self,
a: &SharedTensor<f32>,
x: &SharedTensor<f32>,
y: &mut SharedTensor<f32>) -> Result {
let kernel: ::ocl::Kernel = unimplemented!();
let n = x.shape().capacity;
let alpha = parenchyma::tensor(self, alpha)?;
let x = parenchyma::tensor(self, x)?;
let y = parenchyma::tensor_mut(self, y)?;
let offset = 0;
let inc = 1;
kernel
.arg_scl(n)
.arg_buf(alpha)
.arg_buf(x).arg_scl(offset).arg_scl(inc)
.arg_buf(y).arg_scl(offset).arg_scl(inc)
// //.gwo(..)
// .gws([WGS, 1, 1])
// .lws([WGS, 1, 1])
// // todo The queue must be associated with a device associated with the kernel's program.
.queue(self.active_direct().queue().clone())
.enq()?;
Ok(())
}
fn copy(
&self,
from: &SharedTensor<f32>,
to: &mut SharedTensor<f32>) -> Result {
unimplemented!()
}
fn dot(
&self,
x: &SharedTensor<f32>,
y: &SharedTensor<f32>,
result: &mut SharedTensor<f32>) -> Result {
unimplemented!()
}
fn nrm2(
&self,
x: &SharedTensor<f32>,
result: &mut SharedTensor<f32>) -> Result {
unimplemented!()
}
fn scal(
&self,
a: &SharedTensor<f32>,
x: &mut SharedTensor<f32>) -> Result {
unimplemented!()
}
fn swap(
&self,
x: &mut SharedTensor<f32>,
y: &mut SharedTensor<f32>) -> Result {
unimplemented!()
}
} |
use std::thread;
use std::sync::{Arc,Mutex};
fn main() {
let a = Arc::new(Mutex::new(vec![1, 2, 3]));
{
let a = Arc::clone(&a);
thread::spawn(move || {
let mut a = a.lock().unwrap();
(*a)[1] = 2;
});
}
{
let a = Arc::clone(&a);
thread::spawn(move || {
let mut a = a.lock().unwrap();
(*a)[1] = 3;
});
}
}
|
use std::cell::Ref;
struct Node<T>
where
T: PartialEq + Eq,
{
_bogus: T,
}
impl<T> Node<T>
where
T: PartialEq + Eq,
{
//Here, implement a new() function that creates a new node the way you think is appropriate.
}
pub struct LinkedList<T>
where
T: PartialEq + Eq,
{
_bogus: T
}
impl<T> LinkedList<T>
where
T: PartialEq + Eq,
{
pub fn new() -> LinkedList<T> {
unimplemented!()
}
/// Returns the number of elements in the list.
/// This function runs in `O(1)` time.
pub fn size(&self) -> usize {
unimplemented!()
}
/// Add `value` to the start of the list.
/// This function runs in `O(1)` time.
pub fn push_front(&mut self, value: T) {
unimplemented!()
}
/// Add `value` to the end of the list.
/// This function runs in `O(1)` time.
pub fn push_back(&mut self, value: T) {
unimplemented!()
}
/// Returns a reference to the first value of the list.
/// This function runs in `O(1)` time.
pub fn peek_front(&self) -> Option<Ref<T>> {
unimplemented!()
}
/// Returns a reference to the last value of the list.
/// This function runs in `O(1)` time.
pub fn peek_back(&self) -> Option<Ref<T>> {
unimplemented!()
}
/// Removes the first element from the list and return it
/// This function runs in `O(1)` time.
pub fn pop_front(&mut self) -> Option<T> {
unimplemented!()
}
/// Removes the last element from the list and return it
/// This function runs in `O(1)` time.
pub fn pop_back(&mut self) -> Option<T> {
unimplemented!()
}
/// Finds if a value is present in the list.
/// If the value is not found, return false
/// If the value is found, return true.
///
/// Challenge: Implement a version of this method that, instead of only returning `bool`,
/// returns a mutable reference to the value.
/// To accept this challenge, change the function return type to either:
/// 1. Option<&mut T>
/// 2. Option<RefMut<T>>
///
pub fn find(&mut self, value: &T) -> bool {
unimplemented!()
}
/// Removes the value from the linkedlist.
/// If the value was present, return that value, else return None.
pub fn remove(&mut self, value: &T) -> Option<T> {
unimplemented!()
}
}
impl<T> Drop for LinkedList<T>
where
T: PartialEq + Eq,
{
fn drop(&mut self) {
while self.pop_front().is_some() {}
}
}
mod tests;
|
/// An enum to represent all characters in the IndicSiyaqNumbers block.
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub enum IndicSiyaqNumbers {
/// \u{1ec71}: '𞱱'
IndicSiyaqNumberOne,
/// \u{1ec72}: '𞱲'
IndicSiyaqNumberTwo,
/// \u{1ec73}: '𞱳'
IndicSiyaqNumberThree,
/// \u{1ec74}: '𞱴'
IndicSiyaqNumberFour,
/// \u{1ec75}: '𞱵'
IndicSiyaqNumberFive,
/// \u{1ec76}: '𞱶'
IndicSiyaqNumberSix,
/// \u{1ec77}: '𞱷'
IndicSiyaqNumberSeven,
/// \u{1ec78}: '𞱸'
IndicSiyaqNumberEight,
/// \u{1ec79}: '𞱹'
IndicSiyaqNumberNine,
/// \u{1ec7a}: '𞱺'
IndicSiyaqNumberTen,
/// \u{1ec7b}: '𞱻'
IndicSiyaqNumberTwenty,
/// \u{1ec7c}: '𞱼'
IndicSiyaqNumberThirty,
/// \u{1ec7d}: '𞱽'
IndicSiyaqNumberForty,
/// \u{1ec7e}: '𞱾'
IndicSiyaqNumberFifty,
/// \u{1ec7f}: '𞱿'
IndicSiyaqNumberSixty,
/// \u{1ec80}: '𞲀'
IndicSiyaqNumberSeventy,
/// \u{1ec81}: '𞲁'
IndicSiyaqNumberEighty,
/// \u{1ec82}: '𞲂'
IndicSiyaqNumberNinety,
/// \u{1ec83}: '𞲃'
IndicSiyaqNumberOneHundred,
/// \u{1ec84}: '𞲄'
IndicSiyaqNumberTwoHundred,
/// \u{1ec85}: '𞲅'
IndicSiyaqNumberThreeHundred,
/// \u{1ec86}: '𞲆'
IndicSiyaqNumberFourHundred,
/// \u{1ec87}: '𞲇'
IndicSiyaqNumberFiveHundred,
/// \u{1ec88}: '𞲈'
IndicSiyaqNumberSixHundred,
/// \u{1ec89}: '𞲉'
IndicSiyaqNumberSevenHundred,
/// \u{1ec8a}: '𞲊'
IndicSiyaqNumberEightHundred,
/// \u{1ec8b}: '𞲋'
IndicSiyaqNumberNineHundred,
/// \u{1ec8c}: '𞲌'
IndicSiyaqNumberOneThousand,
/// \u{1ec8d}: '𞲍'
IndicSiyaqNumberTwoThousand,
/// \u{1ec8e}: '𞲎'
IndicSiyaqNumberThreeThousand,
/// \u{1ec8f}: '𞲏'
IndicSiyaqNumberFourThousand,
/// \u{1ec90}: '𞲐'
IndicSiyaqNumberFiveThousand,
/// \u{1ec91}: '𞲑'
IndicSiyaqNumberSixThousand,
/// \u{1ec92}: '𞲒'
IndicSiyaqNumberSevenThousand,
/// \u{1ec93}: '𞲓'
IndicSiyaqNumberEightThousand,
/// \u{1ec94}: '𞲔'
IndicSiyaqNumberNineThousand,
/// \u{1ec95}: '𞲕'
IndicSiyaqNumberTenThousand,
/// \u{1ec96}: '𞲖'
IndicSiyaqNumberTwentyThousand,
/// \u{1ec97}: '𞲗'
IndicSiyaqNumberThirtyThousand,
/// \u{1ec98}: '𞲘'
IndicSiyaqNumberFortyThousand,
/// \u{1ec99}: '𞲙'
IndicSiyaqNumberFiftyThousand,
/// \u{1ec9a}: '𞲚'
IndicSiyaqNumberSixtyThousand,
/// \u{1ec9b}: '𞲛'
IndicSiyaqNumberSeventyThousand,
/// \u{1ec9c}: '𞲜'
IndicSiyaqNumberEightyThousand,
/// \u{1ec9d}: '𞲝'
IndicSiyaqNumberNinetyThousand,
/// \u{1ec9e}: '𞲞'
IndicSiyaqNumberLakh,
/// \u{1ec9f}: '𞲟'
IndicSiyaqNumberLakhan,
/// \u{1eca0}: '𞲠'
IndicSiyaqLakhMark,
/// \u{1eca1}: '𞲡'
IndicSiyaqNumberKaror,
/// \u{1eca2}: '𞲢'
IndicSiyaqNumberKaroran,
/// \u{1eca3}: '𞲣'
IndicSiyaqNumberPrefixedOne,
/// \u{1eca4}: '𞲤'
IndicSiyaqNumberPrefixedTwo,
/// \u{1eca5}: '𞲥'
IndicSiyaqNumberPrefixedThree,
/// \u{1eca6}: '𞲦'
IndicSiyaqNumberPrefixedFour,
/// \u{1eca7}: '𞲧'
IndicSiyaqNumberPrefixedFive,
/// \u{1eca8}: '𞲨'
IndicSiyaqNumberPrefixedSix,
/// \u{1eca9}: '𞲩'
IndicSiyaqNumberPrefixedSeven,
/// \u{1ecaa}: '𞲪'
IndicSiyaqNumberPrefixedEight,
/// \u{1ecab}: '𞲫'
IndicSiyaqNumberPrefixedNine,
/// \u{1ecac}: '𞲬'
IndicSiyaqPlaceholder,
/// \u{1ecad}: '𞲭'
IndicSiyaqFractionOneQuarter,
/// \u{1ecae}: '𞲮'
IndicSiyaqFractionOneHalf,
/// \u{1ecaf}: '𞲯'
IndicSiyaqFractionThreeQuarters,
/// \u{1ecb0}: '𞲰'
IndicSiyaqRupeeMark,
/// \u{1ecb1}: '𞲱'
IndicSiyaqNumberAlternateOne,
/// \u{1ecb2}: '𞲲'
IndicSiyaqNumberAlternateTwo,
/// \u{1ecb3}: '𞲳'
IndicSiyaqNumberAlternateTenThousand,
/// \u{1ecb4}: '𞲴'
IndicSiyaqAlternateLakhMark,
}
impl Into<char> for IndicSiyaqNumbers {
fn into(self) -> char {
match self {
IndicSiyaqNumbers::IndicSiyaqNumberOne => '𞱱',
IndicSiyaqNumbers::IndicSiyaqNumberTwo => '𞱲',
IndicSiyaqNumbers::IndicSiyaqNumberThree => '𞱳',
IndicSiyaqNumbers::IndicSiyaqNumberFour => '𞱴',
IndicSiyaqNumbers::IndicSiyaqNumberFive => '𞱵',
IndicSiyaqNumbers::IndicSiyaqNumberSix => '𞱶',
IndicSiyaqNumbers::IndicSiyaqNumberSeven => '𞱷',
IndicSiyaqNumbers::IndicSiyaqNumberEight => '𞱸',
IndicSiyaqNumbers::IndicSiyaqNumberNine => '𞱹',
IndicSiyaqNumbers::IndicSiyaqNumberTen => '𞱺',
IndicSiyaqNumbers::IndicSiyaqNumberTwenty => '𞱻',
IndicSiyaqNumbers::IndicSiyaqNumberThirty => '𞱼',
IndicSiyaqNumbers::IndicSiyaqNumberForty => '𞱽',
IndicSiyaqNumbers::IndicSiyaqNumberFifty => '𞱾',
IndicSiyaqNumbers::IndicSiyaqNumberSixty => '𞱿',
IndicSiyaqNumbers::IndicSiyaqNumberSeventy => '𞲀',
IndicSiyaqNumbers::IndicSiyaqNumberEighty => '𞲁',
IndicSiyaqNumbers::IndicSiyaqNumberNinety => '𞲂',
IndicSiyaqNumbers::IndicSiyaqNumberOneHundred => '𞲃',
IndicSiyaqNumbers::IndicSiyaqNumberTwoHundred => '𞲄',
IndicSiyaqNumbers::IndicSiyaqNumberThreeHundred => '𞲅',
IndicSiyaqNumbers::IndicSiyaqNumberFourHundred => '𞲆',
IndicSiyaqNumbers::IndicSiyaqNumberFiveHundred => '𞲇',
IndicSiyaqNumbers::IndicSiyaqNumberSixHundred => '𞲈',
IndicSiyaqNumbers::IndicSiyaqNumberSevenHundred => '𞲉',
IndicSiyaqNumbers::IndicSiyaqNumberEightHundred => '𞲊',
IndicSiyaqNumbers::IndicSiyaqNumberNineHundred => '𞲋',
IndicSiyaqNumbers::IndicSiyaqNumberOneThousand => '𞲌',
IndicSiyaqNumbers::IndicSiyaqNumberTwoThousand => '𞲍',
IndicSiyaqNumbers::IndicSiyaqNumberThreeThousand => '𞲎',
IndicSiyaqNumbers::IndicSiyaqNumberFourThousand => '𞲏',
IndicSiyaqNumbers::IndicSiyaqNumberFiveThousand => '𞲐',
IndicSiyaqNumbers::IndicSiyaqNumberSixThousand => '𞲑',
IndicSiyaqNumbers::IndicSiyaqNumberSevenThousand => '𞲒',
IndicSiyaqNumbers::IndicSiyaqNumberEightThousand => '𞲓',
IndicSiyaqNumbers::IndicSiyaqNumberNineThousand => '𞲔',
IndicSiyaqNumbers::IndicSiyaqNumberTenThousand => '𞲕',
IndicSiyaqNumbers::IndicSiyaqNumberTwentyThousand => '𞲖',
IndicSiyaqNumbers::IndicSiyaqNumberThirtyThousand => '𞲗',
IndicSiyaqNumbers::IndicSiyaqNumberFortyThousand => '𞲘',
IndicSiyaqNumbers::IndicSiyaqNumberFiftyThousand => '𞲙',
IndicSiyaqNumbers::IndicSiyaqNumberSixtyThousand => '𞲚',
IndicSiyaqNumbers::IndicSiyaqNumberSeventyThousand => '𞲛',
IndicSiyaqNumbers::IndicSiyaqNumberEightyThousand => '𞲜',
IndicSiyaqNumbers::IndicSiyaqNumberNinetyThousand => '𞲝',
IndicSiyaqNumbers::IndicSiyaqNumberLakh => '𞲞',
IndicSiyaqNumbers::IndicSiyaqNumberLakhan => '𞲟',
IndicSiyaqNumbers::IndicSiyaqLakhMark => '𞲠',
IndicSiyaqNumbers::IndicSiyaqNumberKaror => '𞲡',
IndicSiyaqNumbers::IndicSiyaqNumberKaroran => '𞲢',
IndicSiyaqNumbers::IndicSiyaqNumberPrefixedOne => '𞲣',
IndicSiyaqNumbers::IndicSiyaqNumberPrefixedTwo => '𞲤',
IndicSiyaqNumbers::IndicSiyaqNumberPrefixedThree => '𞲥',
IndicSiyaqNumbers::IndicSiyaqNumberPrefixedFour => '𞲦',
IndicSiyaqNumbers::IndicSiyaqNumberPrefixedFive => '𞲧',
IndicSiyaqNumbers::IndicSiyaqNumberPrefixedSix => '𞲨',
IndicSiyaqNumbers::IndicSiyaqNumberPrefixedSeven => '𞲩',
IndicSiyaqNumbers::IndicSiyaqNumberPrefixedEight => '𞲪',
IndicSiyaqNumbers::IndicSiyaqNumberPrefixedNine => '𞲫',
IndicSiyaqNumbers::IndicSiyaqPlaceholder => '𞲬',
IndicSiyaqNumbers::IndicSiyaqFractionOneQuarter => '𞲭',
IndicSiyaqNumbers::IndicSiyaqFractionOneHalf => '𞲮',
IndicSiyaqNumbers::IndicSiyaqFractionThreeQuarters => '𞲯',
IndicSiyaqNumbers::IndicSiyaqRupeeMark => '𞲰',
IndicSiyaqNumbers::IndicSiyaqNumberAlternateOne => '𞲱',
IndicSiyaqNumbers::IndicSiyaqNumberAlternateTwo => '𞲲',
IndicSiyaqNumbers::IndicSiyaqNumberAlternateTenThousand => '𞲳',
IndicSiyaqNumbers::IndicSiyaqAlternateLakhMark => '𞲴',
}
}
}
impl std::convert::TryFrom<char> for IndicSiyaqNumbers {
type Error = ();
fn try_from(c: char) -> Result<Self, Self::Error> {
match c {
'𞱱' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberOne),
'𞱲' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberTwo),
'𞱳' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberThree),
'𞱴' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberFour),
'𞱵' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberFive),
'𞱶' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberSix),
'𞱷' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberSeven),
'𞱸' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberEight),
'𞱹' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberNine),
'𞱺' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberTen),
'𞱻' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberTwenty),
'𞱼' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberThirty),
'𞱽' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberForty),
'𞱾' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberFifty),
'𞱿' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberSixty),
'𞲀' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberSeventy),
'𞲁' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberEighty),
'𞲂' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberNinety),
'𞲃' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberOneHundred),
'𞲄' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberTwoHundred),
'𞲅' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberThreeHundred),
'𞲆' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberFourHundred),
'𞲇' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberFiveHundred),
'𞲈' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberSixHundred),
'𞲉' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberSevenHundred),
'𞲊' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberEightHundred),
'𞲋' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberNineHundred),
'𞲌' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberOneThousand),
'𞲍' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberTwoThousand),
'𞲎' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberThreeThousand),
'𞲏' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberFourThousand),
'𞲐' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberFiveThousand),
'𞲑' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberSixThousand),
'𞲒' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberSevenThousand),
'𞲓' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberEightThousand),
'𞲔' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberNineThousand),
'𞲕' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberTenThousand),
'𞲖' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberTwentyThousand),
'𞲗' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberThirtyThousand),
'𞲘' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberFortyThousand),
'𞲙' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberFiftyThousand),
'𞲚' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberSixtyThousand),
'𞲛' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberSeventyThousand),
'𞲜' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberEightyThousand),
'𞲝' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberNinetyThousand),
'𞲞' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberLakh),
'𞲟' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberLakhan),
'𞲠' => Ok(IndicSiyaqNumbers::IndicSiyaqLakhMark),
'𞲡' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberKaror),
'𞲢' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberKaroran),
'𞲣' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberPrefixedOne),
'𞲤' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberPrefixedTwo),
'𞲥' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberPrefixedThree),
'𞲦' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberPrefixedFour),
'𞲧' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberPrefixedFive),
'𞲨' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberPrefixedSix),
'𞲩' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberPrefixedSeven),
'𞲪' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberPrefixedEight),
'𞲫' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberPrefixedNine),
'𞲬' => Ok(IndicSiyaqNumbers::IndicSiyaqPlaceholder),
'𞲭' => Ok(IndicSiyaqNumbers::IndicSiyaqFractionOneQuarter),
'𞲮' => Ok(IndicSiyaqNumbers::IndicSiyaqFractionOneHalf),
'𞲯' => Ok(IndicSiyaqNumbers::IndicSiyaqFractionThreeQuarters),
'𞲰' => Ok(IndicSiyaqNumbers::IndicSiyaqRupeeMark),
'𞲱' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberAlternateOne),
'𞲲' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberAlternateTwo),
'𞲳' => Ok(IndicSiyaqNumbers::IndicSiyaqNumberAlternateTenThousand),
'𞲴' => Ok(IndicSiyaqNumbers::IndicSiyaqAlternateLakhMark),
_ => Err(()),
}
}
}
impl Into<u32> for IndicSiyaqNumbers {
fn into(self) -> u32 {
let c: char = self.into();
let hex = c
.escape_unicode()
.to_string()
.replace("\\u{", "")
.replace("}", "");
u32::from_str_radix(&hex, 16).unwrap()
}
}
impl std::convert::TryFrom<u32> for IndicSiyaqNumbers {
type Error = ();
fn try_from(u: u32) -> Result<Self, Self::Error> {
if let Ok(c) = char::try_from(u) {
Self::try_from(c)
} else {
Err(())
}
}
}
impl Iterator for IndicSiyaqNumbers {
type Item = Self;
fn next(&mut self) -> Option<Self> {
let index: u32 = (*self).into();
use std::convert::TryFrom;
Self::try_from(index + 1).ok()
}
}
impl IndicSiyaqNumbers {
/// The character with the lowest index in this unicode block
pub fn new() -> Self {
IndicSiyaqNumbers::IndicSiyaqNumberOne
}
/// The character's name, in sentence case
pub fn name(&self) -> String {
let s = std::format!("IndicSiyaqNumbers{:#?}", self);
string_morph::to_sentence_case(&s)
}
}
|
#[macro_use]
extern crate quote;
extern crate syn;
extern crate proc_macro;
extern crate serde_json as json;
#[macro_use]
extern crate failure;
extern crate itertools;
mod attr;
mod tests;
mod util;
use attr::extract_attrs;
use tests::read_tests_from_dir;
use util::*;
use failure::Error;
use itertools::Itertools;
use syn::Ident;
use proc_macro::TokenStream;
#[proc_macro_derive(JsonTests, attributes(directory, test_with, bench_with, skip, should_panic))]
pub fn json_tests(input: TokenStream) -> TokenStream {
// Construct a string representation of the type definition
let s = input.to_string();
// Parse the string representation
let ast = syn::parse_derive_input(&s).unwrap();
// Build the impl
let gen = match impl_json_tests(&ast) {
Ok(tokens) => tokens,
Err(err) => panic!("{}", err)
};
// Return the generated impl
gen.parse().unwrap()
}
fn impl_json_tests(ast: &syn::DeriveInput) -> Result<quote::Tokens, Error> {
let config = extract_attrs(&ast)?;
let tests = read_tests_from_dir(&config.directory)?;
let mut tokens = quote::Tokens::new();
// split tests into groups by filepath
let tests = tests.group_by(|test| test.path.clone());
open_directory_module(&config, &mut tokens);
for (filepath, tests) in &tests {
// If tests count in this file is 1, we don't need submodule
let tests = tests.collect::<Vec<_>>();
let need_file_submodule = tests.len() > 1;
if need_file_submodule {
open_file_module(&filepath, &mut tokens);
}
// Generate test function
for test in tests {
let test_func_path = &config.test_with.path;
let test_func_name = &config.test_with.name;
let name = sanitize_ident(&test.name);
let name_ident = Ident::from(name.as_ref());
let data = json::to_string(&test.data)?;
// generate test attrs
tokens.append(quote!{#[test]});
if config.should_panic {
tokens.append(quote!{#[should_panic]});
}
// generate test body
tokens.append(quote! {
fn #name_ident() {
use #test_func_path;
let data = #data;
#test_func_name(#name, data);
}
});
// generate optional benchmark body
if let Some(ref bench) = config.bench_with {
let bench_func_path = &bench.path;
let bench_func_name = &bench.name;
let name = format!("bench_{}", name);
let name_ident = Ident::from(name.as_ref());
tokens.append(quote! {
#[bench]
fn #name_ident(b: &mut test::Bencher) {
use #bench_func_path;
let data = #data;
#bench_func_name(b, #name, data);
}
})
}
}
if need_file_submodule {
// Close file module
close_brace(&mut tokens)
}
}
// Close directory module
close_brace(&mut tokens);
Ok(tokens)
}
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::{models, API_VERSION};
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
KeyVaultConnections_Get(#[from] key_vault_connections::get::Error),
#[error(transparent)]
KeyVaultConnections_Create(#[from] key_vault_connections::create::Error),
#[error(transparent)]
KeyVaultConnections_Delete(#[from] key_vault_connections::delete::Error),
#[error(transparent)]
KeyVaultConnections_ListAll(#[from] key_vault_connections::list_all::Error),
#[error(transparent)]
ClassificationRules_Get(#[from] classification_rules::get::Error),
#[error(transparent)]
ClassificationRules_CreateOrUpdate(#[from] classification_rules::create_or_update::Error),
#[error(transparent)]
ClassificationRules_Delete(#[from] classification_rules::delete::Error),
#[error(transparent)]
ClassificationRules_ListAll(#[from] classification_rules::list_all::Error),
#[error(transparent)]
ClassificationRules_ListVersionsByClassificationRuleName(
#[from] classification_rules::list_versions_by_classification_rule_name::Error,
),
#[error(transparent)]
ClassificationRules_TagClassificationVersion(#[from] classification_rules::tag_classification_version::Error),
#[error(transparent)]
DataSources_Get(#[from] data_sources::get::Error),
#[error(transparent)]
DataSources_CreateOrUpdate(#[from] data_sources::create_or_update::Error),
#[error(transparent)]
DataSources_Delete(#[from] data_sources::delete::Error),
#[error(transparent)]
DataSources_ListAll(#[from] data_sources::list_all::Error),
#[error(transparent)]
Filters_Get(#[from] filters::get::Error),
#[error(transparent)]
Filters_CreateOrUpdate(#[from] filters::create_or_update::Error),
#[error(transparent)]
Scans_Get(#[from] scans::get::Error),
#[error(transparent)]
Scans_CreateOrUpdate(#[from] scans::create_or_update::Error),
#[error(transparent)]
Scans_Delete(#[from] scans::delete::Error),
#[error(transparent)]
Scans_ListByDataSource(#[from] scans::list_by_data_source::Error),
#[error(transparent)]
ScanResult_RunScan(#[from] scan_result::run_scan::Error),
#[error(transparent)]
ScanResult_CancelScan(#[from] scan_result::cancel_scan::Error),
#[error(transparent)]
ScanResult_ListScanHistory(#[from] scan_result::list_scan_history::Error),
#[error(transparent)]
ScanRulesets_Get(#[from] scan_rulesets::get::Error),
#[error(transparent)]
ScanRulesets_CreateOrUpdate(#[from] scan_rulesets::create_or_update::Error),
#[error(transparent)]
ScanRulesets_Delete(#[from] scan_rulesets::delete::Error),
#[error(transparent)]
ScanRulesets_ListAll(#[from] scan_rulesets::list_all::Error),
#[error(transparent)]
SystemScanRulesets_ListAll(#[from] system_scan_rulesets::list_all::Error),
#[error(transparent)]
SystemScanRulesets_Get(#[from] system_scan_rulesets::get::Error),
#[error(transparent)]
SystemScanRulesets_GetByVersion(#[from] system_scan_rulesets::get_by_version::Error),
#[error(transparent)]
SystemScanRulesets_GetLatest(#[from] system_scan_rulesets::get_latest::Error),
#[error(transparent)]
SystemScanRulesets_ListVersionsByDataSource(#[from] system_scan_rulesets::list_versions_by_data_source::Error),
#[error(transparent)]
Triggers_GetTrigger(#[from] triggers::get_trigger::Error),
#[error(transparent)]
Triggers_CreateTrigger(#[from] triggers::create_trigger::Error),
#[error(transparent)]
Triggers_DeleteTrigger(#[from] triggers::delete_trigger::Error),
}
pub mod key_vault_connections {
use super::{models, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
key_vault_name: &str,
) -> std::result::Result<models::AzureKeyVault, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/azureKeyVaults/{}", operation_config.base_path(), key_vault_name);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AzureKeyVault =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
key_vault_name: &str,
body: &models::AzureKeyVault,
) -> std::result::Result<models::AzureKeyVault, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/azureKeyVaults/{}", operation_config.base_path(), key_vault_name);
let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(body).map_err(create::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AzureKeyVault =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
key_vault_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/azureKeyVaults/{}", operation_config.base_path(), key_vault_name);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AzureKeyVault =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete::Response::Ok200(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::AzureKeyVault),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_all(operation_config: &crate::OperationConfig) -> std::result::Result<models::AzureKeyVaultList, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/azureKeyVaults", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list_all::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_all::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_all::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_all::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AzureKeyVaultList =
serde_json::from_slice(rsp_body).map_err(|source| list_all::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| list_all::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_all::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_all {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod classification_rules {
use super::{models, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
classification_rule_name: &str,
) -> std::result::Result<models::ClassificationRule, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/classificationrules/{}", operation_config.base_path(), classification_rule_name);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ClassificationRule =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
classification_rule_name: &str,
body: Option<&models::ClassificationRule>,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/classificationrules/{}", operation_config.base_path(), classification_rule_name);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = if let Some(body) = body {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(body).map_err(create_or_update::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ClassificationRule = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::ClassificationRule = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::ClassificationRule),
Created201(models::ClassificationRule),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
classification_rule_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/classificationrules/{}", operation_config.base_path(), classification_rule_name);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ClassificationRule =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete::Response::Ok200(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::ClassificationRule),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_all(
operation_config: &crate::OperationConfig,
) -> std::result::Result<models::ClassificationRuleList, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/classificationrules", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list_all::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_all::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_all::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_all::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ClassificationRuleList =
serde_json::from_slice(rsp_body).map_err(|source| list_all::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| list_all::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_all::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_all {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_versions_by_classification_rule_name(
operation_config: &crate::OperationConfig,
classification_rule_name: &str,
) -> std::result::Result<models::ClassificationRuleList, list_versions_by_classification_rule_name::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/classificationrules/{}/versions",
operation_config.base_path(),
classification_rule_name
);
let mut url = url::Url::parse(url_str).map_err(list_versions_by_classification_rule_name::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_versions_by_classification_rule_name::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_versions_by_classification_rule_name::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_versions_by_classification_rule_name::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ClassificationRuleList = serde_json::from_slice(rsp_body)
.map_err(|source| list_versions_by_classification_rule_name::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel = serde_json::from_slice(rsp_body)
.map_err(|source| list_versions_by_classification_rule_name::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_versions_by_classification_rule_name::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_versions_by_classification_rule_name {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn tag_classification_version(
operation_config: &crate::OperationConfig,
classification_rule_name: &str,
classification_rule_version: i32,
action: &str,
) -> std::result::Result<models::OperationResponse, tag_classification_version::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/classificationrules/{}/versions/{}/:tag",
operation_config.base_path(),
classification_rule_name,
classification_rule_version
);
let mut url = url::Url::parse(url_str).map_err(tag_classification_version::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(tag_classification_version::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
url.query_pairs_mut().append_pair("action", action);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(tag_classification_version::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(tag_classification_version::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::OperationResponse = serde_json::from_slice(rsp_body)
.map_err(|source| tag_classification_version::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel = serde_json::from_slice(rsp_body)
.map_err(|source| tag_classification_version::Error::DeserializeError(source, rsp_body.clone()))?;
Err(tag_classification_version::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod tag_classification_version {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod data_sources {
use super::{models, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
data_source_name: &str,
) -> std::result::Result<models::DataSource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/datasources/{}", operation_config.base_path(), data_source_name);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::DataSource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
data_source_name: &str,
body: Option<&models::DataSource>,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/datasources/{}", operation_config.base_path(), data_source_name);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = if let Some(body) = body {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(body).map_err(create_or_update::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::DataSource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::DataSource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::DataSource),
Created201(models::DataSource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
data_source_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/datasources/{}", operation_config.base_path(), data_source_name);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::DataSource =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete::Response::Ok200(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::DataSource),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_all(operation_config: &crate::OperationConfig) -> std::result::Result<models::DataSourceList, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/datasources", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list_all::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_all::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_all::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_all::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::DataSourceList =
serde_json::from_slice(rsp_body).map_err(|source| list_all::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| list_all::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_all::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_all {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod filters {
use super::{models, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
data_source_name: &str,
scan_name: &str,
) -> std::result::Result<models::Filter, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/datasources/{}/scans/{}/filters/custom",
operation_config.base_path(),
data_source_name,
scan_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::Filter =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
data_source_name: &str,
scan_name: &str,
body: Option<&models::Filter>,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/datasources/{}/scans/{}/filters/custom",
operation_config.base_path(),
data_source_name,
scan_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = if let Some(body) = body {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(body).map_err(create_or_update::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::Filter = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::Filter = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::Filter),
Created201(models::Filter),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod scans {
use super::{models, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
data_source_name: &str,
scan_name: &str,
) -> std::result::Result<models::Scan, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/datasources/{}/scans/{}",
operation_config.base_path(),
data_source_name,
scan_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::Scan =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
data_source_name: &str,
scan_name: &str,
body: &models::Scan,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/datasources/{}/scans/{}",
operation_config.base_path(),
data_source_name,
scan_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(body).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::Scan = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::Scan = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::Scan),
Created201(models::Scan),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
data_source_name: &str,
scan_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/datasources/{}/scans/{}",
operation_config.base_path(),
data_source_name,
scan_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::Scan =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete::Response::Ok200(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::Scan),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_data_source(
operation_config: &crate::OperationConfig,
data_source_name: &str,
) -> std::result::Result<models::ScanList, list_by_data_source::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/datasources/{}/scans", operation_config.base_path(), data_source_name);
let mut url = url::Url::parse(url_str).map_err(list_by_data_source::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_data_source::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_data_source::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_data_source::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ScanList = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_data_source::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_data_source::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_data_source::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_data_source {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod scan_result {
use super::{models, API_VERSION};
pub async fn run_scan(
operation_config: &crate::OperationConfig,
data_source_name: &str,
scan_name: &str,
run_id: &str,
scan_level: Option<&str>,
) -> std::result::Result<models::OperationResponse, run_scan::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/datasources/{}/scans/{}/runs/{}",
operation_config.base_path(),
data_source_name,
scan_name,
run_id
);
let mut url = url::Url::parse(url_str).map_err(run_scan::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(run_scan::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(scan_level) = scan_level {
url.query_pairs_mut().append_pair("scanLevel", scan_level);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(run_scan::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(run_scan::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::OperationResponse =
serde_json::from_slice(rsp_body).map_err(|source| run_scan::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| run_scan::Error::DeserializeError(source, rsp_body.clone()))?;
Err(run_scan::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod run_scan {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn cancel_scan(
operation_config: &crate::OperationConfig,
data_source_name: &str,
scan_name: &str,
run_id: &str,
) -> std::result::Result<models::OperationResponse, cancel_scan::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/datasources/{}/scans/{}/runs/{}/:cancel",
operation_config.base_path(),
data_source_name,
scan_name,
run_id
);
let mut url = url::Url::parse(url_str).map_err(cancel_scan::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(cancel_scan::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(cancel_scan::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(cancel_scan::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::OperationResponse =
serde_json::from_slice(rsp_body).map_err(|source| cancel_scan::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| cancel_scan::Error::DeserializeError(source, rsp_body.clone()))?;
Err(cancel_scan::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod cancel_scan {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_scan_history(
operation_config: &crate::OperationConfig,
data_source_name: &str,
scan_name: &str,
) -> std::result::Result<models::ScanHistoryList, list_scan_history::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/datasources/{}/scans/{}/runs",
operation_config.base_path(),
data_source_name,
scan_name
);
let mut url = url::Url::parse(url_str).map_err(list_scan_history::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_scan_history::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_scan_history::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_scan_history::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ScanHistoryList = serde_json::from_slice(rsp_body)
.map_err(|source| list_scan_history::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel = serde_json::from_slice(rsp_body)
.map_err(|source| list_scan_history::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_scan_history::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_scan_history {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod scan_rulesets {
use super::{models, API_VERSION};
pub async fn get(
operation_config: &crate::OperationConfig,
scan_ruleset_name: &str,
) -> std::result::Result<models::ScanRuleset, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/scanrulesets/{}", operation_config.base_path(), scan_ruleset_name);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ScanRuleset =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
scan_ruleset_name: &str,
body: Option<&models::ScanRuleset>,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/scanrulesets/{}", operation_config.base_path(), scan_ruleset_name);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = if let Some(body) = body {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(body).map_err(create_or_update::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ScanRuleset = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::ScanRuleset = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::ScanRuleset),
Created201(models::ScanRuleset),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
scan_ruleset_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/scanrulesets/{}", operation_config.base_path(), scan_ruleset_name);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ScanRuleset =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete::Response::Ok200(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::ScanRuleset),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_all(operation_config: &crate::OperationConfig) -> std::result::Result<models::ScanRulesetList, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/scanrulesets", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list_all::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_all::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_all::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_all::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ScanRulesetList =
serde_json::from_slice(rsp_body).map_err(|source| list_all::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| list_all::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_all::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_all {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod system_scan_rulesets {
use super::{models, API_VERSION};
pub async fn list_all(
operation_config: &crate::OperationConfig,
) -> std::result::Result<models::SystemScanRulesetList, list_all::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/systemScanRulesets", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list_all::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_all::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_all::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_all::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::SystemScanRulesetList =
serde_json::from_slice(rsp_body).map_err(|source| list_all::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| list_all::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_all::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_all {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
data_source_type: &str,
) -> std::result::Result<models::SystemScanRuleset, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/systemScanRulesets/datasources/{}",
operation_config.base_path(),
data_source_type
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::SystemScanRuleset =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_by_version(
operation_config: &crate::OperationConfig,
data_source_type: Option<&str>,
version: i32,
) -> std::result::Result<models::SystemScanRuleset, get_by_version::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/systemScanRulesets/versions/{}", operation_config.base_path(), version);
let mut url = url::Url::parse(url_str).map_err(get_by_version::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_by_version::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(data_source_type) = data_source_type {
url.query_pairs_mut().append_pair("dataSourceType", data_source_type);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_by_version::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_by_version::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::SystemScanRuleset =
serde_json::from_slice(rsp_body).map_err(|source| get_by_version::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| get_by_version::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_by_version::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_by_version {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_latest(
operation_config: &crate::OperationConfig,
data_source_type: Option<&str>,
) -> std::result::Result<models::SystemScanRuleset, get_latest::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/systemScanRulesets/versions/latest", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(get_latest::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_latest::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(data_source_type) = data_source_type {
url.query_pairs_mut().append_pair("dataSourceType", data_source_type);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_latest::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_latest::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::SystemScanRuleset =
serde_json::from_slice(rsp_body).map_err(|source| get_latest::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| get_latest::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_latest::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_latest {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_versions_by_data_source(
operation_config: &crate::OperationConfig,
data_source_type: Option<&str>,
) -> std::result::Result<models::SystemScanRulesetList, list_versions_by_data_source::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/systemScanRulesets/versions", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list_versions_by_data_source::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_versions_by_data_source::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(data_source_type) = data_source_type {
url.query_pairs_mut().append_pair("dataSourceType", data_source_type);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_versions_by_data_source::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_versions_by_data_source::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::SystemScanRulesetList = serde_json::from_slice(rsp_body)
.map_err(|source| list_versions_by_data_source::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel = serde_json::from_slice(rsp_body)
.map_err(|source| list_versions_by_data_source::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_versions_by_data_source::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_versions_by_data_source {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod triggers {
use super::{models, API_VERSION};
pub async fn get_trigger(
operation_config: &crate::OperationConfig,
data_source_name: &str,
scan_name: &str,
) -> std::result::Result<models::Trigger, get_trigger::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/datasources/{}/scans/{}/triggers/default",
operation_config.base_path(),
data_source_name,
scan_name
);
let mut url = url::Url::parse(url_str).map_err(get_trigger::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_trigger::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_trigger::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_trigger::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::Trigger =
serde_json::from_slice(rsp_body).map_err(|source| get_trigger::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| get_trigger::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_trigger::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_trigger {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_trigger(
operation_config: &crate::OperationConfig,
data_source_name: &str,
scan_name: &str,
body: &models::Trigger,
) -> std::result::Result<create_trigger::Response, create_trigger::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/datasources/{}/scans/{}/triggers/default",
operation_config.base_path(),
data_source_name,
scan_name
);
let mut url = url::Url::parse(url_str).map_err(create_trigger::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_trigger::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(body).map_err(create_trigger::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_trigger::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_trigger::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::Trigger =
serde_json::from_slice(rsp_body).map_err(|source| create_trigger::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_trigger::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::Trigger =
serde_json::from_slice(rsp_body).map_err(|source| create_trigger::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_trigger::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| create_trigger::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_trigger::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_trigger {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::Trigger),
Created201(models::Trigger),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete_trigger(
operation_config: &crate::OperationConfig,
data_source_name: &str,
scan_name: &str,
) -> std::result::Result<delete_trigger::Response, delete_trigger::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/datasources/{}/scans/{}/triggers/default",
operation_config.base_path(),
data_source_name,
scan_name
);
let mut url = url::Url::parse(url_str).map_err(delete_trigger::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete_trigger::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete_trigger::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(delete_trigger::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::Trigger =
serde_json::from_slice(rsp_body).map_err(|source| delete_trigger::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(delete_trigger::Response::Ok200(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(delete_trigger::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponseModel =
serde_json::from_slice(rsp_body).map_err(|source| delete_trigger::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete_trigger::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete_trigger {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::Trigger),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponseModel,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
|
use crate::context::Context;
use crate::errors::{ArgumentError, ShellError};
use crate::parser::registry::{NamedType, PositionalType, Signature};
use crate::parser::{baseline_parse_tokens, CallNode};
use crate::parser::{
hir::{self, NamedArguments},
Flag, RawToken, TokenNode,
};
use crate::traits::ToDebug;
use crate::{Tag, Tagged, TaggedItem, Text};
use log::trace;
pub fn parse_command(
config: &Signature,
context: &Context,
call: &Tagged<CallNode>,
source: &Text,
) -> Result<hir::Call, ShellError> {
let Tagged { item: raw_call, .. } = call;
trace!("Processing {:?}", config);
let head = parse_command_head(call.head())?;
let children: Option<Vec<TokenNode>> = raw_call.children().as_ref().map(|nodes| {
nodes
.iter()
.cloned()
.filter(|node| match node {
TokenNode::Whitespace(_) => false,
_ => true,
})
.collect()
});
match parse_command_tail(&config, context, children, source, call.tag())? {
None => Ok(hir::Call::new(Box::new(head), None, None)),
Some((positional, named)) => Ok(hir::Call::new(Box::new(head), positional, named)),
}
}
fn parse_command_head(head: &TokenNode) -> Result<hir::Expression, ShellError> {
match head {
TokenNode::Token(
spanned @ Tagged {
item: RawToken::Bare,
..
},
) => Ok(spanned.map(|_| hir::RawExpression::Literal(hir::Literal::Bare))),
TokenNode::Token(Tagged {
item: RawToken::String(inner_tag),
tag,
}) => Ok(hir::RawExpression::Literal(hir::Literal::String(*inner_tag)).tagged(*tag)),
other => Err(ShellError::unexpected(&format!(
"command head -> {:?}",
other
))),
}
}
fn parse_command_tail(
config: &Signature,
context: &Context,
tail: Option<Vec<TokenNode>>,
source: &Text,
command_tag: Tag,
) -> Result<Option<(Option<Vec<hir::Expression>>, Option<NamedArguments>)>, ShellError> {
let tail = &mut match &tail {
None => hir::TokensIterator::new(&[]),
Some(tail) => hir::TokensIterator::new(tail),
};
let mut named = NamedArguments::new();
trace_remaining("nodes", tail.clone(), source);
for (name, kind) in &config.named {
trace!(target: "nu::parse", "looking for {} : {:?}", name, kind);
match kind {
NamedType::Switch => {
let flag = extract_switch(name, tail, source);
named.insert_switch(name, flag);
}
NamedType::Mandatory(syntax_type) => {
match extract_mandatory(config, name, tail, source, command_tag) {
Err(err) => return Err(err), // produce a correct diagnostic
Ok((pos, flag)) => {
tail.move_to(pos);
if tail.at_end() {
return Err(ShellError::argument_error(
config.name.clone(),
ArgumentError::MissingValueForName(name.to_string()),
flag.tag(),
));
}
let expr =
hir::baseline_parse_next_expr(tail, context, source, *syntax_type)?;
tail.restart();
named.insert_mandatory(name, expr);
}
}
}
NamedType::Optional(syntax_type) => match extract_optional(name, tail, source) {
Err(err) => return Err(err), // produce a correct diagnostic
Ok(Some((pos, flag))) => {
tail.move_to(pos);
if tail.at_end() {
return Err(ShellError::argument_error(
config.name.clone(),
ArgumentError::MissingValueForName(name.to_string()),
flag.tag(),
));
}
let expr = hir::baseline_parse_next_expr(tail, context, source, *syntax_type)?;
tail.restart();
named.insert_optional(name, Some(expr));
}
Ok(None) => {
tail.restart();
named.insert_optional(name, None);
}
},
};
}
trace_remaining("after named", tail.clone(), source);
let mut positional = vec![];
for arg in &config.positional {
trace!("Processing positional {:?}", arg);
match arg {
PositionalType::Mandatory(..) => {
if tail.len() == 0 {
return Err(ShellError::argument_error(
config.name.clone(),
ArgumentError::MissingMandatoryPositional(arg.name().to_string()),
command_tag,
));
}
}
PositionalType::Optional(..) => {
if tail.len() == 0 {
break;
}
}
}
let result = hir::baseline_parse_next_expr(tail, context, source, arg.syntax_type())?;
positional.push(result);
}
trace_remaining("after positional", tail.clone(), source);
if let Some(syntax_type) = config.rest_positional {
let remainder = baseline_parse_tokens(tail, context, source, syntax_type)?;
positional.extend(remainder);
}
trace_remaining("after rest", tail.clone(), source);
trace!("Constructed positional={:?} named={:?}", positional, named);
let positional = if positional.len() == 0 {
None
} else {
Some(positional)
};
// TODO: Error if extra unconsumed positional arguments
let named = if named.named.is_empty() {
None
} else {
Some(named)
};
trace!("Normalized positional={:?} named={:?}", positional, named);
Ok(Some((positional, named)))
}
fn extract_switch(name: &str, tokens: &mut hir::TokensIterator<'_>, source: &Text) -> Option<Flag> {
tokens
.extract(|t| t.as_flag(name, source))
.map(|(_pos, flag)| flag.item)
}
fn extract_mandatory(
config: &Signature,
name: &str,
tokens: &mut hir::TokensIterator<'_>,
source: &Text,
tag: Tag,
) -> Result<(usize, Tagged<Flag>), ShellError> {
let flag = tokens.extract(|t| t.as_flag(name, source));
match flag {
None => Err(ShellError::argument_error(
config.name.clone(),
ArgumentError::MissingMandatoryFlag(name.to_string()),
tag,
)),
Some((pos, flag)) => {
tokens.remove(pos);
Ok((pos, flag))
}
}
}
fn extract_optional(
name: &str,
tokens: &mut hir::TokensIterator<'_>,
source: &Text,
) -> Result<(Option<(usize, Tagged<Flag>)>), ShellError> {
let flag = tokens.extract(|t| t.as_flag(name, source));
match flag {
None => Ok(None),
Some((pos, flag)) => {
tokens.remove(pos);
Ok(Some((pos, flag)))
}
}
}
pub fn trace_remaining(desc: &'static str, tail: hir::TokensIterator<'_>, source: &Text) {
trace!(
"{} = {:?}",
desc,
itertools::join(
tail.debug_remaining()
.iter()
.map(|i| format!("%{}%", i.debug(&source))),
" "
)
);
}
|
/*
* Copyright Stalwart Labs Ltd. See the COPYING
* file at the top-level directory of this distribution.
*
* Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
* https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
* file at the top-level directory of this distribution.
*
* Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
* https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
* <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
* option. This file may not be copied, modified, or distributed
* except according to those terms.
*/
#[cfg(feature = "async")]
use jmap_client::{client::Client, core::query, email, mailbox};
#[cfg(feature = "async")]
async fn result_reference() {
// Connect to the JMAP server using Basic authentication
let client = Client::new()
.credentials(("john@example.org", "secret"))
.connect("https://jmap.example.org")
.await
.unwrap();
// Delete e-mails matching a filter
let mut request = client.build();
let result_ref = request
.query_email()
.filter(query::Filter::and([
email::query::Filter::has_keyword("$draft"),
email::query::Filter::from("bill"),
]))
.result_reference();
request.set_email().destroy_ref(result_ref);
let _destroyed_ids = request
.send()
.await
.unwrap()
.unwrap_method_responses()
.pop()
.unwrap()
.unwrap_set_email()
.unwrap()
.take_destroyed_ids();
// Fetch mailboxes matching a filter
let mut request = client.build();
let query_result = request
.query_mailbox()
.filter(query::Filter::and([
mailbox::query::Filter::has_any_role(false),
mailbox::query::Filter::is_subscribed(true),
]))
.result_reference();
request.get_mailbox().ids_ref(query_result).properties([
mailbox::Property::Id,
mailbox::Property::Name,
mailbox::Property::ParentId,
mailbox::Property::TotalEmails,
mailbox::Property::UnreadEmails,
]);
let _mailboxes = request
.send()
.await
.unwrap()
.unwrap_method_responses()
.pop()
.unwrap()
.unwrap_get_mailbox()
.unwrap()
.take_list();
// Fetch only the updated properties of all mailboxes that changed
// since a state.
let mut request = client.build();
let changes_request = request.changes_mailbox("n").max_changes(0);
let properties_ref = changes_request.updated_properties_reference();
let updated_ref = changes_request.updated_reference();
request
.get_mailbox()
.ids_ref(updated_ref)
.properties_ref(properties_ref);
for mailbox in request
.send()
.await
.unwrap()
.unwrap_method_responses()
.pop()
.unwrap()
.unwrap_get_mailbox()
.unwrap()
.take_list()
{
println!("Changed mailbox: {:#?}", mailbox);
}
}
fn main() {
#[cfg(feature = "async")]
let _c = result_reference();
}
|
// Reverse Polish notation
fn eval_operation(left: &i32, right: &i32, op: &str) -> i32 {
match op {
"+" => left + right,
"-" => left - right,
"*" => left * right,
"/" => left / right,
_ => panic!("Invalid operator")
}
}
/// Evaluates an expression in Reverse Polish notation
///
/// # Errors
/// If an invalid expression is given,
/// a message explaining the problem is returned
///
/// # Examples
/// ```
/// use linguist::expr::rpn;
/// let tokens = vec!["1", "2", "+"];
/// assert_eq!(rpn::eval(&tokens).unwrap(), 3);
/// ```
pub fn eval(ops: &[&str]) -> Result<i32, &'static str> {
if ops.len() == 0 {
return Ok(0);
}
let mut results = vec![];
for op in ops {
if let Ok(val) = op.parse::<i32>() {
results.push(val);
} else if results.len() < 2 {
return Err("Insufficient operands");
} else {
let right = results.pop().unwrap();
let left = results.pop().unwrap();
results.push(eval_operation(&left, &right, &op));
}
}
if results.len() != 1 {
// TODO: Better error messages
Err("Invalid expression")
} else {
Ok(results.pop().unwrap())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn empty_list_eval_produces_0() {
let expected = 0;
let actual = eval(&vec![]).unwrap();
assert_eq!(expected, actual);
}
#[test]
fn single_digit_produces_same_number() {
assert_eq!(0, eval(vec!["0"].as_slice()).unwrap());
assert_eq!(12, eval(vec!["12"].as_slice()).unwrap());
assert_eq!(345, eval(vec!["345"].as_slice()).unwrap());
assert_eq!(6789, eval(vec!["6789"].as_slice()).unwrap());
}
#[test]
fn addition() {
assert_eq!(3, eval(vec!["1", "2", "+"].as_slice()).unwrap());
assert_eq!(23, eval(vec!["12", "11", "+"].as_slice()).unwrap());
}
#[test]
fn subtraction() {
assert_eq!(-1, eval(vec!["1", "2", "-"].as_slice()).unwrap());
assert_eq!(1, eval(vec!["12", "11", "-"].as_slice()).unwrap());
}
#[test]
fn multiplication() {
assert_eq!(27, eval(vec!["3", "9", "*"].as_slice()).unwrap());
assert_eq!(55, eval(vec!["5", "11", "*"].as_slice()).unwrap());
}
#[test]
fn division() {
assert_eq!(3, eval(vec!["27", "9", "/"].as_slice()).unwrap());
assert_eq!(5, eval(vec!["55", "11", "/"].as_slice()).unwrap());
}
#[test]
fn all_operators_work_together() {
assert_eq!(14, eval(vec![
"5",
"1",
"2",
"+",
"4",
"*",
"+",
"3",
"-"
].as_slice()).unwrap());
}
}
|
pub fn is_pangram(s: &str) -> bool {
let mut chars: Vec<char> = s.to_lowercase()
.chars()
.filter(|&x| match x {
'a'...'z' => true,
_ => false,
})
.collect();
chars.sort();
chars.dedup();
println!("{:?}", chars);
chars.len() == 26
}
|
// 注意: trait 或者要实现 trait 的类型必须至少有一个位于 crate 的本地作用域
pub trait Summary {
fn summarize(&self) -> String;
fn summarize2(&self) -> String {
String::from("默认实现")
}
fn summarize3(&self) -> String {
format!("牛逼呀{}", self.summarize())
}
}
pub trait Summary2 {
}
pub struct NewsArticle {
pub headline: String,
pub location: String,
pub author: String,
pub content: String
}
impl Summary for NewsArticle {
fn summarize(&self) -> String {
format!("{}-{}-{}", self.headline,self.author,self.location)
}
}
pub struct Tweet {
pub username: String,
pub content: String,
pub reply: bool,
pub retweet: bool,
}
impl Summary for Tweet {
fn summarize(&self) -> String {
format!("{}-{}", self.username, self.content)
}
} |
use std::{collections::{HashSet, VecDeque}, io, num};
use problem::{Problem, ProblemInput, solve};
struct Input {
player_1: Vec<u32>,
player_2: Vec<u32>,
}
#[derive(Debug)]
enum ParseInputError {
IoError(io::Error),
ParseIntError(num::ParseIntError),
UnexpectedEndOfInput,
ExpectedPlayer1,
ExpectedPlayer2,
}
impl From<io::Error> for ParseInputError {
fn from(e: io::Error) -> Self {
Self::IoError(e)
}
}
impl From<num::ParseIntError> for ParseInputError {
fn from(e: num::ParseIntError) -> Self {
Self::ParseIntError(e)
}
}
impl ProblemInput for Input {
type Error = ParseInputError;
fn parse<R: io::BufRead>(reader: R) -> Result<Self, Self::Error> {
let mut lines = reader.lines();
if lines.next().ok_or(ParseInputError::UnexpectedEndOfInput)?? != "Player 1:" {
return Err(ParseInputError::ExpectedPlayer1);
}
let mut player_1 = Vec::new();
loop {
let next = lines.next().ok_or(ParseInputError::UnexpectedEndOfInput)??;
if next != "" {
player_1.push(next.parse()?);
} else {
break;
}
}
if lines.next().ok_or(ParseInputError::UnexpectedEndOfInput)?? != "Player 2:" {
return Err(ParseInputError::ExpectedPlayer2);
}
let mut player_2 = Vec::new();
loop {
if let Some(next) = lines.next() {
player_2.push(next?.parse()?);
} else {
break;
}
}
Ok(Self {
player_1,
player_2,
})
}
}
enum RecursiveCombatWinner {
Player1(VecDeque<u32>),
Player2(VecDeque<u32>),
}
// returns "did player 1 win"
fn recursive_combat(mut player_1: VecDeque<u32>, mut player_2: VecDeque<u32>) -> RecursiveCombatWinner {
let mut previous_states = HashSet::new();
while player_1.len() > 0 && player_2.len() > 0 {
if !previous_states.insert((player_1.clone(), player_2.clone())) {
return RecursiveCombatWinner::Player1(player_1);
} else {
let card_1 = player_1.pop_front().unwrap();
let card_2 = player_2.pop_front().unwrap();
if card_1 as usize <= player_1.len() && card_2 as usize <= player_2.len() {
let player_1_deck = player_1.iter().take(card_1 as usize).cloned().collect();
let player_2_deck = player_2.iter().take(card_2 as usize).cloned().collect();
match recursive_combat(player_1_deck, player_2_deck) {
RecursiveCombatWinner::Player1(_) => {
player_1.push_back(card_1);
player_1.push_back(card_2);
},
RecursiveCombatWinner::Player2(_) => {
player_2.push_back(card_2);
player_2.push_back(card_1);
},
}
} else if card_1 > card_2 {
player_1.push_back(card_1);
player_1.push_back(card_2);
} else {
player_2.push_back(card_2);
player_2.push_back(card_1);
}
}
}
if player_1.len() > 0 {
RecursiveCombatWinner::Player1(player_1)
} else {
RecursiveCombatWinner::Player2(player_2)
}
}
struct Day22;
impl Problem for Day22 {
type Input = Input;
type Part1Output = u32;
type Part2Output = u32;
type Error = ();
fn part_1(input: &Self::Input) -> Result<Self::Part1Output, Self::Error> {
let mut player_1 = input.player_1.iter().cloned().collect::<VecDeque<_>>();
let mut player_2 = input.player_2.iter().cloned().collect::<VecDeque<_>>();
while player_1.len() > 0 && player_2.len() > 0 {
let card_1 = player_1.pop_front().unwrap();
let card_2 = player_2.pop_front().unwrap();
if card_1 > card_2 {
player_1.push_back(card_1);
player_1.push_back(card_2);
} else {
player_2.push_back(card_2);
player_2.push_back(card_1);
}
}
let winner = if player_1.len() > 0 { player_1 } else { player_2 };
Ok(winner.iter().rev().enumerate().map(|(i, v)| (i as u32 + 1) * v).sum())
}
fn part_2(input: &Self::Input) -> Result<Self::Part1Output, Self::Error> {
let winner = match recursive_combat(input.player_1.iter().cloned().collect(), input.player_2.iter().cloned().collect()) {
RecursiveCombatWinner::Player1(deck) => deck,
RecursiveCombatWinner::Player2(deck) => deck,
};
Ok(winner.iter().rev().enumerate().map(|(i, v)| (i as u32 + 1) * v).sum())
}
}
fn main() {
solve::<Day22>("input").unwrap();
}
|
use phase::*;
pub trait AiTrait {
fn phase_morning(&self, state: &mut MorningState);
fn phase_special(&self, state: &mut SpecialState);
fn phase_mafia(&self, state: &mut MafiaState);
}
|
use winit::event::{ElementState, KeyboardInput, VirtualKeyCode, WindowEvent};
pub struct Camera {
pub eye: cgmath::Point3<f32>,
pub up: cgmath::Vector3<f32>,
pub aspect: f32,
pub fovy: f32,
pub znear: f32,
pub zfar: f32,
}
impl Camera {
pub fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let target = (self.eye.x, self.eye.y, self.eye.z - 1.0).into();
let view = cgmath::Matrix4::look_at(self.eye, target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return OPENGL_TO_WGPU_MATRIX * proj * view;
}
}
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
pub struct CameraController {
speed: f32,
x_axis: f32,
y_axis: f32,
z_axis: f32,
speed_multiplier: f32,
}
impl CameraController {
pub fn new(speed: f32) -> Self {
Self {
speed,
x_axis: 0.0,
y_axis: 0.0,
z_axis: 0.0,
speed_multiplier: 1.0,
}
}
pub fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input:
KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
let axis_value = if is_pressed { 1.0 } else { 0.0 };
match keycode {
VirtualKeyCode::A | VirtualKeyCode::Left => {
self.x_axis = -axis_value;
true
}
VirtualKeyCode::D | VirtualKeyCode::Right => {
self.x_axis = axis_value;
true
}
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.y_axis = axis_value;
true
}
VirtualKeyCode::S | VirtualKeyCode::Down => {
self.y_axis = -axis_value;
true
}
VirtualKeyCode::R | VirtualKeyCode::E => {
self.z_axis = -axis_value;
true
}
VirtualKeyCode::F | VirtualKeyCode::Q => {
self.z_axis = axis_value;
true
}
VirtualKeyCode::LShift | VirtualKeyCode::RShift => {
self.speed_multiplier = if is_pressed { 2.0 } else { 1.0 };
true
}
VirtualKeyCode::LAlt | VirtualKeyCode::RAlt => {
self.speed_multiplier = if is_pressed { 0.25 } else { 1.0 };
true
}
_ => false,
}
}
_ => false,
}
}
pub fn update_camera(&self, camera: &mut Camera) {
camera.eye.x += self.x_axis * self.speed * self.speed_multiplier;
camera.eye.y += self.y_axis * self.speed * self.speed_multiplier;
camera.eye.z += self.z_axis * self.speed * self.speed_multiplier;
}
}
|
//! This module contains the following indicators :
//! - SMA (Simple Moving Average)
//! - EMA (Exponential Moving Average)
//! - PSAR (Parabolic SAR)
pub mod sma;
pub mod ema;
pub mod psar;
|
use std::collections::VecDeque;
/// supersliceのupperboundはVecDequeにたいして使えなかったので実装した。
pub fn upper_bound<T>(v: &VecDeque<T>, n: T) -> usize
where
T: PartialEq + PartialOrd,
{
let mut left = -1;
let mut right = v.len() as isize;
while right - left > 1 {
let mid = (left + right) / 2;
if v[mid as usize] <= n {
left = mid;
} else {
right = mid;
}
}
right as usize
}
///Returns the index i pointing to the first element in the ordered slice that is not less than x.
pub fn lower_bound<T>(v: &VecDeque<T>, n: T) -> usize
where
T: PartialEq + PartialOrd,
{
let mut left = -1;
let mut right = v.len() as isize;
while right - left > 1 {
let mid = (left + right) / 2;
if v[mid as usize] < n {
left = mid;
} else {
right = mid;
}
}
right as usize
}
|
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
use crate::errors::{utils::check_slice_size, InternalPakeError, PakeError};
use aead::{Aead, NewAead};
use generic_array::{typenum::Unsigned, GenericArray};
use hmac::{Hmac, Mac};
use rand_core::{CryptoRng, RngCore};
use sha2::{Digest, Sha256};
/// This trait encapsulates an encryption scheme that satisfies random-key robustness (RKR), which is implemented
/// through encrypt-then-HMAC -- see Section 3.1.1 of
/// https://www.ietf.org/id/draft-krawczyk-cfrg-opaque-03.txt
/// We require an Aead implementation with a 32-bit key size, since we
/// will derive the symmetric key from pw using Sha256
pub trait RKRCipher: Sized {
/// The requirement of KeySize = U32 is so that we can use a 32-bit hash
/// for key derivation form the user's password
type AEAD: NewAead<KeySize = <Sha256 as Digest>::OutputSize> + Aead;
// Required members
fn new(
aead_output: Vec<u8>,
hmac: &GenericArray<u8, <Sha256 as Digest>::OutputSize>,
nonce: &GenericArray<u8, <Self::AEAD as Aead>::NonceSize>,
) -> Self;
fn aead_output(&self) -> &Vec<u8>;
fn hmac(&self) -> &GenericArray<u8, <Sha256 as Digest>::OutputSize>;
fn nonce(&self) -> &GenericArray<u8, <Self::AEAD as Aead>::NonceSize>;
fn to_bytes(&self) -> Vec<u8>;
// Provided members for enc / dec
fn key_len() -> usize {
<Self::AEAD as NewAead>::KeySize::to_usize()
}
fn nonce_size() -> usize {
<Self::AEAD as Aead>::NonceSize::to_usize()
}
fn hmac_size() -> usize {
<Sha256 as Digest>::OutputSize::to_usize()
}
/// This estimates the size of the ciphertext once we encode —very specifically—
/// the payload we have planned for the protocol's env_u
fn ciphertest_size() -> usize {
Self::key_len() + <Self::AEAD as Aead>::TagSize::to_usize() + Self::hmac_size()
}
fn rkr_with_nonce_size() -> usize {
Self::ciphertest_size() + Self::nonce_size()
}
/// The format of the output ciphertext here is:
/// encryption_output | tag | hmac | nonce
/// variable length | AEAD_TAG_SIZE bytes | HMAC_SIZE bytes | NONCE_SIZE bytes
fn from_bytes(bytes: &[u8]) -> Result<Self, InternalPakeError> {
let checked_bytes = check_slice_size(&bytes[..], Self::rkr_with_nonce_size(), "bytes")?;
let nonce_start = bytes.len() - Self::nonce_size();
let hmac_start = nonce_start - Self::hmac_size();
Ok(<Self as RKRCipher>::new(
bytes[..hmac_start].to_vec(),
GenericArray::from_slice(&checked_bytes[hmac_start..nonce_start]),
GenericArray::from_slice(&checked_bytes[nonce_start..]),
))
}
/// Encrypt with AEAD. Note that this encryption scheme needs to satisfy "random-key robustness" (RKR).
fn encrypt<R: RngCore + CryptoRng>(
encryption_key: &[u8],
hmac_key: &[u8],
plaintext: &[u8],
aad: &[u8],
rng: &mut R,
) -> Result<Self, PakeError> {
let mut nonce = vec![0u8; Self::nonce_size()];
rng.fill_bytes(&mut nonce);
let gen_nonce = GenericArray::from_slice(&nonce[..]);
let ciphertext = <Self::AEAD as NewAead>::new(*GenericArray::from_slice(&encryption_key))
.encrypt(
GenericArray::from_slice(&nonce),
aead::Payload {
msg: &plaintext,
aad: &aad,
},
)
.map_err(|_| PakeError::EncryptionError)?;
let mut mac =
Hmac::<Sha256>::new_varkey(&hmac_key).map_err(|_| InternalPakeError::HmacError)?;
mac.input(&ciphertext);
Ok(<Self as RKRCipher>::new(
ciphertext,
&mac.result().code(),
gen_nonce,
))
}
fn decrypt(
&self,
encryption_key: &[u8],
hmac_key: &[u8],
aad: &[u8],
) -> Result<Vec<u8>, PakeError> {
let mut mac =
Hmac::<Sha256>::new_varkey(&hmac_key).map_err(|_| InternalPakeError::HmacError)?;
mac.input(self.aead_output());
if mac.verify(self.hmac()).is_err() {
return Err(PakeError::DecryptionHmacError);
}
Aead::decrypt(
&<Self::AEAD as NewAead>::new(*GenericArray::from_slice(&encryption_key)),
self.nonce(),
aead::Payload {
msg: self.aead_output(),
aad: &aad,
},
)
.map_err(|_| PakeError::DecryptionError)
}
}
/// This struct is a straightforward instantiation of the trait separating the
/// three components in Vecs
pub struct RKRCiphertext<T> {
aead_choice: std::marker::PhantomData<T>,
aead_output: Vec<u8>,
hmac: Vec<u8>,
nonce: Vec<u8>,
}
impl<T: NewAead<KeySize = <Sha256 as Digest>::OutputSize> + Aead> RKRCipher for RKRCiphertext<T> {
type AEAD = T;
fn new(
aead_output: Vec<u8>,
hmac: &GenericArray<u8, <Sha256 as Digest>::OutputSize>,
nonce: &GenericArray<u8, <Self::AEAD as Aead>::NonceSize>,
) -> Self {
Self {
aead_choice: std::marker::PhantomData,
aead_output,
hmac: hmac.to_vec(),
nonce: nonce.to_vec(),
}
}
fn aead_output(&self) -> &Vec<u8> {
&self.aead_output
}
fn to_bytes(&self) -> Vec<u8> {
[&self.aead_output[..], &self.hmac[..], &self.nonce[..]].concat()
}
fn hmac(&self) -> &GenericArray<u8, <Sha256 as Digest>::OutputSize> {
GenericArray::from_slice(&self.hmac[..])
}
fn nonce(&self) -> &GenericArray<u8, <T as Aead>::NonceSize> {
GenericArray::from_slice(&self.nonce[..])
}
}
#[cfg(test)]
mod tests {
use super::*;
use chacha20poly1305::ChaCha20Poly1305;
use rand_core::OsRng;
#[test]
fn encrypt_and_decrypt() {
let mut rng = OsRng;
let mut encryption_key = [0u8; 32];
rng.fill_bytes(&mut encryption_key);
let mut hmac_key = [0u8; 32];
rng.fill_bytes(&mut hmac_key);
let mut msg = [0u8; 100];
rng.fill_bytes(&mut msg);
let ciphertext = RKRCiphertext::<ChaCha20Poly1305>::encrypt(
&encryption_key,
&hmac_key,
&msg,
b"",
&mut rng,
)
.unwrap();
let decrypted = ciphertext.decrypt(&encryption_key, &hmac_key, b"").unwrap();
assert_eq!(&msg.to_vec(), &decrypted);
}
}
|
use futures::io::{AsyncRead, AsyncSeek, SeekFrom};
use futures::stream::Stream;
use futures::stream::StreamExt;
use std::future::Future;
use std::io::Result;
use std::pin::Pin;
use std::task::{Context, Poll};
use crate::{HypercoreEvent, RemoteHypercore};
type GetOutput = Result<Option<Vec<u8>>>;
type GetFuture = Pin<Box<dyn Future<Output = (RemoteHypercore, GetOutput)> + Send>>;
type SeekOutput = Result<(u64, usize)>;
type SeekFuture = Pin<Box<dyn Future<Output = (RemoteHypercore, SeekOutput)> + Send>>;
async fn get(mut core: RemoteHypercore, seq: u64) -> (RemoteHypercore, GetOutput) {
let block = core.get(seq).await;
(core, block)
}
async fn seek(mut core: RemoteHypercore, byte_offset: u64) -> (RemoteHypercore, SeekOutput) {
let result = core.seek(byte_offset).await;
(core, result)
}
async fn onappend_get(mut core: RemoteHypercore, seq: u64) -> (RemoteHypercore, GetOutput) {
let mut events = core.subscribe();
while let Some(event) = events.next().await {
match event {
HypercoreEvent::Append => {
return get(core, seq).await;
}
HypercoreEvent::Download(downloaded_seq) if downloaded_seq == seq => {
return get(core, seq).await;
}
_ => {}
}
}
(core, Ok(None))
}
fn get_future(core: RemoteHypercore, seq: u64) -> GetFuture {
Box::pin(get(core, seq))
}
fn onappend_future(core: RemoteHypercore, seq: u64) -> GetFuture {
Box::pin(onappend_get(core, seq))
}
fn seek_future(core: RemoteHypercore, byte_offset: usize) -> SeekFuture {
Box::pin(seek(core, byte_offset as u64))
}
pub struct ReadStream {
future: GetFuture,
index: u64,
end: Option<u64>,
live: bool,
finished: bool,
}
impl ReadStream {
pub fn new(core: RemoteHypercore, start: u64, end: Option<u64>, live: bool) -> Self {
Self {
future: get_future(core, start),
index: start,
end,
live,
finished: false,
}
}
}
impl Stream for ReadStream {
type Item = Result<Vec<u8>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if self.finished {
return Poll::Ready(None);
}
let poll_result = Pin::as_mut(&mut self.future).poll(cx);
let (core, result) = futures::ready!(poll_result);
let result = result.transpose();
let len = self.end.unwrap_or_else(|| core.len());
self.index += 1;
if len == self.index && !self.live {
self.finished = true;
} else if len == self.index {
self.future = onappend_future(core, self.index);
} else {
self.future = get_future(core, self.index)
}
Poll::Ready(result)
}
}
pub struct ByteStream {
core: RemoteHypercore,
seq: u64,
block_offset: usize,
byte_offset: usize,
state: State,
}
impl ByteStream {
pub fn new(core: RemoteHypercore) -> Self {
Self {
core,
seq: 0,
block_offset: 0,
byte_offset: 0,
state: State::Idle,
}
}
}
enum State {
Idle,
Seeking(SeekFuture),
Reading(GetFuture),
Ready { block: Vec<u8> },
Finished,
}
impl std::fmt::Display for State {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
// Customize so only `x` and `y` are denoted.
match self {
Self::Idle => write!(f, "Idle"),
Self::Seeking(_) => write!(f, "Seeking"),
Self::Reading(_) => write!(f, "Reading"),
Self::Ready { block } => write!(f, "Ready {}", block.len()),
Self::Finished => write!(f, "Finished"),
}
}
}
impl AsyncSeek for ByteStream {
fn poll_seek(mut self: Pin<&mut Self>, cx: &mut Context, pos: SeekFrom) -> Poll<Result<u64>> {
let mut seq = self.seq;
let mut byte_offset = self.byte_offset;
let mut block_offset = self.block_offset;
let mut finished = false;
while !finished {
self.state = match &mut self.state {
State::Seeking(future) => {
let poll_result = Pin::as_mut(future).poll(cx);
let (_, block) = futures::ready!(poll_result);
// TODO: Handle error.
let (res_seq, res_offset) = block.unwrap();
seq = res_seq;
block_offset = res_offset;
finished = true;
State::Idle
}
// TODO: Don't drop buffer if in range..
_ => {
let next_byte_offset = match pos {
SeekFrom::Start(start) => start,
SeekFrom::End(_end) => unimplemented!(),
SeekFrom::Current(pos) => (self.byte_offset as i64 + pos) as u64,
};
let seek_future = seek_future(self.core.clone(), next_byte_offset as usize);
byte_offset = next_byte_offset as usize;
State::Seeking(seek_future)
}
};
self.byte_offset = byte_offset;
self.seq = seq;
self.block_offset = block_offset;
}
Poll::Ready(Ok(self.byte_offset as u64))
}
}
impl AsyncRead for ByteStream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<Result<usize>> {
let mut finished = false;
let mut pos = 0;
while !finished {
let mut block_offset = self.block_offset;
let mut seq = self.seq;
self.state = match &mut self.state {
// TODO: Check if there is a situation where this would happen.
State::Seeking(_) => unreachable!(),
State::Finished => return Poll::Ready(Ok(0)),
State::Idle => State::Reading(get_future(self.core.clone(), self.seq)),
State::Reading(ref mut fut) => {
let poll_result = Pin::as_mut(fut).poll(cx);
let (_, block) = futures::ready!(poll_result);
match block {
// TODO: Treat only "Block not available from peers" as Finished, otherwise
// as error.
Err(_err) => State::Finished,
Ok(block) => State::Ready {
block: block.unwrap(),
},
}
}
State::Ready { block } => {
let block_slice = &block[block_offset..];
let buf_slice = &buf[pos..];
let len = std::cmp::min(buf_slice.len(), block_slice.len());
buf[pos..pos + len].copy_from_slice(&block_slice[..len]);
pos += len;
block_offset += len;
// We've read something, so return that.
finished = true;
if block_offset == block.len() {
block_offset = 0;
seq += 1;
State::Idle
} else {
// TODO: Don't clone....
State::Ready {
block: block.clone(),
}
}
}
};
self.seq = seq;
self.block_offset = block_offset;
}
Poll::Ready(Ok(pos))
}
}
// async fn read_from(mut core: RemoteHypercore, start: FeedPos, buf: &mut [u8]) -> Result<usize> {
// let (mut seq, mut offset) = match start {
// FeedPos::Bytes(byte_offset) => core.seek(byte_offset as u64).await?,
// FeedPos::Block(seq, offset) => (seq, offset),
// };
// let mut pos = 0;
// let (n, seq, offset) = loop {
// let slice = &mut buf[pos..];
// let block = core.get(seq).await?.unwrap();
// let block_slice = &block[offset..];
// let len = std::cmp::min(block_slice.len(), slice.len());
// slice[..len].copy_from_slice(&block_slice[..len]);
// pos += len;
// offset += len;
// if offset >= block.len() {
// offset = 0;
// seq += 1;
// }
// if pos == buf.len() {
// break (pos, seq, offset);
// }
// };
// Ok(n)
// }
// enum FeedPos {
// Bytes(usize),
// Block(u64, usize),
// }
|
use crate::actors::shutdown_controller::signals::outbound::Shutdown;
use crate::actors::shutdown_controller::signals::ShutdownSubscribe;
use crate::actors::shutdown_controller::ShutdownController;
use crate::lib::error::DfxResult;
use crate::lib::network::network_descriptor::NetworkDescriptor;
use crate::lib::webserver::run_webserver;
use actix::clock::{delay_for, Duration};
use actix::fut::wrap_future;
use actix::{Actor, Addr, AsyncContext, Context, Handler, Recipient, ResponseFuture};
use actix_server::Server;
use futures::future;
use futures::future::FutureExt;
use slog::{debug, error, info, Logger};
use std::net::SocketAddr;
use std::path::PathBuf;
pub mod signals {
use actix::prelude::*;
#[derive(Message)]
#[rtype(result = "()")]
pub struct PortReadySignal {
pub port: u16,
}
#[derive(Message)]
#[rtype(result = "()")]
pub struct PortReadySubscribe(pub Recipient<PortReadySignal>);
}
pub struct Config {
pub logger: Option<Logger>,
pub port_ready_subscribe: Recipient<signals::PortReadySubscribe>,
pub shutdown_controller: Addr<ShutdownController>,
pub bind: SocketAddr,
pub providers: Vec<url::Url>,
pub build_output_root: PathBuf,
pub network_descriptor: NetworkDescriptor,
}
///
/// The ReplicaWebserverCoordinator runs a webserver for the replica.
///
/// If the replica restarts, it will start a new webserver for the new replica.
pub struct ReplicaWebserverCoordinator {
logger: Logger,
config: Config,
server: Option<Server>,
}
impl ReplicaWebserverCoordinator {
pub fn new(config: Config) -> Self {
let logger =
(config.logger.clone()).unwrap_or_else(|| Logger::root(slog::Discard, slog::o!()));
ReplicaWebserverCoordinator {
config,
logger,
server: None,
}
}
fn start_server(&self, port: u16) -> DfxResult<Server> {
let mut providers = self.config.providers.clone();
let ic_replica_bind_addr = "http://localhost:".to_owned() + port.to_string().as_str();
let ic_replica_bind_addr = ic_replica_bind_addr.as_str();
let replica_api_uri =
url::Url::parse(ic_replica_bind_addr).expect("Failed to parse replica ingress url.");
providers.push(replica_api_uri);
info!(
self.logger,
"Starting webserver for replica at {:?}", ic_replica_bind_addr
);
run_webserver(
self.logger.clone(),
self.config.build_output_root.clone(),
self.config.network_descriptor.clone(),
self.config.bind,
providers,
)
}
}
impl Actor for ReplicaWebserverCoordinator {
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
let _ = self
.config
.port_ready_subscribe
.do_send(signals::PortReadySubscribe(ctx.address().recipient()));
self.config
.shutdown_controller
.do_send(ShutdownSubscribe(ctx.address().recipient::<Shutdown>()));
}
}
impl Handler<signals::PortReadySignal> for ReplicaWebserverCoordinator {
type Result = ();
fn handle(&mut self, msg: signals::PortReadySignal, ctx: &mut Self::Context) {
debug!(self.logger, "replica ready on {}", msg.port);
if let Some(server) = &self.server {
ctx.wait(wrap_future(server.stop(true)));
self.server = None;
ctx.address().do_send(msg);
} else {
match self.start_server(msg.port) {
Ok(server) => {
self.server = Some(server);
}
Err(e) => {
error!(
self.logger,
"Unable to start webserver on port {}: {}", msg.port, e
);
ctx.wait(wrap_future(delay_for(Duration::from_secs(2))));
ctx.address().do_send(msg);
}
}
}
}
}
impl Handler<Shutdown> for ReplicaWebserverCoordinator {
type Result = ResponseFuture<Result<(), ()>>;
fn handle(&mut self, _msg: Shutdown, _ctx: &mut Self::Context) -> Self::Result {
if let Some(server) = self.server.take() {
// We stop the webserver before shutting down because
// if we don't, the process will segfault
// while dropping actix stuff after main() returns.
Box::pin(server.stop(true).map(Ok))
} else {
Box::pin(future::ok(()))
}
}
}
|
use crate::parse::{ParseError, ParseStatus};
use data::Row;
use regex::Regex;
use std::io::{BufReader, Error as IoError, Read};
use std::{env, fs::File};
use xml::reader::{Error as XmlError, EventReader, XmlEvent};
use zip::result::ZipError;
use zip::ZipArchive;
mod data;
mod parse;
#[derive(Debug)]
enum CallError {
NoArgument,
Io(IoError),
Xml(XmlError),
Parse(ParseError),
Zip(ZipError),
AlreadyFinished,
Paragraph(String),
}
impl From<IoError> for CallError {
fn from(e: IoError) -> Self {
CallError::Io(e)
}
}
impl From<XmlError> for CallError {
fn from(e: XmlError) -> Self {
CallError::Xml(e)
}
}
impl From<ParseError> for CallError {
fn from(e: ParseError) -> Self {
CallError::Parse(e)
}
}
impl From<ZipError> for CallError {
fn from(e: ZipError) -> Self {
CallError::Zip(e)
}
}
struct ParseXml<R: Read> {
parser: EventReader<R>,
status: ParseStatus,
row: Option<Row>,
}
impl<R: Read> ParseXml<R> {
fn new(r: R) -> Self {
Self {
parser: EventReader::new(r),
status: ParseStatus::SearchingTable,
row: None,
}
}
fn next(&mut self) -> Result<Row, CallError> {
let status = &mut self.status;
if status.is_finished() {
return self.row.take().ok_or(CallError::AlreadyFinished);
}
let mut row = self.row.take().unwrap_or_else(Row::new);
loop {
match self.parser.next() {
Ok(XmlEvent::StartElement {
name, attributes, ..
}) => match name.local_name.as_str() {
"tbl" => status.start_table()?,
"tr" => status.start_field()?,
"tc" => status.start_col()?,
"p" => {
status.append_new_line().ok();
}
"t" => {
if let Some(att) =
attributes.iter().find(|att| att.name.local_name == "space")
{
if att.value == "preserve" {
status
.append_text(" ")
.map_err(|_| CallError::Paragraph(" ".to_owned()))?;
}
}
}
_ => {}
},
Ok(XmlEvent::EndElement { name, .. }) => match name.local_name.as_str() {
"tc" => status.end_col()?,
"tr" => {
let f = status.end_field()?;
if let Err(f) = row.set_field(f) {
let mut new_row = Row::new();
new_row.set_field(f).ok();
self.row = Some(new_row);
return Ok(row);
}
}
"tbl" => {
status.end_table()?;
return Ok(row);
}
_ => {}
},
Ok(XmlEvent::Characters(text)) => {
status
.append_text(&text)
.map_err(|_| CallError::Paragraph(text))?;
}
Ok(XmlEvent::EndDocument) => {
return match status.finish() {
Ok(_) => Err(CallError::AlreadyFinished),
Err(f) => {
row.set_field(f).unwrap_or_else(|f| {
let mut new_row = Row::new();
new_row.set_field(f).ok();
self.row = Some(new_row);
});
Ok(row)
}
};
}
Err(e) => {
status.finish().ok();
return Err(e.into());
}
_ => {}
}
}
}
}
enum RowOrParagraph {
Row(Row),
Paragraph(String),
}
impl<R: Read> IntoIterator for ParseXml<R> {
type Item = RowOrParagraph;
type IntoIter = Rows<R>;
fn into_iter(self) -> Self::IntoIter {
Rows { parser: self }
}
}
struct Rows<R: Read> {
parser: ParseXml<R>,
}
impl<R: Read> Iterator for Rows<R> {
type Item = RowOrParagraph;
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.parser.next() {
Ok(row) => break Some(RowOrParagraph::Row(row)),
Err(CallError::Paragraph(p)) => break Some(RowOrParagraph::Paragraph(p)),
Err(CallError::AlreadyFinished) => break None,
_ => {}
}
}
}
}
fn main() -> Result<(), CallError> {
let path = env::args().skip(1).next().ok_or(CallError::NoArgument)?;
// let path = "/home/gabriel/Downloads/Tabelas completas livro de bruno(3).docx";
let mut zip = ZipArchive::new(File::open(path)?)?;
let entry = BufReader::new(zip.by_name("word/document.xml")?);
println!(r"\chapter{{Catálogo}}");
println!();
println!(r"\section{{Revistas publicadas em 1903}}");
println!();
#[derive(PartialEq, Eq)]
enum LastItem {
Text,
Row,
}
let mut last_item = None;
let number_regex = Regex::new(r"^\d+$").unwrap();
let tabela_regex = Regex::new(r"^Tabela \d+$").unwrap();
for row_or_p in ParseXml::new(entry).into_iter() {
match row_or_p {
RowOrParagraph::Row(row) => {
if last_item.is_some() {
println!();
println!(r"\bigskip");
println!();
}
println!(r"\noindent{}", row);
last_item = Some(LastItem::Row);
}
RowOrParagraph::Paragraph(p) => {
let trimmed = p.trim();
match trimmed {
"Tabela" | r"SEQ Tabela \* ARABIC" => continue,
_ => (),
};
if number_regex.is_match(trimmed) || tabela_regex.is_match(trimmed) {
continue;
}
if Some(LastItem::Row) == last_item {
println!();
eprintln!();
}
eprintln!("paragraph: {}", p);
println!("{}", p.replace("$", r"\$"));
last_item = Some(LastItem::Text);
}
}
}
Ok(())
}
|
pub use crate::ui::filesystem::directory::*;
pub use crate::ui::filesystem::file::*;
pub mod directory;
pub mod file;
pub const NAME_MARGIN: i32 = 20;
|
// Copyright 2015-2017 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#![forbid(
anonymous_parameters,
box_pointers,
legacy_directory_ownership,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unstable_features,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
unused_results,
variant_size_differences,
warnings
)]
use ring::{agreement::*, error, rand, test};
#[test]
fn agreement_agree() {
agreement_agree_(
|private_key: PrivateKey<Ephemeral>, peer_alg, peer_public_key| {
(None, private_key.agree(peer_alg, peer_public_key))
},
);
agreement_agree_(|private_key, peer_alg, peer_public_key| {
(
Some(private_key.agree_static(peer_alg, peer_public_key)),
private_key.agree(peer_alg, peer_public_key),
)
});
}
fn agreement_agree_<U: Lifetime, F>(agree: F)
where
F: Fn(
PrivateKey<U>,
&Algorithm,
untrusted::Input,
) -> (
Option<Result<InputKeyMaterial, error::Unspecified>>,
Result<InputKeyMaterial, error::Unspecified>,
),
{
let rng = rand::SystemRandom::new();
test::from_file("tests/agreement_tests.txt", |section, test_case| {
assert_eq!(section, "");
let curve_name = test_case.consume_string("Curve");
let alg = alg_from_curve_name(&curve_name);
let peer_public = test_case.consume_bytes("PeerQ");
let peer_public = untrusted::Input::from(&peer_public);
match test_case.consume_optional_string("Error") {
None => {
let my_private = test_case.consume_bytes("D");
let rng = test::rand::FixedSliceRandom { bytes: &my_private };
let my_private = PrivateKey::<U>::generate(alg, &rng)?;
let my_public = test_case.consume_bytes("MyQ");
let output = test_case.consume_bytes("Output");
let computed_public = my_private.compute_public_key().unwrap();
assert_eq!(computed_public.as_ref(), &my_public[..]);
let (static_ikm, ephemeral_ikm) = agree(my_private, alg, peer_public);
if let Some(ikm) = static_ikm {
ikm.unwrap().derive(|ikm| assert_eq!(ikm, &output[..]));
};
ephemeral_ikm
.unwrap()
.derive(|key_material| assert_eq!(key_material, &output[..]));
},
Some(_) => {
let dummy_private_key = PrivateKey::<U>::generate(alg, &rng)?;
let (static_ikm, ephemeral_ikm) = agree(dummy_private_key, alg, peer_public);
if let Some(ikm) = static_ikm {
assert!(ikm.is_err());
};
assert!(ephemeral_ikm.is_err());
},
}
return Ok(());
});
}
#[test]
fn test_agreement_ecdh_x25519_rfc_iterated() {
let mut k = h("0900000000000000000000000000000000000000000000000000000000000000");
let mut u = k.clone();
fn expect_iterated_x25519(
expected_result: &str, range: std::ops::Range<usize>, k: &mut Vec<u8>, u: &mut Vec<u8>,
) {
for _ in range {
let new_k = x25519(k, u);
*u = k.clone();
*k = new_k;
}
assert_eq!(&h(expected_result), k);
}
expect_iterated_x25519(
"422c8e7a6227d7bca1350b3e2bb7279f7897b87bb6854b783c60e80311ae3079",
0..1,
&mut k,
&mut u,
);
expect_iterated_x25519(
"684cf59ba83309552800ef566f2f4d3c1c3887c49360e3875f2eb94d99532c51",
1..1_000,
&mut k,
&mut u,
);
// The spec gives a test vector for 1,000,000 iterations but it takes
// too long to do 1,000,000 iterations by default right now. This
// 10,000 iteration vector is self-computed.
expect_iterated_x25519(
"2c125a20f639d504a7703d2e223c79a79de48c4ee8c23379aa19a62ecd211815",
1_000..10_000,
&mut k,
&mut u,
);
if cfg!(feature = "slow_tests") {
expect_iterated_x25519(
"7c3911e0ab2586fd864497297e575e6f3bc601c0883c30df5f4dd2d24f665424",
10_000..1_000_000,
&mut k,
&mut u,
);
}
}
fn x25519(private_key: &[u8], public_key: &[u8]) -> Vec<u8> {
x25519_(private_key, public_key).unwrap()
}
fn x25519_(private_key: &[u8], public_key: &[u8]) -> Result<Vec<u8>, error::Unspecified> {
let rng = test::rand::FixedSliceRandom { bytes: private_key };
let private_key = PrivateKey::<Ephemeral>::generate(&X25519, &rng)?;
let public_key = untrusted::Input::from(public_key);
let ikm = private_key.agree(&X25519, public_key)?;
ikm.derive(|agreed_value| Ok(Vec::from(agreed_value)))
}
fn h(s: &str) -> Vec<u8> {
match test::from_hex(s) {
Ok(v) => v,
Err(msg) => {
panic!("{} in {}", msg, s);
},
}
}
fn alg_from_curve_name(curve_name: &str) -> &'static Algorithm {
if curve_name == "P-256" {
&ECDH_P256
} else if curve_name == "P-384" {
&ECDH_P384
} else if curve_name == "X25519" {
&X25519
} else {
panic!("Unsupported curve: {}", curve_name);
}
}
|
use crate::pi_camera::{setup_camera, take_photo};
use crate::structs::CameraSoftwareSettings;
use crate::structs::DisplayMessage;
use std::sync::mpsc::Sender;
/// Gets camera settings from a QR and saves it to the conf file
pub fn initial_setup(display_tx: &Sender<DisplayMessage>) {
let mut camera = setup_camera(4).expect("Failed to setup camera!");
let mut is_done = false;
display_tx
.send(DisplayMessage {
status_message: Some("Scan first QR code".to_string()),
next_image_time: None,
next_conf_update: None,
})
.expect("Failed to send message to display thread!");
while is_done == false {
// This println! is just here so that it's easier to see each iteration
println!();
println!("Taking QR image");
// Take an image using the provided SimpleCamera
let image_vec = match take_photo(&mut camera) {
Ok(image_vec_ok) => image_vec_ok,
Err(image_vec_err) => {
println!("Failed to take photo! The error was {}", image_vec_err);
continue;
}
};
println!("Decoding QR image");
// Decode the image vec into a DynamicImage (needed by bardecoder)
let decoded_image = match image::load_from_memory(&image_vec) {
Ok(decoded_image_ok) => decoded_image_ok,
Err(decoded_image_err) => {
println!(
"Failed to decode photo! The error was {}",
decoded_image_err
);
continue;
}
};
println!("Looking for QR codes");
// Create a QR decoder and search for QR codes
let qr_decoder = bardecoder::default_decoder();
let qr_results = qr_decoder.decode(&decoded_image);
// This vec will hold all successfully decoded QRs for future use
let mut decoded_qr_codes: Vec<String> = Vec::new();
// Iterate through all the found QR codes, and append any successful codes to the decoded_qr_codes vec
for qr_result in qr_results {
match qr_result {
Ok(qr_result_ok) => decoded_qr_codes.push(qr_result_ok),
Err(qr_result_err) => {
println!("Failed to decode QR code! The error was {}", qr_result_err);
}
}
}
// Iterate through all the decoded QR codes and parse them into CameraSoftwareSettings.
// The first successful decode will be saved and this function will end.
for decoded_qr_code in decoded_qr_codes {
println!("{}", decoded_qr_code);
let parsed_camera_software_settings =
match CameraSoftwareSettings::from_qr_string(decoded_qr_code) {
Ok(parsed_camera_software_settings_ok) => parsed_camera_software_settings_ok,
Err(parsed_camera_software_settings_err) => {
println!(
"Failed to parse QR code to CameraSoftwareSettings! The error was {}",
parsed_camera_software_settings_err
);
continue;
}
};
match confy::store("camera-software-new", parsed_camera_software_settings) {
Ok(_) => is_done = true,
Err(confy_err) => {
println!(
"Failed to store CameraSoftwareSettings with confy! The error was {}",
confy_err
);
continue;
}
}
}
}
}
|
use super::gaddag::gaddag;
use std::fs::File;
use std::io::{prelude::*, BufReader};
use std::cell::RefCell;
use std::collections::HashMap;
use std::rc::{Rc, Weak};
use std::str::Chars;
/// A Node/Branch/Graph used by `Dico`.
struct Node {
/// Reference to its parent
parent : Option<Weak<RefCell<Node>>>,
/// HashMap of its children
children : HashMap<char, NodeRef>,
/// Its letter
letter : Option<char>,
/// Whether its terminal
terminal : bool
}
/// A shortcut.
struct NodeRef (Rc<RefCell<Node>>);
/// An interface to the graph of `Node`.
pub struct Dico {
/// The only `Node` with no parent and no letter.
first_node : NodeRef,
}
impl Dico {
/// Generate a new dictionnary from `filename`
///
/// Panic:
/// * The file cannot be opened (Maybe it does not exist)
pub fn new(filename : &str) -> Dico {
let file = File::open(filename);
let reader = BufReader::new(file.unwrap());
let first_node = NodeRef::new(None, None, false);
let dico = Dico {
first_node : (first_node),
};
for line in reader.lines() {
dico.add_word(line.unwrap().as_str());
}
dico
}
/// Tells if the word is present in the dico.
pub fn exists(&self, word : &str) -> bool {
self.first_node.exists(word.chars())
}
/// Add `word` to the dico
fn add_word(&self, word : &str) {
self.first_node.add_nexts(word.chars());
}
}
impl Node {
/// Create a new node with an optional reference to its parent.
///
/// # Arguments
/// * `letter` - The letter of the node.
/// * `parent` - A reference to its parent NodeRef
/// * `terminal` - whether this node is terminal or not
fn new(letter : Option<char>, parent : Option<NodeRef>, terminal: bool) -> Node {
let weak_parent : Option<Weak<RefCell<Node>>>;
if let Some(x) = parent {
weak_parent = Some(Rc::downgrade(&x.0));
}
else {
weak_parent = None;
}
Node {
parent : weak_parent,
children : HashMap::new(),
letter,
terminal,
}
}
}
impl NodeRef {
/// Create a new NodeRef
///
/// # Arguments
/// Same as Node's
fn new(letter : Option<char>, parent : Option<NodeRef>,
terminal : bool) -> NodeRef {
return NodeRef {
0 : Rc::new(RefCell::new(Node::new(letter, parent, terminal))),
}
}
/// Get copy
///
/// This is a special copy, NodeRef uses Rc<RefCell<Node>>, so the data
/// in the copy points to the same address and is mutable. Use with care.
fn copy(&self) -> Self {
let cloned = Rc::clone(&self.0);
return Self {
0 : cloned,
};
}
/// Add a child to the children list
///
/// It will add a child with the first element of `chars` if needed and
/// pass the remaining of `chars` of the appropriate child.
///
/// # Argument
/// * `chars` - A char iterator
fn add_nexts(&self, mut chars: Chars) {
let first_letter = chars.next();
if let None = first_letter {
self.0.borrow_mut().terminal = true;
return;
}
let first_letter = first_letter.unwrap();
let mut current_node = self.0.borrow_mut();
let next_node = current_node
.children.entry(first_letter)
.or_insert(NodeRef::new(Some(first_letter), Some(self.copy()), false));
next_node.add_nexts(chars);
}
/// Whether there is a terminal node for this word
///
/// # Argument
/// * `chars` - A char iterator
fn exists(&self, mut word : Chars) -> bool {
let next = word.next();
if let None = next {
return self.0.borrow().terminal;
}
let next = next.unwrap();
let current_node = self.0.borrow();
let child : Option<&NodeRef> = current_node.children.get(&next);
match child {
None => false,
Some(x) => return x.exists(word)
}
}
}
|
//! Transactions and the transaction manager trait, and a simple implementation.
mod simple_transaction_manager;
mod transaction_manager;
mod xa_error;
mod xa_transaction_id;
pub use self::simple_transaction_manager::SimpleTransactionManager;
pub use self::transaction_manager::Status;
pub use self::transaction_manager::TransactionManager;
pub use self::xa_error::XaError;
pub use self::xa_transaction_id::XaTransactionId;
|
use super::slice::Slice;
use std::cmp::Ordering;
/// A Comparator object provides a total order across T that are
/// used as keys in an sstable or a database. A Comparator implementation
/// must be thread-safe since we may invoke its methods concurrently
/// from multiple threads.
pub trait Comparator<T> {
/// Three-way comparison. Returns value:
/// `Ordering::Less` iff `self` < `b`
/// `Ordering::Equal` iff `self` = `b`
/// `Ordering::Greater` iff `self` > `b`
fn compare(&self, a: &T, b: &T) -> Ordering;
/// The name of the comparator. Used to check for comparator
/// mismatches (i.e., a DB created with one comparator is
/// accessed using a different comparator.
///
/// The client of this package should switch to a new name whenever
/// the comparator implementation changes in a way that will cause
/// the relative ordering of any two keys to change.
///
/// Names starting with "wickdb." are reserved and should not be used
/// by any clients of this package.
fn name(&self) -> &str;
}
pub struct BytewiseComparator {}
impl BytewiseComparator {
pub fn new() -> BytewiseComparator {
BytewiseComparator {}
}
}
impl Comparator<Slice> for BytewiseComparator{
#[inline]
fn compare(&self, a: &Slice, b: &Slice) -> Ordering {
a.compare(b)
}
#[inline]
fn name(&self) -> &str {
"tinydb.BytewiseComparator"
}
} |
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RelayNamespaceAccessProperties {
#[serde(rename = "namespaceName")]
pub namespace_name: String,
#[serde(rename = "namespaceNameSuffix")]
pub namespace_name_suffix: String,
#[serde(rename = "hybridConnectionName")]
pub hybrid_connection_name: String,
#[serde(rename = "accessKey", default, skip_serializing_if = "Option::is_none")]
pub access_key: Option<String>,
#[serde(rename = "expiresOn", default, skip_serializing_if = "Option::is_none")]
pub expires_on: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EndpointAccessResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub relay: Option<RelayNamespaceAccessProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EndpointProperties {
#[serde(rename = "type")]
pub type_: endpoint_properties::Type,
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
pub mod endpoint_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
#[serde(rename = "default")]
Default,
#[serde(rename = "custom")]
Custom,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EndpointResource {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<EndpointProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EndpointsList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<EndpointResource>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Operation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "isDataAction", default, skip_serializing_if = "Option::is_none")]
pub is_data_action: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<operation::Origin>,
#[serde(rename = "actionType", default, skip_serializing_if = "Option::is_none")]
pub action_type: Option<operation::ActionType>,
}
pub mod operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Display {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Origin {
#[serde(rename = "user")]
User,
#[serde(rename = "system")]
System,
#[serde(rename = "user,system")]
UserSystem,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ActionType {
Internal,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorDetail>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorDetail {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ErrorDetail>,
#[serde(rename = "additionalInfo", default, skip_serializing_if = "Vec::is_empty")]
pub additional_info: Vec<ErrorAdditionalInfo>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorAdditionalInfo {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub info: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SystemData {
#[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")]
pub created_by: Option<String>,
#[serde(rename = "createdByType", default, skip_serializing_if = "Option::is_none")]
pub created_by_type: Option<system_data::CreatedByType>,
#[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "lastModifiedBy", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by: Option<String>,
#[serde(rename = "lastModifiedByType", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by_type: Option<system_data::LastModifiedByType>,
#[serde(rename = "lastModifiedAt", default, skip_serializing_if = "Option::is_none")]
pub last_modified_at: Option<String>,
}
pub mod system_data {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CreatedByType {
User,
Application,
ManagedIdentity,
Key,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LastModifiedByType {
User,
Application,
ManagedIdentity,
Key,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProxyResource {
#[serde(flatten)]
pub resource: Resource,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
|
mod benchmark;
mod clean;
mod effect_size;
mod fingerprint;
mod suite;
mod summarize;
mod upload;
mod validate;
use anyhow::Result;
use benchmark::BenchmarkCommand;
use clean::CleanCommand;
use effect_size::EffectSizeCommand;
use fingerprint::FingerprintCommand;
use log::trace;
use structopt::{clap::AppSettings, StructOpt};
use summarize::SummarizeCommand;
use upload::UploadCommand;
use validate::ValidateCommand;
/// Main entry point for CLI.
fn main() -> Result<()> {
pretty_env_logger::init();
let command = SightglassCommand::from_args();
command.execute()?;
Ok(())
}
/// The sightglass benchmark runner.
#[derive(StructOpt, Debug)]
#[structopt(
version = env!("CARGO_PKG_VERSION"),
global_settings = &[
AppSettings::VersionlessSubcommands,
AppSettings::ColoredHelp
],
)]
enum SightglassCommand {
Benchmark(BenchmarkCommand),
Clean(CleanCommand),
EffectSize(EffectSizeCommand),
Fingerprint(FingerprintCommand),
Summarize(SummarizeCommand),
UploadElastic(UploadCommand),
Validate(ValidateCommand),
}
impl SightglassCommand {
fn execute(&self) -> Result<()> {
trace!("Executing command: {:?}", &self);
match self {
SightglassCommand::Benchmark(benchmark) => benchmark.execute(),
SightglassCommand::Clean(clean) => clean.execute(),
SightglassCommand::EffectSize(effect_size) => effect_size.execute(),
SightglassCommand::Fingerprint(fingerprint) => fingerprint.execute(),
SightglassCommand::Summarize(summarize) => summarize.execute(),
SightglassCommand::UploadElastic(upload) => upload.execute(),
SightglassCommand::Validate(validate) => validate.execute(),
}
}
}
|
//
// Simple Matrix Multiplication Program
//
// My first project in Rust to get a feel for the language.
//
// Eventually, I intend to paralleize the multiplication.
//
extern crate rand;
use rand::Rng;
use std::thread;
use std::time::Duration;
// Matrix type is defined to be a vector of vectors. The
// outer vector defines the rows, which are represented
// as each respective inner vector, running top-down.
// NOTE: Most functions below depend on this representation
// of a matrix so changing this code will likely break
// all subsequent code.
// TODO add in some error checking somewhere to take care
// of cases where the rows/cols don't match data
#[derive(Clone)]
struct Matrix {
rows: usize,
cols: usize,
data: Vec<Vec<f64>>
}
// Generates a matrix loaded with all zeros
fn genZeroMat(rows: usize, cols: usize) -> Matrix {
let data = vec![vec![0.0f64; cols]; rows];
Matrix{rows: rows, cols: cols, data: data}
}
// TODO change the funciton to it gens ints and
// specify the range for these rand values rather than
// the default of (0,1)
fn genRandMat(rows: usize, cols: usize) -> Matrix {
let mut result = genZeroMat(rows, cols);
for y in 0..rows {
for x in 0..cols {
result.data[y][x] =
rand::random::<f64>();
}
}
return result;
}
// Multiplies a matrix by a constant
fn const_mult(A: Matrix, k: f64) -> Matrix {
let mut result = genZeroMat(A.rows, A.cols);
for y in 0..A.rows {
for x in 0..A.cols {
result.data[y][x] = k * A.data[y][x];
}
}
return result;
}
// Adds two matrices.
// Panics if the two matrices aren't the same dims.
fn add(A: Matrix, B: Matrix) -> Matrix {
if A.rows != B.rows || A.cols != B.cols {
panic!("Matrix dimensions don't match.");
}
let mut result = genZeroMat(A.rows, A.cols);
for y in 0..A.rows {
for x in 0..A.cols {
result.data[y][x] = A.data[y][x] + B.data[y][x];
}
}
return result;
}
// Reduces matrix into four separate blocks such that
// we obtain:
// 0,0 | 0,1
// ----------
// 1,0 | 1,1
//
// TODO handle the case where n is odd
fn quarter(A:Matrix) -> Vec<Vec<Matrix>> {
let newsize = A.rows / 2;
let mut result = vec![vec![genZeroMat(newsize
, newsize); 2]; 2];
for y in 0..newsize {
for x in 0..newsize {
result[0][0].data[y][x] = A.data[y][x];
result[1][0].data[y][x] = A.data[newsize + y][x];
result[0][1].data[y][x] = A.data[y][newsize + x];
result[1][1].data[y][x]
= A.data[newsize + y][newsize + x];
}
}
return result;
}
// Implementation of Strassen's algorithm.
fn strassen(A: Matrix, B: Matrix) -> Matrix {
let Aquarters = quarter(A);
let Bquarters = quarter(B);
unimplemented!();
}
// Multiplies matrix A by B and returns Some(A*B).
// Standard method of multiplication.
// If the size of the matrices aren't compatible,
// then returns None.
fn mult(A: Matrix, B: Matrix)
-> Option<Matrix> {
if A.cols != B.rows {
println!("{:?}\n", A.data);
println!("{:?}\n", B.data);
None
} else {
// TODO CHANGE THE DEFAULT DATA!
let mut C = genZeroMat(A.rows, B.cols);
for Arow in 0..A.rows {
for Acol in 0..A.cols {
for Bcol in 0..B.cols {
C.data[Arow][Bcol] += A.data[Arow][Acol]
* B.data[Acol][Bcol]
}
}
}
return Some(C);
}
}
// Multiplies matrix A by B and returns Some(A*B).
// Multiplies the matrices in parallel across all
// processors on the current machine.
// If the size of the matrices aren't compatible,
// then returns None.
fn pmult(A: Matrix, B: Matrix) -> Option<Matrix> {
thread::spawn(move || {
test1();
});
test1;
None
}
fn test1() {
let TestA = Matrix{rows: 2, cols: 2
, data: vec![vec![1.0f64, 0.0f64],vec![0.0f64, 1.0f64]]};
let TestB = Matrix{rows: 2, cols: 2
, data: vec![vec![1.0f64, 2.0f64],vec![3.0f64, 4.0f64]]};
// println!("{:?}\n", TestA.cols);
// println!("{:?}\n", TestB.rows );
// println!("{:?}\n", TestA.cols != TestB.rows );
match mult(TestA, TestB) {
Some(result) => println!("{:?}"
,result.data),
None => println!("Function failed. Sad!"),
}
}
fn test2() {
let TestA = Matrix{rows: 4, cols: 4
, data: vec![vec![1.0f64, 2.0f64, 3.0f64, 4.0f64]
,vec![5.0f64, 6.0f64, 7.0f64, 8.0f64]
,vec![9.0f64, 10.0f64, 11.0f64, 12.0f64]
,vec![13.0f64, 14.0f64, 15.0f64, 16.0f64]]};
let quarters = quarter(TestA);
println!("{:?},",quarters[0][0].data);
println!("{:?},",quarters[1][0].data);
println!("{:?},",quarters[0][1].data);
println!("{:?},",quarters[1][1].data);
}
fn test_strassen() {
// do stuff
}
fn test_parallel() {
let x = 1;
thread::spawn(move || {
println!("x is {}", x);
});
thread::sleep(Duration::from_millis(50));
}
fn main() {
let TestA = Matrix{rows: 2, cols: 2
, data: vec![vec![1.0f64, 0.0f64],vec![0.0f64, 1.0f64]]};
let TestB = Matrix{rows: 2, cols: 2
, data: vec![vec![1.0f64, 2.0f64],vec![3.0f64, 4.0f64]]};
// let handle1 = thread::spawn(|| {
// quarter(TestA)
// });
// let handle2 = thread::spawn(|| {
// quarter(TestB)
// });
//
// let Aquarters = handle1.join().unwrap();
// let Bquarters = handle2.join().unwrap();
let Aquarters = quarter(TestA);
let Bquarters = quarter(TestB);
/// DEBUG CODE
println!("{:?},", Bquarters[0][0].data);
println!("{:?},", Bquarters[1][0].data);
println!("{:?},", Bquarters[0][1].data);
println!("{:?}", Bquarters[1][1].data);
// let mut result = genZeroMat(2,2);
let mut resultQuarters = quarter(genZeroMat(2,2));
for i in 0..1 {
for j in 0..1 {
for k in 0..1 {
let temp = resultQuarters[i][j].clone();
let A = Aquarters[i][j].clone();
let B = Bquarters[j][k].copy();
resultQuarters[i][k] = add(temp
, B);//mult(A, B).unwrap());
}
}
}
// final step is to print each of the quaters
// and/or join them all together again.
println!("{:?},",resultQuarters[0][0].data);
println!("{:?},",resultQuarters[1][0].data);
println!("{:?},",resultQuarters[0][1].data);
println!("{:?},",resultQuarters[1][1].data);
}
|
use crate::components::{Location, WantsToMoveTo};
use oorandom::Rand32;
use serde::{Deserialize, Serialize};
use std::cmp::min;
use std::ops::Range;
pub fn euclidean_distance(a: &Location, b: &Location) -> f64 {
((a.x - b.x).pow(2) as f64 + (a.y - b.y).pow(2) as f64).sqrt()
}
/// Determine if two locations are close enough where we'd consider it reasonable for
/// the player to pick up an item.
pub fn within_pickup_distance(a: &Location, b: &Location) -> bool {
euclidean_distance(&a, &b) < 5.0
}
#[derive(PartialEq, Copy, Clone, Deserialize, Serialize)]
pub enum TileType {
Void,
Water,
Sand,
Grass,
}
#[derive(Default, Serialize, Deserialize, Clone)]
pub struct Map {
pub width: i32,
pub height: i32,
pub tiles: Vec<Vec<TileType>>,
}
/// Generate a map containing void tiles where we don't want the isometric map rendering.
/// I chose this over some sort of tile mask because I don't want to send all that mask
/// data to the clients.
fn blank_isometric_map(width: usize, height: usize) -> Vec<Vec<TileType>> {
let mut void_map = vec![vec![TileType::Void; height as usize]; width as usize];
for index in 0..min(width - 1, height - 1) {
void_map[index][index] = TileType::Water;
for span in 1..(min(width, height) as f64 / 2.1) as usize {
if index + span < width - 1 {
void_map[index + span][index] = TileType::Water;
}
if index + span < height - 1 {
void_map[index][index + span] = TileType::Water;
}
}
}
void_map
}
impl Map {
/// Generate new map by running 3 cellular automata simulations
/// One which defines the sand, and two which define the grass at different
/// densities. I hate this, but it's my first time doing something like this
/// so I'm giving up for now.
pub fn new(mut rng: &mut Rand32, width: i32, height: i32) -> Map {
let blank_tiles = blank_isometric_map(width as usize, height as usize);
let mut map = Map {
width,
height,
tiles: blank_tiles.clone(),
};
let mut grass_map = Map {
width,
height,
tiles: blank_tiles.clone(),
};
let mut grass_dense_map = Map {
width,
height,
tiles: blank_tiles,
};
// Generate the island
cellular_automata_map(&mut map, &mut rng, 9);
// Generate less dense, outer grass map
cellular_automata_map(&mut grass_map, &mut rng, 8);
let pct_grass = 0.80;
let pct_sand = (1_f64 - pct_grass) / 2_f64;
for x in 0usize..(grass_map.width as usize - 1usize) {
for y in 0usize..(grass_map.width as usize - 1usize) {
if grass_map.tiles[x][y] != TileType::Water
&& grass_map.tiles[x][y] != TileType::Void
{
let scaled_x =
(x as f64 * pct_grass + grass_map.width as f64 * pct_sand) as usize;
let scaled_y =
(y as f64 * pct_grass + grass_map.height as f64 * pct_sand) as usize;
if map.tiles[scaled_x][scaled_y] != TileType::Water {
map.tiles[scaled_x][scaled_y] = TileType::Grass;
}
}
}
}
// Generate a more dense, inner grass map
cellular_automata_map(&mut grass_dense_map, &mut rng, 18);
let pct_grass = 0.50;
let pct_sand = (1_f64 - pct_grass) / 2_f64;
for x in 0..width - 1 {
for y in 0..height - 1 {
if grass_dense_map.tiles[x as usize][y as usize] != TileType::Water
&& grass_dense_map.tiles[x as usize][y as usize] != TileType::Void
{
let scaled_x =
(x as f64 * pct_grass + grass_dense_map.width as f64 * pct_sand) as usize;
let scaled_y =
(y as f64 * pct_grass + grass_dense_map.height as f64 * pct_sand) as usize;
if map.tiles[scaled_x][scaled_y] != TileType::Water {
map.tiles[scaled_x][scaled_y] = TileType::Grass;
}
}
}
}
map
}
}
/// Given x+y, return true if an entity can walk there. False if it's water or outside map
pub fn valid_walking_location(map: &Map, wants_to_move: &WantsToMoveTo) -> bool {
if wants_to_move.x < 0 || wants_to_move.x > map.width - 1 {
return false;
} else if wants_to_move.y < 0 || wants_to_move.y > map.height - 1 {
return false;
} else if map.tiles[wants_to_move.x as usize][wants_to_move.y as usize] == TileType::Water {
return false; // Cannot travel to water
} else if map.tiles[wants_to_move.x as usize][wants_to_move.y as usize] == TileType::Void {
return false; // Cannot travel to void
}
true
}
/// Return a location which is not TileType::Water
pub fn get_random_location_of_tile(
map: &Map,
rng: &mut Rand32,
tile_type: Option<TileType>,
) -> Location {
let mut x;
let mut y;
// Right side of island is where we spawn, so limit x range search for Sand
let x_range = match tile_type {
Some(TileType::Sand) => Range {
start: (map.width as f64 * 0.90) as u32,
end: map.width as u32 - 1,
},
_ => Range {
start: 1 as u32,
end: map.width as u32 - 1,
},
};
let y_range = Range {
start: 1 as u32,
end: map.width as u32 - 1,
};
loop {
x = rng.rand_range(x_range.clone());
y = rng.rand_range(y_range.clone());
let cur_tile = map.tiles[x as usize][y as usize];
if tile_type.is_none() && cur_tile != TileType::Void && cur_tile != TileType::Water {
break;
} else if tile_type.is_some() && cur_tile == tile_type.unwrap() {
break;
}
}
Location {
x: x as i32,
y: y as i32,
}
}
/// Modify the tiles structure to create something that looks like an island
fn cellular_automata_map(map: &mut Map, rng: &mut Rand32, iterations: i32) {
for x in 0usize..(map.width as usize - 1usize) {
for y in 0usize..(map.width as usize - 1usize) {
if x < 1 || x as i32 > map.width - 1 || y < 1 || y as i32 > map.height - 1 {
continue;
}
if map.tiles[x][y] == TileType::Void {
continue; // We should never place anything on the void
}
if rng.rand_float() > 0.55 {
map.tiles[x][y] = TileType::Sand;
}
}
}
// Iteratively apply cellular automata rules
for _ in 0..iterations {
let mut new_tiles = map.tiles.clone();
for x in 0usize..(map.width as usize - 1usize) {
for y in 0usize..(map.width as usize - 1usize) {
if map.tiles[x][y] == TileType::Void {
continue; // We should never place anything on the void
}
if x < 1 || x as i32 > map.width - 1 || y < 1 || y as i32 > map.height - 1 {
continue;
}
let mut neighbors = 0;
if map.tiles[x - 1][y] as i32 <= TileType::Water as i32 {
neighbors += 1
}
if map.tiles[x + 1][y] as i32 <= TileType::Water as i32 {
neighbors += 1
}
if map.tiles[x][y - 1] as i32 <= TileType::Water as i32 {
neighbors += 1
}
if map.tiles[x][y + 1] as i32 <= TileType::Water as i32 {
neighbors += 1
}
if map.tiles[x - 1][y - 1] as i32 <= TileType::Water as i32 {
neighbors += 1
}
if map.tiles[x + 1][y - 1] as i32 <= TileType::Water as i32 {
neighbors += 1
}
if map.tiles[x - 1][y + 1] as i32 <= TileType::Water as i32 {
neighbors += 1
}
if map.tiles[x + 1][y + 1] as i32 <= TileType::Water as i32 {
neighbors += 1
}
if neighbors > 4 {
new_tiles[x][y] = TileType::Water;
} else {
new_tiles[x][y] = TileType::Sand;
}
}
}
map.tiles = new_tiles.clone();
}
}
|
#![no_std]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg",
html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg"
)]
#![deny(unsafe_code)]
#![warn(missing_docs, rust_2018_idioms)]
//! # Usage
//!
//! Simple usage (allocating, no associated data):
//!
#![cfg_attr(all(feature = "getrandom", feature = "std"), doc = "```")]
#![cfg_attr(not(all(feature = "getrandom", feature = "std")), doc = "```ignore")]
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! use aes::Aes256;
//! use eax::{
//! aead::{Aead, AeadCore, KeyInit, OsRng, generic_array::GenericArray},
//! Eax, Nonce
//! };
//!
//! pub type Aes256Eax = Eax<Aes256>;
//!
//! let key = Aes256Eax::generate_key(&mut OsRng);
//! let cipher = Aes256Eax::new(&key);
//! let nonce = Aes256Eax::generate_nonce(&mut OsRng); // 128-bits; unique per message
//! let ciphertext = cipher.encrypt(&nonce, b"plaintext message".as_ref())?;
//! let plaintext = cipher.decrypt(&nonce, ciphertext.as_ref())?;
//! assert_eq!(&plaintext, b"plaintext message");
//! # Ok(())
//! # }
//! ```
//!
//! ## In-place Usage (eliminates `alloc` requirement)
//!
//! This crate has an optional `alloc` feature which can be disabled in e.g.
//! microcontroller environments that don't have a heap.
//!
//! The [`AeadInPlace::encrypt_in_place`] and [`AeadInPlace::decrypt_in_place`]
//! methods accept any type that impls the [`aead::Buffer`] trait which
//! contains the plaintext for encryption or ciphertext for decryption.
//!
//! Note that if you enable the `heapless` feature of this crate,
//! you will receive an impl of [`aead::Buffer`] for `heapless::Vec`
//! (re-exported from the [`aead`] crate as [`aead::heapless::Vec`]),
//! which can then be passed as the `buffer` parameter to the in-place encrypt
//! and decrypt methods:
//!
//! ```
//! # #[cfg(feature = "heapless")]
//! # {
//! use aes::Aes256;
//! use eax::Eax;
//! use eax::aead::{
//! generic_array::GenericArray,
//! heapless::Vec,
//! AeadCore, AeadInPlace, KeyInit, OsRng
//! };
//!
//! pub type Aes256Eax = Eax<Aes256>;
//!
//! let key = Aes256Eax::generate_key(&mut OsRng);
//! let cipher = Aes256Eax::new(&key);
//!
//! let nonce = Aes256Eax::generate_nonce(&mut OsRng); // 128-bits; unique per message
//!
//! let mut buffer: Vec<u8, 128> = Vec::new();
//! buffer.extend_from_slice(b"plaintext message");
//!
//! // Encrypt `buffer` in-place, replacing the plaintext contents with ciphertext
//! cipher.encrypt_in_place(&nonce, b"", &mut buffer).expect("encryption failure!");
//!
//! // `buffer` now contains the message ciphertext
//! assert_ne!(&buffer, b"plaintext message");
//!
//! // Decrypt `buffer` in-place, replacing its ciphertext context with the original plaintext
//! cipher.decrypt_in_place(&nonce, b"", &mut buffer).expect("decryption failure!");
//! assert_eq!(&buffer, b"plaintext message");
//! # }
//! ```
//!
//! Similarly, enabling the `arrayvec` feature of this crate will provide an impl of
//! [`aead::Buffer`] for `arrayvec::ArrayVec` (re-exported from the [`aead`] crate as
//! [`aead::arrayvec::ArrayVec`]).
//!
//! ## Custom Tag Length
//!
//! The tag for eax is usually 16 bytes long but it can be shortened if needed.
//! The second generic argument of `Eax` can be set to the tag length:
//!
//! ```
//! # #[cfg(feature = "heapless")]
//! # {
//! use aes::Aes256;
//! use eax::Eax;
//! use eax::aead::{AeadInPlace, KeyInit, generic_array::GenericArray};
//! use eax::aead::heapless::Vec;
//! use eax::aead::consts::{U8, U128};
//!
//! let key = GenericArray::from_slice(b"an example very very secret key.");
//! let cipher = Eax::<Aes256, U8>::new(key);
//!
//! let nonce = GenericArray::from_slice(b"my unique nonces"); // 128-bits; unique per message
//!
//! let mut buffer: Vec<u8, 128> = Vec::new();
//! buffer.extend_from_slice(b"plaintext message");
//!
//! // Encrypt `buffer` in-place, replacing the plaintext contents with ciphertext
//! let tag = cipher.encrypt_in_place_detached(nonce, b"", &mut buffer).expect("encryption failure!");
//!
//! // The tag has only 8 bytes, compared to the usual 16 bytes
//! assert_eq!(tag.len(), 8);
//!
//! // `buffer` now contains the message ciphertext
//! assert_ne!(&buffer, b"plaintext message");
//!
//! // Decrypt `buffer` in-place, replacing its ciphertext context with the original plaintext
//! cipher.decrypt_in_place_detached(nonce, b"", &mut buffer, &tag).expect("decryption failure!");
//! assert_eq!(&buffer, b"plaintext message");
//! # }
//! ```
pub use aead::{self, AeadCore, AeadInPlace, Error, Key, KeyInit, KeySizeUser};
pub use cipher;
use cipher::{
consts::{U0, U16},
generic_array::{functional::FunctionalSequence, GenericArray},
BlockCipher, BlockEncrypt, InnerIvInit, StreamCipherCore,
};
use cmac::{digest::Output, Cmac, Mac};
use core::marker::PhantomData;
mod traits;
use traits::TagSize;
// TODO Max values?
/// Maximum length of associated data
pub const A_MAX: u64 = 1 << 36;
/// Maximum length of plaintext
pub const P_MAX: u64 = 1 << 36;
/// Maximum length of ciphertext
pub const C_MAX: u64 = (1 << 36) + 16;
/// EAX nonces
pub type Nonce<NonceSize> = GenericArray<u8, NonceSize>;
/// EAX tags
pub type Tag<TagSize> = GenericArray<u8, TagSize>;
pub mod online;
/// Counter mode with a 128-bit big endian counter.
type Ctr128BE<C> = ctr::CtrCore<C, ctr::flavors::Ctr128BE>;
/// EAX: generic over an underlying block cipher implementation.
///
/// This type is generic to support substituting alternative cipher
/// implementations.
///
/// ## Type parameters
/// - `Cipher`: block cipher.
/// - `M`: size of MAC tag, valid values: up to `U16`.
#[derive(Clone)]
pub struct Eax<Cipher, M = U16>
where
Cipher: BlockCipher<BlockSize = U16> + BlockEncrypt + Clone + KeyInit,
M: TagSize,
{
/// Encryption key
key: Key<Cipher>,
_tag_size: PhantomData<M>,
}
impl<Cipher, M> KeySizeUser for Eax<Cipher, M>
where
Cipher: BlockCipher<BlockSize = U16> + BlockEncrypt + Clone + KeyInit,
M: TagSize,
{
type KeySize = Cipher::KeySize;
}
impl<Cipher, M> KeyInit for Eax<Cipher, M>
where
Cipher: BlockCipher<BlockSize = U16> + BlockEncrypt + Clone + KeyInit,
M: TagSize,
{
fn new(key: &Key<Cipher>) -> Self {
Self {
key: key.clone(),
_tag_size: PhantomData,
}
}
}
impl<Cipher, M> AeadCore for Eax<Cipher, M>
where
Cipher: BlockCipher<BlockSize = U16> + BlockEncrypt + Clone + KeyInit,
M: TagSize,
{
type NonceSize = Cipher::BlockSize;
type TagSize = M;
type CiphertextOverhead = U0;
}
impl<Cipher, M> AeadInPlace for Eax<Cipher, M>
where
Cipher: BlockCipher<BlockSize = U16> + BlockEncrypt + Clone + KeyInit,
M: TagSize,
{
fn encrypt_in_place_detached(
&self,
nonce: &Nonce<Self::NonceSize>,
associated_data: &[u8],
buffer: &mut [u8],
) -> Result<Tag<M>, Error> {
if buffer.len() as u64 > P_MAX || associated_data.len() as u64 > A_MAX {
return Err(Error);
}
// https://crypto.stackexchange.com/questions/26948/eax-cipher-mode-with-nonce-equal-header
// has an explanation of eax.
// l = block cipher size = 128 (for AES-128) = 16 byte
// 1. n ← OMAC(0 || Nonce)
// (the 0 means the number zero in l bits)
let n = Self::cmac_with_iv(&self.key, 0, nonce);
// 2. h ← OMAC(1 || associated data)
let h = Self::cmac_with_iv(&self.key, 1, associated_data);
// 3. enc ← CTR(M) using n as iv
Ctr128BE::<Cipher>::inner_iv_init(Cipher::new(&self.key), &n)
.apply_keystream_partial(buffer.into());
// 4. c ← OMAC(2 || enc)
let c = Self::cmac_with_iv(&self.key, 2, buffer);
// 5. tag ← n ^ h ^ c
// (^ means xor)
let full_tag = n.zip(h, |a, b| a ^ b).zip(c, |a, b| a ^ b);
let tag = Tag::<M>::clone_from_slice(&full_tag[..M::to_usize()]);
Ok(tag)
}
fn decrypt_in_place_detached(
&self,
nonce: &Nonce<Self::NonceSize>,
associated_data: &[u8],
buffer: &mut [u8],
tag: &Tag<M>,
) -> Result<(), Error> {
if buffer.len() as u64 > C_MAX || associated_data.len() as u64 > A_MAX {
return Err(Error);
}
// 1. n ← OMAC(0 || Nonce)
let n = Self::cmac_with_iv(&self.key, 0, nonce);
// 2. h ← OMAC(1 || associated data)
let h = Self::cmac_with_iv(&self.key, 1, associated_data);
// 4. c ← OMAC(2 || enc)
let c = Self::cmac_with_iv(&self.key, 2, buffer);
// 5. tag ← n ^ h ^ c
// (^ means xor)
let expected_tag = n.zip(h, |a, b| a ^ b).zip(c, |a, b| a ^ b);
let expected_tag = &expected_tag[..tag.len()];
// Constant-time MAC comparison
use subtle::ConstantTimeEq;
if expected_tag.ct_eq(tag).into() {
// Decrypt
Ctr128BE::<Cipher>::inner_iv_init(Cipher::new(&self.key), &n)
.apply_keystream_partial(buffer.into());
Ok(())
} else {
Err(Error)
}
}
}
impl<Cipher, M> Eax<Cipher, M>
where
Cipher: BlockCipher<BlockSize = U16> + BlockEncrypt + Clone + KeyInit,
M: TagSize,
{
/// CMAC/OMAC1
///
/// To avoid constructing new buffers on the heap, an iv encoded into 16
/// bytes is prepended inside this function.
fn cmac_with_iv(
key: &GenericArray<u8, Cipher::KeySize>,
iv: u8,
data: &[u8],
) -> Output<Cmac<Cipher>> {
let mut mac = <Cmac<Cipher> as Mac>::new(key);
mac.update(&[0; 15]);
mac.update(&[iv]);
mac.update(data);
mac.finalize().into_bytes()
}
}
|
use crate::csv::csv_data::{reset_stream, CsvData, CsvStream, CsvType, CsvWrapper};
use csv::StringRecord;
use indexmap::map::IndexMap;
use log::debug;
use std::collections::{HashMap, HashSet};
use std::fmt::{Display, Formatter};
use std::io::{Read, Seek};
use std::num::{ParseFloatError, ParseIntError};
/// a record of the inferred types for columns in a CSV
#[derive(Debug)]
pub struct ColumnInference {
pub columns_to_types: IndexMap<String, CsvType>,
pub columns_to_indexes: IndexMap<String, usize>,
}
impl Display for ColumnInference {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
for (column, inferred_type) in self.columns_to_types.iter() {
writeln!(f, "{} -> {}", column, inferred_type)?;
}
Ok(())
}
}
#[derive(Debug)]
pub struct ColumnInferences {
hashmap: HashMap<String, ColumnInference>,
}
impl ColumnInferences {
pub fn new(hashmap: HashMap<String, ColumnInference>) -> ColumnInferences {
ColumnInferences { hashmap }
}
}
impl Display for ColumnInferences {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
for (table_name, inference) in self.hashmap.iter() {
writeln!(f, "{}:", table_name)?;
for (column, inferred_type) in inference.columns_to_types.iter() {
writeln!(f, "\t{} -> {}", column, inferred_type)?;
}
}
Ok(())
}
}
impl ColumnInference {
/// build inference from a CSV
pub fn from_csv(csv: &CsvData) -> ColumnInference {
let mut columns_to_types = IndexMap::with_capacity(8);
let mut columns_to_indexes = IndexMap::with_capacity(8);
for (i, header) in csv.headers.iter().enumerate() {
let t = get_type_of_column(&mut csv.records.iter(), i);
columns_to_types.insert(String::from(header), t);
columns_to_indexes.insert(String::from(header), i);
}
debug!(
"Inferred columns for file {}: {:?} ",
csv.filename, columns_to_types
);
ColumnInference {
columns_to_types,
columns_to_indexes,
}
}
pub fn from_stream<A: Read + Seek>(
csv: &mut CsvStream<A>,
) -> Result<ColumnInference, csv::Error> {
let mut columns_to_types = IndexMap::with_capacity(8);
let mut columns_to_indexes = IndexMap::with_capacity(8);
let headers: Vec<String> = csv.headers.iter().map(String::from).collect();
for (i, header) in headers.iter().enumerate() {
reset_stream(csv).unwrap();
let mut records = csv.stream.records();
let t = get_type_of_column_stream(&mut records, i)?;
columns_to_types.insert(String::from(header), t);
columns_to_indexes.insert(String::from(header), i);
}
debug!(
"Inferred columns for file {}: {:?} ",
csv.filename, columns_to_types
);
Ok(ColumnInference {
columns_to_types,
columns_to_indexes,
})
}
/// build column 'inference' with every column artificially inferred as a String
pub fn default_inference(headers: &StringRecord) -> ColumnInference {
let mut columns_to_types = IndexMap::with_capacity(8);
let mut columns_to_indexes = IndexMap::with_capacity(8);
for (i, header) in headers.iter().enumerate() {
columns_to_types.insert(String::from(header), CsvType::String);
columns_to_indexes.insert(String::from(header), i);
}
ColumnInference {
columns_to_types,
columns_to_indexes,
}
}
/// get the type of a column, referenced by its string name
pub fn get_type(&self, s: String) -> Option<&CsvType> {
self.columns_to_types.get(s.as_str())
}
}
fn parse(s: &str) -> CsvWrapper {
let is_integer: Result<i64, ParseIntError> = s.parse();
let is_float: Result<f64, ParseFloatError> = s.parse();
let is_integer = is_integer.map(CsvWrapper::Integer);
let is_float = is_float.map(CsvWrapper::Float);
is_integer
.or(is_float)
.unwrap_or_else(|_| CsvWrapper::String(String::from(s)))
}
fn get_type_of_column<'a, I: Iterator<Item = &'a StringRecord>>(
csv: &mut I,
index: usize,
) -> CsvType {
let mut distinct_types = HashSet::new();
for record in csv {
let parsed_type = parse(record.get(index).unwrap()).get_type();
distinct_types.insert(parsed_type);
}
if distinct_types.contains(&CsvType::String) {
CsvType::String
} else if distinct_types.contains(&CsvType::Integer) && distinct_types.contains(&CsvType::Float)
{
CsvType::Float
} else if distinct_types.len() == 1 {
distinct_types.iter().next().unwrap().to_owned()
} else {
CsvType::String
}
}
fn get_type_of_column_stream<I: Iterator<Item = csv::Result<StringRecord>>>(
csv: &mut I,
index: usize,
) -> csv::Result<CsvType> {
let mut distinct_types = HashSet::with_capacity(8);
for record in csv {
let record = record?;
let parsed_type = parse(record.get(index).unwrap()).get_type();
distinct_types.insert(parsed_type);
if distinct_types.contains(&CsvType::String) {
return Ok(CsvType::String);
}
}
let found_type = if distinct_types.contains(&CsvType::String) {
debug!("Distinct types contains String");
CsvType::String
} else if distinct_types.contains(&CsvType::Integer) && distinct_types.contains(&CsvType::Float)
{
debug!("Distinct types contains Integer and Float");
CsvType::Float
} else if distinct_types.len() == 1 {
debug!("Distinct types contains single value");
distinct_types.iter().next().unwrap().to_owned()
} else {
debug!("all else");
CsvType::String
};
debug!("distinct types {:?} for index {}", distinct_types, index);
Ok(found_type)
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn it_should_parse_integers() {
assert_eq!(parse("1"), CsvWrapper::Integer(1));
assert_eq!(parse("-1"), CsvWrapper::Integer(-1));
}
#[test]
fn it_should_parse_strings() {
assert_eq!(parse("foo"), CsvWrapper::String(String::from("foo")));
assert_eq!(parse("bar"), CsvWrapper::String(String::from("bar")));
}
#[test]
fn it_should_parse_floats() {
assert_eq!(parse("1.00000009"), CsvWrapper::Float(1.00000009f64));
}
#[test]
fn it_should_recognize_integer_column() {
let filename: String = String::from("foo.csv");
let headers = StringRecord::from(vec!["bar"]);
let records = vec![StringRecord::from(vec!["1"]), StringRecord::from(vec!["2"])];
let inference = ColumnInference::from_csv(&CsvData {
records,
headers,
filename,
});
assert_eq!(
inference.get_type(String::from("bar")),
Some(&CsvType::Integer)
);
}
#[test]
fn it_should_recognize_float_column() {
let filename: String = String::from("foo.csv");
let headers = StringRecord::from(vec!["bar"]);
let records = vec![
StringRecord::from(vec!["1.0"]),
StringRecord::from(vec!["2.0"]),
];
let inference = ColumnInference::from_csv(&CsvData {
records,
headers,
filename,
});
assert_eq!(
inference.get_type(String::from("bar")),
Some(&CsvType::Float)
);
}
#[test]
fn it_should_classify_mixed_floats_as_float() {
let filename: String = String::from("foo.csv");
let headers = StringRecord::from(vec!["foo", "bar"]);
let records = vec![
StringRecord::from(vec!["entry1", "1"]),
StringRecord::from(vec!["entry2", "2.0"]),
];
let inference = ColumnInference::from_csv(&CsvData {
records,
headers,
filename,
});
assert_eq!(
inference.get_type(String::from("foo")),
Some(&CsvType::String)
);
assert_eq!(
inference.get_type(String::from("bar")),
Some(&CsvType::Float)
);
}
#[test]
fn it_should_classify_any_column_with_string_as_string() {
let filename: String = String::from("foo.csv");
let headers = StringRecord::from(vec!["foo", "bar"]);
let records = vec![
StringRecord::from(vec!["entry1", "1"]),
StringRecord::from(vec!["entry2", "2.0"]),
StringRecord::from(vec!["entry3", "foobar"]),
];
let inference = ColumnInference::from_csv(&CsvData {
records,
headers,
filename,
});
assert_eq!(
inference.get_type(String::from("foo")),
Some(&CsvType::String)
);
assert_eq!(
inference.get_type(String::from("bar")),
Some(&CsvType::String)
);
}
#[test]
fn it_should_use_default_column_type_if_inference_disabled() {
let headers = StringRecord::from(vec!["foo", "bar"]);
let inference = ColumnInference::default_inference(&headers);
assert_eq!(
inference.get_type(String::from("foo")),
Some(&CsvType::String)
);
assert_eq!(
inference.get_type(String::from("bar")),
Some(&CsvType::String)
);
}
}
|
extern crate easy_ll;
extern crate rustyline;
extern crate weld;
use rustyline::error::ReadlineError;
use rustyline::Editor;
use std::env;
use std::path::PathBuf;
use weld::ast::ExprKind::*;
use weld::llvm::LlvmGenerator;
use weld::macro_processor;
use weld::parser::*;
use weld::pretty_print::*;
use weld::transforms;
use weld::type_inference::*;
fn main() {
let home_path = env::home_dir().unwrap_or(PathBuf::new());
let history_file_path = home_path.join(".weld_history");
let history_file_path = history_file_path.to_str().unwrap_or(".weld_history");
let mut rl = Editor::<()>::new();
rl.load_history(&history_file_path).unwrap();
loop {
let raw_readline = rl.readline(">> ");
let readline;
match raw_readline {
Ok(raw_readline) => {
rl.add_history_entry(&raw_readline);
readline = raw_readline;
},
Err(ReadlineError::Interrupted) => {
println!("Exiting!");
break
},
Err(ReadlineError::Eof) => {
println!("Exiting!");
break
},
Err(err) => {
println!("Error: {:?}", err);
break
}
}
let trimmed = readline.trim();
if trimmed == "" {
continue;
}
let program = parse_program(trimmed);
if let Err(ref e) = program {
println!("Error during parsing: {:?}", e);
continue;
}
let program = program.unwrap();
println!("Raw structure:\n{:?}\n", program);
let expr = macro_processor::process_program(&program);
if let Err(ref e) = expr {
println!("Error during macro substitution: {}", e);
continue;
}
let mut expr = expr.unwrap();
println!("After macro substitution:\n{}\n", print_expr(&expr));
if let Err(ref e) = transforms::inline_apply(&mut expr) {
println!("Error during inlining applies: {}\n", e);
}
println!("After inlining applies:\n{}\n", print_expr(&expr));
if let Err(ref e) = infer_types(&mut expr) {
println!("Error during type inference: {}\n", e);
println!("Partially inferred types:\n{}\n", print_typed_expr(&expr));
continue;
}
println!("After type inference:\n{}\n", print_typed_expr(&expr));
println!("Expression type: {}\n", print_type(&expr.ty));
let expr = expr.to_typed().unwrap();
if let Lambda(ref args, ref body) = expr.kind {
let mut generator = LlvmGenerator::new();
if let Err(ref e) = generator.add_function_on_pointers("run", args, body) {
println!("Error during LLVM code gen:\n{}\n", e);
continue;
}
let llvm_code = generator.result();
println!("LLVM code:\n{}\n", llvm_code);
if let Err(ref e) = easy_ll::compile_module(&llvm_code) {
println!("Error during LLVM compilation:\n{}\n", e);
continue;
}
println!("LLVM module compiled successfully\n")
} else {
println!("Expression is not a function, so not compiling to LLVM.\n")
}
}
rl.save_history(&history_file_path).unwrap();
}
|
#![warn(unused_crate_dependencies)]
#![warn(clippy::pedantic)]
#![warn(clippy::cargo)]
#![allow(clippy::module_name_repetitions)]
use crate::layers::{CliLayer, CliLayerError, DepsLayer, DepsLayerError};
use crate::yarn::Yarn;
use heroku_nodejs_utils::inv::Inventory;
use heroku_nodejs_utils::package_json::{PackageJson, PackageJsonError};
use heroku_nodejs_utils::vrs::{Requirement, VersionError};
use libcnb::build::{BuildContext, BuildResult, BuildResultBuilder};
use libcnb::data::build_plan::BuildPlanBuilder;
use libcnb::data::launch::{LaunchBuilder, ProcessBuilder};
use libcnb::data::{layer_name, process_type};
use libcnb::detect::{DetectContext, DetectResult, DetectResultBuilder};
use libcnb::generic::GenericMetadata;
use libcnb::generic::GenericPlatform;
use libcnb::layer_env::Scope;
use libcnb::{buildpack_main, Buildpack, Env};
use libherokubuildpack::log::{log_error, log_header, log_info};
use thiserror::Error;
#[cfg(test)]
use libcnb_test as _;
#[cfg(test)]
use test_support as _;
#[cfg(test)]
use ureq as _;
mod cfg;
mod cmd;
mod layers;
mod yarn;
const INVENTORY: &str = include_str!("../inventory.toml");
const DEFAULT_YARN_REQUIREMENT: &str = "1.22.x";
pub(crate) struct YarnBuildpack;
impl Buildpack for YarnBuildpack {
type Platform = GenericPlatform;
type Metadata = GenericMetadata;
type Error = YarnBuildpackError;
fn detect(&self, context: DetectContext<Self>) -> libcnb::Result<DetectResult, Self::Error> {
context
.app_dir
.join("yarn.lock")
.exists()
.then(|| {
DetectResultBuilder::pass()
.build_plan(
BuildPlanBuilder::new()
.provides("yarn")
.requires("yarn")
.provides("node_modules")
.requires("node_modules")
.requires("node")
.build(),
)
.build()
})
.unwrap_or_else(|| DetectResultBuilder::fail().build())
}
fn build(&self, context: BuildContext<Self>) -> libcnb::Result<BuildResult, Self::Error> {
let mut env = Env::from_current();
let pkg_json = PackageJson::read(context.app_dir.join("package.json"))
.map_err(YarnBuildpackError::PackageJson)?;
let yarn_version = match cmd::yarn_version(&env) {
// Install yarn if it's not present.
Err(cmd::Error::Spawn(_)) => {
log_header("Detecting yarn CLI version to install");
let inventory: Inventory =
toml::from_str(INVENTORY).map_err(YarnBuildpackError::InventoryParse)?;
let requested_yarn_cli_range = match cfg::requested_yarn_range(&pkg_json) {
None => {
log_info("No yarn engine range detected in package.json, using default ({DEFAULT_YARN_REQUIREMENT})");
Requirement::parse(DEFAULT_YARN_REQUIREMENT)
.map_err(YarnBuildpackError::YarnDefaultParse)?
}
Some(requirement) => {
log_info(format!(
"Detected yarn engine version range {requirement} in package.json"
));
requirement
}
};
let yarn_cli_release = inventory.resolve(&requested_yarn_cli_range).ok_or(
YarnBuildpackError::YarnVersionResolve(requested_yarn_cli_range),
)?;
log_info(format!(
"Resolved yarn CLI version: {}",
yarn_cli_release.version
));
log_header("Installing yarn CLI");
let dist_layer = context.handle_layer(
layer_name!("dist"),
CliLayer {
release: yarn_cli_release.clone(),
},
)?;
env = dist_layer.env.apply(Scope::Build, &env);
cmd::yarn_version(&env).map_err(YarnBuildpackError::YarnVersionDetect)?
}
// Use the existing yarn installation if it is present.
Ok(version) => version,
err => err.map_err(YarnBuildpackError::YarnVersionDetect)?,
};
let yarn = Yarn::from_major(yarn_version.major())
.ok_or_else(|| YarnBuildpackError::YarnVersionUnsupported(yarn_version.major()))?;
log_info(format!("Yarn CLI operating in yarn {yarn_version} mode."));
log_header("Setting up yarn dependency cache");
let zero_install = cfg::cache_populated(
&cmd::yarn_get_cache(&yarn, &env).map_err(YarnBuildpackError::YarnCacheGet)?,
);
if zero_install {
log_info("Yarn zero-install detected. Skipping dependency cache.");
} else {
let deps_layer =
context.handle_layer(layer_name!("deps"), DepsLayer { yarn: yarn.clone() })?;
cmd::yarn_set_cache(&yarn, &deps_layer.path.join("cache"), &env)
.map_err(YarnBuildpackError::YarnCacheSet)?;
}
log_header("Installing dependencies");
cmd::yarn_install(&yarn, zero_install, &env).map_err(YarnBuildpackError::YarnInstall)?;
log_header("Running scripts");
let scripts = cfg::get_build_scripts(&pkg_json);
if scripts.is_empty() {
log_info("No build scripts found");
} else {
for script in scripts {
log_info(format!("Running `{script}` script"));
cmd::yarn_run(&env, &script).map_err(YarnBuildpackError::BuildScript)?;
}
}
if cfg::has_start_script(&pkg_json) {
BuildResultBuilder::new()
.launch(
LaunchBuilder::new()
.process(
ProcessBuilder::new(process_type!("web"), "yarn")
.arg("start")
.default(true)
.build(),
)
.build(),
)
.build()
} else {
BuildResultBuilder::new().build()
}
}
fn on_error(&self, error: libcnb::Error<Self::Error>) {
match error {
libcnb::Error::BuildpackError(bp_err) => {
let err_string = bp_err.to_string();
match bp_err {
YarnBuildpackError::BuildScript(_) => {
log_error("Yarn build script error", err_string);
}
YarnBuildpackError::CliLayer(_) => {
log_error("Yarn distribution layer error", err_string);
}
YarnBuildpackError::DepsLayer(_) => {
log_error("Yarn dependency layer error", err_string);
}
YarnBuildpackError::InventoryParse(_) => {
log_error("Yarn inventory parse error", err_string);
}
YarnBuildpackError::PackageJson(_) => {
log_error("Yarn package.json error", err_string);
}
YarnBuildpackError::YarnCacheSet(_) | YarnBuildpackError::YarnCacheGet(_) => {
log_error("Yarn cache error", err_string);
}
YarnBuildpackError::YarnInstall(_) => {
log_error("Yarn install error", err_string);
}
YarnBuildpackError::YarnVersionDetect(_)
| YarnBuildpackError::YarnVersionResolve(_)
| YarnBuildpackError::YarnVersionUnsupported(_)
| YarnBuildpackError::YarnDefaultParse(_) => {
log_error("Yarn version error", err_string);
}
}
}
err => {
log_error("Yarn internal buildpack error", err.to_string());
}
}
}
}
#[derive(Error, Debug)]
pub(crate) enum YarnBuildpackError {
#[error("Couldn't run build script: {0}")]
BuildScript(cmd::Error),
#[error("{0}")]
CliLayer(#[from] CliLayerError),
#[error("{0}")]
DepsLayer(#[from] DepsLayerError),
#[error("Couldn't parse yarn inventory: {0}")]
InventoryParse(toml::de::Error),
#[error("Couldn't parse package.json: {0}")]
PackageJson(PackageJsonError),
#[error("Couldn't read yarn cache folder: {0}")]
YarnCacheGet(cmd::Error),
#[error("Couldn't set yarn cache folder: {0}")]
YarnCacheSet(cmd::Error),
#[error("Yarn install error: {0}")]
YarnInstall(cmd::Error),
#[error("Couldn't determine yarn version: {0}")]
YarnVersionDetect(cmd::Error),
#[error("Unsupported yarn version: {0}")]
YarnVersionUnsupported(u64),
#[error("Couldn't resolve yarn version requirement ({0}) to a known yarn version")]
YarnVersionResolve(Requirement),
#[error("Couldn't parse yarn default version range: {0}")]
YarnDefaultParse(VersionError),
}
impl From<YarnBuildpackError> for libcnb::Error<YarnBuildpackError> {
fn from(e: YarnBuildpackError) -> Self {
libcnb::Error::BuildpackError(e)
}
}
buildpack_main!(YarnBuildpack);
|
use crate::{BoxFuture, Entity, Result};
pub trait Remove<E: Entity> {
fn remove<'a>(&'a mut self, k: E::Key, track: &'a E::TrackCtx) -> BoxFuture<'a, Result<()>>;
fn remove_all<'a, K>(
&'a mut self,
keys: K,
track: &'a E::TrackCtx,
) -> BoxFuture<'a, Result<usize>>
where
K: 'a,
K: IntoIterator<Item = E::Key> + Send,
K::IntoIter: Send;
}
|
use super::*;
#[inline(always)]
pub(crate) fn encode_edge(src: NodeT, dst: NodeT, node_bits: u8) -> EdgeT {
((src as EdgeT) << node_bits) | dst as EdgeT
}
#[inline(always)]
pub(crate) fn encode_max_edge(node: NodeT, node_bits: u8) -> EdgeT {
((node as EdgeT) << node_bits) | node as EdgeT
}
#[inline(always)]
pub(crate) fn decode_edge(edge: u64, node_bits: u8, node_bit_mask: u64) -> (NodeT, NodeT) {
(
(edge >> node_bits) as NodeT,
(edge & node_bit_mask) as NodeT,
)
}
#[inline(always)]
pub(crate) fn get_node_bits(top_node: NodeT) -> u8 {
(1.0 + top_node as f64).log2().ceil() as u8
}
impl Graph {
#[inline(always)]
pub(crate) fn encode_edge(&self, src: NodeT, dst: NodeT) -> u64 {
encode_edge(src, dst, self.node_bits)
}
#[inline(always)]
pub(crate) fn decode_edge(&self, edge: u64) -> (NodeT, NodeT) {
decode_edge(edge, self.node_bits, self.node_bit_mask)
}
#[inline(always)]
pub(crate) fn get_edge_from_edge_id(&self, edge_id: EdgeT) -> (NodeT, NodeT) {
if let (Some(sources), Some(destinations)) = (&self.sources, &self.destinations) {
return (sources[edge_id as usize], destinations[edge_id as usize]);
}
self.decode_edge(self.edges.unchecked_select(edge_id))
}
#[inline(always)]
pub(crate) fn get_edge_id_from_tuple(&self, src: NodeT, dst: NodeT) -> Option<EdgeT> {
self.edges
.rank(self.encode_edge(src, dst))
.map(|value| value as EdgeT)
}
#[inline(always)]
pub(crate) fn get_unchecked_edge_id_from_tuple(&self, src: NodeT, dst: NodeT) -> EdgeT {
self.edges.unchecked_rank(self.encode_edge(src, dst)) as EdgeT
}
#[inline(always)]
pub(crate) fn get_unique_source(&self, source_id: NodeT) -> NodeT {
self.unique_sources
.unchecked_select((source_id % self.get_source_nodes_number()) as u64) as NodeT
}
}
|
use std::{collections::HashMap, convert::TryInto, fmt::Debug, ops::Deref};
use futures::{future::BoxFuture, stream::TryStreamExt, FutureExt};
use serde::{de::Deserializer, Deserialize};
use crate::{
bson::{doc, to_bson, Bson, Deserializer as BsonDeserializer, Document},
client::session::TransactionState,
error::Result,
options::{
AggregateOptions,
CollectionOptions,
CountOptions,
CreateCollectionOptions,
DatabaseOptions,
DeleteOptions,
DistinctOptions,
DropCollectionOptions,
DropIndexOptions,
EstimatedDocumentCountOptions,
FindOneAndDeleteOptions,
FindOneAndReplaceOptions,
FindOneAndUpdateOptions,
FindOneOptions,
FindOptions,
IndexOptions,
InsertManyOptions,
InsertOneOptions,
ListCollectionsOptions,
ListDatabasesOptions,
ListIndexesOptions,
ReadConcern,
ReplaceOptions,
TransactionOptions,
UpdateModifications,
UpdateOptions,
},
selection_criteria::{ReadPreference, SelectionCriteria},
test::{assert_matches, log_uncaptured, FailPoint, TestClient},
ClientSession,
Collection,
Database,
IndexModel,
};
use super::{OpRunner, OpSessions};
pub(crate) trait TestOperation: Debug + Send + Sync {
fn execute_on_collection<'a>(
&'a self,
_collection: &'a Collection<Document>,
_session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
todo!()
}
fn execute_on_database<'a>(
&'a self,
_database: &'a Database,
_session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
todo!()
}
fn execute_on_client<'a>(
&'a self,
_client: &'a TestClient,
) -> BoxFuture<'a, Result<Option<Bson>>> {
todo!()
}
fn execute_on_session<'a>(
&'a self,
_session: &'a mut ClientSession,
) -> BoxFuture<'a, Result<Option<Bson>>> {
todo!()
}
fn execute_recursive<'a>(
&'a self,
_runner: &'a mut OpRunner,
_sessions: OpSessions<'a>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
todo!()
}
}
#[derive(Debug)]
pub(crate) struct Operation {
operation: Box<dyn TestOperation>,
pub(crate) name: String,
pub(crate) object: Option<OperationObject>,
pub(crate) collection_options: Option<CollectionOptions>,
pub(crate) database_options: Option<DatabaseOptions>,
pub(crate) error: Option<bool>,
pub(crate) result: Option<OperationResult>,
pub(crate) session: Option<String>,
}
impl Operation {
pub(crate) fn assert_result_matches(&self, result: &Result<Option<Bson>>, description: &str) {
if self.error.is_none() && self.result.is_none() && result.is_err() {
log_uncaptured(format!(
"Ignoring operation error: {}",
result.clone().unwrap_err()
));
}
if let Some(error) = self.error {
assert_eq!(error, result.is_err(), "{}", description);
}
if let Some(expected_result) = &self.result {
match expected_result {
OperationResult::Success(expected) => {
let result = match result.as_ref() {
Ok(Some(r)) => r,
_ => panic!("{}: expected value, got {:?}", description, result),
};
assert_matches(result, expected, Some(description));
}
OperationResult::Error(operation_error) => {
assert!(
result.is_err(),
"{}: expected error\n{:#?} got value\n{:#?}",
description,
operation_error,
result,
);
let error = result.as_ref().unwrap_err();
if let Some(error_contains) = &operation_error.error_contains {
let message = error.message().unwrap().to_lowercase();
assert!(
message.contains(&error_contains.to_lowercase()),
"{}: expected error message to contain \"{}\" but got \"{}\"",
description,
error_contains,
message
);
}
if let Some(error_code_name) = &operation_error.error_code_name {
let code_name = error.code_name().unwrap();
assert_eq!(
error_code_name, code_name,
"{}: expected error with codeName {:?}, instead got {:#?}",
description, error_code_name, error
);
}
if let Some(error_code) = operation_error.error_code {
let code = error.sdam_code().unwrap();
assert_eq!(error_code, code);
}
if let Some(error_labels_contain) = &operation_error.error_labels_contain {
let labels = error.labels();
error_labels_contain
.iter()
.for_each(|label| assert!(labels.contains(label)));
}
if let Some(error_labels_omit) = &operation_error.error_labels_omit {
let labels = error.labels();
error_labels_omit
.iter()
.for_each(|label| assert!(!labels.contains(label)));
}
#[cfg(feature = "in-use-encryption-unstable")]
if let Some(t) = &operation_error.is_timeout_error {
assert_eq!(
*t,
error.is_network_timeout() || error.is_non_timeout_network_error()
)
}
}
}
}
}
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub(crate) enum OperationObject {
Database,
Collection,
Client,
Session0,
Session1,
#[serde(rename = "gridfsbucket")]
GridfsBucket,
TestRunner,
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
pub(crate) enum OperationResult {
Error(OperationError),
Success(Bson),
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub(crate) struct OperationError {
pub(crate) error_contains: Option<String>,
pub(crate) error_code_name: Option<String>,
pub(crate) error_code: Option<i32>,
pub(crate) error_labels_contain: Option<Vec<String>>,
pub(crate) error_labels_omit: Option<Vec<String>>,
#[cfg(feature = "in-use-encryption-unstable")]
pub(crate) is_timeout_error: Option<bool>,
}
impl<'de> Deserialize<'de> for Operation {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
struct OperationDefinition {
pub(crate) name: String,
pub(crate) object: Option<OperationObject>,
pub(crate) collection_options: Option<CollectionOptions>,
pub(crate) database_options: Option<DatabaseOptions>,
#[serde(default = "default_arguments")]
pub(crate) arguments: Document,
pub(crate) error: Option<bool>,
pub(crate) result: Option<OperationResult>,
// We don't need to use this field, but it needs to be included during deserialization
// so that we can use the deny_unknown_fields tag.
#[serde(rename = "command_name")]
pub(crate) _command_name: Option<String>,
}
fn default_arguments() -> Document {
doc! {}
}
let mut definition = OperationDefinition::deserialize(deserializer)?;
let session = definition
.arguments
.remove("session")
.map(|session| session.as_str().unwrap().to_string());
let boxed_op = match definition.name.as_str() {
"insertOne" => deserialize_op::<InsertOne>(definition.arguments),
"insertMany" => deserialize_op::<InsertMany>(definition.arguments),
"updateOne" => deserialize_op::<UpdateOne>(definition.arguments),
"updateMany" => deserialize_op::<UpdateMany>(definition.arguments),
"deleteMany" => deserialize_op::<DeleteMany>(definition.arguments),
"deleteOne" => deserialize_op::<DeleteOne>(definition.arguments),
"find" => deserialize_op::<Find>(definition.arguments),
"aggregate" => deserialize_op::<Aggregate>(definition.arguments),
"distinct" => deserialize_op::<Distinct>(definition.arguments),
"countDocuments" => deserialize_op::<CountDocuments>(definition.arguments),
"estimatedDocumentCount" => {
deserialize_op::<EstimatedDocumentCount>(definition.arguments)
}
"findOne" => deserialize_op::<FindOne>(definition.arguments),
"listCollections" => deserialize_op::<ListCollections>(definition.arguments),
"listCollectionNames" => deserialize_op::<ListCollectionNames>(definition.arguments),
"replaceOne" => deserialize_op::<ReplaceOne>(definition.arguments),
"findOneAndUpdate" => deserialize_op::<FindOneAndUpdate>(definition.arguments),
"findOneAndReplace" => deserialize_op::<FindOneAndReplace>(definition.arguments),
"findOneAndDelete" => deserialize_op::<FindOneAndDelete>(definition.arguments),
"listDatabases" => deserialize_op::<ListDatabases>(definition.arguments),
"targetedFailPoint" => deserialize_op::<TargetedFailPoint>(definition.arguments),
"assertSessionPinned" => deserialize_op::<AssertSessionPinned>(definition.arguments),
"assertSessionUnpinned" => {
deserialize_op::<AssertSessionUnpinned>(definition.arguments)
}
"listDatabaseNames" => deserialize_op::<ListDatabaseNames>(definition.arguments),
"assertSessionTransactionState" => {
deserialize_op::<AssertSessionTransactionState>(definition.arguments)
}
"startTransaction" => deserialize_op::<StartTransaction>(definition.arguments),
"commitTransaction" => deserialize_op::<CommitTransaction>(definition.arguments),
"abortTransaction" => deserialize_op::<AbortTransaction>(definition.arguments),
"runCommand" => deserialize_op::<RunCommand>(definition.arguments),
"dropCollection" => deserialize_op::<DropCollection>(definition.arguments),
"createCollection" => deserialize_op::<CreateCollection>(definition.arguments),
"assertCollectionExists" => {
deserialize_op::<AssertCollectionExists>(definition.arguments)
}
"assertCollectionNotExists" => {
deserialize_op::<AssertCollectionNotExists>(definition.arguments)
}
"createIndex" => deserialize_op::<CreateIndex>(definition.arguments),
"dropIndex" => deserialize_op::<DropIndex>(definition.arguments),
"listIndexes" => deserialize_op::<ListIndexes>(definition.arguments),
"listIndexNames" => deserialize_op::<ListIndexNames>(definition.arguments),
"assertIndexExists" => deserialize_op::<AssertIndexExists>(definition.arguments),
"assertIndexNotExists" => deserialize_op::<AssertIndexNotExists>(definition.arguments),
"watch" => deserialize_op::<Watch>(definition.arguments),
"withTransaction" => deserialize_op::<WithTransaction>(definition.arguments),
_ => Ok(Box::new(UnimplementedOperation) as Box<dyn TestOperation>),
}
.map_err(|e| serde::de::Error::custom(format!("{}", e)))?;
Ok(Operation {
operation: boxed_op,
name: definition.name,
object: definition.object,
collection_options: definition.collection_options,
database_options: definition.database_options,
error: definition.error,
result: definition.result,
session,
})
}
}
fn deserialize_op<'de, 'a, Op: TestOperation + Deserialize<'de> + 'a>(
arguments: Document,
) -> std::result::Result<Box<dyn TestOperation + 'a>, bson::de::Error> {
Ok(Box::new(Op::deserialize(BsonDeserializer::new(
Bson::Document(arguments),
))?))
}
impl Deref for Operation {
type Target = Box<dyn TestOperation>;
fn deref(&self) -> &Box<dyn TestOperation> {
&self.operation
}
}
#[derive(Debug, Deserialize)]
pub(super) struct DeleteMany {
filter: Document,
#[serde(flatten)]
options: Option<DeleteOptions>,
}
impl TestOperation for DeleteMany {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
collection
.delete_many_with_session(
self.filter.clone(),
self.options.clone(),
session,
)
.await?
}
None => {
collection
.delete_many(self.filter.clone(), self.options.clone())
.await?
}
};
let result = bson::to_bson(&result)?;
Ok(Some(result))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct DeleteOne {
filter: Document,
#[serde(flatten)]
options: Option<DeleteOptions>,
}
impl TestOperation for DeleteOne {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
collection
.delete_one_with_session(self.filter.clone(), self.options.clone(), session)
.await?
}
None => {
collection
.delete_one(self.filter.clone(), self.options.clone())
.await?
}
};
let result = bson::to_bson(&result)?;
Ok(Some(result))
}
.boxed()
}
}
#[derive(Debug, Default, Deserialize)]
pub(super) struct Find {
filter: Option<Document>,
#[serde(flatten)]
options: Option<FindOptions>,
}
impl TestOperation for Find {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
let mut cursor = collection
.find_with_session(self.filter.clone(), self.options.clone(), session)
.await?;
cursor
.stream(session)
.try_collect::<Vec<Document>>()
.await?
}
None => {
let cursor = collection
.find(self.filter.clone(), self.options.clone())
.await?;
cursor.try_collect::<Vec<Document>>().await?
}
};
Ok(Some(Bson::from(result)))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct InsertMany {
documents: Vec<Document>,
#[serde(flatten)]
options: Option<InsertManyOptions>,
}
impl TestOperation for InsertMany {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
let documents = self.documents.clone();
let options = self.options.clone();
async move {
let result = match session {
Some(session) => {
collection
.insert_many_with_session(documents, options, session)
.await?
}
None => collection.insert_many(documents, options).await?,
};
let ids: HashMap<String, Bson> = result
.inserted_ids
.into_iter()
.map(|(k, v)| (k.to_string(), v))
.collect();
let ids = bson::to_bson(&ids)?;
Ok(Some(Bson::from(doc! { "insertedIds": ids })))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct InsertOne {
document: Document,
#[serde(flatten)]
options: Option<InsertOneOptions>,
}
impl TestOperation for InsertOne {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
let document = self.document.clone();
let options = self.options.clone();
async move {
let result = match session {
Some(session) => {
collection
.insert_one_with_session(document, options, session)
.await?
}
None => collection.insert_one(document, options).await?,
};
let result = bson::to_bson(&result)?;
Ok(Some(result))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct UpdateMany {
filter: Document,
update: UpdateModifications,
#[serde(flatten)]
options: Option<UpdateOptions>,
}
impl TestOperation for UpdateMany {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
collection
.update_many_with_session(
self.filter.clone(),
self.update.clone(),
self.options.clone(),
session,
)
.await?
}
None => {
collection
.update_many(
self.filter.clone(),
self.update.clone(),
self.options.clone(),
)
.await?
}
};
let result = bson::to_bson(&result)?;
Ok(Some(result))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct UpdateOne {
filter: Document,
update: UpdateModifications,
#[serde(flatten)]
options: Option<UpdateOptions>,
}
impl TestOperation for UpdateOne {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
collection
.update_one_with_session(
self.filter.clone(),
self.update.clone(),
self.options.clone(),
session,
)
.await?
}
None => {
collection
.update_one(
self.filter.clone(),
self.update.clone(),
self.options.clone(),
)
.await?
}
};
let result = bson::to_bson(&result)?;
Ok(Some(result))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(super) struct Aggregate {
pipeline: Vec<Document>,
#[serde(flatten)]
options: Option<AggregateOptions>,
}
impl TestOperation for Aggregate {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
let mut cursor = collection
.aggregate_with_session(
self.pipeline.clone(),
self.options.clone(),
session,
)
.await?;
cursor
.stream(session)
.try_collect::<Vec<Document>>()
.await?
}
None => {
let cursor = collection
.aggregate(self.pipeline.clone(), self.options.clone())
.await?;
cursor.try_collect::<Vec<Document>>().await?
}
};
Ok(Some(Bson::from(result)))
}
.boxed()
}
fn execute_on_database<'a>(
&'a self,
database: &'a Database,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
let mut cursor = database
.aggregate_with_session(
self.pipeline.clone(),
self.options.clone(),
session,
)
.await?;
cursor
.stream(session)
.try_collect::<Vec<Document>>()
.await?
}
None => {
let cursor = database
.aggregate(self.pipeline.clone(), self.options.clone())
.await?;
cursor.try_collect::<Vec<Document>>().await?
}
};
Ok(Some(Bson::from(result)))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(super) struct Distinct {
field_name: String,
filter: Option<Document>,
#[serde(flatten)]
options: Option<DistinctOptions>,
}
impl TestOperation for Distinct {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
collection
.distinct_with_session(
&self.field_name,
self.filter.clone(),
self.options.clone(),
session,
)
.await?
}
None => {
collection
.distinct(&self.field_name, self.filter.clone(), self.options.clone())
.await?
}
};
Ok(Some(Bson::Array(result)))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct CountDocuments {
filter: Document,
#[serde(flatten)]
options: Option<CountOptions>,
}
impl TestOperation for CountDocuments {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
collection
.count_documents_with_session(
self.filter.clone(),
self.options.clone(),
session,
)
.await?
}
None => {
collection
.count_documents(self.filter.clone(), self.options.clone())
.await?
}
};
Ok(Some(Bson::Int64(result.try_into().unwrap())))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct EstimatedDocumentCount {
#[serde(flatten)]
options: Option<EstimatedDocumentCountOptions>,
}
impl TestOperation for EstimatedDocumentCount {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
_session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = collection
.estimated_document_count(self.options.clone())
.await?;
Ok(Some(Bson::Int64(result.try_into().unwrap())))
}
.boxed()
}
}
#[derive(Debug, Default, Deserialize)]
pub(super) struct FindOne {
filter: Option<Document>,
#[serde(flatten)]
options: Option<FindOneOptions>,
}
impl TestOperation for FindOne {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
collection
.find_one_with_session(self.filter.clone(), self.options.clone(), session)
.await?
}
None => {
collection
.find_one(self.filter.clone(), self.options.clone())
.await?
}
};
match result {
Some(result) => Ok(Some(Bson::from(result))),
None => Ok(None),
}
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct ListCollections {
filter: Option<Document>,
#[serde(flatten)]
options: Option<ListCollectionsOptions>,
}
impl TestOperation for ListCollections {
fn execute_on_database<'a>(
&'a self,
database: &'a Database,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
let mut cursor = database
.list_collections_with_session(
self.filter.clone(),
self.options.clone(),
session,
)
.await?;
cursor.stream(session).try_collect::<Vec<_>>().await?
}
None => {
let cursor = database
.list_collections(self.filter.clone(), self.options.clone())
.await?;
cursor.try_collect::<Vec<_>>().await?
}
};
Ok(Some(bson::to_bson(&result)?))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct ListCollectionNames {
filter: Option<Document>,
}
impl TestOperation for ListCollectionNames {
fn execute_on_database<'a>(
&'a self,
database: &'a Database,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
database
.list_collection_names_with_session(self.filter.clone(), session)
.await?
}
None => database.list_collection_names(self.filter.clone()).await?,
};
let result: Vec<Bson> = result.into_iter().map(|s| s.into()).collect();
Ok(Some(result.into()))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct ReplaceOne {
filter: Document,
replacement: Document,
#[serde(flatten)]
options: Option<ReplaceOptions>,
}
impl TestOperation for ReplaceOne {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
collection
.replace_one_with_session(
self.filter.clone(),
self.replacement.clone(),
self.options.clone(),
session,
)
.await?
}
None => {
collection
.replace_one(
self.filter.clone(),
self.replacement.clone(),
self.options.clone(),
)
.await?
}
};
let result = bson::to_bson(&result)?;
Ok(Some(result))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct FindOneAndUpdate {
filter: Document,
update: UpdateModifications,
#[serde(flatten)]
options: Option<FindOneAndUpdateOptions>,
}
impl TestOperation for FindOneAndUpdate {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
collection
.find_one_and_update_with_session(
self.filter.clone(),
self.update.clone(),
self.options.clone(),
session,
)
.await?
}
None => {
collection
.find_one_and_update(
self.filter.clone(),
self.update.clone(),
self.options.clone(),
)
.await?
}
};
let result = bson::to_bson(&result)?;
Ok(Some(result))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct FindOneAndReplace {
filter: Document,
replacement: Document,
#[serde(flatten)]
options: Option<FindOneAndReplaceOptions>,
}
impl TestOperation for FindOneAndReplace {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
collection
.find_one_and_replace_with_session(
self.filter.clone(),
self.replacement.clone(),
self.options.clone(),
session,
)
.await?
}
None => {
collection
.find_one_and_replace(
self.filter.clone(),
self.replacement.clone(),
self.options.clone(),
)
.await?
}
};
let result = bson::to_bson(&result)?;
Ok(Some(result))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct FindOneAndDelete {
filter: Document,
#[serde(flatten)]
options: Option<FindOneAndDeleteOptions>,
}
impl TestOperation for FindOneAndDelete {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
collection
.find_one_and_delete_with_session(
self.filter.clone(),
self.options.clone(),
session,
)
.await?
}
None => {
collection
.find_one_and_delete(self.filter.clone(), self.options.clone())
.await?
}
};
let result = bson::to_bson(&result)?;
Ok(Some(result))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub(super) struct TargetedFailPoint {
fail_point: FailPoint,
}
impl TestOperation for TargetedFailPoint {
fn execute_on_client<'a>(&'a self, _client: &'a TestClient) -> BoxFuture<Result<Option<Bson>>> {
async move { Ok(Some(to_bson(&self.fail_point)?)) }.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct AssertSessionPinned {}
impl TestOperation for AssertSessionPinned {
fn execute_on_session<'a>(
&'a self,
session: &'a mut ClientSession,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
assert!(session.transaction.pinned_mongos().is_some());
Ok(None)
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct AssertSessionUnpinned {}
impl TestOperation for AssertSessionUnpinned {
fn execute_on_session<'a>(
&'a self,
session: &'a mut ClientSession,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
assert!(session.transaction.pinned_mongos().is_none());
Ok(None)
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct ListDatabases {
filter: Option<Document>,
#[serde(flatten)]
options: Option<ListDatabasesOptions>,
}
impl TestOperation for ListDatabases {
fn execute_on_client<'a>(
&'a self,
client: &'a TestClient,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = client
.list_databases(self.filter.clone(), self.options.clone())
.await?;
Ok(Some(bson::to_bson(&result)?))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct ListDatabaseNames {
filter: Option<Document>,
#[serde(flatten)]
options: Option<ListDatabasesOptions>,
}
impl TestOperation for ListDatabaseNames {
fn execute_on_client<'a>(
&'a self,
client: &'a TestClient,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = client
.list_database_names(self.filter.clone(), self.options.clone())
.await?;
let result: Vec<Bson> = result.into_iter().map(|s| s.into()).collect();
Ok(Some(result.into()))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct AssertSessionTransactionState {
state: String,
}
impl TestOperation for AssertSessionTransactionState {
fn execute_on_session<'a>(
&'a self,
session: &'a mut ClientSession,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
match self.state.as_str() {
"none" => assert!(matches!(session.transaction.state, TransactionState::None)),
"starting" => assert!(matches!(
session.transaction.state,
TransactionState::Starting
)),
"in_progress" => assert!(matches!(
session.transaction.state,
TransactionState::InProgress
)),
"committed" => assert!(matches!(
session.transaction.state,
TransactionState::Committed { .. }
)),
"aborted" => assert!(matches!(
session.transaction.state,
TransactionState::Aborted
)),
other => panic!("Unknown transaction state: {}", other),
}
Ok(None)
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct StartTransaction {
options: Option<TransactionOptions>,
}
impl TestOperation for StartTransaction {
fn execute_on_session<'a>(
&'a self,
session: &'a mut ClientSession,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
session
.start_transaction(self.options.clone())
.await
.map(|_| None)
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct CommitTransaction {}
impl TestOperation for CommitTransaction {
fn execute_on_session<'a>(
&'a self,
session: &'a mut ClientSession,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move { session.commit_transaction().await.map(|_| None) }.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct AbortTransaction {}
impl TestOperation for AbortTransaction {
fn execute_on_session<'a>(
&'a self,
session: &'a mut ClientSession,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move { session.abort_transaction().await.map(|_| None) }.boxed()
}
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(super) struct RunCommand {
command: Document,
read_preference: Option<ReadPreference>,
}
impl TestOperation for RunCommand {
fn execute_on_database<'a>(
&'a self,
database: &'a Database,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let selection_criteria = self
.read_preference
.as_ref()
.map(|read_preference| SelectionCriteria::ReadPreference(read_preference.clone()));
let result = match session {
Some(session) => {
database
.run_command_with_session(self.command.clone(), selection_criteria, session)
.await
}
None => database.run_command(self.command.clone(), None).await,
};
result.map(|doc| Some(Bson::Document(doc)))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct DropCollection {
collection: String,
#[serde(flatten)]
options: Option<DropCollectionOptions>,
}
impl TestOperation for DropCollection {
fn execute_on_database<'a>(
&'a self,
database: &'a Database,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
database
.collection::<Document>(&self.collection)
.drop_with_session(self.options.clone(), session)
.await
}
None => {
database
.collection::<Document>(&self.collection)
.drop(self.options.clone())
.await
}
};
result.map(|_| None)
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct CreateCollection {
collection: String,
#[serde(flatten)]
options: Option<CreateCollectionOptions>,
}
impl TestOperation for CreateCollection {
fn execute_on_database<'a>(
&'a self,
database: &'a Database,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let result = match session {
Some(session) => {
database
.create_collection_with_session(
&self.collection,
self.options.clone(),
session,
)
.await
}
None => {
database
.create_collection(&self.collection, self.options.clone())
.await
}
};
result.map(|_| None)
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct AssertCollectionExists {
database: String,
collection: String,
}
impl TestOperation for AssertCollectionExists {
fn execute_on_client<'a>(
&'a self,
client: &'a TestClient,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let collections = client
.database_with_options(
&self.database,
DatabaseOptions::builder()
.read_concern(ReadConcern::MAJORITY)
.build(),
)
.list_collection_names(None)
.await
.unwrap();
assert!(
collections.contains(&self.collection),
"Collection {}.{} should exist, but does not (collections: {:?}).",
self.database,
self.collection,
collections
);
Ok(None)
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct AssertCollectionNotExists {
database: String,
collection: String,
}
impl TestOperation for AssertCollectionNotExists {
fn execute_on_client<'a>(
&'a self,
client: &'a TestClient,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let collections = client
.database(&self.database)
.list_collection_names(None)
.await
.unwrap();
assert!(!collections.contains(&self.collection));
Ok(None)
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
#[serde(deny_unknown_fields)]
pub(super) struct CreateIndex {
keys: Document,
name: Option<String>,
}
impl TestOperation for CreateIndex {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let options = IndexOptions::builder().name(self.name.clone()).build();
let index = IndexModel::builder()
.keys(self.keys.clone())
.options(options)
.build();
let name = match session {
Some(session) => {
collection
.create_index_with_session(index, None, session)
.await?
.index_name
}
None => collection.create_index(index, None).await?.index_name,
};
Ok(Some(name.into()))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct DropIndex {
name: String,
#[serde(flatten)]
options: DropIndexOptions,
}
impl TestOperation for DropIndex {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
match session {
Some(session) => {
collection
.drop_index_with_session(self.name.clone(), self.options.clone(), session)
.await?
}
None => {
collection
.drop_index(self.name.clone(), self.options.clone())
.await?
}
}
Ok(None)
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct ListIndexes {
#[serde(flatten)]
options: ListIndexesOptions,
}
impl TestOperation for ListIndexes {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let indexes: Vec<IndexModel> = match session {
Some(session) => {
collection
.list_indexes_with_session(self.options.clone(), session)
.await?
.stream(session)
.try_collect()
.await?
}
None => {
collection
.list_indexes(self.options.clone())
.await?
.try_collect()
.await?
}
};
let indexes: Vec<Document> = indexes
.iter()
.map(|index| bson::to_document(index).unwrap())
.collect();
Ok(Some(indexes.into()))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct ListIndexNames {}
impl TestOperation for ListIndexNames {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let names = match session {
Some(session) => collection.list_index_names_with_session(session).await?,
None => collection.list_index_names().await?,
};
Ok(Some(names.into()))
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct Watch {}
impl TestOperation for Watch {
fn execute_on_collection<'a>(
&'a self,
collection: &'a Collection<Document>,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
match session {
None => {
collection.watch(None, None).await?;
}
Some(s) => {
collection.watch_with_session(None, None, s).await?;
}
}
Ok(None)
}
.boxed()
}
fn execute_on_database<'a>(
&'a self,
database: &'a Database,
session: Option<&'a mut ClientSession>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
match session {
None => {
database.watch(None, None).await?;
}
Some(s) => {
database.watch_with_session(None, None, s).await?;
}
}
Ok(None)
}
.boxed()
}
fn execute_on_client<'a>(
&'a self,
client: &'a TestClient,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
client.watch(None, None).await?;
Ok(None)
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct AssertIndexExists {
database: String,
collection: String,
index: String,
}
impl TestOperation for AssertIndexExists {
fn execute_on_client<'a>(
&'a self,
client: &'a TestClient,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let coll = client
.database(&self.database)
.collection::<Document>(&self.collection);
let indexes = coll.list_index_names().await?;
assert!(indexes.contains(&self.index));
Ok(None)
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct AssertIndexNotExists {
database: String,
collection: String,
index: String,
}
impl TestOperation for AssertIndexNotExists {
fn execute_on_client<'a>(
&'a self,
client: &'a TestClient,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let coll = client
.database(&self.database)
.collection::<Document>(&self.collection);
match coll.list_index_names().await {
Ok(indexes) => assert!(!indexes.contains(&self.index)),
// a namespace not found error indicates that the index does not exist
Err(err) => assert_eq!(err.sdam_code(), Some(26)),
}
Ok(None)
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct WithTransaction {
callback: WithTransactionCallback,
options: Option<TransactionOptions>,
}
#[derive(Debug, Deserialize)]
struct WithTransactionCallback {
operations: Vec<Operation>,
}
impl TestOperation for WithTransaction {
fn execute_recursive<'a>(
&'a self,
runner: &'a mut OpRunner,
sessions: OpSessions<'a>,
) -> BoxFuture<'a, Result<Option<Bson>>> {
async move {
let session = sessions.session0.unwrap();
session
.with_transaction(
(runner, &self.callback.operations, sessions.session1),
|session, (runner, operations, session1)| {
async move {
for op in operations.iter() {
let sessions = OpSessions {
session0: Some(session),
session1: session1.as_deref_mut(),
};
let result = match runner.run_operation(op, sessions).await {
Some(r) => r,
None => continue,
};
op.assert_result_matches(
&result,
"withTransaction nested operation",
);
// Propagate sub-operation errors after validating the result.
let _ = result?;
}
Ok(())
}
.boxed()
},
self.options.clone(),
)
.await?;
Ok(None)
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct UnimplementedOperation;
impl TestOperation for UnimplementedOperation {}
|
// importing print file
// mod print;
// mod variables;
// mod datatypes;
// mod strings;
// mod tuples;
// mod arrays;
// mod vectors;
// mod conditional;
// mod loops;
// mod functions;
// mod pointer_ref;
// mod structures;
// mod enums;
// mod cli;
// mod option;
// mod yaml_reader;
mod read_file;
fn main() {
// Calling run function from print file
// print::run();
// Running run method in variables file
// variables::run();
// Running run method in datatypes file
// datatypes::run();
// strings::run();
// tuples::run();
// arrays::start();
// vectors::start();
// conditional::run();
// loops::run();
// functions::run();
// pointer_ref::run();
// structures::run();
// enums::run();
// cli::run();
// option::run();
// yaml_reader::run();
read_file::run();
}
|
/*
* Datadog API V1 Collection
*
* Collection of all Datadog Public endpoints.
*
* The version of the OpenAPI document: 1.0
* Contact: support@datadoghq.com
* Generated by: https://openapi-generator.tech
*/
/// SloCorrectionCategory : Category the SLO correction belongs to
/// Category the SLO correction belongs to
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum SloCorrectionCategory {
#[serde(rename = "Scheduled Maintenance")]
SCHEDULED_MAINTENANCE,
#[serde(rename = "Outside Business Hours")]
OUTSIDE_BUSINESS_HOURS,
#[serde(rename = "Deployment")]
DEPLOYMENT,
#[serde(rename = "Other")]
OTHER,
}
impl ToString for SloCorrectionCategory {
fn to_string(&self) -> String {
match self {
Self::SCHEDULED_MAINTENANCE => String::from("Scheduled Maintenance"),
Self::OUTSIDE_BUSINESS_HOURS => String::from("Outside Business Hours"),
Self::DEPLOYMENT => String::from("Deployment"),
Self::OTHER => String::from("Other"),
}
}
}
|
mod data;
mod proxy;
mod servers;
use crate::proxy::Proxy;
use crate::servers::ServerList;
use anyhow::ensure;
use anyhow::Context;
use env_logger::Env;
use log::{debug, info};
use rotmg_packets::Parameters;
use std::net::IpAddr;
use std::path::PathBuf;
use structopt::StructOpt;
use tokio::try_join;
#[derive(StructOpt)]
pub struct Opts {
/// Use the given port for listening instead of using the one extracted from
/// the ROTMG client.
#[structopt(short, long)]
port: Option<u16>,
/// Listen on the given IP address.
#[structopt(short, long, default_value = "127.0.0.1")]
ip: IpAddr,
/// The default server to connect to.
#[structopt(short = "s", long, default_value = "USEast")]
default_server: String,
/// Respond to flash policy file requests with the given policy file instead
/// of the default "allow-all" policy.
#[structopt(long)]
policy_file: Option<PathBuf>,
}
fn init_logging() {
env_logger::init_from_env(
Env::new().default_filter_or(concat!(env!("CARGO_PKG_NAME"), "=INFO")),
)
}
/// Create the data directory and return the path
async fn init_data_dir() -> anyhow::Result<PathBuf> {
let dir = dirs::data_dir()
.map(|p| p.join(env!("CARGO_PKG_NAME")))
.context("getting system data dir")?;
tokio::fs::create_dir_all(&dir)
.await
.context("creating data dir")?;
Ok(dir)
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
init_logging();
let opts: Opts = StructOpt::from_args();
let dir = init_data_dir().await?;
Proxy::init(dir, opts).await?.start().await?;
Ok(())
}
|
use super::hlist::*;
use std::ops::Add;
#[derive(PartialEq, Eq, Debug)]
pub enum Validated<T, E>
where T: HList
{
Ok(T),
Err(Vec<E>),
}
impl<T, E> Validated<T, E>
where T: HList
{
/// Returns true if this validation is Ok, false otherwise
///
/// ```
/// # use frunk::validated::*;
///
/// let r1: Result<String, String> = Result::Ok(String::from("hello"));
/// let v = r1.into_validated();
/// assert!(v.is_ok());
/// ```
pub fn is_ok(&self) -> bool {
match *self {
Validated::Ok(_) => true,
_ => false,
}
}
/// Returns true if this validation is Err, false otherwise
///
/// ```
/// # use frunk::validated::*;
///
/// let r1: Result<String, i32> = Result::Err(32);
/// let v = r1.into_validated();
/// assert!(v.is_err());
/// ```
pub fn is_err(&self) -> bool {
!self.is_ok()
}
/// Turns this Validated into a Result.
///
/// If this Validated is Ok, it will become a Result::Ok, holding an HList of all the accumulated
/// results. Otherwise, it will become a Result::Err with a list of all accumulated errors.
///
/// ```
/// # #[macro_use] extern crate frunk; use frunk::hlist::*; use frunk::validated::*; fn main() {
///
/// #[derive(PartialEq, Eq, Debug)]
/// struct Person {
/// age: i32,
/// name: String,
/// }
///
/// fn get_name() -> Result<String, String> {
/// Result::Ok("James".to_owned())
/// }
///
/// fn get_age() -> Result<i32, String> {
/// Result::Ok(32)
/// }
///
/// let v = get_name().into_validated() + get_age();
/// let person = v.into_result()
/// .map(|hlist| {
/// let (name, age) = hlist.into_tuple2();
/// Person {
/// name: name,
/// age: age,
/// }
/// });
///
/// assert_eq!(person,
/// Result::Ok(Person {
/// name: "James".to_owned(),
/// age: 32,
/// }));
/// # }
pub fn into_result(self) -> Result<T, Vec<E>> {
match self {
Validated::Ok(h) => Result::Ok(h),
Validated::Err(errors) => Result::Err(errors),
}
}
}
/// Trait for "lifting" a given type into a Validated
pub trait IntoValidated<T, E> {
fn into_validated(self) -> Validated<HCons<T, HNil>, E>;
}
impl<T, E> IntoValidated<T, E> for Result<T, E> {
/// Consumes the current Result into a Validated so that we can begin chaining
///
/// ```
/// # use frunk::validated::*;
///
/// let r1: Result<String, i32> = Result::Err(32);
/// let v = r1.into_validated();
/// assert!(v.is_err());
/// ```
fn into_validated(self) -> Validated<HCons<T, HNil>, E> {
match self {
Result::Err(e) => Validated::Err(vec![e]),
Result::Ok(v) => {
Validated::Ok(HCons {
head: v,
tail: HNil,
})
}
}
}
}
/// Implements Add for the current Validated with a Result, returning a new Validated.
///
/// ```
/// # #[macro_use] extern crate frunk; use frunk::hlist::*; use frunk::validated::*; fn main() {
///
/// let r1: Result<String, String> = Result::Ok(String::from("hello"));
/// let r2: Result<i32, String> = Result::Ok(1);
/// let v = r1.into_validated() + r2;
/// assert_eq!(v, Validated::Ok(hlist!(String::from("hello"), 1)))
/// # }
/// ```
///
impl<T, E, T2> Add<Result<T2, E>> for Validated<T, E>
where T: HList + Add<HCons<T2, HNil>>,
<T as Add<HCons<T2, HNil>>>::Output: HList
{
type Output = Validated<<T as Add<HCons<T2, HNil>>>::Output, E>;
fn add(self, other: Result<T2, E>) -> Self::Output {
let other_as_validated = other.into_validated();
self + other_as_validated
}
}
/// Implements Add for the current Validated with another Validated, returning a new Validated.
///
/// ```
/// # #[macro_use] extern crate frunk; use frunk::hlist::*; use frunk::validated::*; fn main() {
/// let r1: Result<String, String> = Result::Ok(String::from("hello"));
/// let r2: Result<i32, String> = Result::Ok(1);
/// let v1 = r1.into_validated();
/// let v2 = r2.into_validated();
/// let v3 = v1 + v2;
/// assert_eq!(v3, Validated::Ok(hlist!(String::from("hello"), 1)))
/// # }
/// ```
impl<T, E, T2> Add<Validated<T2, E>> for Validated<T, E>
where T: HList + Add<T2>,
T2: HList,
<T as Add<T2>>::Output: HList
{
type Output = Validated<<T as Add<T2>>::Output, E>;
fn add(self, other: Validated<T2, E>) -> Self::Output {
match (self, other) {
(Validated::Err(mut errs), Validated::Err(errs2)) => {
errs.extend(errs2);
Validated::Err(errs)
}
(Validated::Err(errs), _) => Validated::Err(errs),
(_, Validated::Err(errs)) => Validated::Err(errs),
(Validated::Ok(h1), Validated::Ok(h2)) => Validated::Ok(h1 + h2),
}
}
}
#[cfg(test)]
mod tests {
use super::super::hlist::*;
use super::*;
#[test]
fn test_adding_ok_results() {
let r1: Result<String, String> = Result::Ok(String::from("hello"));
let r2: Result<i32, String> = Result::Ok(1);
let v = r1.into_validated() + r2;
assert_eq!(v, Validated::Ok(hlist!(String::from("hello"), 1)))
}
#[test]
fn test_adding_validated_oks() {
let r1: Result<String, String> = Result::Ok(String::from("hello"));
let r2: Result<i32, String> = Result::Ok(1);
let r3: Result<i32, String> = Result::Ok(3);
let v1 = r1.into_validated();
let v2 = r2.into_validated();
let v3 = r3.into_validated();
let comb = v1 + v2 + v3;
assert_eq!(comb, Validated::Ok(hlist!(String::from("hello"), 1, 3)))
}
#[test]
fn test_adding_err_results() {
let r1: Result<i16, String> = Result::Ok(1);
let r2: Result<i16, String> = Result::Err(String::from("NO!"));
let v1 = r1.into_validated() + r2;
assert!(v1.is_err());
assert_eq!(v1, Validated::Err(vec!["NO!".to_owned()]))
}
#[derive(PartialEq, Eq, Debug)]
struct Person {
age: i32,
name: String,
}
fn get_name() -> Result<String, String> {
Result::Ok("James".to_owned())
}
fn get_age() -> Result<i32, String> {
Result::Ok(32)
}
fn get_name_faulty() -> Result<String, String> {
Result::Err("crap name".to_owned())
}
fn get_age_faulty() -> Result<i32, String> {
Result::Err("crap age".to_owned())
}
#[test]
fn test_to_result_ok() {
let v = get_name().into_validated() + get_age();
let person = v.into_result()
.map(|hlist| {
let (name, age) = hlist.into_tuple2();
Person {
name: name,
age: age,
}
});
assert_eq!(person,
Result::Ok(Person {
name: "James".to_owned(),
age: 32,
}));
}
#[test]
fn test_to_result_faulty() {
let v = get_name_faulty().into_validated() + get_age_faulty();
let person = v.into_result()
.map(|_| unimplemented!());
assert_eq!(person,
Result::Err(vec!["crap name".to_owned(), "crap age".to_owned()]));
}
}
|
use std::fs::File;
use crate::{Vec3};
pub struct Texture {
pub dim_x: usize,
pub dim_y: usize,
color_type: png::ColorType,
data: Box<Vec<u8>>,
}
impl Texture {
pub fn from_png(path: &str) -> Texture {
let decoder = png::Decoder::new(File::open(path).unwrap());
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf = vec![0; info.buffer_size()];
reader.next_frame(&mut buf).unwrap();
if info.color_type != png::ColorType::RGBA {
panic!("Only RBGA color format is supported.");
}
Texture{
dim_x: info.width as usize,
dim_y: info.height as usize,
color_type: info.color_type,
data: Box::new(buf),
}
}
fn get_pixel_discrete(&self, mut x: usize, mut y: usize) -> &[u8] {
// Clamp to valid range
x = if x >= self.dim_x {self.dim_x - 1} else {x};
y = if y >= self.dim_y {self.dim_y - 1} else {y};
x = (x + 500) % self.dim_x;
// Get pixel slice
let pixel_start_idx = y * (self.dim_x) * 4 + x * 4;
&self.data[pixel_start_idx..pixel_start_idx+4]
}
fn get_pixel_bilinear(&self, x: f32, y: f32) -> [u8; 4] {
let mut color: [u8; 4] = [0; 4];
// closest discrete indicies
let fx = f32::floor(x);
let fy = f32::floor(y);
let cx = f32::ceil(x);
let cy = f32::ceil(y);
// pixel weights
let d1 = f32::sqrt( (fx-x)*(fx-x) + (fy-y)*(fy-y) );
let d2 = f32::sqrt( (fx-x)*(fx-x) + (cy-y)*(cy-y) );
let d3 = f32::sqrt( (cx-x)*(cx-x) + (fy-y)*(fy-y) );
let d4 = f32::sqrt( (cx-x)*(cx-x) + (cy-y)*(cy-y) );
let dall = 1.0/(d1+d2+d3+d4);
let w1 = d1*dall;
let w2 = d2*dall;
let w3 = d3*dall;
let w4 = d4*dall;
// samples:
let c1 = self.get_pixel_discrete(fx as usize, fy as usize);
let c2 = self.get_pixel_discrete(fx as usize, cy as usize);
let c3 = self.get_pixel_discrete(cx as usize, fy as usize);
let c4 = self.get_pixel_discrete(cx as usize, cy as usize);
color[0] = ((c1[0] as f32 * w1) as u8) + ((c2[0] as f32 * w2) as u8) + ((c3[0] as f32 * w3) as u8) + ((c4[0] as f32 * w4) as u8);
color[1] = ((c1[1] as f32 * w1) as u8) + ((c2[1] as f32 * w2) as u8) + ((c3[1] as f32 * w3) as u8) + ((c4[1] as f32 * w4) as u8);
color[2] = ((c1[2] as f32 * w1) as u8) + ((c2[2] as f32 * w2) as u8) + ((c3[2] as f32 * w3) as u8) + ((c4[2] as f32 * w4) as u8);
color[3] = ((c1[3] as f32 * w1) as u8) + ((c2[3] as f32 * w2) as u8) + ((c3[3] as f32 * w3) as u8) + ((c4[3] as f32 * w4) as u8);
color
}
fn xy_from_opengl(&self, s: f32, t: f32) -> (f32, f32) {
let x = s*(self.dim_x as f32);
let y = (1.0 - t)*(self.dim_y as f32);
(x,y)
}
/// Sphere Map lookup according to OpenGL documentation
/// https://www.opengl.org/archives/resources/code/samples/advanced/advanced97/notes/node94.html
pub fn lookup_sphere(texture: &Texture, dir: &Vec3) -> [u8; 4] {
let m = 2.0 * f32::sqrt((dir.x * dir.x) + (dir.y * dir.y) + ((dir.z + 1.0)*(dir.z + 1.0)));
let s = dir.x / m + 0.5;
let t = dir.y / m + 0.5;
let pixel = texture.xy_from_opengl(s, t);
texture.get_pixel_bilinear(pixel.0, pixel.1)
}
} |
use std::collections::{HashMap, HashSet};
use std::io::{self, BufRead};
type Food = (HashSet<String>, HashSet<String>);
type Mapping = HashMap<String, HashSet<String>>;
type Reduced = HashMap<String, String>;
fn parse_line(line: &str) -> Option<Food> {
let sep = " (contains ";
let index = line.find(sep)?;
let s1: HashSet<String> = line
.get(0..index)?
.split(' ')
.map(|x| x.to_string())
.collect();
let s2: HashSet<String> = line
.get(index + sep.len()..line.len() - 1)?
.split(", ")
.map(|x| x.to_string())
.collect();
Some((s1, s2))
}
fn build_mapping(xs: &[Food]) -> Mapping {
let mut mapping: Mapping = HashMap::new();
for (ys, zs) in xs {
for z in zs {
let current = mapping.entry(z.clone()).or_insert_with(|| ys.clone());
*current = ys.intersection(current).cloned().collect();
}
}
mapping
}
fn part1(xs: &[Food], mapping: &Mapping) -> usize {
let suspects: HashSet<String> = mapping.values().flatten().cloned().collect();
xs.iter()
.flat_map(|(ys, _)| ys)
.filter(|&x| !suspects.contains(x))
.count()
}
fn reduce_mapping(mapping: &Mapping) -> Reduced {
let mut result: Reduced = HashMap::new();
let mut mapping = mapping.clone();
while let Some((key, values)) = mapping.iter().find(|(_, values)| values.len() == 1) {
let key = key.to_string();
let value = values.iter().next().unwrap().to_string();
mapping.remove(&key);
for values in mapping.values_mut() {
values.remove(&value);
}
result.insert(key, value);
}
result
}
fn part2(reduced: &Reduced) -> String {
let mut vec: Vec<_> = reduced.iter().collect();
vec.sort_by_key(|(key, _)| key.to_string());
vec.iter()
.map(|&(_, value)| value)
.cloned()
.collect::<Vec<_>>()
.join(",")
}
fn main() {
let xs: Vec<_> = io::stdin()
.lock()
.lines()
.filter_map(|x| parse_line(x.ok()?.as_ref()))
.collect();
let mapping = build_mapping(&xs);
let result = part1(&xs, &mapping);
println!("Part 1: {}", result);
let reduced = reduce_mapping(&mapping);
let result = part2(&reduced);
println!("Part 2: {}", result);
}
|
// 定义求和的方法
fn sum_number(v:&[u32]) -> Option<u32> {
let mut r:u32 = 0;
for n in v {
r = r + n;
}
if r > u32::MAX {
return None;
}
Some(r)
}
fn main() {
let num = &[3, 5, 8, 4143636300, 4143646666];
let res = sum_number(num);
println!("计算结果是:{:?}", res);
}
|
use super::ObjectRef;
use crate::watcher;
use ahash::AHashMap;
use derivative::Derivative;
use kube_client::Resource;
use parking_lot::RwLock;
use std::{fmt::Debug, hash::Hash, sync::Arc};
type Cache<K> = Arc<RwLock<AHashMap<ObjectRef<K>, Arc<K>>>>;
/// A writable Store handle
///
/// This is exclusive since it's not safe to share a single `Store` between multiple reflectors.
/// In particular, `Restarted` events will clobber the state of other connected reflectors.
#[derive(Debug, Derivative)]
#[derivative(Default(bound = "K::DynamicType: Default"))]
pub struct Writer<K: 'static + Resource>
where
K::DynamicType: Eq + Hash,
{
store: Cache<K>,
dyntype: K::DynamicType,
}
impl<K: 'static + Resource + Clone> Writer<K>
where
K::DynamicType: Eq + Hash + Clone,
{
/// Creates a new Writer with the specified dynamic type.
///
/// If the dynamic type is default-able (for example when writer is used with
/// `k8s_openapi` types) you can use `Default` instead.
pub fn new(dyntype: K::DynamicType) -> Self {
Writer {
store: Default::default(),
dyntype,
}
}
/// Return a read handle to the store
///
/// Multiple read handles may be obtained, by either calling `as_reader` multiple times,
/// or by calling `Store::clone()` afterwards.
#[must_use]
pub fn as_reader(&self) -> Store<K> {
Store {
store: self.store.clone(),
}
}
/// Applies a single watcher event to the store
pub fn apply_watcher_event(&mut self, event: &watcher::Event<K>) {
match event {
watcher::Event::Applied(obj) => {
let key = ObjectRef::from_obj_with(obj, self.dyntype.clone());
let obj = Arc::new(obj.clone());
self.store.write().insert(key, obj);
}
watcher::Event::Deleted(obj) => {
let key = ObjectRef::from_obj_with(obj, self.dyntype.clone());
self.store.write().remove(&key);
}
watcher::Event::Restarted(new_objs) => {
let new_objs = new_objs
.iter()
.map(|obj| {
(
ObjectRef::from_obj_with(obj, self.dyntype.clone()),
Arc::new(obj.clone()),
)
})
.collect::<AHashMap<_, _>>();
*self.store.write() = new_objs;
}
}
}
}
/// A readable cache of Kubernetes objects of kind `K`
///
/// Cloning will produce a new reference to the same backing store.
///
/// Cannot be constructed directly since one writer handle is required,
/// use `Writer::as_reader()` instead.
#[derive(Derivative)]
#[derivative(Debug(bound = "K: Debug, K::DynamicType: Debug"), Clone)]
pub struct Store<K: 'static + Resource>
where
K::DynamicType: Hash + Eq,
{
store: Cache<K>,
}
impl<K: 'static + Clone + Resource> Store<K>
where
K::DynamicType: Eq + Hash + Clone,
{
/// Retrieve a `clone()` of the entry referred to by `key`, if it is in the cache.
///
/// `key.namespace` is ignored for cluster-scoped resources.
///
/// Note that this is a cache and may be stale. Deleted objects may still exist in the cache
/// despite having been deleted in the cluster, and new objects may not yet exist in the cache.
/// If any of these are a problem for you then you should abort your reconciler and retry later.
/// If you use `kube_rt::controller` then you can do this by returning an error and specifying a
/// reasonable `error_policy`.
#[must_use]
pub fn get(&self, key: &ObjectRef<K>) -> Option<Arc<K>> {
let store = self.store.read();
store
.get(key)
// Try to erase the namespace and try again, in case the object is cluster-scoped
.or_else(|| {
store.get(&{
let mut cluster_key = key.clone();
cluster_key.namespace = None;
cluster_key
})
})
// Clone to let go of the entry lock ASAP
.cloned()
}
/// Return a full snapshot of the current values
#[must_use]
pub fn state(&self) -> Vec<Arc<K>> {
let s = self.store.read();
s.values().cloned().collect()
}
/// Retrieve a `clone()` of the entry found by the given predicate
#[must_use]
pub fn find<P>(&self, predicate: P) -> Option<Arc<K>>
where
P: Fn(&K) -> bool,
{
self.store
.read()
.iter()
.map(|(_, k)| k)
.find(|k| predicate(k.as_ref()))
.cloned()
}
/// Return the number of elements in the store
#[must_use]
pub fn len(&self) -> usize {
self.store.read().len()
}
/// Return whether the store is empty
#[must_use]
pub fn is_empty(&self) -> bool {
self.store.read().is_empty()
}
}
/// Create a (Reader, Writer) for a `Store<K>` for a typed resource `K`
///
/// The `Writer` should be passed to a [`reflector`](crate::reflector()),
/// and the [`Store`] is a read-only handle.
#[must_use]
pub fn store<K>() -> (Store<K>, Writer<K>)
where
K: Resource + Clone + 'static,
K::DynamicType: Eq + Hash + Clone + Default,
{
let w = Writer::<K>::default();
let r = w.as_reader();
(r, w)
}
#[cfg(test)]
mod tests {
use super::{store, Writer};
use crate::{reflector::ObjectRef, watcher};
use k8s_openapi::api::core::v1::ConfigMap;
use kube_client::api::ObjectMeta;
#[test]
fn should_allow_getting_namespaced_object_by_namespaced_ref() {
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("obj".to_string()),
namespace: Some("ns".to_string()),
..ObjectMeta::default()
},
..ConfigMap::default()
};
let mut store_w = Writer::default();
store_w.apply_watcher_event(&watcher::Event::Applied(cm.clone()));
let store = store_w.as_reader();
assert_eq!(store.get(&ObjectRef::from_obj(&cm)).as_deref(), Some(&cm));
}
#[test]
fn should_not_allow_getting_namespaced_object_by_clusterscoped_ref() {
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("obj".to_string()),
namespace: Some("ns".to_string()),
..ObjectMeta::default()
},
..ConfigMap::default()
};
let mut cluster_cm = cm.clone();
cluster_cm.metadata.namespace = None;
let mut store_w = Writer::default();
store_w.apply_watcher_event(&watcher::Event::Applied(cm));
let store = store_w.as_reader();
assert_eq!(store.get(&ObjectRef::from_obj(&cluster_cm)), None);
}
#[test]
fn should_allow_getting_clusterscoped_object_by_clusterscoped_ref() {
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("obj".to_string()),
namespace: None,
..ObjectMeta::default()
},
..ConfigMap::default()
};
let (store, mut writer) = store();
writer.apply_watcher_event(&watcher::Event::Applied(cm.clone()));
assert_eq!(store.get(&ObjectRef::from_obj(&cm)).as_deref(), Some(&cm));
}
#[test]
fn should_allow_getting_clusterscoped_object_by_namespaced_ref() {
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("obj".to_string()),
namespace: None,
..ObjectMeta::default()
},
..ConfigMap::default()
};
let mut nsed_cm = cm.clone();
nsed_cm.metadata.namespace = Some("ns".to_string());
let mut store_w = Writer::default();
store_w.apply_watcher_event(&watcher::Event::Applied(cm.clone()));
let store = store_w.as_reader();
assert_eq!(store.get(&ObjectRef::from_obj(&nsed_cm)).as_deref(), Some(&cm));
}
#[test]
fn find_element_in_store() {
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("obj".to_string()),
namespace: None,
..ObjectMeta::default()
},
..ConfigMap::default()
};
let mut target_cm = cm.clone();
let (reader, mut writer) = store::<ConfigMap>();
assert!(reader.is_empty());
writer.apply_watcher_event(&watcher::Event::Applied(cm));
assert_eq!(reader.len(), 1);
assert!(reader.find(|k| k.metadata.generation == Some(1234)).is_none());
target_cm.metadata.name = Some("obj1".to_string());
target_cm.metadata.generation = Some(1234);
writer.apply_watcher_event(&watcher::Event::Applied(target_cm.clone()));
assert!(!reader.is_empty());
assert_eq!(reader.len(), 2);
let found = reader.find(|k| k.metadata.generation == Some(1234));
assert_eq!(found.as_deref(), Some(&target_cm));
}
}
|
#![forbid(unsafe_code)]
#![cfg_attr(not(debug_assertions), deny(warnings))] // Forbid warnings in release builds
#![warn(clippy::all, rust_2018_idioms)]
//mod fractal_clock;
//use fractal_clock::FractalClock;
mod noise;
use noise::noise_graph::NoiseGraph;
// When compiling natively:
#[cfg(not(target_arch = "wasm32"))]
fn main() {
let app = NoiseGraph::default();
let native_options = eframe::NativeOptions::default();
eframe::run_native(Box::new(app), native_options);
}
|
use serde::{Deserialize, Serialize};
pub type MyError = String;
pub fn error<A, S: Into<String>>(s: S) -> MyResult<A> {
let string = s.into();
Err(string)
}
pub type MyResult<A> = Result<A, MyError>;
pub type Path = String;
#[derive(Deserialize, Serialize, Debug)]
pub struct Watched {
pub path: Path,
pub created: String,
}
#[derive(Deserialize, Serialize, Debug)]
pub struct Update {
pub path: Path,
pub watched: bool,
}
#[derive(Deserialize, Serialize, Debug)]
pub struct Open {
pub path: Path,
}
|
#[cfg_attr(pwr_h7, path = "h7.rs")]
mod _version;
pub use _version::*;
|
mod nu_plugin_str;
mod strutils;
pub use strutils::Str;
#[cfg(test)]
mod tests {
use super::Str;
use crate::strutils::Action;
use nu_errors::ShellError;
use nu_protocol::{Primitive, ReturnSuccess, TaggedDictBuilder, UntaggedValue, Value};
use nu_source::Tag;
use nu_value_ext::ValueExt;
use num_bigint::BigInt;
impl Str {
pub fn expect_action(&self, action: Action) {
match &self.action {
Some(set) if set == &action => {}
Some(other) => panic!(format!("\nExpected {:#?}\n\ngot {:#?}", action, other)),
None => panic!(format!("\nAction {:#?} not found.", action)),
}
}
pub fn expect_field(&self, field: Value) {
let field = match field.as_column_path() {
Ok(column_path) => column_path,
Err(reason) => panic!(format!(
"\nExpected {:#?} to be a ColumnPath, \n\ngot {:#?}",
field, reason
)),
};
match &self.field {
Some(column_path) if column_path == &field => {}
Some(other) => panic!(format!("\nExpected {:#?} \n\ngot {:#?}", field, other)),
None => panic!(format!("\nField {:#?} not found.", field)),
}
}
}
pub fn get_data(for_value: Value, key: &str) -> Value {
for_value.get_data(&key.to_string()).borrow().clone()
}
pub fn expect_return_value_at(
for_results: Result<Vec<Result<ReturnSuccess, ShellError>>, ShellError>,
at: usize,
) -> Value {
let return_values = for_results
.expect("Failed! This seems to be an error getting back the results from the plugin.");
for (idx, item) in return_values.iter().enumerate() {
let item = match item {
Ok(return_value) => return_value,
Err(reason) => panic!(format!("{}", reason)),
};
if idx == at {
return item.raw_value().unwrap();
}
}
panic!(format!(
"Couldn't get return value from stream at {}. (There are {} items)",
at,
return_values.len() - 1
))
}
pub fn int(i: impl Into<BigInt>) -> Value {
UntaggedValue::Primitive(Primitive::Int(i.into())).into_untagged_value()
}
pub fn string(input: impl Into<String>) -> Value {
UntaggedValue::string(input.into()).into_untagged_value()
}
pub fn structured_sample_record(key: &str, value: &str) -> Value {
let mut record = TaggedDictBuilder::new(Tag::unknown());
record.insert_untagged(key.clone(), UntaggedValue::string(value));
record.into_value()
}
pub fn unstructured_sample_record(value: &str) -> Value {
UntaggedValue::string(value).into_value(Tag::unknown())
}
}
|
use super::underlying::{Link, Underlying};
use super::{Barrier, Ptr};
use std::ops::Deref;
use std::ptr::NonNull;
/// [`Arc`] is a reference-counted handle to an instance.
#[derive(Debug)]
pub struct Arc<T: 'static> {
instance_ptr: NonNull<Underlying<T>>,
}
impl<T: 'static> Arc<T> {
/// Creates a new instance of [`Arc`].
///
/// # Examples
///
/// ```
/// use scc::ebr::Arc;
///
/// let arc: Arc<usize> = Arc::new(31);
/// ```
#[inline]
pub fn new(t: T) -> Arc<T> {
let boxed = Box::new(Underlying::new(t));
Arc {
instance_ptr: unsafe { NonNull::new_unchecked(Box::into_raw(boxed)) },
}
}
/// Generates a [`Ptr`] out of the [`Arc`].
///
/// # Examples
///
/// ```
/// use scc::ebr::{Arc, Barrier};
///
/// let arc: Arc<usize> = Arc::new(37);
/// let barrier = Barrier::new();
/// let ptr = arc.ptr(&barrier);
/// assert_eq!(*ptr.as_ref().unwrap(), 37);
/// ```
#[must_use]
#[inline]
pub fn ptr<'b>(&self, _barrier: &'b Barrier) -> Ptr<'b, T> {
Ptr::from(self.instance_ptr.as_ptr())
}
/// Returns a mutable reference to the underlying instance if the instance is exclusively
/// owned.
///
/// # Examples
///
/// ```
/// use scc::ebr::Arc;
///
/// let mut arc: Arc<usize> = Arc::new(38);
/// *arc.get_mut().unwrap() += 1;
/// assert_eq!(*arc, 39);
/// ```
#[inline]
pub fn get_mut(&mut self) -> Option<&mut T> {
unsafe { self.instance_ptr.as_mut().get_mut() }
}
/// Drops the underlying instance if the last reference is dropped.
///
/// The instance is not passed to the garbage collector when the last reference is dropped,
/// instead the method drops the instance immediately. The semantics is the same as that of
/// [`std::sync::Arc`].
///
/// # Safety
///
/// The caller must ensure that there is no [`Ptr`] pointing to the instance.
///
/// # Examples
///
/// ```
/// use scc::ebr::Arc;
/// use std::sync::atomic::AtomicBool;
/// use std::sync::atomic::Ordering::Relaxed;
///
/// static DROPPED: AtomicBool = AtomicBool::new(false);
/// struct T(&'static AtomicBool);
/// impl Drop for T {
/// fn drop(&mut self) {
/// self.0.store(true, Relaxed);
/// }
/// }
///
/// let arc: Arc<T> = Arc::new(T(&DROPPED));
///
/// unsafe {
/// arc.drop_in_place();
/// }
/// assert!(DROPPED.load(Relaxed));
/// ```
pub unsafe fn drop_in_place(mut self) {
if self.underlying().drop_ref() {
self.instance_ptr.as_mut().free();
std::mem::forget(self);
}
}
/// Creates a new [`Arc`] from the given pointer.
pub(super) fn from(ptr: NonNull<Underlying<T>>) -> Arc<T> {
debug_assert_ne!(
unsafe {
ptr.as_ref()
.ref_cnt()
.load(std::sync::atomic::Ordering::Relaxed)
},
0
);
Arc { instance_ptr: ptr }
}
/// Returns its underlying pointer.
pub(super) fn raw_ptr(&self) -> *mut Underlying<T> {
self.instance_ptr.as_ptr()
}
/// Drops the reference, and returns the underlying pointer if the last reference was
/// dropped.
pub(super) fn drop_ref(&self) -> Option<*mut Underlying<T>> {
if self.underlying().drop_ref() {
Some(self.instance_ptr.as_ptr())
} else {
None
}
}
/// Returns a reference to the underlying instance.
fn underlying(&self) -> &Underlying<T> {
unsafe { self.instance_ptr.as_ref() }
}
}
impl<T: 'static> Clone for Arc<T> {
#[inline]
fn clone(&self) -> Self {
debug_assert_ne!(
self.underlying()
.ref_cnt()
.load(std::sync::atomic::Ordering::Relaxed),
0
);
self.underlying().add_ref();
Self {
instance_ptr: self.instance_ptr,
}
}
}
impl<T: 'static> Deref for Arc<T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
&**self.underlying()
}
}
impl<T: 'static> Drop for Arc<T> {
#[inline]
fn drop(&mut self) {
if self.underlying().drop_ref() {
let barrier = Barrier::new();
barrier.reclaim_underlying(self.instance_ptr.as_ptr());
}
}
}
unsafe impl<T: 'static + Send> Send for Arc<T> {}
unsafe impl<T: 'static + Sync> Sync for Arc<T> {}
#[cfg(test)]
mod test {
use super::*;
use std::sync::atomic::Ordering::Relaxed;
use std::sync::atomic::{AtomicBool, AtomicUsize};
struct A(AtomicUsize, usize, &'static AtomicBool);
impl Drop for A {
fn drop(&mut self) {
self.2.swap(true, Relaxed);
}
}
#[test]
fn arc() {
static DESTROYED: AtomicBool = AtomicBool::new(false);
let mut arc = Arc::new(A(AtomicUsize::new(10), 10, &DESTROYED));
if let Some(mut_ref) = arc.get_mut() {
mut_ref.1 += 1;
}
arc.0.fetch_add(1, Relaxed);
assert_eq!(arc.deref().0.load(Relaxed), 11);
assert_eq!(arc.deref().1, 11);
let mut arc_cloned = arc.clone();
assert!(arc_cloned.get_mut().is_none());
arc_cloned.0.fetch_add(1, Relaxed);
assert_eq!(arc_cloned.deref().0.load(Relaxed), 12);
assert_eq!(arc_cloned.deref().1, 11);
let mut arc_cloned_again = arc_cloned.clone();
assert!(arc_cloned_again.get_mut().is_none());
assert_eq!(arc_cloned_again.deref().0.load(Relaxed), 12);
assert_eq!(arc_cloned_again.deref().1, 11);
drop(arc);
assert!(!DESTROYED.load(Relaxed));
assert!(arc_cloned_again.get_mut().is_none());
drop(arc_cloned);
assert!(!DESTROYED.load(Relaxed));
assert!(arc_cloned_again.get_mut().is_some());
drop(arc_cloned_again);
while !DESTROYED.load(Relaxed) {
drop(Barrier::new());
}
}
#[test]
fn arc_send() {
static DESTROYED: AtomicBool = AtomicBool::new(false);
let arc = Arc::new(A(AtomicUsize::new(14), 14, &DESTROYED));
let arc_cloned = arc.clone();
let thread = std::thread::spawn(move || {
assert_eq!(arc_cloned.0.load(Relaxed), arc_cloned.1);
});
assert!(thread.join().is_ok());
assert_eq!(arc.0.load(Relaxed), arc.1);
}
#[test]
fn arc_arc_send() {
static DESTROYED: AtomicBool = AtomicBool::new(false);
let arc_arc = Arc::new(A(AtomicUsize::new(14), 14, &DESTROYED));
let arc_arc_cloned = arc_arc.clone();
let thread = std::thread::spawn(move || {
assert_eq!(arc_arc_cloned.0.load(Relaxed), 14);
});
assert!(thread.join().is_ok());
assert_eq!(arc_arc.0.load(Relaxed), 14);
unsafe {
arc_arc.drop_in_place();
}
assert!(DESTROYED.load(Relaxed));
}
#[test]
fn arc_nested() {
static DESTROYED: AtomicBool = AtomicBool::new(false);
struct Nest(Arc<A>);
let nested_arc = Arc::new(Nest(Arc::new(A(AtomicUsize::new(10), 10, &DESTROYED))));
assert!(!DESTROYED.load(Relaxed));
drop(nested_arc);
while !DESTROYED.load(Relaxed) {
drop(Barrier::new());
}
}
}
|
use aoc_runner_derive::{aoc, aoc_generator};
use parse_display::{Display, FromStr};
use std::error::Error;
#[derive(Display, FromStr)]
#[display("{min}-{max} {value}: {password}")]
struct Policy {
min: usize,
max: usize,
value: char,
password: String,
}
#[aoc_generator(day2)]
fn parse_input_day2(input: &str) -> Result<Vec<Policy>, impl Error> {
input.lines().map(|l| l.parse()).collect()
}
#[aoc(day2, part1)]
fn part1(policies: &[Policy]) -> usize {
policies
.iter()
.filter(|p| {
let count = p.password.chars().filter(|&c| c == p.value).count();
count >= p.min && count <= p.max
})
.count()
}
#[aoc(day2, part2)]
fn part2(policies: &[Policy]) -> usize {
policies
.iter()
.filter(|p| {
let chars: Vec<char> = p.password.chars().collect();
(chars[p.min - 1] == p.value) ^ (chars[p.max - 1] == p.value)
})
.count()
}
|
use coffee::{
graphics::{Color, Frame, Mesh, Point, Rectangle, Shape, Window, WindowSettings},
input::{mouse, ButtonState, Event, Input},
load::Task,
Game, Result, Timer,
};
use nalgebra;
use rand::{self, Rng};
const WIDTH: f32 = 800.0;
const HEIGHT: f32 = 600.0;
const YELLOW: Color = Color {
r: 1.0,
g: 1.0,
b: 0.0,
a: 1.0,
};
const GRAY: Color = Color {
r: 0.7,
g: 0.7,
b: 0.7,
a: 0.1,
};
const RED: Color = Color {
r: 1.0,
g: 0.0,
b: 0.0,
a: 0.1,
};
const BLUE: Color = Color {
r: 0.0,
g: 0.0,
b: 1.0,
a: 1.0,
};
const GREEN: Color = Color {
r: 0.0,
g: 1.0,
b: 0.0,
a: 1.0,
};
const PURPLE: Color = Color {
r: 1.0,
g: 0.0,
b: 1.0,
a: 1.0,
};
// Copy of KeyboardAndMouse in order to get access to mouse_pressed
struct VennInput {
cursor_position: Point,
is_cursor_taken: bool,
is_mouse_pressed: bool,
}
impl Input for VennInput {
fn new() -> VennInput {
VennInput {
cursor_position: Point::new(0.0, 0.0),
is_cursor_taken: false,
is_mouse_pressed: false,
}
}
fn update(&mut self, event: Event) {
match event {
Event::Mouse(mouse_event) => match mouse_event {
mouse::Event::CursorMoved { x, y } => {
self.cursor_position = Point::new(x, y);
}
mouse::Event::CursorTaken => {
self.is_cursor_taken = true;
}
mouse::Event::CursorReturned => {
self.is_cursor_taken = false;
}
mouse::Event::Input {
button: mouse::Button::Left,
state,
} => match state {
ButtonState::Pressed => {
self.is_mouse_pressed = !self.is_cursor_taken;
}
ButtonState::Released => {
self.is_mouse_pressed = false;
}
},
_ => {}
},
_ => {}
}
}
fn clear(&mut self) {}
}
struct VennTarget {
color: VennColor,
shape: VennShape,
size: VennSize,
}
struct VennAnswer {
width: f32,
height: f32,
center: Point,
hover: bool,
target: VennTarget,
}
impl VennAnswer {
fn draw(&self, mesh: &mut Mesh) {
if self.hover {
let mut color = YELLOW;
color.a = 0.1;
mesh.fill(
Shape::Rectangle(Rectangle {
x: self.center.x - self.width / 2.0,
y: self.center.y - self.height / 2.0,
width: self.width,
height: self.height,
}),
color,
);
}
mesh.stroke(
Shape::Rectangle(Rectangle {
x: self.center.x - self.width / 2.0,
y: self.center.y - self.height / 2.0,
width: self.width,
height: self.height,
}),
Color::BLACK,
2,
);
}
fn contains(&self, point: &Point) -> bool {
if point.x > self.center.x - self.width / 2.0
&& point.x < self.center.x + self.width / 2.0
&& point.y > self.center.y - self.height / 2.0
&& point.y < self.center.y + self.height / 2.0
{
return true;
}
false
}
fn matches(&self, target: &VennTarget) -> bool {
if self.target.shape == target.shape
// || self.target.size == target.size
&& self.target.color == target.color
{
return true;
}
false
}
}
#[derive(PartialEq, Copy, Clone)]
enum VennColor {
Yellow,
Blue,
Purple,
}
impl VennColor {
fn to_color(&self) -> Color {
match self {
VennColor::Yellow => YELLOW,
VennColor::Blue => BLUE,
VennColor::Purple => PURPLE,
}
}
}
impl VennColor {
fn all() -> Vec<VennColor> {
vec![VennColor::Yellow, VennColor::Blue, VennColor::Purple]
}
fn random(rng: &mut rand::rngs::ThreadRng) -> VennColor {
match rng.gen_range(0, 2) {
0 => VennColor::Yellow,
1 => VennColor::Blue,
2 => VennColor::Purple,
_ => panic!("Unexpected value"),
}
}
}
#[derive(PartialEq, Copy, Clone)]
enum VennSize {
Small,
Medium,
Large,
}
impl VennSize {
fn all() -> Vec<VennSize> {
vec![VennSize::Small, VennSize::Medium, VennSize::Large]
}
fn random(rng: &mut rand::rngs::ThreadRng) -> VennSize {
match rng.gen_range(0, 2) {
0 => VennSize::Small,
1 => VennSize::Medium,
2 => VennSize::Large,
_ => panic!("Unexpected value"),
}
}
}
#[derive(PartialEq, Copy, Clone)]
enum VennShape {
Circle,
Triangle,
Square,
}
impl VennShape {
fn all() -> Vec<VennShape> {
vec![VennShape::Circle, VennShape::Square, VennShape::Triangle]
}
fn random(rng: &mut rand::rngs::ThreadRng) -> VennShape {
match rng.gen_range(0, 2) {
0 => VennShape::Circle,
1 => VennShape::Square,
2 => VennShape::Triangle,
_ => panic!("Unexpected value"),
}
}
}
struct VennGuess {
center: Point,
radius: f32,
dragged: bool,
target: VennTarget,
matches: Option<bool>,
}
impl VennGuess {
fn new(i: usize, shape: VennShape, color: VennColor, size: VennSize) -> VennGuess {
VennGuess {
center: Point::new(20.0, (i + 1) as f32 * 40.0),
radius: 30.0,
dragged: false,
target: VennTarget { shape, size, color },
matches: None,
}
}
fn drag_to(&mut self, point: &Point) {
self.dragged = true;
self.center = point.clone();
}
fn contains(&self, point: &Point) -> bool {
if nalgebra::distance(point, &self.center) < self.radius {
return true;
}
false
}
fn draw(&self, mesh: &mut Mesh) {
let mut color = match self.matches {
None => GRAY,
Some(true) => GREEN,
Some(false) => RED,
};
color.a = 1.0;
if self.dragged {
color.a -= 0.3;
}
mesh.fill(
Shape::Circle {
center: self.center,
radius: self.radius,
},
color,
);
mesh.stroke(
Shape::Circle {
center: self.center,
radius: self.radius,
},
Color::BLACK,
1,
);
let shape = match self.target.shape {
VennShape::Circle => Shape::Circle {
center: self.center,
radius: 10.0,
},
VennShape::Square => Shape::Rectangle(Rectangle {
x: self.center.x - 10.0,
y: self.center.y - 10.0,
width: 10.0 * 2.0,
height: 10.0 * 2.0,
}),
VennShape::Triangle => Shape::Polyline {
points: vec![
Point::new(self.center.x, self.center.y - 10.0),
Point::new(self.center.x - 10.0, self.center.y + 10.0),
Point::new(self.center.x + 10.0, self.center.y + 10.0),
Point::new(self.center.x, self.center.y - 10.0),
],
},
};
mesh.fill(shape.clone(), self.target.color.to_color());
mesh.stroke(shape, Color::BLACK, 1);
}
}
struct VennCircle {
center: Point,
radius: f32,
color: Color,
selected: bool,
answer: VennAnswer,
}
impl Default for VennCircle {
fn default() -> VennCircle {
VennCircle {
center: Point::new(0.0, 0.0),
radius: 1.0,
color: Color::BLACK,
selected: false,
answer: VennAnswer {
center: Point::new(0.0, 0.0),
width: 40.0,
height: 30.0,
hover: false,
target: VennTarget {
shape: VennShape::Circle,
size: VennSize::Large,
color: VennColor::Blue,
},
},
}
}
}
impl VennCircle {
fn draw(&self, mesh: &mut Mesh) {
self.answer.draw(mesh);
let mut color = self.color.clone();
color.a = 0.1;
if self.selected {
color.a = 0.3;
}
mesh.fill(
Shape::Circle {
center: self.center,
radius: self.radius,
},
color,
);
mesh.stroke(
Shape::Circle {
center: self.center,
radius: self.radius,
},
Color::BLACK,
1,
);
}
fn contains(&self, point: &Point) -> bool {
if nalgebra::distance(point, &self.center) < self.radius {
return true;
}
false
}
fn interact(&mut self, input: &VennInput) {
self.selected = false;
if self.contains(&input.cursor_position) {
self.selected = true;
}
}
fn matches(&self, target: &VennTarget) -> bool {
if self.answer.target.shape == target.shape
// || self.target.size == target.size
|| self.answer.target.color == target.color
{
return true;
}
false
}
}
struct Venn {
left: VennCircle,
right: VennCircle,
shapes: Vec<VennGuess>,
drag_index: Option<usize>,
}
impl Game for Venn {
type Input = VennInput;
type LoadingScreen = ();
const TICKS_PER_SECOND: u16 = 60;
fn load(_window: &Window) -> Task<Venn> {
let x_margin = 10.0;
let y_margin = 10.0;
let remaining_x = WIDTH - x_margin * 2.0;
let remaining_y = HEIGHT - y_margin * 2.0;
Task::new(move || {
let mut rng = rand::thread_rng();
let mut shapes = Vec::new();
let mut i = 0;
for shape in VennShape::all() {
for color in VennColor::all() {
// for size in VennSize::all() {
let size = VennSize::Small;
shapes.push(VennGuess::new(i, shape.clone(), color.clone(), size));
i += 1;
// }
}
}
let left_center =
Point::new(x_margin + remaining_x / 3.0, y_margin + remaining_y / 2.0);
let mut left_answer_center = left_center.clone();
left_answer_center.y = left_answer_center.y - 200.0 - 40.0 - 15.0;
let right_center = Point::new(
WIDTH - x_margin - remaining_x / 3.0,
HEIGHT - y_margin - remaining_y / 2.0,
);
let mut right_answer_center = right_center.clone();
right_answer_center.y = right_answer_center.y - 200.0 - 40.0 - 15.0;
Venn {
left: VennCircle {
center: left_center,
radius: 200.0,
color: BLUE,
answer: VennAnswer {
center: left_answer_center,
width: 100.0,
height: 80.0,
hover: false,
target: VennTarget {
shape: VennShape::random(&mut rng),
size: VennSize::random(&mut rng),
color: VennColor::random(&mut rng),
},
},
..VennCircle::default()
},
right: VennCircle {
center: right_center,
radius: 200.0,
color: YELLOW,
answer: VennAnswer {
center: right_answer_center,
width: 100.0,
height: 80.0,
hover: false,
target: VennTarget {
shape: VennShape::random(&mut rng),
size: VennSize::random(&mut rng),
color: VennColor::random(&mut rng),
},
},
..VennCircle::default()
},
shapes,
drag_index: None,
}
})
}
fn draw(&mut self, frame: &mut Frame<'_>, _timer: &Timer) {
frame.clear(Color::WHITE);
let mut mesh = Mesh::new();
self.left.draw(&mut mesh);
self.right.draw(&mut mesh);
for shape in &self.shapes {
shape.draw(&mut mesh);
}
mesh.draw(&mut frame.as_target());
}
fn interact(&mut self, input: &mut Self::Input, _window: &mut Window) {
self.left.interact(input);
self.right.interact(input);
if input.is_mouse_pressed {
match self.drag_index {
None => {
for (i, shape) in self.shapes.iter_mut().enumerate().rev() {
if shape.contains(&input.cursor_position) {
shape.matches = None;
shape.drag_to(&input.cursor_position);
self.drag_index = Some(i);
break;
}
}
}
Some(index) => {
self.shapes[index].drag_to(&input.cursor_position);
}
}
if self.drag_index.is_some() {
self.left.answer.hover = self.left.answer.contains(&input.cursor_position);
self.right.answer.hover = self.right.answer.contains(&input.cursor_position);
}
} else {
self.left.answer.hover = false;
self.right.answer.hover = false;
match self.drag_index {
Some(index) => {
let mut shape = &mut self.shapes[index];
match (
self.left.contains(&shape.center),
self.right.contains(&shape.center),
self.left.answer.contains(&shape.center),
self.right.answer.contains(&shape.center),
) {
(true, true, _, _) => {
// Does left and right need to match the same property of shape?
// Or is it okay if it contains at least one property of each, independently?
shape.matches = Some(
self.left.matches(&shape.target)
&& self.right.matches(&shape.target),
);
}
(true, false, _, _) => {
shape.matches = Some(self.left.matches(&shape.target));
}
(false, true, _, _) => {
shape.matches = Some(self.right.matches(&shape.target));
}
(false, false, true, false) => {
shape.matches = Some(self.left.answer.matches(&shape.target));
shape.center = self.left.answer.center;
}
(false, false, false, true) => {
shape.matches = Some(self.right.answer.matches(&shape.target));
shape.center = self.right.answer.center;
}
(false, false, _, _) => {
shape.matches = None;
}
}
shape.dragged = false;
self.drag_index = None;
}
None => {}
}
}
}
fn update(&mut self, _window: &Window) {}
}
fn main() -> Result<()> {
Venn::run(WindowSettings {
title: String::from("Venn Deduction"),
size: (WIDTH as u32, HEIGHT as u32),
resizable: false,
fullscreen: false,
})
}
|
use std::path::PathBuf;
#[derive(StructOpt, Debug)]
pub enum Opt {
/// Builds the Rom Hack
#[structopt(name = "build")]
Build {
/// Compiles the Rom Hack in Rust's debug mode
#[structopt(short = "d", long = "debug")]
debug: bool,
/// Compiles the Rom Hack as a patch
#[structopt(short = "p", long = "patch")]
patch: bool,
},
/// Applies a patch file to a game to create a Rom Hack
#[structopt(name = "apply")]
Apply {
/// Input path to patch file
#[structopt(name = "PATCH", parse(from_os_str))]
patch: PathBuf,
/// Input path to original game (GCM or ISO format)
#[structopt(name = "ORIGINAL", parse(from_os_str))]
original_game: PathBuf,
/// Output path for Rom Hack
#[structopt(name = "OUT", parse(from_os_str))]
output: PathBuf,
},
/// Creates a new Rom Hack with the given name
#[structopt(name = "new")]
New { name: String },
}
|
use crate::things::thing::Thing;
use crate::world::world::World;
pub struct Baron {
pub thing: Thing,
}
impl Baron {
pub fn new(world: &World, x: f32, z: f32) -> Self {
let thing = Thing::new(world, x, z, 0.0, 0.025, 1.76);
Baron { thing }
}
}
|
pub const SPRITE_SIZE: usize = 63;
pub const SPRITE_WIDTH: usize = 24;
pub const SPRITE_HEIGHT: usize = 21;
/// Store the data associated with a sprite image. Multicolor sprites are not
/// supported at this time.
pub struct Sprite {
data: Vec<u8>,
}
/// These codepoints are block element glyphs that can represent any combination of a 2x2 pixel
/// bitmap. The most significant two bits are the upper pixels, and the least significant bits are
/// the lower pixels.
#[rustfmt::skip]
static UNICODE_BLOCK_ELEMENTS: [char; 16] = [
' ', '\u{2597}', '\u{2596}', '\u{2584}',
'\u{259D}', '\u{2590}', '\u{259E}', '\u{259F}',
'\u{2598}', '\u{259A}', '\u{258C}', '\u{2599}',
'\u{2580}', '\u{259C}', '\u{259B}', '\u{2588}',
];
impl Sprite {
pub fn from_bytes(bytes: &[u8]) -> Sprite {
assert_eq!(bytes.len(), SPRITE_SIZE);
Sprite {
data: bytes.to_vec(),
}
}
pub fn to_bytes(&self, bytes: &mut [u8]) {
assert_eq!(bytes.len(), SPRITE_SIZE);
bytes.copy_from_slice(&self.data);
}
pub fn to_unicode(&self) -> String {
const ROWS: usize = (SPRITE_HEIGHT + 1) / 2;
const COLUMNS: usize = (SPRITE_WIDTH + 1) / 2;
// Unicode block elements (but not the empty block which is space) occupy three
// bytes each when encoded as UTF-8.
const MAX_BYTES: usize = 3 * ROWS * COLUMNS;
let mut string = String::with_capacity(MAX_BYTES);
for row in 0..ROWS {
for offset in 0..3 {
let top_byte = self.data[row * 2 * 3 + offset];
let bottom_byte = if row * 2 + 1 < SPRITE_HEIGHT {
self.data[(row * 2 + 1) * 3 + offset]
} else {
0 // The last row has no bottom half.
};
for block in 0..4 {
let top_bits = top_byte >> ((3 - block) * 2) & 0x03;
let bottom_bits = bottom_byte >> ((3 - block) * 2) & 0x03;
let glyph = (top_bits << 2) | bottom_bits;
string.push(UNICODE_BLOCK_ELEMENTS[glyph as usize]);
}
}
string.push('\n');
}
string
}
}
|
use crate::render::svg::*;
/// AxisLine represents line of an axis.
pub struct AxisLine {
x1: f32,
y1: f32,
x2: f32,
y2: f32,
stroke_width: i32,
stroke_color: String,
}
impl AxisLine {
/// Create a new AxisLine.
fn new(x1: f32, x2: f32, y1: f32, y2: f32) -> Self {
AxisLine {
x1,
x2,
y1,
y2,
stroke_width: DEFAULT_STROKE_WIDTH,
stroke_color: DEFAULT_STROKE_COLOR.to_string(),
}
}
/// Create a new horizontal AxisLine.
pub fn new_horizontal(x2: f32) -> Self {
Self::new(START, x2, START, START)
}
/// Create a new vertical AxisLine.
pub fn new_vertical(y2: f32) -> Self {
Self::new(START, START, START, y2)
}
/// Get SVG representation of an line.
pub fn to_svg(&self) -> svg::node::element::Line {
svg::node::element::Line::new()
.set(X1_ATTR, self.x1)
.set(X2_ATTR, self.x2)
.set(Y1_ATTR, self.y1)
.set(Y2_ATTR, self.y2)
.set(SHAPE_RENDERING_ATTR, SHAPE_RENDERING_CRISP_EDGES)
.set(STROKE_WIDTH_ATTR, self.stroke_width)
.set(STROKE_ATTR, self.stroke_color.to_owned())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn axis_line_horizontal_basic() {
let expected_svg_group = r##"<line shape-rendering="crispEdges" stroke="#bbbbbb" stroke-width="1" x1="0" x2="10" y1="0" y2="0"/>"##;
let axis_line_svg = AxisLine::new_horizontal(10_f32).to_svg();
assert_eq!(axis_line_svg.to_string(), expected_svg_group);
}
#[test]
fn axis_line_vertical_basic() {
let expected_svg_group = r##"<line shape-rendering="crispEdges" stroke="#bbbbbb" stroke-width="1" x1="0" x2="0" y1="0" y2="10"/>"##;
let axis_line_svg = AxisLine::new_vertical(10_f32).to_svg();
assert_eq!(axis_line_svg.to_string(), expected_svg_group);
}
}
|
//! Node metrics
use futures::StreamExt;
use parity_scale_codec::Encode;
use sc_client_api::{BlockBackend, BlockImportNotification, ImportNotifications};
use sp_runtime::traits::Block as BlockT;
use std::sync::Arc;
use substrate_prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64};
pub struct NodeMetrics<Block: BlockT, Client> {
client: Arc<Client>,
block_import: ImportNotifications<Block>,
blocks: Counter<U64>,
extrinsics: Counter<U64>,
extrinsics_size: Counter<U64>,
_p: std::marker::PhantomData<Block>,
}
impl<Block, Client> NodeMetrics<Block, Client>
where
Block: BlockT,
Client: BlockBackend<Block> + 'static,
{
pub fn new(
client: Arc<Client>,
block_import: ImportNotifications<Block>,
registry: &Registry,
) -> Result<Self, PrometheusError> {
Ok(Self {
client,
block_import,
blocks: register(
Counter::new("subspace_node_blocks", "Total number of imported blocks")?,
registry,
)?,
extrinsics: register(
Counter::new(
"subspace_node_extrinsics",
"Total number of extrinsics in the imported blocks",
)?,
registry,
)?,
extrinsics_size: register(
Counter::new(
"subspace_node_extrinsics_size",
"Total extrinsic bytes in the imported blocks",
)?,
registry,
)?,
_p: Default::default(),
})
}
pub async fn run(mut self) {
while let Some(incoming_block) = self.block_import.next().await {
self.update_block_metrics(incoming_block);
}
}
fn update_block_metrics(&mut self, incoming_block: BlockImportNotification<Block>) {
let extrinsics = self
.client
.block_body(incoming_block.hash)
.ok()
.flatten()
.unwrap_or(vec![]);
self.blocks.inc();
self.extrinsics.inc_by(extrinsics.len() as u64);
let total_size: usize = extrinsics
.iter()
.map(|extrinsic| extrinsic.encoded_size())
.sum();
self.extrinsics_size.inc_by(total_size as u64);
}
}
|
use std::sync::{Arc, Mutex};
use super::sse::{self, Broadcaster};
use crate::{env, ApplicationContext};
use actix_rt::System;
use actix_web::client::Client;
use actix_web::http::header::HeaderValue;
use actix_web::{middleware, web, App, Error, HttpRequest, HttpResponse, HttpServer};
use std::thread;
async fn forward(
req: HttpRequest,
body: web::Bytes,
client: web::Data<Client>,
) -> Result<HttpResponse, Error> {
let mut new_url = env::BASE_URL.clone();
new_url.push_str(req.uri().path());
if let Some(query) = req.uri().query() {
new_url.push_str("?");
new_url.push_str(query);
}
let forwarded_req = client.request_from(new_url, req.head()).no_decompress();
let forwarded_req = if let Some(addr) = req.head().peer_addr {
forwarded_req.header("x-forwarded-for", format!("{}", addr.ip()))
} else {
forwarded_req
};
let mut res = forwarded_req.send_body(body).await.map_err(Error::from)?;
let mut client_resp = HttpResponse::build(res.status());
// Remove `Connection` as per
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Connection#Directives
for (header_name, header_value) in res.headers().iter().filter(|(h, _)| *h != "connection") {
if header_name == "set-cookie" {
// Rewrite set-cookie header for proxy
let value = header_value.to_str().unwrap_or("");
let local_domain = format!("Domain={}", env::LOCAL_DOMAIN.as_str());
let server_domain = format!("Domain={}", env::SERVER_DOMAIN.as_str());
let value = value.replace(&server_domain, &local_domain);
let value = value.replace(" Secure;", "");
let value = HeaderValue::from_str(&value).unwrap_or_else(|_| header_value.clone());
client_resp.header(header_name.clone(), value);
} else {
client_resp.header(header_name.clone(), header_value.clone());
}
}
Ok(client_resp.body(res.body().await?))
}
#[derive(Debug, Deserialize)]
struct RequestPaymentToken {
amount: i32,
}
async fn request_payment_token(
context: web::Data<Arc<Mutex<ApplicationContext>>>,
request: web::Json<RequestPaymentToken>,
) -> Result<HttpResponse, Error> {
let mut c = context.lock().expect("Mutex deadlock!");
c.request_payment(request.amount);
Ok(HttpResponse::Ok().finish())
}
async fn request_reauthentication(
context: web::Data<Arc<Mutex<ApplicationContext>>>,
) -> Result<HttpResponse, Error> {
let mut c = context.lock().expect("Mutex deadlock!");
c.request_reauthentication();
Ok(HttpResponse::Ok().finish())
}
async fn request_cancel(
context: web::Data<Arc<Mutex<ApplicationContext>>>,
) -> Result<HttpResponse, Error> {
let mut c = context.lock().expect("Mutex deadlock!");
c.request_cancel();
Ok(HttpResponse::Ok().finish())
}
pub fn start(broadcaster: Arc<Mutex<Broadcaster>>, context: Arc<Mutex<ApplicationContext>>) {
thread::spawn(move || {
let mut sys = System::new("sse");
Broadcaster::spawn_ping(broadcaster.clone());
let address = format!("{}:{}", env::HOST.as_str(), *env::PORT);
let srv = HttpServer::new(move || {
App::new()
.data(Client::new())
.data(broadcaster.clone())
.data(context.clone())
.wrap(middleware::Logger::default())
.service(
web::scope("/proxy")
.service(web::resource("/events").to(sse::new_client))
.service(
web::resource("/request-payment-token")
.route(web::post().to(request_payment_token)),
)
.service(
web::resource("/reauthenticate-nfc")
.route(web::get().to(request_reauthentication)),
)
.service(
web::resource("/cancel").route(web::get().to(request_reauthentication)),
),
)
.default_service(web::route().to(forward))
})
.bind(address)
.expect("Cannot start proxy server!")
.system_exit()
.run();
sys.block_on(srv)
});
}
|
use super::CodeGenerator;
use crate::code_generator::intermediate::variable::Variable;
use crate::code_generator::intermediate::{Access, Constant, Instruction};
use parser::ast;
use parser::ast::visitor::Visitor;
use parser::ast::{ExprOp, RelOp};
#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
enum Order {
First,
Second,
}
impl CodeGenerator {
fn emit_if_else<F: FnMut(&mut Self, Order)>(
&mut self,
condition: &ast::Condition,
mut emit_body: F,
) {
let negative_label = self.new_label();
let endif_label = self.new_label();
self.visit(condition);
let (first_order, second_order) = match condition.op {
RelOp::NEQ | RelOp::LEQ | RelOp::GEQ => (Order::First, Order::Second),
RelOp::EQ | RelOp::LT | RelOp::GT => (Order::Second, Order::First),
};
let cond_jump = match condition.op {
RelOp::EQ | RelOp::NEQ => Instruction::JZero {
label: negative_label,
},
RelOp::GT | RelOp::LEQ => Instruction::JPositive {
label: negative_label,
},
RelOp::LT | RelOp::GEQ => Instruction::JNegative {
label: negative_label,
},
};
self.emit(cond_jump);
emit_body(self, first_order);
self.emit(Instruction::Jump { label: endif_label });
self.emit(Instruction::Label {
label: negative_label,
});
emit_body(self, second_order);
self.emit(Instruction::Label { label: endif_label });
}
fn emit_do<F: FnMut(&mut Self)>(&mut self, condition: &ast::Condition, mut emit_body: F) {
// do { commands } while(condition)
// is the same as:
// start: { commands } if(condition) jump start;
let start_label = self.new_label();
self.emit(Instruction::Label { label: start_label });
emit_body(self);
let emit_jump = |gen: &mut Self, order| {
if order == Order::First {
gen.emit(Instruction::Jump { label: start_label });
}
};
self.emit_if_else(condition, emit_jump);
}
fn emit_while<F: FnMut(&mut Self)>(&mut self, condition: &ast::Condition, mut emit_body: F) {
// while(condition) { commands }
// is the same as:
// if(condition) { do { commands } while(condition) }
let emit_if = |gen: &mut Self, order| {
if order == Order::First {
gen.emit_do(condition, &mut emit_body);
}
};
self.emit_if_else(condition, emit_if);
}
}
impl Visitor for CodeGenerator {
type Result = ();
fn visit_declaration(&mut self, declaration: &ast::Declaration) -> Self::Result {
let var = match declaration {
ast::Declaration::Var { name } => Variable::Unit { name: name.clone() },
ast::Declaration::Array { name, start, end } => Variable::Array {
name: name.clone(),
start: *start,
end: *end,
},
};
self.add_global(var);
}
fn visit_if_else_command(
&mut self,
condition: &ast::Condition,
positive: &ast::Commands,
negative: &ast::Commands,
) -> Self::Result {
let emit = |gen: &mut Self, order| match order {
Order::First => gen.visit_commands(positive),
Order::Second => gen.visit_commands(negative),
};
self.emit_if_else(condition, emit);
}
fn visit_if_command(
&mut self,
condition: &ast::Condition,
positive: &ast::Commands,
) -> Self::Result {
let emit = |gen: &mut Self, order| {
if order == Order::First {
gen.visit_commands(positive);
}
};
self.emit_if_else(condition, emit);
}
fn visit_while_command(
&mut self,
condition: &ast::Condition,
commands: &ast::Commands,
) -> Self::Result {
self.emit_while(condition, |gen: &mut Self| gen.visit_commands(commands));
}
fn visit_do_command(
&mut self,
commands: &ast::Commands,
condition: &ast::Condition,
) -> Self::Result {
self.emit_do(condition, |gen: &mut Self| gen.visit_commands(commands));
}
fn visit_for_command(
&mut self,
counter: &str,
ascending: bool,
from: &ast::Value,
to: &ast::Value,
commands: &ast::Commands,
) -> Self::Result {
let counter_var = self.add_local(Variable::Unit {
name: counter.to_owned(),
});
let tmp = self.add_local(Variable::Unit {
name: counter.to_owned() + "$to",
});
self.emit(Instruction::PreStore {
access: Access::Variable(counter_var),
});
self.visit(from);
self.emit_load_visited();
self.emit(Instruction::Store {
access: Access::Variable(counter_var),
});
self.emit(Instruction::PreStore {
access: Access::Variable(tmp),
});
self.visit(to);
self.emit_load_visited();
self.emit(Instruction::Store {
access: Access::Variable(tmp),
});
let counter_name = self
.context
.get_variable(&counter_var)
.variable()
.name()
.to_owned();
debug_assert_eq!(counter_name.as_str(), counter);
let tmp_name = self.context.get_variable(&tmp).variable().name().to_owned();
debug_assert_eq!(tmp_name.as_str(), (counter_name.clone() + "$to").as_str());
self.emit_while(
&ast::Condition {
left: ast::Value::Identifier(ast::Identifier::VarAccess {
name: counter_name.clone(),
}),
op: if ascending {
ast::RelOp::LEQ
} else {
ast::RelOp::GEQ
},
right: ast::Value::Identifier(ast::Identifier::VarAccess {
name: tmp_name.clone(),
}),
},
|gen| {
gen.visit_commands(commands);
gen.visit_assign_command(
&ast::Identifier::VarAccess {
name: counter_name.clone(),
},
&ast::Expression::Compound {
left: ast::Value::Identifier(ast::Identifier::VarAccess {
name: counter_name.clone(),
}),
op: if ascending {
ast::ExprOp::Plus
} else {
ast::ExprOp::Minus
},
right: ast::Value::Num(1),
},
);
},
);
self.pop_local(tmp);
self.pop_local(counter_var);
}
fn visit_read_command(&mut self, target: &ast::Identifier) -> Self::Result {
self.visit(target);
self.emit_pre_store_visited();
self.emit(Instruction::Get);
self.emit_store_visited();
}
fn visit_write_command(&mut self, value: &ast::Value) -> Self::Result {
self.visit(value);
self.emit_load_visited();
self.emit(Instruction::Put);
}
fn visit_assign_command(
&mut self,
target: &ast::Identifier,
expr: &ast::Expression,
) -> Self::Result {
self.visit(target);
self.emit_pre_store_visited();
self.visit(expr);
self.emit_store_visited();
}
//
// fn visit_commands(&mut self, commands: &ast::Commands) -> Self::Result {
// unimplemented!()
// }
//
// fn visit_command(&mut self, command: &ast::Command) -> Self::Result {
// unimplemented!()
// }
fn visit_simple_expression(&mut self, value: &ast::Value) -> Self::Result {
self.visit(value);
self.emit_load_visited();
}
fn visit_compound_expression(
&mut self,
left: &ast::Value,
op: &ast::ExprOp,
right: &ast::Value,
) -> Self::Result {
self.visit(left);
let left = self.pop_access();
self.visit(right);
let right = self.pop_access();
self.emit(Instruction::Operation {
left,
op: (*op).into(),
right,
});
}
// fn visit_expression(&mut self, expr: &ast::Expression) -> Self::Result {
// unimplemented!()
// }
fn visit_condition(&mut self, condition: &ast::Condition) -> Self::Result {
self.visit_compound_expression(&condition.left, &ExprOp::Minus, &condition.right);
}
fn visit_num_value(&mut self, num: i64) -> Self::Result {
self.context.register_constant(Constant(num));
self.push_access(Access::Constant(Constant(num)));
}
fn visit_identifier(&mut self, identifier: &ast::Identifier) -> Self::Result {
use ast::Identifier::*;
match identifier {
ArrAccess { name, index } => {
let name_index = self.find_variable_by_name(name).unwrap().id();
let index_index = self.find_variable_by_name(index).unwrap().id();
self.push_access(Access::ArrayDynamic(name_index, index_index))
}
ArrConstAccess { name, index } => {
let name_index = self.find_variable_by_name(name).unwrap().id();
self.context.register_constant(Constant(*index));
self.push_access(Access::ArrayStatic(name_index, Constant(*index)));
}
VarAccess { name } => {
let name_index = self.find_variable_by_name(name).unwrap().id();
self.push_access(Access::Variable(name_index));
}
}
}
}
|
use std::io::{self, BufRead};
use regex::Regex;
struct FnCall {
name: String,
params: String,
}
fn main() {
let stdin = io::stdin();
for line in stdin
.lock()
.lines()
.map(|l| l.unwrap())
.filter(|l| !l.is_empty())
{
// split line on ", " to get expected and actual
let mut iter = line.split(", ");
println!("Expected:");
let expected = parse_fns(iter.next().unwrap());
print_fns(expected.iter());
println!("Actual:");
let actual = parse_fns(iter.next().unwrap());
print_fns(actual.iter());
println!("");
}
}
fn print_fns<'a>(fns: impl Iterator<Item = &'a FnCall>) {
for f in fns {
println!(" {}({})", f.name, f.params);
}
}
fn parse_fns(inp: &str) -> Vec<FnCall> {
let re = Regex::new(r#"\[([^\(]+)\(([^\)]+)\)\]"#).unwrap();
let mut fns = vec![];
for cap in re.captures_iter(inp) {
fns.push(FnCall {
name: cap[1].to_string(),
params: cap[2].to_string(),
});
}
fns
}
|
use database::Engine;
use misc;
pub fn info(engine: &Engine) -> String {
match engine {
Engine::Postgres => {
return String::from("SELECT * FROM MV_ENTRY where iptm_entry_code = $1");
},
Engine::Oracle => {
return String::from("SELECT * FROM MV_ENTRY where iptm_entry_code = :1");
}
}
}
pub fn search(term_type: &str, role: &str,ptm_types: &Vec<String>,organism_taxons: &Vec<i32>,paginate: bool,offset: i32, limit: i32,engine: &Engine) -> String {
let search_clause = search_clause(term_type, role,ptm_types,organism_taxons, paginate, offset, limit, engine);
return format!("SELECT * FROM {search_clause}",search_clause=search_clause);
}
pub fn search_count(term_type: &str, role: &str,ptm_types: &Vec<String>, organism_taxons: &Vec<i32>,engine: &Engine) -> String {
let search_clause = search_clause(term_type, role,ptm_types,organism_taxons, false,0,0,engine);
return format!("SELECT COUNT(iptm_entry_id) AS search_count FROM {search_clause}",search_clause=search_clause);
}
fn search_clause(term_type: &str, role: &str,ptm_types: &Vec<String>,organism_taxons: &Vec<i32>,paginate: bool,offset: i32, limit: i32,engine: &Engine) -> String {
// build the search term matching clause
let mut search_term_clause = String::new();
if term_type == "All" {
match engine {
&Engine::Postgres => {
search_term_clause = String::from("uniprot_id ILIKE $1 OR protein_name ILIKE $2 OR gene_name ILIKE $3");
},
&Engine::Oracle => {
search_term_clause = String::from("regexp_like(uniprot_id,:1,'i') OR regexp_like(protein_name,:2,'i') OR regexp_like(gene_name,:3,'i')")
}
}
}else if term_type == "UniprotID" {
match engine {
&Engine::Postgres => {
search_term_clause = String::from("uniprot_id ILIKE $1")
},
&Engine::Oracle => {
search_term_clause = String::from("regexp_like(uniprot_id,:1,'i')")
}
}
}else if term_type == "Protein/Gene Name" {
match engine {
&Engine::Postgres => {
search_term_clause = String::from("uniprot_id ILIKE $1 OR gene_name ILIKE $2")
},
&Engine::Oracle => {
if !paginate{
search_term_clause = String::from("regexp_like(uniprot_id,:1,'i')' OR regexp_like(gene_name,:2,'i')")
}else{
search_term_clause = String::from("")
}
}
}
}
// build the enzyme matching clause
let mut enzyme_clause = String::from("");
if role == "Enzyme or Substrate" {
enzyme_clause = String::from("AND (role_as_enzyme = 'T' OR role_as_substrate = 'T')")
}else if role == "Enzyme" {
enzyme_clause = String::from("AND (role_as_enzyme = 'T')")
}else if role == "Substrate" {
enzyme_clause = String::from("AND (role_as_substrate = 'T')")
}else if role == "Enzyme and Substrate" {
enzyme_clause = String::from("AND (role_as_enzyme = 'T' AND role_as_substrate = 'T')")
}
//ptm clause
let ptm_clause;
match engine {
Engine::Postgres => {
let ptm_array = misc::to_postgres_array_str(ptm_types);
ptm_clause = format!("AND (string_to_array(list_as_substrate,',') && {array})",array=ptm_array);
},
Engine::Oracle => {
//ptm_clause = format!("AND (taxon_code = ANY ({taxon_codes}))",taxon_codes=taxon_codes);
let ptm_csv = misc::str_vec_to_str_with_sep(ptm_types,String::from("|"));
ptm_clause = format!("AND (regexp_like(LIST_AS_SUBSTRATE,'{ptm_csv}','i'))",ptm_csv=ptm_csv);
}
}
//taxon clause
let mut taxon_clause = String::new();
if !organism_taxons.is_empty() {
let taxon_codes=misc::taxons_to_tuple_str(organism_taxons);
match engine {
Engine::Postgres => {
taxon_clause = format!("AND (taxon_code = ANY ('{{{taxon_codes}}}'))",taxon_codes=taxon_codes);
},
Engine::Oracle => {
taxon_clause = format!("AND (taxon_code = ANY ({taxon_codes}))",taxon_codes=taxon_codes);
}
}
}
// pagination
if paginate {
//limit offset clause
let limit_offset_clause;
match engine {
Engine::Postgres => {
limit_offset_clause = format!("OFFSET {offset} LIMIT {limit}",limit=limit,offset=offset);
},
Engine::Oracle => {
limit_offset_clause = format!("OFFSET {offset} rows FETCH NEXT {limit} rows only",limit=limit,offset=offset);
//limit_offset_clause = String::from("");
}
}
return format!("MV_ENTRY where ({search_term_clause}) {enzyme_clause} AND iptm_entry_type != 'pro_id' {ptm_clause} {taxon_clause} \
ORDER BY iptm_entry_id {limit_offset_clause}",
search_term_clause=search_term_clause,
enzyme_clause=enzyme_clause,
ptm_clause=ptm_clause,
taxon_clause=taxon_clause,
limit_offset_clause=limit_offset_clause
);
}else{
return format!("MV_ENTRY where ({search_term_clause}) {enzyme_clause} AND iptm_entry_type != 'pro_id' {ptm_clause} {taxon_clause}",
search_term_clause=search_term_clause,
enzyme_clause=enzyme_clause,
ptm_clause=ptm_clause,
taxon_clause=taxon_clause
);
}
}
pub fn pro_info(engine: &Engine) -> String {
let query_str = String::from("SELECT * FROM MV_ENTRY where iptm_entry_code = $1");
match engine {
&Engine::Postgres => {
return query_str;
},
&Engine::Oracle => {
return query_str.replace("$",":");
}
}
}
pub fn sub_forms(engine: &Engine) -> String {
let query_str = String::from("SELECT DISTINCT SUB_FORM_CODE from MV_EVENT where SUB_CODE = $1");
match engine {
&Engine::Postgres => {
return query_str;
},
&Engine::Oracle => {
return query_str.replace("$",":");
}
}
}
pub fn proteoforms(engine: &Engine) -> String {
match engine {
&Engine::Postgres => {
return String::from("SELECT * FROM MV_PROTEO where SUB_XREF ILIKE $1 AND EVENT_NAME != 'Interaction'");
},
&Engine::Oracle => {
return String::from("SELECT * FROM MV_PROTEO where regexp_like(SUB_XREF,:1,'i') AND EVENT_NAME != 'Interaction'");
}
}
}
pub fn proteoformppi(engine: &Engine) -> String {
match engine {
&Engine::Postgres => {
return String::from("SELECT * FROM MV_PROTEO where SUB_XREF ILIKE $1 AND EVENT_NAME = 'Interaction'");
},
&Engine::Oracle => {
return String::from("SELECT * FROM MV_PROTEO where regexp_like(SUB_XREF,:1,'i') AND EVENT_NAME = 'Interaction'");
}
}
}
pub fn ptmppi(engine: &Engine) -> String {
let query_str = String::from("SELECT * FROM MV_EFIP where PPI_SUB_CODE = $1 OR PPI_PR_CODE = $1");
match engine {
&Engine::Postgres => {
return query_str;
},
&Engine::Oracle => {
return query_str.replace("$",":");
}
}
}
pub fn get_sequences(engine: &Engine) -> String {
match engine {
Engine::Postgres => {
return String::from("SELECT * FROM SEQUENCE where ID ILIKE $1");
},
Engine::Oracle => {
return String::from("SELECT * FROM SEQUENCE where regexp_like(ID,:1,'i')");
}
}
}
pub fn get_decorations(engine: &Engine) -> String {
match engine {
Engine::Postgres => {
return String::from("select event_name,string_agg(source_label, ', ') as source_labels ,string_agg(pmids, ', ') as pmids from mv_event where sub_form_code = $1 and position = $2 and residue = $3 AND position is not null group by event_name");
},
Engine::Oracle => {
return String::from("SELECT * FROM SEQUENCE where regexp_like(ID,:1,'i')");
}
}
}
pub fn get_decorations_count(engine: &Engine) -> String {
match engine {
Engine::Postgres => {
return String::from(r#"select count(*) as "count" from mv_event where sub_code = $1 and position = $2 and residue = $3 AND position is not null"#);
},
Engine::Oracle => {
return String::from("SELECT * FROM SEQUENCE where regexp_like(ID,:1,'i')");
}
}
} |
use crate::wire;
use crate::MAX_BUFF_SIZE;
use crate::protocol::Protocol;
use crate::name_server::NameServer;
use crate::name_server::ROOT_V4_SERVERS;
use crate::name_server::ROOT_V6_SERVERS;
use wire::serialize_req;
use wire::serialize_res;
use wire::deserialize_req;
use wire::deserialize_res;
use tokio::net::UdpSocket;
use tokio::net::TcpListener;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use std::sync::Arc;
use std::sync::RwLock;
use std::net::IpAddr;
use std::net::Ipv4Addr;
use std::net::Ipv6Addr;
use std::net::SocketAddr;
use std::net::SocketAddrV4;
use std::net::SocketAddrV6;
pub async fn query2(req: &wire::Request, name_server: &NameServer) -> Result<wire::Response, wire::Error> {
todo!()
}
pub async fn query(req: &wire::Request, name_server: &NameServer) -> Result<wire::Response, wire::Error> {
let mut buf = [0u8; MAX_BUFF_SIZE];
// if !self.is_tcp() && !self.is_udp() {
// return Err(wire::Error::new(wire::ErrorKind::ServerFailure, "DNS Protocol not supported."))
// }
if req.questions.is_empty() {
return Err(wire::Error::new(wire::ErrorKind::FormatError, "DNS Query must have questions."));
}
if req.questions.len() > 1 {
warn!("Only the first question record will be sent to the query, the rest will be ignored");
}
// debug!("Sent DNS Query to {:?}://{} {:?} ...", &self.protocols, &self.socket_addr, self.domain_name);
debug!("{:?}", req);
let amt = serialize_req(req, &mut buf[2..])?;
if amt > std::u16::MAX as usize {
return Err(wire::Error::from(wire::ErrorKind::ServerFailure));
}
&mut buf[..2].copy_from_slice(&(amt as u16).to_be_bytes());
let sa = name_server.socket_addr_by(Protocol::Tcp).unwrap();
let mut stream = tokio::time::timeout(
std::time::Duration::from_secs(5),
tokio::net::TcpStream::connect(&sa)
)
.await
.map_err(|e| wire::Error::new(wire::ErrorKind::ServerFailure, e))??;
stream.write_all(&buf[..amt+2])
.await
.map_err(|e| wire::Error::new(wire::ErrorKind::ServerFailure, e))?;
stream.read_exact(&mut buf[..2])
.await
.map_err(|e| wire::Error::new(wire::ErrorKind::ServerFailure, e))?;
let amt = u16::from_be_bytes([buf[0], buf[1]]) as usize;
if amt > std::cmp::min(buf.len(), std::u16::MAX as usize) {
return Err(wire::Error::from(wire::ErrorKind::ServerFailure));
}
stream.read_exact(&mut buf[..amt])
.await
.map_err(|e| wire::Error::new(wire::ErrorKind::ServerFailure, e))?;
let pkt = &buf[..amt];
let res = deserialize_res(pkt)
.map_err(|e| wire::Error::new(wire::ErrorKind::ServerFailure, e))?;
if res.id != req.id {
return Err(wire::Error::new(wire::ErrorKind::ServerFailure, "DNS Message ID not match."));
}
debug!("{:?}", &res);
Ok(res)
}
#[derive(Debug, Clone)]
pub struct Query {
// pub max_hop: usize,
// pub use_ipv4: bool,
// pub use_ipv6: bool,
state: Arc<RwLock<ResolvOptions>>,
cache: Option<Cache>,
request: Arc<Request>,
name_servers: Arc<Vec<NameServer>>,
}
pub fn iquery(query: Query) -> Pin<Box<dyn Future<Output = Result<Response, wire::Error> > + Send >> {
Box::pin(async move {
let name_servers = &query.name_servers;
if name_servers.is_empty() {
return Err(wire::Error::new(wire::ErrorKind::ServerFailure, "could not find next name server."));
}
let idx = name_servers.len() - 1;
let name_server = &name_servers[idx];
let req = &query.request;
if query.state.read().unwrap().attempts == 0 {
return Err(wire::Error::new(wire::ErrorKind::ServerFailure, "recursion limit."));
}
if let Some(ref cache) = query.cache {
if let Some(res) = cache.get(&req, name_server) {
let mut state = query.state.write().unwrap();
state.attempts -= 1;
return Ok(res);
}
}
let res = name_server.query(&req).await?;
let mut state = query.state.write().unwrap();
state.attempts -= 1;
let mut cacheable = false;
if let Some(mut cache) = query.cache {
if res.rcode == wire::ResponseCode::OK {
// Update DNS Cache
if res.answers.is_empty() {
let mut has_ns_rr = false;
let mut has_soa_rr = false;
for rr in res.authorities.iter() {
match rr {
Record::NS(_) => {
has_ns_rr = true;
},
Record::SOA(_) => {
has_soa_rr = true;
},
_ => { },
}
}
if has_ns_rr && !has_soa_rr {
cacheable = true;
}
} else {
cacheable = true;
}
}
if cacheable {
cache.insert(&req, name_server, &res);
}
}
Ok(res)
})
}
pub fn rquery(query: Query) -> Pin<Box<dyn Future<Output = Result<Response, wire::Error> > + Send >> {
Box::pin(async move {
let req = &query.request;
if req.questions.is_empty() {
trace!("DNS Query must have questions.");
return Err(wire::Error::from(wire::ErrorKind::FormatError));
}
let mut res = rquery2(query.clone()).await?;
let mut cnames = Vec::new();
'LOOP1: loop {
if !res.answers.is_empty() {
for rr in res.answers.iter().rev() {
match rr {
Record::CNAME(ref cname) => {
let mut req: Request = (*query.request).clone();
let question = &mut req.questions[0];
question.name = cname.value.clone();
let cname_rr_clone = rr.clone();
// NOTE: CNAME 跳转查询不允许失败!
let mut query = query.clone();
query.request = Arc::new(req);
res = rquery2(query).await?;
cnames.push(cname_rr_clone);
continue 'LOOP1;
},
_ => { },
}
}
}
break;
}
cnames.reverse();
for cname in cnames {
res.answers.insert(0, cname);
}
Ok(res)
})
}
pub fn rquery2(query: Query) -> Pin<Box<dyn Future<Output = Result<Response, wire::Error> > + Send >> {
Box::pin(async move {
let query = query;
let mut res = iquery(query.clone()).await?;
'LOOP1: loop {
if res.rcode != ResponseCode::OK {
return Ok(res);
}
if !res.answers.is_empty() {
return Ok(res);
}
let name_servers = get_name_servers(&res);
if name_servers.is_none() {
return Ok(res);
}
let mut name_servers = name_servers.unwrap();
let mut ns_names = Vec::new();
let (use_ipv4, use_ipv6) = {
let state = query.state.read().unwrap();
(state.use_ipv4, state.use_ipv6)
};
'LOOP2: for (ns_name, ns_name_servers) in name_servers.iter_mut() {
if ns_name_servers.is_empty() {
warn!("NSLOOKUP: {:?}", ns_name);
ns_names.push(ns_name.to_string());
let kind = if use_ipv6 { wire::Kind::AAAA } else { wire::Kind::A };
let req = Request {
id: rand::random(),
flags: wire::ReprFlags::default(),
opcode: wire::OpCode::QUERY,
client_subnet: None,
questions: vec![
wire::Question {
name: ns_name.clone(),
kind: kind,
class: wire::Class::IN,
}
],
};
let mut query = query.clone();
query.request = Arc::new(req);
match rquery(query).await {
Ok(ns_res) => {
for rr in ns_res.answers.iter() {
match rr {
Record::A(ref a) => {
let ns_addr = SocketAddr::from((a.value, 53u16));
let name_server = NameServer::new(Some(ns_name.to_string()), ns_addr, Protocols::default())
// .map_err(|e| wire::Error::new(wire::ErrorKind::ServerFailure, e))?;
.unwrap();
debug!("NS ADDR: {:?}", name_server);
ns_name_servers.push(name_server);
},
Record::AAAA(ref aaaa) => {
let ns_addr = SocketAddr::from((aaaa.value, 53u16));
let name_server = NameServer::new(Some(ns_name.to_string()), ns_addr, Protocols::default())
// .map_err(|e| wire::Error::new(wire::ErrorKind::ServerFailure, e))?;
.unwrap();
debug!("NS ADDR: {:?}", name_server);
ns_name_servers.push(name_server);
},
_ => { },
}
}
},
Err(e) => {
error!("{:?}", e);
},
}
}
'LOOP3: for ns_name_server in ns_name_servers.iter() {
if ns_name_server.is_ipv4() {
if !use_ipv4 {
continue;
}
}
if ns_name_server.is_ipv6() {
if !use_ipv6 {
continue;
}
}
let mut query = query.clone();
query.name_servers = Arc::new(vec![ns_name_server.clone()]);
match iquery(query).await {
Ok(res_) => {
res = res_;
continue 'LOOP1;
},
Err(e) => {
error!("{:?}", e);
}
}
}
}
return Ok(res);
}
})
}
fn get_name_servers(res: &Response) -> Option<Vec<(String, Vec<NameServer>)>> {
if !res.answers.is_empty() {
return None;
}
if res.authorities.is_empty() {
warn!("Authority Section is empty.");
return None;
}
let mut name_servers1 = Vec::new();
let mut name_servers2 = Vec::new();
for rr in res.authorities.iter().rev() {
match rr {
Record::NS(ref ns) => {
let mut ns_name_servers = Vec::new();
for rr in res.additionals.iter().rev() {
match rr {
Record::A(ref a) => {
let ns_addr = SocketAddr::from((a.value, 53u16));
let name_server = NameServer::new(Some(ns.value.clone()), ns_addr, Protocols::default())
// .map_err(|e| wire::Error::new(wire::ErrorKind::ServerFailure, e))?;
.unwrap();
ns_name_servers.push(name_server);
},
Record::AAAA(ref aaaa) => {
let ns_addr = SocketAddr::from((aaaa.value, 53u16));
let name_server = NameServer::new(Some(ns.value.clone()), ns_addr, Protocols::default())
// .map_err(|e| wire::Error::new(wire::ErrorKind::ServerFailure, e))?;
.unwrap();
ns_name_servers.push(name_server);
},
_ => { },
}
}
if ns_name_servers.is_empty() {
for root_servers in [ &ROOT_V4_SERVERS, &ROOT_V6_SERVERS ].iter() {
for (root_name, root_addr) in root_servers.iter() {
if root_name != &ns.value.as_str() {
continue;
}
let ns_addr = SocketAddr::new(*root_addr, 53u16);
let name_server = NameServer::new(Some(ns.value.clone()), ns_addr, Protocols::default())
// .map_err(|e| wire::Error::new(wire::ErrorKind::ServerFailure, e))?;
.unwrap();
ns_name_servers.push(name_server);
}
}
}
if !ns_name_servers.is_empty() {
name_servers1.push((ns.value.clone(), ns_name_servers));
} else {
name_servers2.push((ns.value.clone(), ns_name_servers));
}
},
_ => { },
}
}
name_servers1.extend(name_servers2);
let name_servers = name_servers1;
if name_servers.is_empty() {
warn!("Authority Section has no NS records.");
return None;
} else {
return Some(name_servers);
}
} |
use super::opcodes::*;
/// Represents one MIR instruction
pub struct MIRNode {
pub(super) op: Opcode,
pub(super) inputs: Vec<Operand>,
pub(super) outputs: Vec<Operand>,
}
#[derive(Copy, Clone)]
pub enum Operand {
Imm32(i32),
Imm64(i64),
Imm16(i16),
Imm8(i8),
UImm64(u64),
UImm32(u32),
UImm16(u16),
UImm8(u8),
Value(u32),
}
|
use std::fs;
use serde::{Serialize, Deserialize};
use crate::transport::plain;
use crate::transport::{AsyncConnect, AsyncAccept};
mod dns;
mod ep;
mod net;
mod tls;
mod trans;
// re-export
pub use dns::DnsMode;
pub use net::NetConfig;
pub use tls::{TLSConfig, TLSClientConfig, TLSServerConfig};
pub use trans::{TransportConfig, WebSocketConfig};
pub use ep::EndpointConfig;
#[derive(Debug, Serialize, Deserialize)]
pub struct GlobalConfig {
#[serde(default)]
pub dns_mode: DnsMode,
pub endpoints: Vec<EndpointConfig>,
}
impl GlobalConfig {
pub fn from_config_file(file: &str) -> Self {
let config = fs::read_to_string(file).expect("invalid file path");
serde_json::from_str(&config).expect("failed to parse config file")
}
}
pub trait WithTransport<L = plain::Acceptor, C = plain::Connector>
where
L: AsyncAccept,
C: AsyncConnect,
{
type Acceptor: AsyncAccept;
type Connector: AsyncConnect;
fn apply_to_lis(&self, lis: L) -> Self::Acceptor;
fn apply_to_conn(&self, conn: C) -> Self::Connector;
}
|
use std::sync::Arc;
use http::{header::HeaderName, request::Request, HeaderValue};
use tower::{Layer, Service};
#[derive(Clone)]
/// Layer that adds a static set of extra headers to each request
pub struct ExtraHeadersLayer {
pub(crate) headers: Arc<Vec<(HeaderName, HeaderValue)>>,
}
impl<S> Layer<S> for ExtraHeadersLayer {
type Service = ExtraHeaders<S>;
fn layer(&self, inner: S) -> Self::Service {
ExtraHeaders {
inner,
headers: self.headers.clone(),
}
}
}
#[derive(Clone)]
/// Service that adds a static set of extra headers to each request
pub struct ExtraHeaders<S> {
inner: S,
headers: Arc<Vec<(HeaderName, HeaderValue)>>,
}
impl<S, ReqBody> Service<Request<ReqBody>> for ExtraHeaders<S>
where
S: Service<Request<ReqBody>>,
{
type Error = S::Error;
type Future = S::Future;
type Response = S::Response;
fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> std::task::Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx)
}
fn call(&mut self, mut req: Request<ReqBody>) -> Self::Future {
req.headers_mut().extend(self.headers.iter().cloned());
self.inner.call(req)
}
}
|
use std::mem;
// mark the List as public so that others can use it, but as a struct to hide implementation
// details.
// Structs with one element are the same size as that element: a zero-cost abstraction.
pub struct List {
head: Link,
}
// link is used to chain elements together
// This used the null pointer optimization, because there is no need to store the 'Empty' tag (can
// be all 0s)
enum Link {
Empty,
More(Box<Node>),
}
struct Node {
elem: i32,
next: Link,
}
/* Multiline comment cool
* Now we need to specify an implementation for our List
* "impl" blocks associate code with a type
* A normal function inside of an "impl" block is a static method
*
* Methods are a special type of function in rust because of the "self" arg
*
* There are three primary forms of ownership in Rust:
* - self: Value
* - &mut self: mutable reference
* Gotcha: you can't remove a value without replacement
* - &self: shared reference
*/
impl List {
pub fn new() -> Self { // "Self" is an alias to List
List { head: Link::Empty} // implicit return as last expression
}
/* push element to front of the list. */
pub fn push(&mut self, elem: i32) { // I suppose this function does not return anything
/* The code does not work because of exception safety.
* One would think that we could move an element out of List as long as we replace it
* later. However, if an exception occurred, and the code unwrapped, we need some guarantee
* that memory will be valid there. This is why we must use mem::replace. */
//let new_node = Box::new(Node {
// elem: elem,
// next: self.head,
//});
//self.head = Link::More(new_node);
let new_node = Box::new(Node {
elem: elem,
next: mem::replace(&mut self.head, Link::Empty), // temporarily replace head of the list with Empty and assign next to the previous head.
});
self.head = Link::More(new_node);
}
/* remove element from the front of the list.
* Option represents a type that might be Some<T> or None. */
pub fn pop(&mut self) -> Option<i32> {
let result;
/* An example of enum pattern matching! */
/* we have to do the replace trick because we can't move values
* out of shared references without replacing them. */
match mem::replace(&mut self.head, Link::Empty) {
Link::Empty => {
result = None;
}
Link::More(node) => {
/* It should be noted that the compiler is doing something tricky here,
* known as Deref coercion. Basically, if a type U implements Deref<Target=T>,
* then values of &U will automatically be coerced to &T. So in this case,
* the compiler is actually coercing Box<Node> into Node so we can access the elem
* field */
result = Some(node.elem);
self.head = node.next;
}
};
result
}
}
/* The guide claims that the default destructor for our linked list would be recursive
* and that recursion would be unbounded. This is why we explicitly implement the Drop
* trait for our list. */
impl Drop for List {
fn drop(&mut self) {
let mut cur_link = mem::replace(&mut self.head, Link::Empty);
/* "while let" is a form of pattern matching.
* It means "do this thing while this pattern matches" */
while let Link::More(mut boxed_node) = cur_link {
/* These next few lines are pretty clever.
* The pattern matching statement above moves ownership of the boxed_node
* into the body of the while loop. The Link stored in boxed_node.next is then replaced
* with an empty link (Link::Empty), and that value is now assigned to cur_link.
* So boxed_node will go out of scope, be dropped, and therefore cleaned up.
* This eliminates unbounded recursion. This is the reason why we have to set
* boxed_node.next to Link::Empty, so it doesn't recurse down the list
*/
cur_link = mem::replace(&mut boxed_node.next, Link::Empty);
}
}
}
#[cfg(test)]
mod test {
use super::List;
#[test]
fn basics() {
let mut list = List::new();
// Check empty list behaves right
assert_eq!(list.pop(), None);
// Populate list
list.push(1);
list.push(2);
list.push(3);
// Check normal removal
assert_eq!(list.pop(), Some(3));
assert_eq!(list.pop(), Some(2));
// Push some more just to make sure nothing's corrupted
list.push(4);
list.push(5);
// Check normal removal
assert_eq!(list.pop(), Some(5));
assert_eq!(list.pop(), Some(4));
// Check exhaustion
assert_eq!(list.pop(), Some(1));
assert_eq!(list.pop(), None);
}
}
|
//! The basic components for serving static files.
use {
crate::{
error::Error,
future::TryFuture,
input::Input,
output::{Responder, ResponseBody},
},
bytes::{BufMut, BytesMut},
filetime::FileTime,
futures01::{Async, Poll, Stream},
http::{
header::{self, HeaderMap},
Request, Response, StatusCode,
},
log::trace,
mime::Mime,
std::{
borrow::Cow,
cmp, fmt,
fs::{File, Metadata},
io::{self, Read as _Read},
mem,
ops::Deref,
path::{Path, PathBuf},
str::FromStr,
sync::Arc,
time::Duration,
},
time::Timespec,
tokio_threadpool::blocking as poll_blocking,
};
// ==== headers ====
fn parse_http_date(s: &str) -> Result<Timespec, time::ParseError> {
time::strptime(s, "%a, %d %b %Y %T %Z")
.or_else(|_| time::strptime(s, "%A, %d-%b-%y %T %Z"))
.or_else(|_| time::strptime(s, "%c"))
.map(|tm| tm.to_timespec())
}
#[derive(Debug)]
struct ETag {
weak: bool,
tag: String,
}
impl ETag {
fn from_metadata(metadata: &Metadata) -> Self {
let last_modified = FileTime::from_last_modification_time(&metadata);
Self {
weak: true,
tag: format!(
"{:x}-{:x}.{:x}",
metadata.len(),
last_modified.seconds(),
last_modified.nanoseconds()
),
}
}
fn parse_inner(weak: bool, s: &str) -> Result<Self, failure::Error> {
if s.len() < 2 {
failure::bail!("");
}
if !s.starts_with('"') || !s.ends_with('"') {
failure::bail!("");
}
let tag = &s[1..s.len() - 1];
if !tag.is_ascii() {
failure::bail!("");
}
Ok(Self {
weak,
tag: tag.to_owned(),
})
}
fn eq(&self, other: &Self) -> bool {
self.tag == other.tag && (self.weak || !other.weak)
}
}
impl FromStr for ETag {
type Err = failure::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.get(0..3) {
Some("W/\"") if s[2..].starts_with('"') => Self::parse_inner(true, &s[2..]),
Some(t) if t.starts_with('"') => Self::parse_inner(false, s),
Some(..) => failure::bail!("invalid string to parse ETag"),
None => failure::bail!("empty string to parse ETag"),
}
}
}
impl fmt::Display for ETag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.weak {
f.write_str("W/")?;
}
write!(f, "\"{}\"", self.tag)
}
}
// ==== Config ====
/// A set of configuration used in `NamedFile`.
#[derive(Debug, Default, Clone)]
pub struct OpenConfig {
/// The size of chunked buffers.
///
/// If `None`, it will be guessed based on the block size on the filesystem.
pub chunk_size: Option<usize>,
/// The maximal amount of time to refresh the resource.
///
/// If this field is set, the generated HTTP response will include a "Cache-Control" header
/// that includes the parameter max-age.
pub max_age: Option<Duration>,
}
// ==== NamedFile ====
/// An instance of `Responder` for responding a file.
#[derive(Debug, Clone)]
pub struct NamedFile<P> {
path: P,
config: Option<OpenConfig>,
}
impl<P> NamedFile<P>
where
P: AsRef<Path> + Send + 'static,
{
/// Open a specified file with the default configuration.
pub fn open(path: P) -> Self {
Self { path, config: None }
}
/// Open a specified file with the provided configuration.
pub fn open_with_config(path: P, config: OpenConfig) -> Self {
Self {
path,
config: Some(config),
}
}
}
impl<P> Responder for NamedFile<P>
where
P: AsRef<Path> + Send + 'static,
{
type Upgrade = crate::upgrade::NeverUpgrade;
type Error = crate::Error;
type Respond = OpenNamedFile<P>;
#[inline]
fn respond(self) -> Self::Respond {
OpenNamedFile {
path: self.path,
config: self.config,
}
}
}
#[doc(hidden)]
#[derive(Debug)]
pub struct OpenNamedFile<P> {
path: P,
config: Option<OpenConfig>,
}
impl<P> TryFuture for OpenNamedFile<P>
where
P: AsRef<Path>,
{
type Ok = Response<ResponseBody>;
type Error = crate::Error;
fn poll_ready(&mut self, input: &mut Input<'_>) -> Poll<Self::Ok, Self::Error> {
let (file, meta) = futures01::try_ready!(blocking_io(|| {
let file = File::open(&self.path)?;
let meta = file.metadata()?;
Ok((file, meta))
}));
let config = self.config.take().unwrap_or_default();
let last_modified = FileTime::from_last_modification_time(&meta);
let etag = ETag::from_metadata(&meta);
let content_type = mime_guess::guess_mime_type(&self.path);
let response = NamedFileResponse {
file,
meta,
content_type,
last_modified,
etag,
config,
}
.into_response(input.request)?;
Ok(Async::Ready(response))
}
}
#[derive(Debug)]
struct NamedFileResponse {
file: File,
meta: Metadata,
content_type: Mime,
etag: ETag,
last_modified: FileTime,
config: OpenConfig,
}
impl NamedFileResponse {
#[allow(clippy::cast_sign_loss)]
fn is_modified(&self, headers: &HeaderMap) -> Result<bool, Error> {
if let Some(h) = headers.get(header::IF_NONE_MATCH) {
trace!("NamedFile::is_modified(): validate If-None-Match");
let etag: ETag = h
.to_str()
.map_err(crate::error::bad_request)?
.parse()
.map_err(crate::error::bad_request)?;
let modified = !etag.eq(&self.etag);
trace!(
"--> self.etag={:?}, etag={:?}, modified={}",
self.etag,
etag,
modified
);
return Ok(modified);
}
if let Some(h) = headers.get(header::IF_MODIFIED_SINCE) {
trace!("NamedFile::is_modified(): validate If-Modified-Since");
let if_modified_since = {
let timespec = parse_http_date(h.to_str().map_err(crate::error::bad_request)?)
.map_err(crate::error::bad_request)?;
FileTime::from_unix_time(timespec.sec, timespec.nsec as u32)
};
let modified = self.last_modified > if_modified_since;
trace!(
"--> if_modified_sicne={:?}, modified={}",
if_modified_since,
modified
);
return Ok(modified);
}
Ok(true)
}
fn cache_control(&self) -> Cow<'static, str> {
match self.config.max_age {
Some(ref max_age) => format!("public, max-age={}", max_age.as_secs()).into(),
None => "public".into(),
}
}
#[allow(clippy::cast_possible_wrap)]
fn last_modified(&self) -> Result<String, time::ParseError> {
let tm = time::at(Timespec::new(
self.last_modified.seconds(),
self.last_modified.nanoseconds() as i32,
));
time::strftime("%c", &tm)
}
fn into_response(self, request: &Request<()>) -> Result<Response<ResponseBody>, Error> {
trace!("NamedFileResponse::into_response");
if !self.is_modified(request.headers())? {
return Ok(Response::builder()
.status(StatusCode::NOT_MODIFIED)
.body(ResponseBody::empty())
.unwrap());
}
// FIXME: optimize
let cache_control = self.cache_control();
let last_modified = self
.last_modified()
.map_err(crate::error::internal_server_error)?;
let stream = ReadStream::new(self.file, self.meta, self.config.chunk_size);
Ok(Response::builder()
.header(header::CONTENT_TYPE, self.content_type.as_ref())
.header(header::CACHE_CONTROL, &*cache_control)
.header(header::LAST_MODIFIED, &*last_modified)
.header(header::ETAG, &*self.etag.to_string())
.body(ResponseBody::wrap_stream(stream))
.unwrap())
}
}
// ==== ReadStream ====
#[derive(Debug)]
struct ReadStream(State);
#[derive(Debug)]
enum State {
Reading { file: File, buf_size: usize },
Eof,
Gone,
}
impl ReadStream {
fn new(file: File, meta: Metadata, buf_size: Option<usize>) -> Self {
let buf_size = finalize_block_size(buf_size, &meta);
drop(meta);
ReadStream(State::Reading { file, buf_size })
}
}
impl Stream for ReadStream {
type Item = izanami::http::body::Data;
type Error = io::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
loop {
match self.0 {
State::Reading {
ref mut file,
buf_size,
..
} => {
trace!("ReadStream::poll(): polling on the mode State::Reading");
let buf = futures01::try_ready!(blocking_io(|| {
let mut buf = BytesMut::with_capacity(buf_size);
if !buf.has_remaining_mut() {
buf.reserve(buf_size);
}
unsafe {
let n = file.read(buf.bytes_mut())?;
buf.advance_mut(n);
}
Ok(buf)
}));
if !buf.is_empty() {
return Ok(Async::Ready(Some(buf.freeze().into())));
}
}
State::Eof => {
trace!("ReadStream::poll(): polling on the mode State::Reading");
return Ok(Async::Ready(None));
}
State::Gone => panic!("unexpected state"),
};
match mem::replace(&mut self.0, State::Gone) {
State::Reading { .. } => self.0 = State::Eof,
_ => unreachable!("unexpected state"),
}
}
}
}
#[allow(dead_code)]
const DEFAULT_BUF_SIZE: u64 = 8192;
fn blocking_io<T>(f: impl FnOnce() -> io::Result<T>) -> Poll<T, io::Error> {
match poll_blocking(f) {
Ok(Async::Ready(ready)) => ready.map(Async::Ready),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
}
}
// FIXME: replace usize to u64
#[allow(clippy::cast_possible_truncation)]
fn finalize_block_size(buf_size: Option<usize>, meta: &Metadata) -> usize {
match buf_size {
Some(n) => cmp::min(meta.len(), n as u64) as usize,
None => cmp::min(meta.len(), block_size(&meta)) as usize,
}
}
#[cfg(unix)]
fn block_size(meta: &Metadata) -> u64 {
use std::os::unix::fs::MetadataExt;
meta.blksize()
}
#[cfg(not(unix))]
fn block_size(_: &Metadata) -> u64 {
DEFAULT_BUF_SIZE
}
#[derive(Debug, Clone)]
pub struct ArcPath(Arc<PathBuf>);
impl From<PathBuf> for ArcPath {
fn from(path: PathBuf) -> Self {
ArcPath(Arc::new(path))
}
}
impl AsRef<Path> for ArcPath {
fn as_ref(&self) -> &Path {
(*self.0).as_ref()
}
}
impl Deref for ArcPath {
type Target = Path;
#[inline]
fn deref(&self) -> &Self::Target {
(*self.0).as_ref()
}
}
// #[derive(Debug, Clone)]
// pub struct ServeFile {
// inner: Arc<ServeFileInner>,
// }
// #[derive(Debug)]
// struct ServeFileInner {
// path: ArcPath,
// config: Option<OpenConfig>,
// extract_path: bool,
// }
// mod impl_handler_for_serve_file {
// use {
// super::{ArcPath, NamedFile, ServeFile},
// crate::{endpoint::Endpoint, error::Error, future::TryFuture, input::Input},
// futures01::{Async, Poll},
// };
// impl Endpoint<()> for ServeFile {
// type Output = NamedFile<ArcPath>;
// type Error = Error;
// type Future = Self;
// fn apply(&self, _: ()) -> Self::Future {
// self.clone()
// }
// }
// impl TryFuture for ServeFile {
// type Ok = NamedFile<ArcPath>;
// type Error = Error;
// fn poll_ready(&mut self, input: &mut Input<'_>) -> Poll<Self::Ok, Self::Error> {
// let path = if self.inner.extract_path {
// let path = input
// .params
// .as_ref()
// .and_then(|params| params.catch_all())
// .ok_or_else(|| crate::error::internal_server_error("missing params"))?;
// self.inner.path.join(path).into()
// } else {
// self.inner.path.clone()
// };
// Ok(Async::Ready(match self.inner.config {
// Some(ref config) => NamedFile::open_with_config(path, config.clone()),
// None => NamedFile::open(path),
// }))
// }
// }
// }
// /// A configuration type for adding entries in the directory to the route.
// #[derive(Debug)]
// pub struct Staticfiles<P> {
// root_dir: P,
// config: Option<OpenConfig>,
// }
// impl<P> Staticfiles<P>
// where
// P: AsRef<Path>,
// {
// /// Create a new `Staticfiles` with the specified directory path.
// pub fn new(root_dir: P) -> Self {
// Self {
// root_dir,
// config: None,
// }
// }
// /// Sets the value of `OpenConfig` used in handlers.
// pub fn open_config(self, config: OpenConfig) -> Self {
// Self {
// config: Some(config),
// ..self
// }
// }
// /// Registers the static file handlers onto the provided scope.
// pub fn register<M, C>(
// self,
// scope: &mut crate::app::config::Scope<'_, M, C>,
// ) -> crate::app::Result<()>
// where
// M: ModifyHandler<RouteHandler<(), ServeFile>>,
// M::Handler: Into<C::Handler>,
// C: Concurrency,
// {
// let Self { root_dir, config } = self;
// for entry in std::fs::read_dir(root_dir).map_err(crate::app::config::Error::custom)? {
// let entry = entry.map_err(crate::app::config::Error::custom)?;
// let name = entry.file_name();
// let name = name
// .to_str() //
// .ok_or_else(|| {
// crate::app::config::Error::custom(failure::format_err!(
// "the filename must be UTF-8"
// ))
// })?;
// let path = entry
// .path()
// .canonicalize()
// .map(|path| ArcPath(Arc::new(path)))
// .map_err(crate::app::config::Error::custom)?;
// let file_type = entry
// .file_type()
// .map_err(crate::app::config::Error::custom)?;
// if file_type.is_file() {
// scope.at(format!("/{}", name))?.to(
// Method::GET,
// ServeFile {
// inner: Arc::new(ServeFileInner {
// path,
// config: config.clone(),
// extract_path: false,
// }),
// },
// )?;
// } else if file_type.is_dir() {
// scope.at(format!("/{}/*path", name))?.to(
// Method::GET,
// ServeFile {
// inner: Arc::new(ServeFileInner {
// path,
// config: config.clone(),
// extract_path: true,
// }),
// },
// )?;
// } else {
// return Err(crate::app::config::Error::custom(failure::format_err!(
// "unexpected file type"
// )));
// }
// }
// Ok(())
// }
// }
|
use dasp::Frame;
use dasp::Sample;
use crate::delay_line;
#[derive(Clone, Copy)]
pub struct EchoParameters {
pub attenuation: f32,
pub length: usize,
}
impl EchoParameters {
pub fn from_distances(d: f32, h: f32, sample_rate: usize) -> Self {
let frame_t = 1.0 / sample_rate as f32;
let r = ((h * h + (d * d / 4.0)) as f32).sqrt();
let m = ((2.0 * r - d) / (343.0 * frame_t)).round() as usize;
let g = d / (2.0 * r);
EchoParameters {
attenuation: g,
length: m,
}
}
}
pub struct Echo<T> {
delay_line: delay_line::DelayLine<Vec<T>>,
params: EchoParameters,
}
impl<T: Frame> Echo<T> {
pub fn new(d: f32, h: f32, sample_rate: usize, capacity: usize) -> Self {
let params = EchoParameters::from_distances(d, h, sample_rate);
assert!(params.length < capacity);
Echo {
delay_line: delay_line::DelayLine::new(vec![T::EQUILIBRIUM; capacity], params.length),
params: params,
}
}
pub fn set_params(self: &mut Self, params: EchoParameters) {
self.params = params;
self.delay_line.set_delay(params.length);
}
pub fn tick(self: &mut Self, in_frame: T) -> T {
let signed_in = in_frame.to_signed_frame();
let out = self.delay_line.tick(in_frame);
out.scale_amp(self.params.attenuation.to_sample())
.add_amp(signed_in)
}
}
pub struct FlangeParameters {
frame_time: f64,
rate: f64,
amount: f64,
depth: f64,
}
pub struct Flange<T> {
params: FlangeParameters,
delay_line: delay_line::DelayLineFracLin<Vec<T>>,
time: f64,
}
impl<T: Frame> Flange<T> {
pub fn new(rate: f64, amount: f64, depth: f64, sample_rate: usize) -> Self {
Flange {
params: FlangeParameters {
frame_time: 1.0 / sample_rate as f64,
rate: rate,
amount: amount * sample_rate as f64,
depth: depth,
},
delay_line: delay_line::DelayLineFracLin::new(vec![T::EQUILIBRIUM; 10000], 1000.0),
time: 0.0,
}
}
pub fn tick(self: &mut Self, in_frame: T) -> T {
self.time += self.params.frame_time;
let sine = (self.params.rate * self.time * 2.0 * std::f64::consts::PI).sin(); //Would a look up table give better performance?
self.delay_line
.set_delay(self.params.amount * sine + self.params.amount + 0.0005);
let a = self.delay_line.tick(in_frame);
let out = a
.scale_amp(self.params.depth.to_sample())
.add_amp(in_frame.to_signed_frame());
return out;
}
pub fn set_rate(&mut self, rate: f64) {
self.params.rate = rate;
}
pub fn set_amount(&mut self, amount: f64) {
self.params.amount = amount / self.params.frame_time;
}
pub fn set_depth(&mut self, depth: f64) {
self.params.depth = depth;
}
}
|
#[no_mangle]
pub extern fn physics_single_chain_ideal_thermodynamics_isotensional_end_to_end_length(number_of_links: u8, link_length: f64, force: f64, temperature: f64) -> f64
{
super::end_to_end_length(&number_of_links, &link_length, &force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ideal_thermodynamics_isotensional_end_to_end_length_per_link(link_length: f64, force: f64, temperature: f64) -> f64
{
super::end_to_end_length_per_link(&link_length, &force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ideal_thermodynamics_isotensional_nondimensional_end_to_end_length(number_of_links: u8, nondimensional_force: f64) -> f64
{
super::nondimensional_end_to_end_length(&number_of_links, &nondimensional_force)
}
#[no_mangle]
pub extern fn physics_single_chain_ideal_thermodynamics_isotensional_nondimensional_end_to_end_length_per_link(nondimensional_force: f64) -> f64
{
super::nondimensional_end_to_end_length_per_link(&nondimensional_force)
}
#[no_mangle]
pub extern fn physics_single_chain_ideal_thermodynamics_isotensional_gibbs_free_energy(number_of_links: u8, link_length: f64, hinge_mass: f64, force: f64, temperature: f64) -> f64
{
super::gibbs_free_energy(&number_of_links, &link_length, &hinge_mass, &force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ideal_thermodynamics_isotensional_gibbs_free_energy_per_link(link_length: f64, hinge_mass: f64, force: f64, temperature: f64) -> f64
{
super::gibbs_free_energy_per_link(&link_length, &hinge_mass, &force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ideal_thermodynamics_isotensional_relative_gibbs_free_energy(number_of_links: u8, link_length: f64, force: f64, temperature: f64) -> f64
{
super::relative_gibbs_free_energy(&number_of_links, &link_length, &force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ideal_thermodynamics_isotensional_relative_gibbs_free_energy_per_link(link_length: f64, force: f64, temperature: f64) -> f64
{
super::relative_gibbs_free_energy_per_link(&link_length, &force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ideal_thermodynamics_isotensional_nondimensional_gibbs_free_energy(number_of_links: u8, link_length: f64, hinge_mass: f64, nondimensional_force: f64, temperature: f64) -> f64
{
super::nondimensional_gibbs_free_energy(&number_of_links, &link_length, &hinge_mass, &nondimensional_force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ideal_thermodynamics_isotensional_nondimensional_gibbs_free_energy_per_link(link_length: f64, hinge_mass: f64, nondimensional_force: f64, temperature: f64) -> f64
{
super::nondimensional_gibbs_free_energy_per_link(&link_length, &hinge_mass, &nondimensional_force, &temperature)
}
#[no_mangle]
pub extern fn physics_single_chain_ideal_thermodynamics_isotensional_nondimensional_relative_gibbs_free_energy(number_of_links: u8, nondimensional_force: f64) -> f64
{
super::nondimensional_relative_gibbs_free_energy(&number_of_links, &nondimensional_force)
}
#[no_mangle]
pub extern fn physics_single_chain_ideal_thermodynamics_isotensional_nondimensional_relative_gibbs_free_energy_per_link(nondimensional_force: f64) -> f64
{
super::nondimensional_relative_gibbs_free_energy_per_link(&nondimensional_force)
}
|
/// CreateHookOptionConfig has all config options in it
/// required are "content_type" and "url" Required
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct CreateHookOptionConfig {}
|
use lzw_arc::lzw;
use sha1::{Sha1, Digest};
use std::fs::{remove_file, File};
#[test]
fn compress_test() {
lzw::compress("test-file", "compress_test", 16).unwrap();
// Source hash
let mut file = File::open("test-compressed").unwrap();
let mut hasher = Sha1::new();
std::io::copy(&mut file, &mut hasher).unwrap();
let source_hash = hasher.result();
// Result hash
let mut file = File::open("compress_test").unwrap();
let mut hasher = Sha1::new();
std::io::copy(&mut file, &mut hasher).unwrap();
let result_hash = hasher.result();
remove_file("compress_test").unwrap();
assert_eq!(source_hash, result_hash);
}
#[test]
fn decompress_test() {
lzw::decompress("test-compressed", "decompress_test", 16).unwrap();
// Source hash
let mut file = File::open("test-file").unwrap();
let mut hasher = Sha1::new();
std::io::copy(&mut file, &mut hasher).unwrap();
let source_hash = hasher.result();
// Result hash
let mut file = File::open("decompress_test").unwrap();
let mut hasher = Sha1::new();
std::io::copy(&mut file, &mut hasher).unwrap();
let result_hash = hasher.result();
remove_file("decompress_test").unwrap();
assert_eq!(source_hash, result_hash);
}
#[test]
fn aes_test() {
lzw::compress_aes("test-file", "aes_test", 16, "secret").unwrap();
lzw::decompress_aes("aes_test", "aes_test_result", 16, "secret").unwrap();
// Source hash
let mut file = File::open("test-file").unwrap();
let mut hasher = Sha1::new();
std::io::copy(&mut file, &mut hasher).unwrap();
let source_hash = hasher.result();
// Result hash
let mut file = File::open("aes_test_result").unwrap();
let mut hasher = Sha1::new();
std::io::copy(&mut file, &mut hasher).unwrap();
let result_hash = hasher.result();
remove_file("aes_test").unwrap();
remove_file("aes_test_result").unwrap();
assert_eq!(source_hash, result_hash);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.