text stringlengths 8 4.13M |
|---|
use reqwest::{Response, StatusCode};
use std::time::UNIX_EPOCH;
use tokio::time::Duration;
static RESET_HEADER: &str = "x-rate-limit-reset";
pub fn check_rate_limit(resp: &Response) -> Option<Duration> {
if resp.status() != StatusCode::TOO_MANY_REQUESTS {
return None;
}
let rate_reset_at = resp.headers().get(RESET_HEADER)?.to_str().ok()?;
let duration =
Duration::from_secs(rate_reset_at.parse::<u64>().ok()?) - UNIX_EPOCH.elapsed().ok()?;
Some(duration)
}
|
use std::collections::HashMap;
use std::io::Read;
use std::sync::Arc;
use hyper::client::Client;
use hyper::client::Response;
use hyper::client::IntoUrl;
use hyper::header::ContentType;
use hyper::header::Headers;
use hyper::mime;
use serde::Deserialize;
use url::form_urlencoded;
use super::errors::*;
use super::resp::IntoResult;
use super::sig;
use super::util;
const URL_ACCESS_TOKEN_REQ: &'static str = "https://oauth.api.189.cn/emp/oauth2/v3/access_token";
pub struct Open189Client {
http: Arc<Client>,
}
fn prepare_request_params(params: &mut HashMap<&'static str, String>,
app_id: &str,
secret: &str,
access_token: &str) {
params.insert("app_id", app_id.to_string());
params.insert("access_token", access_token.to_string());
params.insert("timestamp", util::get_api_timestamp());
let sign = sig::sign(params, secret);
params.insert("sign", sign);
}
fn process_response<T>(mut response: Response) -> Result<T::Item>
where T: Deserialize + IntoResult
{
let mut response_str = String::new();
response.read_to_string(&mut response_str)?;
let obj: T = ::serde_json::from_str(&response_str)?;
obj.into_result(response.status)
}
impl Open189Client {
pub fn new(http_client: Client) -> Open189Client {
Open189Client { http: Arc::new(http_client) }
}
pub fn get_sync<U, S, T>(&self,
app_id: S,
secret: S,
access_token: S,
url: U,
mut params: HashMap<&'static str, String>)
-> Result<T::Item>
where U: IntoUrl,
S: AsRef<str>,
T: Deserialize + IntoResult
{
let mut url = url.into_url()?;
prepare_request_params(&mut params,
app_id.as_ref(),
secret.as_ref(),
access_token.as_ref());
{
let mut qs = url.query_pairs_mut();
qs.clear();
for (k, v) in params.iter() {
qs.append_pair(k.as_ref(), v.as_ref());
}
}
let response = self.http.get(url).send()?;
process_response::<T>(response)
}
pub fn post_sync<U, S, T>(&self,
app_id: S,
secret: S,
access_token: S,
url: U,
mut params: HashMap<&'static str, String>)
-> Result<T::Item>
where U: IntoUrl,
S: AsRef<str>,
T: Deserialize + IntoResult
{
prepare_request_params(&mut params,
app_id.as_ref(),
secret.as_ref(),
access_token.as_ref());
self.post_sync_prepared::<U, T>(url, params)
}
pub fn perform_access_token_req<S, T>(&self,
app_id: S,
secret: S,
mut params: HashMap<&'static str, String>)
-> Result<T::Item>
where S: AsRef<str>,
T: Deserialize + IntoResult
{
params.insert("app_id", app_id.as_ref().to_string());
params.insert("app_secret", secret.as_ref().to_string());
params.insert("state", util::get_random_state_str());
self.post_sync_prepared::<_, T>(URL_ACCESS_TOKEN_REQ, params)
}
fn post_sync_prepared<U, T>(&self,
url: U,
params: HashMap<&'static str, String>)
-> Result<T::Item>
where U: IntoUrl,
T: Deserialize + IntoResult
{
let url = url.into_url()?;
let body = {
let mut serializer = form_urlencoded::Serializer::new(String::new());
for (k, v) in params.iter() {
serializer.append_pair(k, v);
}
serializer.finish()
};
let headers = {
let mut tmp = Headers::new();
tmp.set(ContentType(mime::Mime(mime::TopLevel::Application,
mime::SubLevel::WwwFormUrlEncoded,
vec![(mime::Attr::Charset, mime::Value::Utf8)])));
tmp
};
let response = self.http.post(url).headers(headers).body(&body).send()?;
process_response::<T>(response)
}
}
|
use std::cmp::min;
fn one_away(s1: String, s2: String) -> bool {
let mut dp = vec![vec![0; s2.len() + 1]; s1.len() + 1];
for i in 0..s1.len() + 1 {
for j in 0..s2.len() + 1 {
if i == 0 {
dp[i][j] = j;
continue;
}
if j == 0 {
dp[i][j] = i;
continue;
}
if s1[i - 1..i] == s2[j - 1..j] {
dp[i][j] = min(dp[i - 1][j], min(dp[i][j - 1], dp[i - 1][j - 1]))
} else {
dp[i][j] = min(
dp[i - 1][j] + 1,
min(dp[i][j - 1] + 1, dp[i - 1][j - 1] + 1),
)
}
}
}
dp[s1.len()][s2.len()] <= 1
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_remove() {
assert_eq!(one_away("pale".to_string(), "ple".to_string()), true)
}
#[test]
fn test_insert() {
assert_eq!(one_away("pale".to_string(), "pales".to_string()), true)
}
#[test]
fn test_replace() {
assert_eq!(one_away("pale".to_string(), "bale".to_string()), true)
}
#[test]
fn test_ng() {
assert_eq!(one_away("pale".to_string(), "bake".to_string()), false)
}
}
|
extern crate sysinfo;
use cursive::views::Dialog;
use cursive::Cursive;
use cursive::views::LinearLayout;
use sysinfo::{ProcessExt, ProcessorExt, SystemExt, DiskExt};
use std::{thread, time};
fn get_my_processes(system : &mut sysinfo::System) -> String {
system.refresh_all();
let mut my_vec = Vec::new();
for (pid, process) in system.get_processes() {
my_vec.push(pid.to_string());
my_vec.push(process.name().to_string());
my_vec.push(format!("{:?}", process.cpu_usage()));
my_vec.push(format!("{:?}", process.memory()));
}
let mut my_s = String::with_capacity(2048);
my_s.push_str(&format!("{:^5}: {:^6}: {:^6}: {:^6}\n", "Pid", "Name", "Cpu(%)", "Memory(kb)"));
for x in (0..my_vec.len()).step_by(4) {
my_s.push_str(&format!("{:^5}", &my_vec[x]));
my_s.push_str(": ");
my_s.push_str(&format!("{:^6}", &my_vec[x + 1]));
my_s.push_str(": ");
my_s.push_str(&format!("{:^6}", &my_vec[x + 2])[0..6]);
my_s.push_str(": ");
my_s.push_str(&format!("{:^10}", &my_vec[x + 3]));
my_s.push_str("\n");
}
return my_s;
}
fn get_my_cpu_usage(system : &mut sysinfo::System) -> String {
system.refresh_all();
let mut my_vec = Vec::new();
for processor in system.get_processors() {
my_vec.push(processor.get_cpu_usage());
}
let mut my_s = String::with_capacity(2048);
for x in (0..my_vec.len()).step_by(1) {
my_s.push_str(&format!("[{:^2}]", x));
my_s.push_str(" [");
for i in (0..100).step_by(2) {
if i < my_vec[x] as u8 {
my_s.push_str("|");
}
else {
my_s.push_str(" ");
}
}
my_s.push_str("] ");
my_s.push_str(&format!("{:^3}", &my_vec[x]));
my_s.push_str("\n");
}
return my_s;
}
fn get_disk_type_string(disk : sysinfo::DiskType) -> String {
match disk {
sysinfo::DiskType::HDD => String::from("HDD"),
sysinfo::DiskType::SSD => String::from("SSD"),
_ => String::from("Unknown"),
}
}
fn get_my_disks(system : &mut sysinfo::System) -> String {
system.refresh_all();
let mut my_vec = Vec::new();
for disk in system.get_disks() {
my_vec.push(disk.get_name().to_string_lossy().into_owned());
my_vec.push(get_disk_type_string(disk.get_type()));
my_vec.push(format!("{:?}", disk.get_mount_point()));
my_vec.push(format!("{:?}", disk.get_total_space() / 1000000000));
my_vec.push(format!("{:?}", disk.get_available_space() / 1000000000));
}
let mut my_s = String::with_capacity(2048);
my_s.push_str(&format!("{:^8}: {:^8}: {:^10}: {:^12}: {:^12}\n", "Name", "Type", "Mount", "Total(Gb)", "Free(Gb)"));
for x in (0..my_vec.len()).step_by(5) {
my_s.push_str(&format!("{:^8}", &my_vec[x]));
my_s.push_str(": ");
my_s.push_str(&format!("{:^8}", &my_vec[x + 1]));
my_s.push_str(": ");
my_s.push_str(&format!("{:^10}", &my_vec[x + 2]));
my_s.push_str(": ");
my_s.push_str(&format!("{:^12}", &my_vec[x + 3]));
my_s.push_str(": ");
my_s.push_str(&format!("{:^12}", &my_vec[x + 4]));
my_s.push_str("\n");
}
return my_s;
}
// println!("MEMORY");
// println!("total memory: {} KiB", system.get_total_memory());
// println!("used memory : {} KiB", system.get_used_memory());
// println!("total swap : {} KiB", system.get_total_swap());
// println!("used swap : {} KiB", system.get_used_swap());
fn get_my_memory(system : &mut sysinfo::System) -> String {
system.refresh_all();
let mut my_vec = Vec::new();
my_vec.push(format!("{}", system.get_total_memory() / 1049000));
my_vec.push(format!("{}", system.get_used_memory()/ 1049000));
my_vec.push(format!("{}", system.get_total_swap()/ 1049000));
my_vec.push(format!("{}", system.get_used_swap()/ 1049000));
let mut my_s = String::with_capacity(2048);
my_s.push_str(&format!("{:^5}: {:^5}: {:^5}: {:^5}\n", "Total", "Used", "Swap", "Used"));
my_s.push_str(&format!("{}(GiB)", &my_vec[0]));
my_s.push_str(": ");
my_s.push_str(&format!("{}(GiB)", &my_vec[1]));
my_s.push_str(": ");
my_s.push_str(&format!("{}(GiB)", &my_vec[2]));
my_s.push_str(": ");
my_s.push_str(&format!("{}(GiB)", &my_vec[3]));
my_s.push_str("\n");
return my_s;
}
fn my_loop(s: &mut Cursive) {
let mut system = sysinfo::System::new_all();
s.pop_layer();
let process_string = get_my_processes(&mut system);
let cpu_string = get_my_cpu_usage(&mut system);
let disk_string = get_my_disks(&mut system);
let memory_string = get_my_memory(&mut system);
let process = Dialog::text(process_string).title("Running Processes");
let cpu = Dialog::text(cpu_string).title("CPU Usage");
let disks = Dialog::text(disk_string).title("Disks Info");
let memory = Dialog::text(memory_string).title("Memory Info");
let layout = LinearLayout::vertical()
// .child(LinearLayout::horizontal()
.child(cpu)
.child(memory)
.child(LinearLayout::horizontal()
.child(process)
.child(disks));
s.add_layer(layout);
}
fn main() {
let mut siv = cursive::default();
siv.add_global_callback('q', |s| s.quit());
while siv.is_running() {
my_loop(&mut siv);
siv.step();
siv.refresh();
thread::sleep(time::Duration::from_millis(1000));
}
// println!("NETWORK");
// for (interface_name, data) in system.get_networks() {
// println!("{}: {}/{} B", interface_name, data.get_received(), data.get_transmitted());
// }
}
|
//!
use std::fmt;
use serde::{Deserialize, Serialize};
#[macro_export]
macro_rules! location {
() => {
$crate::Location {
file: file!().to_string(),
line: line!(),
column: column!(),
}
}
}
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[derive(Deserialize, Serialize)]
pub struct Location {
pub file: String,
pub line: u32,
pub column: u32,
}
impl fmt::Display for Location {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}:{}:{}", self.file, self.line, self.column)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn default_location() {
let Location { file, line, column } = Location::default();
assert_eq!( file, "");
assert_eq!( line, 0);
assert_eq!(column, 0);
}
#[test]
fn location_macro() {
let Location { file, line, column } = location!();
assert_eq!( file, "src/lib.rs");
assert_eq!( line, 47);
assert_eq!(column, 47);
}
}
|
// Copyright 2016 coroutine-rs Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![cfg_attr(feature = "nightly", feature(repr_simd))]
#![deny(missing_docs)]
//! This project provides an easy interface to the famous **Boost.Context** library
//! and thus the building blocks for higher-level abstractions, like coroutines,
//! cooperative threads (userland threads) or an equivalent to the C# keyword "yield".
extern crate libc;
#[cfg(windows)]
extern crate kernel32;
#[cfg(windows)]
extern crate winapi;
/// Provides the `Context` and `Transfer` types for
/// saving and restoring the current state of execution.
///
/// See the `Context` struct for more information.
pub mod context;
/// Provides utilities to allocate memory suitable as stack memory for `Context`.
pub mod stack;
mod sys;
pub use context::{Context, Transfer, ContextFn, ResumeOntopFn};
|
use std::convert::{TryFrom, TryInto};
use proc_macro2::Span;
use syn::{spanned::Spanned, Error, ExprIf, Result};
use crate::glsl::Glsl;
use crate::glsl::GlslFragment;
use crate::glsl::GlslLine;
use super::YaslExprFunctionScope;
use super::YaslExprLineScope;
use crate::yasl_block::YaslBlock;
#[derive(Debug)]
pub struct YaslExprIf {
if_token: syn::token::If,
cond: Box<YaslExprLineScope>,
then_branch: Box<YaslBlock>,
else_branch: Option<(syn::token::Else, Box<YaslExprFunctionScope>)>,
}
impl YaslExprIf {
pub fn span(&self) -> Span {
self.if_token.span()
}
}
impl From<&YaslExprIf> for Glsl {
fn from(expr: &YaslExprIf) -> Glsl {
let mut elements = Vec::new();
elements.push(Glsl::Line(GlslLine {
span: Some(expr.if_token.span()),
ends_with_semi: false,
glsl_string: format!("if({})", Glsl::from(&*expr.cond),),
}));
elements.push(Glsl::from(&*expr.then_branch));
if let Some(expr) = &expr.else_branch {
let el = Glsl::Line(GlslLine {
span: Some(expr.0.span()),
ends_with_semi: false,
glsl_string: "else".into(),
});
let block = (&*expr.1).into();
let fragment = GlslFragment {
elements: vec![el, block],
};
elements.push(Glsl::Fragment(fragment));
}
Glsl::Fragment(GlslFragment { elements })
}
}
impl TryFrom<ExprIf> for YaslExprIf {
type Error = Error;
fn try_from(c: ExprIf) -> Result<Self> {
let if_token = c.if_token;
let cond = Box::new((*c.cond).try_into()?);
let then_branch = Box::new(c.then_branch.try_into()?);
let else_branch = if let Some((e, expr)) = c.else_branch {
Some((e, Box::new((*expr).try_into()?)))
} else {
None
};
Ok(Self {
if_token,
cond,
then_branch,
else_branch,
})
}
}
|
#![no_main]
#[macro_use] extern crate libfuzzer_sys;
extern crate parser_c;
use std::str;
fuzz_target!(|data: &[u8]| {
if let Ok(data) = str::from_utf8(&data) {
let _ = parser_c::parse(&data, "input");
}
});
|
fn main() {
let puzzle = "iwrupvqb";
for i in 0.. {
let puzzle_number = String::from(puzzle) + &i.to_string();
let md5 = md5::compute(&puzzle_number);
if hex::encode(*md5).to_string().starts_with("000000") {
dbg!(puzzle_number, md5);
break;
}
}
} |
use std::ops::Add;
use bevy::{core::FixedTimestep, prelude::*};
#[derive(Debug, Copy, Clone)]
enum InputCommand {
LEFT,
RIGHT,
UP,
DOWN,
}
struct OwnedInput {
owner_id: u8,
command: InputCommand,
}
struct PlayerConfig {
move_speed: f32,
}
struct Player {
id: u8,
name: String,
}
struct GlobalInput {
input_buffer: Vec<OwnedInput>,
}
struct InputComponent {
commands: Vec<InputCommand>,
}
fn init_players(mut commands: Commands, mut materials: ResMut<Assets<ColorMaterial>>) {
// camera
commands.spawn_bundle(OrthographicCameraBundle::new_2d());
//Players
commands
.spawn_bundle(SpriteBundle {
material: materials.add(Color::rgb(0.5, 0.5, 1.0).into()),
transform: Transform::from_translation(Vec3::new(0.0, 0.0, 0.0)),
sprite: Sprite::new(Vec2::new(30.0, 30.0)),
..Default::default()
})
.insert(Player {
id: 0,
name: "One".to_string(),
})
.insert(InputComponent {
commands: Vec::new(),
});
}
fn keyboard_input_system(
keyboard_input: Res<Input<KeyCode>>,
mut global_input: ResMut<GlobalInput>,
) {
for keycode in keyboard_input.get_pressed() {
match keycode {
KeyCode::Left => global_input.input_buffer.push(OwnedInput {
owner_id: 0,
command: InputCommand::LEFT,
}),
KeyCode::Right => global_input.input_buffer.push(OwnedInput {
owner_id: 0,
command: InputCommand::RIGHT,
}),
KeyCode::Up => global_input.input_buffer.push(OwnedInput {
owner_id: 0,
command: InputCommand::UP,
}),
KeyCode::Down => global_input.input_buffer.push(OwnedInput {
owner_id: 0,
command: InputCommand::DOWN,
}),
_ => {}
}
}
}
fn input_system(
mut global_input: ResMut<GlobalInput>,
mut query: Query<(&Player, &mut InputComponent)>,
) {
for owned_input in global_input.input_buffer.iter() {
for (player, mut input) in query.iter_mut() {
if owned_input.owner_id == player.id {
input.commands.push(owned_input.command);
}
}
}
global_input.input_buffer.clear();
}
fn movement_system(
player_config: Res<PlayerConfig>,
mut query: Query<(&mut Transform, &mut InputComponent)>,
) {
for (mut transform, mut input) in query.iter_mut() {
let mut velocity = Vec3::ZERO;
while !input.commands.is_empty() {
let command = input.commands.pop().unwrap();
match command {
InputCommand::LEFT => {
velocity = velocity.add(Vec3::new(-1.0, 0.0, 0.0));
}
InputCommand::RIGHT => {
velocity = velocity.add(Vec3::new(1.0, 0.0, 0.0));
}
InputCommand::UP => {
velocity = velocity.add(Vec3::new(0.0, 1.0, 0.0));
}
InputCommand::DOWN => {
velocity = velocity.add(Vec3::new(0.0, -1.0, 0.0));
}
}
}
if velocity.length() > 0.0 {
transform.translation =
transform.translation + (velocity.normalize() * player_config.move_speed);
}
}
}
fn main() {
App::build()
.add_plugins(DefaultPlugins)
.insert_resource(PlayerConfig { move_speed: 5.0 })
.insert_resource(GlobalInput {
input_buffer: Vec::new(),
})
.add_startup_system(init_players.system())
.add_system_set(
SystemSet::new()
.with_run_criteria(FixedTimestep::step(1.0 / 60.0))
.with_system(keyboard_input_system.system())
.with_system(input_system.system())
.with_system(movement_system.system()),
)
.run();
}
|
use std::cmp::Ordering::{self, *};
use crate::{Atomize, IsBot, IsTop, LatticeFrom, LatticeOrd, Merge};
/// Wraps a lattice in [`Option`], treating [`None`] as a new bottom element which compares as less
/// than to all other values.
///
/// This can be used for giving a sensible default/bottom element to lattices that don't
/// necessarily have one.
#[repr(transparent)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct WithBot<Inner>(pub Option<Inner>);
impl<Inner> WithBot<Inner> {
/// Create a new `WithBot` lattice instance from a value.
pub fn new(val: Option<Inner>) -> Self {
Self(val)
}
/// Create a new `WithBot` lattice instance from a value using `Into`.
pub fn new_from(val: impl Into<Option<Inner>>) -> Self {
Self::new(val.into())
}
/// Reveal the inner value as a shared reference.
pub fn as_reveal_ref(&self) -> Option<&Inner> {
self.0.as_ref()
}
/// Reveal the inner value as an exclusive reference.
pub fn as_reveal_mut(&mut self) -> Option<&mut Inner> {
self.0.as_mut()
}
/// Gets the inner by value, consuming self.
pub fn into_reveal(self) -> Option<Inner> {
self.0
}
}
// Cannot auto derive because the generated implementation has the wrong trait bounds.
// https://github.com/rust-lang/rust/issues/26925
impl<Inner> Default for WithBot<Inner> {
fn default() -> Self {
Self(None)
}
}
impl<Inner, Other> Merge<WithBot<Other>> for WithBot<Inner>
where
Inner: Merge<Other> + LatticeFrom<Other>,
Other: IsBot,
{
fn merge(&mut self, other: WithBot<Other>) -> bool {
match (&mut self.0, other.0) {
(this @ None, Some(other_inner)) if !other_inner.is_bot() => {
*this = Some(LatticeFrom::lattice_from(other_inner));
true
}
(Some(self_inner), Some(other_inner)) => self_inner.merge(other_inner),
(_self, _none_or_bot) => false,
}
}
}
impl<Inner, Other> LatticeFrom<WithBot<Other>> for WithBot<Inner>
where
Inner: LatticeFrom<Other>,
{
fn lattice_from(other: WithBot<Other>) -> Self {
Self(other.0.map(Inner::lattice_from))
}
}
impl<Inner, Other> PartialOrd<WithBot<Other>> for WithBot<Inner>
where
Inner: PartialOrd<Other> + IsBot,
Other: IsBot,
{
fn partial_cmp(&self, other: &WithBot<Other>) -> Option<Ordering> {
match (&self.0, &other.0) {
(None, None) => Some(Equal),
(None, Some(bot)) if bot.is_bot() => Some(Equal),
(Some(bot), None) if bot.is_bot() => Some(Equal),
(None, Some(_)) => Some(Less),
(Some(_), None) => Some(Greater),
(Some(this_inner), Some(other_inner)) => this_inner.partial_cmp(other_inner),
}
}
}
impl<Inner, Other> LatticeOrd<WithBot<Other>> for WithBot<Inner> where
Self: PartialOrd<WithBot<Other>>
{
}
impl<Inner, Other> PartialEq<WithBot<Other>> for WithBot<Inner>
where
Inner: PartialEq<Other> + IsBot,
Other: IsBot,
{
fn eq(&self, other: &WithBot<Other>) -> bool {
match (&self.0, &other.0) {
(None, None) => true,
(None, Some(bot)) if bot.is_bot() => true,
(Some(bot), None) if bot.is_bot() => true,
(None, Some(_)) => false,
(Some(_), None) => false,
(Some(this_inner), Some(other_inner)) => this_inner == other_inner,
}
}
}
impl<Inner> Eq for WithBot<Inner> where Self: PartialEq {}
impl<Inner> IsBot for WithBot<Inner>
where
Inner: IsBot,
{
fn is_bot(&self) -> bool {
self.0.as_ref().map_or(true, IsBot::is_bot)
}
}
impl<Inner> IsTop for WithBot<Inner>
where
Inner: IsTop,
{
fn is_top(&self) -> bool {
self.0.as_ref().map_or(false, IsTop::is_top)
}
}
impl<Inner> Atomize for WithBot<Inner>
where
Inner: 'static + Atomize + LatticeFrom<<Inner as Atomize>::Atom>,
{
type Atom = WithBot<Inner::Atom>;
// TODO: use impl trait.
type AtomIter = Box<dyn Iterator<Item = Self::Atom>>;
fn atomize(self) -> Self::AtomIter {
Box::new(
self.0
.into_iter()
.flat_map(Atomize::atomize)
.map(WithBot::new_from),
)
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::set_union::{SetUnionHashSet, SetUnionSingletonSet};
use crate::test::{check_all, check_atomize_each};
#[test]
fn test_singly_nested_singleton_example() {
let mut my_hash_set = WithBot::new_from(SetUnionHashSet::<&str>::default());
let my_delta_set = WithBot::new_from(SetUnionSingletonSet::new_from("hello world"));
assert!(my_hash_set.merge(my_delta_set)); // Changes
assert!(!my_hash_set.merge(my_delta_set)); // No changes
}
#[test]
fn test_doubly_nested_singleton_example() {
let mut my_hash_set =
WithBot::new_from(WithBot::new_from(SetUnionHashSet::<&str>::default()));
let my_delta_set = WithBot::new_from(WithBot::new_from(SetUnionSingletonSet::new_from(
"hello world",
)));
assert!(my_hash_set.merge(my_delta_set)); // Changes
assert!(!my_hash_set.merge(my_delta_set)); // No changes
}
#[test]
#[rustfmt::skip]
fn auto_derives() {
type B = WithBot<SetUnionHashSet<usize>>;
assert_eq!(B::default().partial_cmp(&B::default()), Some(Equal));
// Test bot collapsing - `WithBot(Some(Bot))` equals `WithBot(None)`.
assert_eq!(B::new_from(SetUnionHashSet::new_from([])).partial_cmp(&B::default()), Some(Equal));
assert_eq!(B::default().partial_cmp(&B::new_from(SetUnionHashSet::new_from([]))), Some(Equal));
assert!(B::new_from(SetUnionHashSet::new_from([])).eq(&B::default()));
assert!(B::default().eq(&B::new_from(SetUnionHashSet::new_from([]))));
// PartialOrd
assert_eq!(B::new_from(SetUnionHashSet::new_from([])).partial_cmp(&B::new_from(SetUnionHashSet::new_from([]))), Some(Equal));
assert_eq!(B::new_from(SetUnionHashSet::new_from([0])).partial_cmp(&B::new_from(SetUnionHashSet::new_from([]))), Some(Greater));
assert_eq!(B::new_from(SetUnionHashSet::new_from([])).partial_cmp(&B::new_from(SetUnionHashSet::new_from([0]))), Some(Less));
assert_eq!(B::new_from(SetUnionHashSet::new_from([0])).partial_cmp(&B::new_from(SetUnionHashSet::new_from([1]))), None);
// PartialEq
assert!(B::default().eq(&B::default()));
assert!(B::new_from(SetUnionHashSet::new_from([])).eq(&B::new_from(SetUnionHashSet::new_from([]))));
assert!(!B::new_from(SetUnionHashSet::new_from([0])).eq(&B::new_from(SetUnionHashSet::new_from([]))));
assert!(!B::new_from(SetUnionHashSet::new_from([])).eq(&B::new_from(SetUnionHashSet::new_from([0]))));
assert!(!B::new_from(SetUnionHashSet::new_from([0])).eq(&B::new_from(SetUnionHashSet::new_from([1]))));
}
#[test]
fn consistency() {
check_all(&[
WithBot::default(),
WithBot::new_from(SetUnionHashSet::new_from([])),
WithBot::new_from(SetUnionHashSet::new_from([0])),
WithBot::new_from(SetUnionHashSet::new_from([1])),
WithBot::new_from(SetUnionHashSet::new_from([0, 1])),
])
}
#[test]
fn atomize() {
check_atomize_each(&[
WithBot::default(),
WithBot::new_from(SetUnionHashSet::new_from([])),
WithBot::new_from(SetUnionHashSet::new_from([0])),
WithBot::new_from(SetUnionHashSet::new_from([1])),
WithBot::new_from(SetUnionHashSet::new_from([0, 1])),
WithBot::new_from(SetUnionHashSet::new((0..10).collect())),
]);
}
}
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub mod operations {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<ResourceProviderOperationList, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/providers/Microsoft.DesktopVirtualization/operations",
&operation_config.base_path,
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ResourceProviderOperationList = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
list::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod workspaces {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<Workspace, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/workspaces/{}",
&operation_config.base_path, subscription_id, resource_group_name, workspace_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: Workspace = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
workspace: &Workspace,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/workspaces/{}",
&operation_config.base_path, subscription_id, resource_group_name, workspace_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(workspace);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: Workspace = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: Workspace = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(Workspace),
Created201(Workspace),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
workspace: Option<&WorkspacePatch>,
) -> std::result::Result<Workspace, update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/workspaces/{}",
&operation_config.base_path, subscription_id, resource_group_name, workspace_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(workspace) = workspace {
req_builder = req_builder.json(workspace);
}
let req = req_builder.build().context(update::BuildRequestError)?;
let rsp = client.execute(req).await.context(update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: Workspace = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
workspace_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/workspaces/{}",
&operation_config.base_path, subscription_id, resource_group_name, workspace_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
) -> std::result::Result<WorkspaceList, list_by_resource_group::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/workspaces",
&operation_config.base_path, subscription_id, resource_group_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_resource_group::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?;
let rsp_value: WorkspaceList = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?;
list_by_resource_group::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_by_subscription(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<WorkspaceList, list_by_subscription::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.DesktopVirtualization/workspaces",
&operation_config.base_path, subscription_id
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_subscription::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_by_subscription::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_subscription::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_subscription::ResponseBytesError)?;
let rsp_value: WorkspaceList = serde_json::from_slice(&body).context(list_by_subscription::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_subscription::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(list_by_subscription::DeserializeError { body })?;
list_by_subscription::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_subscription {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod application_groups {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
application_group_name: &str,
) -> std::result::Result<ApplicationGroup, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/applicationGroups/{}",
&operation_config.base_path, subscription_id, resource_group_name, application_group_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ApplicationGroup = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
application_group_name: &str,
application_group: &ApplicationGroup,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/applicationGroups/{}",
&operation_config.base_path, subscription_id, resource_group_name, application_group_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(application_group);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: ApplicationGroup = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: ApplicationGroup = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(ApplicationGroup),
Created201(ApplicationGroup),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
application_group_name: &str,
application_group: Option<&ApplicationGroupPatch>,
) -> std::result::Result<ApplicationGroup, update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/applicationGroups/{}",
&operation_config.base_path, subscription_id, resource_group_name, application_group_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(application_group) = application_group {
req_builder = req_builder.json(application_group);
}
let req = req_builder.build().context(update::BuildRequestError)?;
let rsp = client.execute(req).await.context(update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: ApplicationGroup = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
application_group_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/applicationGroups/{}",
&operation_config.base_path, subscription_id, resource_group_name, application_group_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
filter: Option<&str>,
) -> std::result::Result<ApplicationGroupList, list_by_resource_group::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/applicationGroups",
&operation_config.base_path, subscription_id, resource_group_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_resource_group::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(filter) = filter {
req_builder = req_builder.query(&[("$filter", filter)]);
}
let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?;
let rsp_value: ApplicationGroupList =
serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?;
list_by_resource_group::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_by_subscription(
operation_config: &crate::OperationConfig,
subscription_id: &str,
filter: Option<&str>,
) -> std::result::Result<ApplicationGroupList, list_by_subscription::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.DesktopVirtualization/applicationGroups",
&operation_config.base_path, subscription_id
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_subscription::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(filter) = filter {
req_builder = req_builder.query(&[("$filter", filter)]);
}
let req = req_builder.build().context(list_by_subscription::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_subscription::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_subscription::ResponseBytesError)?;
let rsp_value: ApplicationGroupList =
serde_json::from_slice(&body).context(list_by_subscription::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_subscription::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(list_by_subscription::DeserializeError { body })?;
list_by_subscription::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_subscription {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod start_menu_items {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
application_group_name: &str,
) -> std::result::Result<StartMenuItemList, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/applicationGroups/{}/startMenuItems",
&operation_config.base_path, subscription_id, resource_group_name, application_group_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: StartMenuItemList = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod applications {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
application_group_name: &str,
application_name: &str,
) -> std::result::Result<Application, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/applicationGroups/{}/applications/{}",
&operation_config.base_path, subscription_id, resource_group_name, application_group_name, application_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: Application = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
application_group_name: &str,
application_name: &str,
application: &Application,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/applicationGroups/{}/applications/{}",
&operation_config.base_path, subscription_id, resource_group_name, application_group_name, application_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(application);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: Application = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: Application = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(Application),
Created201(Application),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
application_group_name: &str,
application_name: &str,
application: Option<&ApplicationPatch>,
) -> std::result::Result<Application, update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/applicationGroups/{}/applications/{}",
&operation_config.base_path, subscription_id, resource_group_name, application_group_name, application_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(application) = application {
req_builder = req_builder.json(application);
}
let req = req_builder.build().context(update::BuildRequestError)?;
let rsp = client.execute(req).await.context(update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: Application = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
application_group_name: &str,
application_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/applicationGroups/{}/applications/{}",
&operation_config.base_path, subscription_id, resource_group_name, application_group_name, application_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
application_group_name: &str,
) -> std::result::Result<ApplicationList, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/applicationGroups/{}/applications",
&operation_config.base_path, subscription_id, resource_group_name, application_group_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ApplicationList = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod desktops {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
application_group_name: &str,
desktop_name: &str,
) -> std::result::Result<Desktop, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/applicationGroups/{}/desktops/{}",
&operation_config.base_path, subscription_id, resource_group_name, application_group_name, desktop_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: Desktop = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
application_group_name: &str,
desktop_name: &str,
desktop: Option<&DesktopPatch>,
) -> std::result::Result<Desktop, update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/applicationGroups/{}/desktops/{}",
&operation_config.base_path, subscription_id, resource_group_name, application_group_name, desktop_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(desktop) = desktop {
req_builder = req_builder.json(desktop);
}
let req = req_builder.build().context(update::BuildRequestError)?;
let rsp = client.execute(req).await.context(update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: Desktop = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
application_group_name: &str,
) -> std::result::Result<DesktopList, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/applicationGroups/{}/desktops",
&operation_config.base_path, subscription_id, resource_group_name, application_group_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: DesktopList = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod host_pools {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
) -> std::result::Result<HostPool, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: HostPool = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
host_pool: &HostPool,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(host_pool);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: HostPool = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: HostPool = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(HostPool),
Created201(HostPool),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
host_pool: Option<&HostPoolPatch>,
) -> std::result::Result<HostPool, update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(host_pool) = host_pool {
req_builder = req_builder.json(host_pool);
}
let req = req_builder.build().context(update::BuildRequestError)?;
let rsp = client.execute(req).await.context(update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: HostPool = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
force: Option<bool>,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(force) = force {
req_builder = req_builder.query(&[("force", force)]);
}
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
) -> std::result::Result<HostPoolList, list_by_resource_group::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools",
&operation_config.base_path, subscription_id, resource_group_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_resource_group::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?;
let rsp_value: HostPoolList = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?;
list_by_resource_group::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list(operation_config: &crate::OperationConfig, subscription_id: &str) -> std::result::Result<HostPoolList, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.DesktopVirtualization/hostPools",
&operation_config.base_path, subscription_id
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: HostPoolList = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod user_sessions {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list_by_host_pool(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
filter: Option<&str>,
) -> std::result::Result<UserSessionList, list_by_host_pool::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/userSessions",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_host_pool::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(filter) = filter {
req_builder = req_builder.query(&[("$filter", filter)]);
}
let req = req_builder.build().context(list_by_host_pool::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_host_pool::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_host_pool::ResponseBytesError)?;
let rsp_value: UserSessionList = serde_json::from_slice(&body).context(list_by_host_pool::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_host_pool::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(list_by_host_pool::DeserializeError { body })?;
list_by_host_pool::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_host_pool {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
session_host_name: &str,
user_session_id: &str,
) -> std::result::Result<UserSession, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/sessionHosts/{}/userSessions/{}",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name, session_host_name, user_session_id
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: UserSession = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
session_host_name: &str,
user_session_id: &str,
force: Option<bool>,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/sessionHosts/{}/userSessions/{}",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name, session_host_name, user_session_id
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(force) = force {
req_builder = req_builder.query(&[("force", force)]);
}
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
session_host_name: &str,
) -> std::result::Result<UserSessionList, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/sessionHosts/{}/userSessions",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name, session_host_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: UserSessionList = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn disconnect(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
session_host_name: &str,
user_session_id: &str,
) -> std::result::Result<(), disconnect::Error> {
let client = &operation_config.client;
let uri_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/sessionHosts/{}/userSessions/{}/disconnect" , & operation_config . base_path , subscription_id , resource_group_name , host_pool_name , session_host_name , user_session_id) ;
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(disconnect::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0);
let req = req_builder.build().context(disconnect::BuildRequestError)?;
let rsp = client.execute(req).await.context(disconnect::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(()),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(disconnect::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(disconnect::DeserializeError { body })?;
disconnect::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod disconnect {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn send_message(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
session_host_name: &str,
user_session_id: &str,
send_message: Option<&SendMessage>,
) -> std::result::Result<(), send_message::Error> {
let client = &operation_config.client;
let uri_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/sessionHosts/{}/userSessions/{}/sendMessage" , & operation_config . base_path , subscription_id , resource_group_name , host_pool_name , session_host_name , user_session_id) ;
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(send_message::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(send_message) = send_message {
req_builder = req_builder.json(send_message);
}
let req = req_builder.build().context(send_message::BuildRequestError)?;
let rsp = client.execute(req).await.context(send_message::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(()),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(send_message::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(send_message::DeserializeError { body })?;
send_message::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod send_message {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod session_hosts {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
session_host_name: &str,
) -> std::result::Result<SessionHost, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/sessionHosts/{}",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name, session_host_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: SessionHost = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
session_host_name: &str,
session_host: Option<&SessionHostPatch>,
) -> std::result::Result<SessionHost, update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/sessionHosts/{}",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name, session_host_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(session_host) = session_host {
req_builder = req_builder.json(session_host);
}
let req = req_builder.build().context(update::BuildRequestError)?;
let rsp = client.execute(req).await.context(update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: SessionHost = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
session_host_name: &str,
force: Option<bool>,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/sessionHosts/{}",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name, session_host_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(force) = force {
req_builder = req_builder.query(&[("force", force)]);
}
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
) -> std::result::Result<SessionHostList, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/sessionHosts",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: SessionHostList = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod msix_packages {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
msix_package_full_name: &str,
) -> std::result::Result<MsixPackage, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/msixPackages/{}",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name, msix_package_full_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: MsixPackage = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
msix_package_full_name: &str,
msix_package: &MsixPackage,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/msixPackages/{}",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name, msix_package_full_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(msix_package);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: MsixPackage = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: MsixPackage = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(MsixPackage),
Created201(MsixPackage),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
msix_package_full_name: &str,
msix_package: Option<&MsixPackagePatch>,
) -> std::result::Result<MsixPackage, update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/msixPackages/{}",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name, msix_package_full_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(msix_package) = msix_package {
req_builder = req_builder.json(msix_package);
}
let req = req_builder.build().context(update::BuildRequestError)?;
let rsp = client.execute(req).await.context(update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: MsixPackage = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
msix_package_full_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/msixPackages/{}",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name, msix_package_full_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
) -> std::result::Result<MsixPackageList, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/msixPackages",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: MsixPackageList = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod msix_images {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn expand(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
host_pool_name: &str,
msix_image_uri: &MsixImageUri,
) -> std::result::Result<ExpandMsixImageList, expand::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DesktopVirtualization/hostPools/{}/expandMsixImage",
&operation_config.base_path, subscription_id, resource_group_name, host_pool_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(expand::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(msix_image_uri);
let req = req_builder.build().context(expand::BuildRequestError)?;
let rsp = client.execute(req).await.context(expand::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(expand::ResponseBytesError)?;
let rsp_value: ExpandMsixImageList = serde_json::from_slice(&body).context(expand::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(expand::ResponseBytesError)?;
let rsp_value: CloudError = serde_json::from_slice(&body).context(expand::DeserializeError { body })?;
expand::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod expand {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::CloudError,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
|
use std::convert::TryFrom;
use std::net::SocketAddr;
use std::{io, sync::Arc};
use async_trait::async_trait;
use crate::{
app::dns_client::DnsClient,
proxy::{stream::SimpleProxyStream, OutboundHandler, ProxyStream, TcpOutboundHandler},
session::{Session, SocksAddr},
};
pub struct Handler {
pub actors: Vec<Arc<dyn OutboundHandler>>,
pub dns_client: Arc<DnsClient>,
}
#[async_trait]
impl TcpOutboundHandler for Handler {
fn name(&self) -> &str {
super::NAME
}
fn tcp_connect_addr(&self) -> Option<(String, u16, SocketAddr)> {
for a in self.actors.iter() {
if let Some(addr) = a.tcp_connect_addr() {
return Some(addr);
}
}
None
}
async fn handle_tcp<'a>(
&'a self,
sess: &'a Session,
stream: Option<Box<dyn ProxyStream>>,
) -> io::Result<Box<dyn ProxyStream>> {
if let Some(mut stream) = stream {
for (i, a) in self.actors.iter().enumerate() {
let mut new_sess = sess.clone();
for j in (i + 1)..self.actors.len() {
if let Some((connect_addr, port, _)) = self.actors[j].tcp_connect_addr() {
if let Ok(addr) = SocksAddr::try_from(format!("{}:{}", connect_addr, port))
{
new_sess.destination = addr;
}
}
}
stream = a.handle_tcp(&new_sess, Some(stream)).await?;
}
return Ok(Box::new(SimpleProxyStream(stream)));
}
for a in self.actors.iter() {
if let Some((connect_addr, port, bind_addr)) = a.tcp_connect_addr() {
let mut stream = self
.dial_tcp_stream(self.dns_client.clone(), &bind_addr, &connect_addr, &port)
.await?;
for (i, a) in self.actors.iter().enumerate() {
let mut new_sess = sess.clone();
for j in (i + 1)..self.actors.len() {
if let Some((connect_addr, port, _)) = self.actors[j].tcp_connect_addr() {
if let Ok(addr) =
SocksAddr::try_from(format!("{}:{}", connect_addr, port))
{
new_sess.destination = addr;
break;
}
}
}
stream = a.handle_tcp(&new_sess, Some(stream)).await?;
}
return Ok(Box::new(SimpleProxyStream(stream)));
}
}
Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid chain"))
}
}
|
use aoc_lib::AocImplementation;
use itertools::Itertools;
use image;
fn main() {
let day = Day8{};
day.start(8);
}
struct Day8 {}
impl AocImplementation<u8> for Day8 {
fn process_input(&self, input: &str) -> Vec<u8> {
input.split("").filter(|s| s != &"").map(|s| s.parse().unwrap()).collect()
}
fn execute(&self, input: Vec<u8>) -> Option<i32> {
let width = 25;
let height = 6;
let layers = input.chunks(width * height).rev();
let mut img_buf = image::ImageBuffer::new(width as u32, height as u32);
for layer in layers {
for (index, b) in layer.iter().enumerate() {
let (x, y) = get_coords_from_index(width, height, index);
println!("index: {}, x: {}, y: {}", index, x, y);
if *b == 2 { continue; }
let pixel = img_buf.get_pixel_mut(x, y);
*pixel = match b {
0 => image::Rgb([0,0,0]),
1 => image::Rgb([255,255,255]),
_ => panic!("Unknown color: {}", b)
}
}
}
img_buf.save("code.png").unwrap();
Some(0)
}
}
fn get_coords_from_index(width: usize, height: usize, index: usize) -> (u32, u32) {
let y = index / width;
let x = index % width;
(x as u32, y as u32)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn correct_coordinates_0_0() {
let (x, y) = get_coords_from_index(25, 6, 0);
assert_eq!(x, 0);
assert_eq!(y, 0);
}
#[test]
fn correct_coordinates_25_6() {
let (x, y) = get_coords_from_index(25, 6, 25 * 6 - 1);
assert_eq!(x + 1, 25);
assert_eq!(y + 1, 6);
}
} |
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
_reserved0: [u8; 0x04],
#[doc = "0x04..0x44 - Cluster CH%s, containing ?CR1, ?CR2, ?FRCR, ?SLOTR, ?IM, ?SR, ?CLRFR, ?DR"]
pub ch: [CH; 2],
}
impl RegisterBlock {
#[doc = "0x04..0x24 - Cluster CH%s, containing ?CR1, ?CR2, ?FRCR, ?SLOTR, ?IM, ?SR, ?CLRFR, ?DR"]
#[inline(always)]
pub fn cha(&self) -> &CH {
&self.ch[0]
}
#[doc = "0x24..0x44 - Cluster CH%s, containing ?CR1, ?CR2, ?FRCR, ?SLOTR, ?IM, ?SR, ?CLRFR, ?DR"]
#[inline(always)]
pub fn chb(&self) -> &CH {
&self.ch[1]
}
}
#[doc = "Cluster CH%s, containing ?CR1, ?CR2, ?FRCR, ?SLOTR, ?IM, ?SR, ?CLRFR, ?DR"]
pub use self::ch::CH;
#[doc = r"Cluster"]
#[doc = "Cluster CH%s, containing ?CR1, ?CR2, ?FRCR, ?SLOTR, ?IM, ?SR, ?CLRFR, ?DR"]
pub mod ch;
|
use actix_web::{delete, get, post, put, web};
use actix_web::web::Json;
use sqlx::SqlitePool;
use crate::{common};
use crate::common::{DynamicResult, make_api_response};
use crate::errors::ApiError;
use crate::middleware::Auth;
use crate::runs::{Run, UpdateRun};
#[derive(Serialize)]
pub struct ListRunsResult {
pub result: String,
pub count: i32,
pub runs: Vec<Run>,
}
#[get("")]
async fn list_all(db_pool: web::Data<SqlitePool>) -> Result<Json<ListRunsResult>, ApiError> {
let runs = Run::find_all(db_pool.get_ref()).await?;
Ok(Json(ListRunsResult {
result: common::SUCCESS_MSG.to_string(),
count: runs.len() as i32,
runs,
}))
}
#[get("/{id}")]
async fn details(id: web::Path<i32>, db_pool: web::Data<SqlitePool>) -> Result<Json<Run>, ApiError> {
let run_opt = Run::find_by_id(id.into_inner(), db_pool.get_ref()).await?;
match run_opt {
Some(run) => Ok(Json(run)),
None => Err(ApiError::NotFound("Run not found".to_string()))
}
}
#[post("")]
async fn create(run: web::Json<Run>, db_pool: web::Data<SqlitePool>) -> DynamicResult {
let new_id = Run::create(run.into_inner(), db_pool.get_ref()).await?;
Ok(Json(make_api_response(true, vec![("id".to_string(), new_id.to_string())])))
}
#[put("/{id}")]
async fn update(id: web::Path<i32>, run_data: web::Json<UpdateRun>, db_pool: web::Data<SqlitePool>) -> Result<Json<Run>, ApiError> {
let result = Run::update(id.into_inner(), run_data.into_inner(), db_pool.get_ref()).await?;
return Ok(Json(result));
}
#[delete("/{id}")]
async fn delete(id: web::Path<i32>, db_pool: web::Data<SqlitePool>) -> DynamicResult {
Run::delete(id.into_inner(), db_pool.get_ref()).await?;
return Ok(Json(make_api_response(true, vec![])));
}
// function that will be called on new Application to configure routes for this module
pub fn init(cfg: &mut web::ServiceConfig) {
cfg.service(
web::scope("/runs")
.wrap(Auth)
.service(list_all)
.service(details)
.service(create)
.service(update)
.service(delete)
);
} |
use assembly_core::nom::{
bytes::complete::take,
combinator::{cond, map, map_res},
multi::length_count,
number::complete::{le_u32, le_u64, le_u8},
IResult,
};
use std::convert::TryFrom;
use super::core::{
FileVersion, SceneRef, SceneTransition, SceneTransitionInfo, SceneTransitionPoint, ZoneFile,
};
use assembly_core::nom_ext::{count_2, count_5};
use assembly_core::parser::{parse_quat, parse_u8_string, parse_vec3f, parse_world_id};
use assembly_core::types::Placement3D;
pub fn parse_file_version(input: &[u8]) -> IResult<&[u8], FileVersion> {
map(le_u32, FileVersion::from)(input)
}
pub fn parse_file_revision(input: &[u8], version: FileVersion) -> IResult<&[u8], Option<u32>> {
cond(version.id() >= 0x24, le_u32)(input)
}
pub fn parse_spawn_point<'a>(
input: &'a [u8],
version: FileVersion,
) -> IResult<&'a [u8], Option<Placement3D>> {
let inner = |i: &'a [u8]| {
let (i, a) = parse_vec3f(i)?;
let (i, b) = parse_quat(i)?;
Ok((i, Placement3D { pos: a, rot: b }))
};
cond(version.id() >= 0x26, inner)(input)
}
pub fn parse_scene_count(version: FileVersion) -> fn(input: &[u8]) -> IResult<&[u8], usize> {
fn pre_x25(input: &[u8]) -> IResult<&[u8], usize> {
map_res(le_u8, usize::try_from)(input)
}
fn post_x25(input: &[u8]) -> IResult<&[u8], usize> {
map_res(le_u32, usize::try_from)(input)
}
if version.id() >= 0x25 {
post_x25
} else {
pre_x25
}
}
fn parse_scene_ref(input: &[u8]) -> IResult<&[u8], SceneRef> {
let (input, file_name) = parse_u8_string(input)?;
let (input, id) = le_u32(input)?;
let (input, layer) = le_u32(input)?;
let (input, name) = parse_u8_string(input)?;
let (input, _) = take(3usize)(input)?;
Ok((
input,
SceneRef {
file_name,
id,
layer,
name,
},
))
}
fn parse_scene_transition_point(input: &[u8]) -> IResult<&[u8], SceneTransitionPoint> {
let (input, a) = le_u64(input)?;
let (input, b) = parse_vec3f(input)?;
Ok((
input,
SceneTransitionPoint {
scene_id: a,
point: b,
},
))
}
fn parse_scene_transition_info(
version: FileVersion,
) -> fn(&[u8]) -> IResult<&[u8], SceneTransitionInfo> {
fn x22_to_x26(i: &[u8]) -> IResult<&[u8], SceneTransitionInfo> {
map(
count_5(parse_scene_transition_point),
SceneTransitionInfo::from,
)(i)
}
fn post_x27(i: &[u8]) -> IResult<&[u8], SceneTransitionInfo> {
map(
count_2(parse_scene_transition_point),
SceneTransitionInfo::from,
)(i)
}
if version.id() <= 0x21 || version.id() >= 0x27 {
post_x27
} else {
x22_to_x26
}
}
fn parse_scene_transition(
version: FileVersion,
) -> impl Fn(&[u8]) -> IResult<&[u8], SceneTransition> + Copy {
let sti_parser = parse_scene_transition_info(version);
move |i: &[u8]| {
let (i, name) = cond(version.id() < 0x25, parse_u8_string)(i)?;
let (i, points) = sti_parser(i)?;
Ok((i, SceneTransition { name, points }))
}
}
fn parse_scene_transitions(
version: FileVersion,
) -> impl Fn(&[u8]) -> IResult<&[u8], Option<Vec<SceneTransition>>> {
let st_parser = parse_scene_transition(version);
move |i: &[u8]| cond(version.id() >= 0x20, length_count(le_u32, st_parser))(i)
}
#[allow(clippy::many_single_char_names)]
pub fn parse_zone_file(input: &[u8]) -> IResult<&[u8], ZoneFile<Vec<u8>>> {
let (input, file_version) = parse_file_version(input)?;
let sc_parser = parse_scene_count(file_version);
let st_parser = parse_scene_transitions(file_version);
let (input, file_revision) = parse_file_revision(input, file_version)?;
let (input, world_id) = parse_world_id(input)?;
let (input, spawn_point) = parse_spawn_point(input, file_version)?;
let (input, scene_refs) = length_count(sc_parser, parse_scene_ref)(input)?;
let (input, g) = parse_u8_string(input)?;
let (input, map_filename) = parse_u8_string(input)?;
let (input, map_name) = parse_u8_string(input)?;
let (input, map_description) = parse_u8_string(input)?;
let (input, scene_transitions) = st_parser(input)?;
let (input, path_data) = cond(file_version.min(0x23), length_count(le_u32, le_u8))(input)?;
Ok((
input,
ZoneFile {
file_version,
file_revision,
world_id,
spawn_point,
scene_refs,
something: g,
map_filename,
map_name,
map_description,
scene_transitions,
path_data,
},
))
}
#[test]
fn test_parse() {
use assembly_core::nom::error::ErrorKind;
assert_eq!(
parse_file_revision(&[20, 0, 0, 0], FileVersion::from(0x24)),
Ok((&[][..], Some(20)))
);
assert_eq!(
parse_file_revision(&[20, 0, 0, 0], FileVersion::from(0x23)),
Ok((&[20, 0, 0, 0][..], None))
);
assert_eq!(
parse_scene_count(FileVersion::from(0x24))(&[20]),
Ok((&[][..], 20))
);
assert_eq!(
parse_scene_count(FileVersion::from(0x25))(&[20, 0, 0, 0]),
Ok((&[][..], 20))
);
assert_eq!(
parse_u8_string::<(&[u8], ErrorKind)>(&[2, 65, 66]),
Ok((&[][..], String::from("AB")))
);
}
|
//! StarkNet L2 sequencer client.
mod builder;
pub mod error;
pub mod reply;
pub mod request;
use self::request::{add_transaction::ContractDefinition, Call};
use crate::{
core::{
BlockId, CallSignatureElem, Chain, ClassHash, ConstructorParam, ContractAddress,
ContractAddressSalt, Fee, StarknetTransactionHash, StorageAddress, StorageValue,
TransactionNonce, TransactionVersion,
},
rpc::types::BlockHashOrTag,
sequencer::error::SequencerError,
};
use reqwest::Url;
use std::{fmt::Debug, result::Result, time::Duration};
#[cfg_attr(test, mockall::automock)]
#[async_trait::async_trait]
pub trait ClientApi {
async fn block(&self, block: BlockId) -> Result<reply::MaybePendingBlock, SequencerError>;
async fn call(
&self,
payload: request::Call,
block_hash: BlockHashOrTag,
) -> Result<reply::Call, SequencerError>;
async fn full_contract(
&self,
contract_addr: ContractAddress,
) -> Result<bytes::Bytes, SequencerError>;
async fn class_by_hash(&self, class_hash: ClassHash) -> Result<bytes::Bytes, SequencerError>;
async fn class_hash_at(
&self,
contract_address: ContractAddress,
) -> Result<ClassHash, SequencerError>;
async fn storage(
&self,
contract_addr: ContractAddress,
key: StorageAddress,
block_hash: BlockHashOrTag,
) -> Result<StorageValue, SequencerError>;
async fn transaction(
&self,
transaction_hash: StarknetTransactionHash,
) -> Result<reply::Transaction, SequencerError>;
async fn transaction_status(
&self,
transaction_hash: StarknetTransactionHash,
) -> Result<reply::TransactionStatus, SequencerError>;
async fn state_update(&self, block: BlockId) -> Result<reply::StateUpdate, SequencerError>;
async fn eth_contract_addresses(&self) -> Result<reply::EthContractAddresses, SequencerError>;
async fn add_invoke_transaction(
&self,
function_invocation: Call,
max_fee: Fee,
version: TransactionVersion,
) -> Result<reply::add_transaction::InvokeResponse, SequencerError>;
#[allow(clippy::too_many_arguments)]
async fn add_declare_transaction(
&self,
contract_definition: ContractDefinition,
sender_address: ContractAddress,
max_fee: Fee,
signature: Vec<CallSignatureElem>,
nonce: TransactionNonce,
version: TransactionVersion,
token: Option<String>,
) -> Result<reply::add_transaction::DeclareResponse, SequencerError>;
async fn add_deploy_transaction(
&self,
contract_address_salt: ContractAddressSalt,
constructor_calldata: Vec<ConstructorParam>,
contract_definition: ContractDefinition,
token: Option<String>,
) -> Result<reply::add_transaction::DeployResponse, SequencerError>;
}
/// StarkNet sequencer client using REST API.
///
/// Retry is performed on __all__ types of errors __except for__
/// [StarkNet specific errors](crate::sequencer::error::StarknetError).
///
/// Initial backoff time is 30 seconds and saturates at 1 hour:
///
/// `backoff [secs] = min((2 ^ N) * 15, 3600) [secs]`
///
/// where `N` is the consecutive retry iteration number `{1, 2, ...}`.
#[derive(Debug, Clone)]
pub struct Client {
/// This client is internally refcounted
inner: reqwest::Client,
/// StarkNet sequencer URL.
sequencer_url: Url,
}
impl Client {
#[cfg(not(test))]
const RETRY: builder::Retry = builder::Retry::Enabled;
#[cfg(test)]
const RETRY: builder::Retry = builder::Retry::Disabled;
/// Creates a new Sequencer client for the given chain.
pub fn new(chain: Chain) -> reqwest::Result<Self> {
let url = match chain {
Chain::Mainnet => Url::parse("https://alpha-mainnet.starknet.io/").unwrap(),
Chain::Goerli => Url::parse("https://alpha4.starknet.io/").unwrap(),
};
Self::with_url(url)
}
#[cfg(test)]
pub(crate) fn integration() -> reqwest::Result<Self> {
let integration_url = Url::parse("https://external.integration.starknet.io").unwrap();
Self::with_url(integration_url)
}
/// Create a Sequencer client for the given [Url].
pub fn with_url(url: Url) -> reqwest::Result<Self> {
Ok(Self {
inner: reqwest::Client::builder()
.timeout(Duration::from_secs(120))
.user_agent(crate::consts::USER_AGENT)
.build()?,
sequencer_url: url,
})
}
fn request(&self) -> builder::Request<'_, builder::stage::Gateway> {
builder::Request::builder(&self.inner, self.sequencer_url.clone())
}
/// Returns the [network chain](Chain) this client is operating on.
pub async fn chain(&self) -> anyhow::Result<Chain> {
use crate::consts::{GOERLI_GENESIS_HASH, MAINNET_GENESIS_HASH};
use crate::core::StarknetBlockNumber;
// unwrap is safe as `block_hash` is always present for non-pending blocks.
let genesis_hash = self
.block(StarknetBlockNumber::GENESIS.into())
.await?
.as_block()
.expect("Genesis block should not be pending")
.block_hash;
match genesis_hash {
goerli if goerli == GOERLI_GENESIS_HASH => Ok(Chain::Goerli),
mainnet if mainnet == MAINNET_GENESIS_HASH => Ok(Chain::Mainnet),
other => Err(anyhow::anyhow!("Unknown genesis block hash: {}", other.0)),
}
}
}
#[async_trait::async_trait]
impl ClientApi for Client {
#[tracing::instrument(skip(self))]
async fn block(&self, block: BlockId) -> Result<reply::MaybePendingBlock, SequencerError> {
self.request()
.feeder_gateway()
.get_block()
.with_block(block)
.with_retry(Self::RETRY)
.get()
.await
}
/// Performs a `call` on contract's function. Call result is not stored in L2, as opposed to `invoke`.
#[tracing::instrument(skip(self))]
async fn call(
&self,
payload: request::Call,
block_hash: BlockHashOrTag,
) -> Result<reply::Call, SequencerError> {
self.request()
.feeder_gateway()
.call_contract()
.with_block(block_hash)
.with_retry(Self::RETRY)
.post_with_json(&payload)
.await
}
/// Gets full contract definition.
#[tracing::instrument(skip(self))]
async fn full_contract(
&self,
contract_addr: ContractAddress,
) -> Result<bytes::Bytes, SequencerError> {
self.request()
.feeder_gateway()
.get_full_contract()
.with_contract_address(contract_addr)
.with_retry(Self::RETRY)
.get_as_bytes()
.await
}
/// Gets class for a particular class hash.
#[tracing::instrument(skip(self))]
async fn class_by_hash(&self, class_hash: ClassHash) -> Result<bytes::Bytes, SequencerError> {
self.request()
.feeder_gateway()
.get_class_by_hash()
.with_class_hash(class_hash)
.with_retry(Self::RETRY)
.get_as_bytes()
.await
}
/// Gets class hash for a particular contract address.
#[tracing::instrument(skip(self))]
async fn class_hash_at(
&self,
contract_address: ContractAddress,
) -> Result<ClassHash, SequencerError> {
self.request()
.feeder_gateway()
.get_class_hash_at()
.with_contract_address(contract_address)
.with_retry(Self::RETRY)
.get()
.await
}
/// Gets storage value associated with a `key` for a prticular contract.
#[tracing::instrument(skip(self))]
async fn storage(
&self,
contract_addr: ContractAddress,
key: StorageAddress,
block_hash: BlockHashOrTag,
) -> Result<StorageValue, SequencerError> {
self.request()
.feeder_gateway()
.get_storage_at()
.with_contract_address(contract_addr)
.with_storage_address(key)
.with_block(block_hash)
.with_retry(Self::RETRY)
.get()
.await
}
/// Gets transaction by hash.
#[tracing::instrument(skip(self))]
async fn transaction(
&self,
transaction_hash: StarknetTransactionHash,
) -> Result<reply::Transaction, SequencerError> {
self.request()
.feeder_gateway()
.get_transaction()
.with_transaction_hash(transaction_hash)
.with_retry(Self::RETRY)
.get()
.await
}
/// Gets transaction status by transaction hash.
#[tracing::instrument(skip(self))]
async fn transaction_status(
&self,
transaction_hash: StarknetTransactionHash,
) -> Result<reply::TransactionStatus, SequencerError> {
self.request()
.feeder_gateway()
.get_transaction_status()
.with_transaction_hash(transaction_hash)
.with_retry(Self::RETRY)
.get()
.await
}
#[tracing::instrument(skip(self))]
async fn state_update(&self, block: BlockId) -> Result<reply::StateUpdate, SequencerError> {
self.request()
.feeder_gateway()
.get_state_update()
.with_block(block)
.with_retry(Self::RETRY)
.get()
.await
}
/// Gets addresses of the Ethereum contracts crucial to Starknet operation.
#[tracing::instrument(skip(self))]
async fn eth_contract_addresses(&self) -> Result<reply::EthContractAddresses, SequencerError> {
self.request()
.feeder_gateway()
.get_contract_addresses()
.with_retry(Self::RETRY)
.get()
.await
}
/// Adds a transaction invoking a contract.
#[tracing::instrument(skip(self))]
async fn add_invoke_transaction(
&self,
call: Call,
max_fee: Fee,
version: TransactionVersion,
) -> Result<reply::add_transaction::InvokeResponse, SequencerError> {
let req = request::add_transaction::AddTransaction::Invoke(
request::add_transaction::InvokeFunction {
contract_address: call.contract_address,
entry_point_selector: call.entry_point_selector,
calldata: call.calldata,
max_fee,
version,
signature: call.signature,
},
);
// Note that we don't do retries here.
// This method is used to proxy an add transaction operation from the JSON-RPC
// API to the sequencer. Retries should be implemented in the JSON-RPC
// client instead.
self.request()
.gateway()
.add_transaction()
.with_retry(builder::Retry::Disabled)
.post_with_json(&req)
.await
}
/// Adds a transaction declaring a class.
#[tracing::instrument(skip(self))]
async fn add_declare_transaction(
&self,
contract_definition: ContractDefinition,
sender_address: ContractAddress,
max_fee: Fee,
signature: Vec<CallSignatureElem>,
nonce: TransactionNonce,
version: TransactionVersion,
token: Option<String>,
) -> Result<reply::add_transaction::DeclareResponse, SequencerError> {
let req =
request::add_transaction::AddTransaction::Declare(request::add_transaction::Declare {
contract_class: contract_definition,
sender_address,
max_fee,
signature,
nonce,
version,
});
// Note that we don't do retries here.
// This method is used to proxy an add transaction operation from the JSON-RPC
// API to the sequencer. Retries should be implemented in the JSON-RPC
// client instead.
self.request()
.gateway()
.add_transaction()
// mainnet requires a token (but testnet does not so its optional).
.with_optional_token(token.as_deref())
.with_retry(builder::Retry::Disabled)
.post_with_json(&req)
.await
}
/// Deploys a contract.
#[tracing::instrument(skip(self, contract_definition))]
async fn add_deploy_transaction(
&self,
contract_address_salt: ContractAddressSalt,
constructor_calldata: Vec<ConstructorParam>,
contract_definition: ContractDefinition,
token: Option<String>,
) -> Result<reply::add_transaction::DeployResponse, SequencerError> {
let req =
request::add_transaction::AddTransaction::Deploy(request::add_transaction::Deploy {
contract_address_salt,
contract_definition,
constructor_calldata,
});
// Note that we don't do retries here.
// This method is used to proxy an add transaction operation from the JSON-RPC
// API to the sequencer. Retries should be implemented in the JSON-RPC
// client instead.
self.request()
.gateway()
.add_transaction()
// mainnet requires a token (but testnet does not so its optional).
.with_optional_token(token.as_deref())
.with_retry(builder::Retry::Disabled)
.post_with_json(&req)
.await
}
}
#[cfg(test)]
pub mod test_utils {
use crate::{
core::{
CallParam, ClassHash, ContractAddress, EntryPoint, StarknetBlockHash,
StarknetBlockNumber, StarknetTransactionHash, StarknetTransactionIndex, StorageAddress,
},
rpc::types::{BlockHashOrTag, BlockNumberOrTag},
starkhash,
};
use stark_hash::StarkHash;
pub const GENESIS_BLOCK_NUMBER: BlockNumberOrTag =
BlockNumberOrTag::Number(StarknetBlockNumber::GENESIS);
pub const INVALID_BLOCK_NUMBER: BlockNumberOrTag =
BlockNumberOrTag::Number(StarknetBlockNumber::MAX);
pub const GENESIS_BLOCK_HASH: BlockHashOrTag = BlockHashOrTag::Hash(StarknetBlockHash(
starkhash!("07d328a71faf48c5c3857e99f20a77b18522480956d1cd5bff1ff2df3c8b427b"),
));
pub const INVALID_BLOCK_HASH: BlockHashOrTag = BlockHashOrTag::Hash(StarknetBlockHash(
starkhash!("06d328a71faf48c5c3857e99f20a77b18522480956d1cd5bff1ff2df3c8b427b"),
));
pub const PRE_DEPLOY_CONTRACT_BLOCK_HASH: BlockHashOrTag =
BlockHashOrTag::Hash(StarknetBlockHash(starkhash!(
"05ef884a311df4339c8df791ce19bf305d7cf299416666b167bc56dd2d1f435f"
)));
pub const INVOKE_CONTRACT_BLOCK_HASH: BlockHashOrTag = BlockHashOrTag::Hash(StarknetBlockHash(
starkhash!("03871c8a0c3555687515a07f365f6f5b1d8c2ae953f7844575b8bde2b2efed27"),
));
pub const VALID_TX_HASH: StarknetTransactionHash = StarknetTransactionHash(starkhash!(
"0493d8fab73af67e972788e603aee18130facd3c7685f16084ecd98b07153e24"
));
pub const INVALID_TX_HASH: StarknetTransactionHash = StarknetTransactionHash(starkhash!(
"0393d8fab73af67e972788e603aee18130facd3c7685f16084ecd98b07153e24"
));
pub const VALID_CONTRACT_ADDR: ContractAddress = ContractAddress(starkhash!(
"06fbd460228d843b7fbef670ff15607bf72e19fa94de21e29811ada167b4ca39"
));
pub const INVALID_CONTRACT_ADDR: ContractAddress = ContractAddress(starkhash!(
"05fbd460228d843b7fbef670ff15607bf72e19fa94de21e29811ada167b4ca39"
));
pub const VALID_ENTRY_POINT: EntryPoint = EntryPoint(starkhash!(
"0362398bec32bc0ebb411203221a35a0301193a96f317ebe5e40be9f60d15320"
));
pub const INVALID_ENTRY_POINT: EntryPoint = EntryPoint(StarkHash::ZERO);
pub const INVALID_TX_INDEX: StarknetTransactionIndex = StarknetTransactionIndex(u64::MAX);
pub const VALID_KEY: StorageAddress = StorageAddress(starkhash!(
"0206F38F7E4F15E87567361213C28F235CCCDAA1D7FD34C9DB1DFE9489C6A091"
));
lazy_static::lazy_static! {
pub static ref VALID_KEY_DEC: String = crate::rpc::serde::starkhash_to_dec_str(&VALID_KEY.0);
}
pub const VALID_CALL_DATA: [CallParam; 1] = [CallParam(starkhash!("04d2"))];
/// Class hash for VALID_CONTRACT_ADDR
pub const VALID_CLASS_HASH: ClassHash = ClassHash(starkhash!(
"021a7f43387573b68666669a0ed764252ce5367708e696e31967764a90b429c2"
));
pub const INVALID_CLASS_HASH: ClassHash = ClassHash(starkhash!(
"031a7f43387573b68666669a0ed764252ce5367708e696e31967764a90b429c2"
));
}
#[cfg(test)]
mod tests {
use super::{error::StarknetErrorCode, test_utils::*, *};
use crate::{
core::{StarknetBlockHash, StarknetBlockNumber},
rpc::types::Tag,
};
use assert_matches::assert_matches;
use stark_hash::StarkHash;
impl std::fmt::Display for crate::core::ContractAddress {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut buf = [0u8; 2 + 64];
let s = self.0.as_hex_str(&mut buf);
f.write_str(s)
}
}
impl std::fmt::Display for crate::core::StarknetTransactionHash {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut buf = [0u8; 2 + 64];
let s = self.0.as_hex_str(&mut buf);
f.write_str(s)
}
}
impl std::fmt::Display for crate::core::ClassHash {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut buf = [0u8; 2 + 64];
let s = self.0.as_hex_str(&mut buf);
f.write_str(s)
}
}
/// Helper macro which creates a successful response tuple
/// which can then be used by the [setup] function.
///
/// The macro takes the name of the fixture file.
/// The fixture file should be a text file containing valid UTF8 characters.
///
/// The HTTP status code value of the tuple is `200` (`OK`).
macro_rules! response {
($file_name:literal) => {
(
include_str!(concat!("../fixtures/sequencer/", $file_name)),
200,
)
};
}
impl StarknetErrorCode {
/// Helper funtion which allows for easy creation of a response tuple
/// that contains a [StarknetError] for a given [StarknetErrorCode].
///
/// The response tuple can then be used by the [setup] function.
///
/// The `message` field is always an empty string.
/// The HTTP status code for this response is always `500` (`Internal Server Error`).
fn into_response(self) -> (String, u16) {
use crate::sequencer::error::StarknetError;
let e = StarknetError {
code: self,
message: "".to_string(),
};
(serde_json::to_string(&e).unwrap(), 500)
}
}
/// Use to initialize a [sequencer::Client] test case. The function does one of the following things:
///
/// 1. if `SEQUENCER_TESTS_LIVE_API` environment variable is set:
/// - creates a [sequencer::Client] instance which connects to the Goerli
/// sequencer API
///
/// 2. otherwise:
/// - initializes a local mock server instance with the given expected
/// url paths & queries and respective fixtures for replies
/// - creates a [sequencer::Client] instance which connects to the mock server
///
fn setup<S1, S2, const N: usize>(
url_paths_queries_and_response_fixtures: [(S1, (S2, u16)); N],
) -> (Option<tokio::task::JoinHandle<()>>, Client)
where
S1: std::convert::AsRef<str>
+ std::fmt::Display
+ std::fmt::Debug
+ std::cmp::PartialEq
+ Send
+ Sync
+ Clone
+ 'static,
S2: std::string::ToString + Send + Sync + Clone + 'static,
{
if std::env::var_os("SEQUENCER_TESTS_LIVE_API").is_some() {
(None, Client::new(Chain::Goerli).unwrap())
} else {
use warp::Filter;
let opt_query_raw = warp::query::raw()
.map(Some)
.or_else(|_| async { Ok::<(Option<String>,), std::convert::Infallible>((None,)) });
let path = warp::any().and(warp::path::full()).and(opt_query_raw).map(
move |full_path: warp::path::FullPath, raw_query: Option<String>| {
let actual_full_path_and_query = match raw_query {
Some(some_raw_query) => {
format!("{}?{}", full_path.as_str(), some_raw_query.as_str())
}
None => full_path.as_str().to_owned(),
};
match url_paths_queries_and_response_fixtures
.iter()
.find(|x| x.0.as_ref() == actual_full_path_and_query)
{
Some((_, (body, status))) => http::response::Builder::new()
.status(*status)
.body(body.to_string()),
None => panic!(
"Actual url path and query {} not found in the expected {:?}",
actual_full_path_and_query,
url_paths_queries_and_response_fixtures
.iter()
.map(|(expected_path, _)| expected_path)
.collect::<Vec<_>>()
),
}
},
);
let (addr, serve_fut) = warp::serve(path).bind_ephemeral(([127, 0, 0, 1], 0));
let server_handle = tokio::spawn(serve_fut);
let client =
Client::with_url(reqwest::Url::parse(&format!("http://{}", addr)).unwrap())
.unwrap();
(Some(server_handle), client)
}
}
#[test_log::test(tokio::test)]
async fn client_user_agent() {
use crate::core::StarknetBlockTimestamp;
use crate::sequencer::reply::{Block, Status};
use std::convert::Infallible;
use warp::Filter;
let filter = warp::header::optional("user-agent").and_then(
|user_agent: Option<String>| async move {
let user_agent = user_agent.expect("user-agent set");
let (name, version) = user_agent.split_once('/').unwrap();
assert_eq!(name, "starknet-pathfinder");
assert_eq!(version, env!("VERGEN_GIT_SEMVER_LIGHTWEIGHT"));
Ok::<_, Infallible>(warp::reply::json(&Block {
block_hash: StarknetBlockHash(StarkHash::ZERO),
block_number: StarknetBlockNumber::GENESIS,
gas_price: None,
parent_block_hash: StarknetBlockHash(StarkHash::ZERO),
sequencer_address: None,
state_root: crate::core::GlobalRoot(StarkHash::ZERO),
status: Status::NotReceived,
timestamp: StarknetBlockTimestamp(0),
transaction_receipts: vec![],
transactions: vec![],
starknet_version: None,
}))
},
);
let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel();
let (addr, run_srv) =
warp::serve(filter).bind_with_graceful_shutdown(([127, 0, 0, 1], 0), async {
shutdown_rx.await.ok();
});
let server_handle = tokio::spawn(run_srv);
let url = format!("http://{}", addr);
let url = Url::parse(&url).unwrap();
let client = Client::with_url(url).unwrap();
let _ = client.block(BlockId::Latest).await;
shutdown_tx.send(()).unwrap();
server_handle.await.unwrap();
}
mod block_matches_by_hash_on {
use super::*;
use crate::starkhash;
#[tokio::test]
async fn genesis() {
let (_jh, client) = setup([
(
format!("/feeder_gateway/get_block?blockHash={}", GENESIS_BLOCK_HASH),
response!("0.9.0/block/genesis.json"),
),
(
format!(
"/feeder_gateway/get_block?blockNumber={}",
GENESIS_BLOCK_NUMBER
),
response!("0.9.0/block/genesis.json"),
),
]);
let by_hash = client
.block(BlockId::from(GENESIS_BLOCK_HASH))
.await
.unwrap();
let by_number = client
.block(BlockId::from(GENESIS_BLOCK_NUMBER))
.await
.unwrap();
assert_eq!(by_hash, by_number);
}
#[tokio::test]
async fn specific_block() {
let (_jh, client) = setup([
(
"/feeder_gateway/get_block?blockHash=0x40ffdbd9abbc4fc64652c50db94a29bce65c183316f304a95df624de708e746",
response!("0.9.0/block/231579.json")
),
(
"/feeder_gateway/get_block?blockNumber=231579",
response!("0.9.0/block/231579.json")
),
]);
let by_hash = client
.block(
StarknetBlockHash(starkhash!(
"040ffdbd9abbc4fc64652c50db94a29bce65c183316f304a95df624de708e746"
))
.into(),
)
.await
.unwrap();
let by_number = client
.block(StarknetBlockNumber(231579).into())
.await
.unwrap();
assert_eq!(by_hash, by_number);
}
}
mod block {
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn latest() {
use crate::core::BlockId;
let (_jh, client) = setup([(
"/feeder_gateway/get_block?blockNumber=latest",
response!("0.9.0/block/231579.json"),
)]);
client.block(BlockId::Latest).await.unwrap();
}
#[tokio::test]
async fn pending() {
use crate::core::BlockId;
let (_jh, client) = setup([(
"/feeder_gateway/get_block?blockNumber=pending",
response!("0.9.0/block/pending.json"),
)]);
client.block(BlockId::Pending).await.unwrap();
}
#[test_log::test(tokio::test)]
async fn invalid_hash() {
let (_jh, client) = setup([(
format!("/feeder_gateway/get_block?blockHash={}", INVALID_BLOCK_HASH),
StarknetErrorCode::BlockNotFound.into_response(),
)]);
let error = client
.block(BlockId::from(INVALID_BLOCK_HASH))
.await
.unwrap_err();
assert_matches!(
error,
SequencerError::StarknetError(e) => assert_eq!(e.code, StarknetErrorCode::BlockNotFound)
);
}
#[test_log::test(tokio::test)]
async fn invalid_number() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_block?blockNumber={}",
INVALID_BLOCK_NUMBER
),
StarknetErrorCode::BlockNotFound.into_response(),
)]);
let error = client
.block(BlockId::from(INVALID_BLOCK_NUMBER))
.await
.unwrap_err();
assert_matches!(
error,
SequencerError::StarknetError(e) => assert_eq!(e.code, StarknetErrorCode::BlockNotFound)
);
}
#[tokio::test]
async fn with_starknet_version_added_in_0_9_1() {
use crate::sequencer::reply::MaybePendingBlock;
let (_jh, client) = setup([
(
"/feeder_gateway/get_block?blockNumber=192844",
response!("integration/block/192844.json"),
),
(
"/feeder_gateway/get_block?blockNumber=pending",
response!("integration/block/pending.json"),
),
]);
let expected_version = "0.9.1";
let block = client
.block(StarknetBlockNumber(192844).into())
.await
.unwrap();
assert_eq!(
block
.as_block()
.expect("should not had been a pending block")
.starknet_version
.as_deref(),
Some(expected_version)
);
let block = client.block(BlockId::Pending).await.unwrap();
match block {
MaybePendingBlock::Pending(p) => {
assert_eq!(p.starknet_version.as_deref(), Some(expected_version))
}
MaybePendingBlock::Block(_) => panic!("should not had been a ready block"),
}
}
}
mod call {
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn invalid_entry_point() {
let (_jh, client) = setup([(
"/feeder_gateway/call_contract?blockNumber=latest",
StarknetErrorCode::EntryPointNotFound.into_response(),
)]);
let error = client
.call(
request::Call {
calldata: VALID_CALL_DATA.to_vec(),
contract_address: VALID_CONTRACT_ADDR,
entry_point_selector: INVALID_ENTRY_POINT,
signature: vec![],
},
BlockHashOrTag::Tag(Tag::Latest),
)
.await
.unwrap_err();
assert_matches!(
error,
SequencerError::StarknetError(e) => assert_eq!(e.code, StarknetErrorCode::EntryPointNotFound)
);
}
#[tokio::test]
async fn invalid_contract_address() {
let (_jh, client) = setup([(
"/feeder_gateway/call_contract?blockNumber=latest",
StarknetErrorCode::UninitializedContract.into_response(),
)]);
let error = client
.call(
request::Call {
calldata: VALID_CALL_DATA.to_vec(),
contract_address: INVALID_CONTRACT_ADDR,
entry_point_selector: VALID_ENTRY_POINT,
signature: vec![],
},
BlockHashOrTag::Tag(Tag::Latest),
)
.await
.unwrap_err();
assert_matches!(
error,
SequencerError::StarknetError(e) => assert_eq!(e.code, StarknetErrorCode::UninitializedContract)
);
}
#[tokio::test]
async fn invalid_call_data() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/call_contract?blockHash={}",
INVOKE_CONTRACT_BLOCK_HASH
),
StarknetErrorCode::TransactionFailed.into_response(),
)]);
let error = client
.call(
request::Call {
calldata: vec![],
contract_address: VALID_CONTRACT_ADDR,
entry_point_selector: VALID_ENTRY_POINT,
signature: vec![],
},
INVOKE_CONTRACT_BLOCK_HASH,
)
.await
.unwrap_err();
assert_matches!(
error,
SequencerError::StarknetError(e) => assert_eq!(e.code, StarknetErrorCode::TransactionFailed)
);
}
#[tokio::test]
async fn uninitialized_contract() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/call_contract?blockHash={}",
GENESIS_BLOCK_HASH
),
StarknetErrorCode::UninitializedContract.into_response(),
)]);
let error = client
.call(
request::Call {
calldata: vec![],
contract_address: VALID_CONTRACT_ADDR,
entry_point_selector: VALID_ENTRY_POINT,
signature: vec![],
},
GENESIS_BLOCK_HASH,
)
.await
.unwrap_err();
assert_matches!(
error,
SequencerError::StarknetError(e) => assert_eq!(e.code, StarknetErrorCode::UninitializedContract)
);
}
#[tokio::test]
async fn invalid_block_hash() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/call_contract?blockHash={}",
INVALID_BLOCK_HASH
),
StarknetErrorCode::BlockNotFound.into_response(),
)]);
let error = client
.call(
request::Call {
calldata: VALID_CALL_DATA.to_vec(),
contract_address: VALID_CONTRACT_ADDR,
entry_point_selector: VALID_ENTRY_POINT,
signature: vec![],
},
INVALID_BLOCK_HASH,
)
.await
.unwrap_err();
assert_matches!(
error,
SequencerError::StarknetError(e) => assert_eq!(e.code, StarknetErrorCode::BlockNotFound)
);
}
#[tokio::test]
async fn latest_invoke_block() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/call_contract?blockHash={}",
INVOKE_CONTRACT_BLOCK_HASH
),
(r#"{"result":[]}"#, 200),
)]);
client
.call(
request::Call {
calldata: VALID_CALL_DATA.to_vec(),
contract_address: VALID_CONTRACT_ADDR,
entry_point_selector: VALID_ENTRY_POINT,
signature: vec![],
},
INVOKE_CONTRACT_BLOCK_HASH,
)
.await
.unwrap();
}
#[tokio::test]
async fn latest_block() {
let (_jh, client) = setup([(
"/feeder_gateway/call_contract?blockNumber=latest",
(r#"{"result":[]}"#, 200),
)]);
client
.call(
request::Call {
calldata: VALID_CALL_DATA.to_vec(),
contract_address: VALID_CONTRACT_ADDR,
entry_point_selector: VALID_ENTRY_POINT,
signature: vec![],
},
BlockHashOrTag::Tag(Tag::Latest),
)
.await
.unwrap();
}
#[tokio::test]
async fn pending_block() {
let (_jh, client) = setup([(
"/feeder_gateway/call_contract?blockNumber=pending",
(r#"{"result":[]}"#, 200),
)]);
client
.call(
request::Call {
calldata: VALID_CALL_DATA.to_vec(),
contract_address: VALID_CONTRACT_ADDR,
entry_point_selector: VALID_ENTRY_POINT,
signature: vec![],
},
BlockHashOrTag::Tag(Tag::Pending),
)
.await
.unwrap();
}
}
mod full_contract {
use super::*;
use pretty_assertions::assert_eq;
#[test_log::test(tokio::test)]
async fn invalid_contract_address() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_full_contract?contractAddress={}",
INVALID_CONTRACT_ADDR
),
StarknetErrorCode::UninitializedContract.into_response(),
)]);
let error = client
.full_contract(INVALID_CONTRACT_ADDR)
.await
.unwrap_err();
assert_matches!(
error,
SequencerError::StarknetError(e) => assert_eq!(e.code, StarknetErrorCode::UninitializedContract)
);
}
#[tokio::test]
async fn success() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_full_contract?contractAddress={}",
VALID_CONTRACT_ADDR
),
(r#"{"hello":"world"}"#, 200),
)]);
let bytes = client.full_contract(VALID_CONTRACT_ADDR).await.unwrap();
serde_json::from_slice::<serde_json::value::Value>(&bytes).unwrap();
}
}
mod class_by_hash {
use super::*;
use pretty_assertions::assert_eq;
#[test_log::test(tokio::test)]
async fn invalid_class_hash() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_class_by_hash?classHash={}",
INVALID_CLASS_HASH
),
StarknetErrorCode::UndeclaredClass.into_response(),
)]);
let error = client.class_by_hash(INVALID_CLASS_HASH).await.unwrap_err();
assert_matches!(
error,
SequencerError::StarknetError(e) => assert_eq!(e.code, StarknetErrorCode::UndeclaredClass)
);
}
#[tokio::test]
async fn success() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_class_by_hash?classHash={}",
VALID_CLASS_HASH
),
(r#"{"hello":"world"}"#, 200),
)]);
let bytes = client.class_by_hash(VALID_CLASS_HASH).await.unwrap();
serde_json::from_slice::<serde_json::value::Value>(&bytes).unwrap();
}
}
mod class_hash {
use super::*;
use pretty_assertions::assert_eq;
#[test_log::test(tokio::test)]
async fn invalid_contract_address() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_class_hash_at?contractAddress={}",
INVALID_CONTRACT_ADDR
),
StarknetErrorCode::UninitializedContract.into_response(),
)]);
let error = client
.class_hash_at(INVALID_CONTRACT_ADDR)
.await
.unwrap_err();
assert_matches!(
error,
SequencerError::StarknetError(e) => assert_eq!(e.code, StarknetErrorCode::UninitializedContract)
);
}
#[tokio::test]
async fn success() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_class_hash_at?contractAddress={}",
VALID_CONTRACT_ADDR
),
(r#""0x01""#, 200),
)]);
client.class_hash_at(VALID_CONTRACT_ADDR).await.unwrap();
}
}
mod storage {
use super::*;
use crate::starkhash;
use pretty_assertions::assert_eq;
#[test_log::test(tokio::test)]
async fn invalid_contract_address() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_storage_at?contractAddress={}&key={}&blockNumber=latest",
INVALID_CONTRACT_ADDR, *VALID_KEY_DEC
),
(r#""0x0""#, 200),
)]);
let result = client
.storage(
INVALID_CONTRACT_ADDR,
VALID_KEY,
BlockHashOrTag::Tag(Tag::Latest),
)
.await
.unwrap();
assert_eq!(result, StorageValue(StarkHash::ZERO));
}
#[tokio::test]
async fn invalid_key() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_storage_at?contractAddress={}&key=0&blockNumber=latest",
VALID_CONTRACT_ADDR
),
(r#""0x0""#, 200),
)]);
let result = client
.storage(
VALID_CONTRACT_ADDR,
StorageAddress(StarkHash::ZERO),
BlockHashOrTag::Tag(Tag::Latest),
)
.await
.unwrap();
assert_eq!(result, StorageValue(StarkHash::ZERO));
}
#[tokio::test]
async fn invalid_block_hash() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_storage_at?contractAddress={}&key={}&blockHash={}",
VALID_CONTRACT_ADDR, *VALID_KEY_DEC, INVALID_BLOCK_HASH
),
StarknetErrorCode::BlockNotFound.into_response(),
)]);
let error = client
.storage(VALID_CONTRACT_ADDR, VALID_KEY, INVALID_BLOCK_HASH)
.await
.unwrap_err();
assert_matches!(
error,
SequencerError::StarknetError(e) => assert_eq!(e.code, StarknetErrorCode::BlockNotFound)
);
}
#[tokio::test]
async fn latest_invoke_block() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_storage_at?contractAddress={}&key={}&blockHash={}",
VALID_CONTRACT_ADDR, *VALID_KEY_DEC, INVOKE_CONTRACT_BLOCK_HASH
),
(r#""0x1e240""#, 200),
)]);
let result = client
.storage(VALID_CONTRACT_ADDR, VALID_KEY, INVOKE_CONTRACT_BLOCK_HASH)
.await
.unwrap();
assert_eq!(result, StorageValue(starkhash!("01e240")));
}
#[tokio::test]
async fn latest_block() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_storage_at?contractAddress={}&key={}&blockNumber=latest",
VALID_CONTRACT_ADDR, *VALID_KEY_DEC,
),
(r#""0x1e240""#, 200),
)]);
let result = client
.storage(
VALID_CONTRACT_ADDR,
VALID_KEY,
BlockHashOrTag::Tag(Tag::Latest),
)
.await
.unwrap();
assert_eq!(result, StorageValue(starkhash!("01e240")));
}
#[tokio::test]
async fn pending_block() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_storage_at?contractAddress={}&key={}&blockNumber=pending",
VALID_CONTRACT_ADDR, *VALID_KEY_DEC
),
(r#""0x1e240""#, 200),
)]);
let result = client
.storage(
VALID_CONTRACT_ADDR,
VALID_KEY,
BlockHashOrTag::Tag(Tag::Pending),
)
.await
.unwrap();
assert_eq!(result, StorageValue(starkhash!("01e240")));
}
}
mod transaction {
use super::{reply::Status, *};
use crate::starkhash;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn declare() {
let (_jh, client) = setup([(
"/feeder_gateway/get_transaction?transactionHash=0x587d93f2339b7f2beda040187dbfcb9e076ce4a21eb8d15ae64819718817fbe",
response!("0.9.0/txn/invoke.json"),
)]);
assert_eq!(
client
.transaction(StarknetTransactionHash(starkhash!(
"0587d93f2339b7f2beda040187dbfcb9e076ce4a21eb8d15ae64819718817fbe"
)))
.await
.unwrap()
.status,
Status::AcceptedOnL1
);
}
#[tokio::test]
async fn deploy() {
let (_jh, client) = setup([(
"/feeder_gateway/get_transaction?transactionHash=0x3d7623443283d9a0cec946492db78b06d57642a551745ddfac8d3f1f4fcc2a8",
response!("0.9.0/txn/deploy.json"),
)]);
assert_eq!(
client
.transaction(StarknetTransactionHash(starkhash!(
"03d7623443283d9a0cec946492db78b06d57642a551745ddfac8d3f1f4fcc2a8"
)))
.await
.unwrap()
.status,
Status::AcceptedOnL1
);
}
#[tokio::test]
async fn invoke() {
let (_jh, client) = setup([(
"/feeder_gateway/get_transaction?transactionHash=0x587d93f2339b7f2beda040187dbfcb9e076ce4a21eb8d15ae64819718817fbe",
response!("0.9.0/txn/invoke.json"),
)]);
assert_eq!(
client
.transaction(StarknetTransactionHash(starkhash!(
"0587d93f2339b7f2beda040187dbfcb9e076ce4a21eb8d15ae64819718817fbe"
)))
.await
.unwrap()
.status,
Status::AcceptedOnL1
);
}
#[tokio::test]
async fn invalid_hash() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_transaction?transactionHash={}",
INVALID_TX_HASH
),
(r#"{"status": "NOT_RECEIVED"}"#, 200),
)]);
assert_eq!(
client.transaction(INVALID_TX_HASH).await.unwrap().status,
Status::NotReceived,
);
}
}
mod transaction_status {
use super::{reply::Status, *};
use crate::starkhash;
#[tokio::test]
async fn accepted() {
let (_jh, client) = setup([(
"/feeder_gateway/get_transaction_status?transactionHash=0x79cc07feed4f4046276aea23ddcea8b2f956d14f2bfe97382fa333a11169205",
response!("0.9.0/txn/status.json"),
)]);
assert_eq!(
client
.transaction_status(StarknetTransactionHash(starkhash!(
"079cc07feed4f4046276aea23ddcea8b2f956d14f2bfe97382fa333a11169205"
)))
.await
.unwrap()
.tx_status,
Status::AcceptedOnL1
);
}
#[tokio::test]
async fn invalid_hash() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_transaction_status?transactionHash={}",
INVALID_TX_HASH
),
(r#"{"tx_status": "NOT_RECEIVED"}"#, 200),
)]);
assert_eq!(
client
.transaction_status(INVALID_TX_HASH)
.await
.unwrap()
.tx_status,
Status::NotReceived
);
}
}
mod state_update_matches_by_hash_on {
use super::{
reply::{
state_update::{DeployedContract, StorageDiff},
StateUpdate,
},
*,
};
use crate::{
core::{ContractAddress, GlobalRoot},
starkhash,
};
use pretty_assertions::assert_eq;
use std::collections::{BTreeSet, HashMap};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct OrderedStateDiff {
pub storage_diffs: HashMap<ContractAddress, BTreeSet<StorageDiff>>,
pub deployed_contracts: BTreeSet<DeployedContract>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct OrderedStateUpdate {
pub new_root: GlobalRoot,
pub old_root: GlobalRoot,
pub state_diff: OrderedStateDiff,
}
impl From<StateUpdate> for OrderedStateUpdate {
fn from(s: StateUpdate) -> Self {
Self {
new_root: s.new_root,
old_root: s.old_root,
state_diff: OrderedStateDiff {
storage_diffs: s
.state_diff
.storage_diffs
.into_iter()
.map(|(addr, diffs)| (addr, diffs.into_iter().collect()))
.collect(),
deployed_contracts: s.state_diff.deployed_contracts.into_iter().collect(),
},
}
}
}
#[tokio::test]
async fn genesis() {
let (_jh, client) = setup([
(
"/feeder_gateway/get_state_update?blockNumber=0".to_string(),
response!("0.9.0/state_update/genesis.json"),
),
(
format!(
"/feeder_gateway/get_state_update?blockHash={}",
GENESIS_BLOCK_HASH
),
response!("0.9.0/state_update/genesis.json"),
),
]);
let by_number: OrderedStateUpdate = client
.state_update(BlockId::from(GENESIS_BLOCK_NUMBER))
.await
.unwrap()
.into();
let by_hash: OrderedStateUpdate = client
.state_update(BlockId::from(GENESIS_BLOCK_HASH))
.await
.unwrap()
.into();
assert_eq!(by_number, by_hash);
}
#[tokio::test]
async fn specific_block() {
let (_jh, client) = setup([
(
"/feeder_gateway/get_state_update?blockNumber=231579",
response!("0.9.0/state_update/231579.json"),
),
(
"/feeder_gateway/get_state_update?blockHash=0x40ffdbd9abbc4fc64652c50db94a29bce65c183316f304a95df624de708e746",
response!("0.9.0/state_update/231579.json"),
),
]);
let by_number: OrderedStateUpdate = client
.state_update(StarknetBlockNumber(231579).into())
.await
.unwrap()
.into();
let by_hash: OrderedStateUpdate = client
.state_update(
StarknetBlockHash(starkhash!(
"040ffdbd9abbc4fc64652c50db94a29bce65c183316f304a95df624de708e746"
))
.into(),
)
.await
.unwrap()
.into();
assert_eq!(by_number, by_hash);
}
}
mod state_update {
use super::*;
#[test_log::test(tokio::test)]
async fn invalid_number() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_state_update?blockNumber={}",
INVALID_BLOCK_NUMBER
),
StarknetErrorCode::BlockNotFound.into_response(),
)]);
let error = client
.state_update(BlockId::from(INVALID_BLOCK_NUMBER))
.await
.unwrap_err();
assert_matches!(
error,
SequencerError::StarknetError(e) => assert_eq!(e.code, StarknetErrorCode::BlockNotFound)
);
}
#[tokio::test]
async fn invalid_hash() {
let (_jh, client) = setup([(
format!(
"/feeder_gateway/get_state_update?blockHash={}",
INVALID_BLOCK_HASH
),
StarknetErrorCode::BlockNotFound.into_response(),
)]);
let error = client
.state_update(BlockId::from(INVALID_BLOCK_HASH))
.await
.unwrap_err();
assert_matches!(
error,
SequencerError::StarknetError(e) => assert_eq!(e.code, StarknetErrorCode::BlockNotFound)
);
}
#[tokio::test]
async fn latest() {
let (_jh, client) = setup([(
"/feeder_gateway/get_state_update?blockNumber=latest",
response!("0.9.0/state_update/231579.json"),
)]);
client.state_update(BlockId::Latest).await.unwrap();
}
#[tokio::test]
async fn pending() {
let (_jh, client) = setup([(
"/feeder_gateway/get_state_update?blockNumber=pending",
response!("0.9.0/state_update/pending.json"),
)]);
client.state_update(BlockId::Pending).await.unwrap();
}
#[tokio::test]
async fn by_number_with_declared_contracts_in_0_9_1() {
let (_jh, client) = setup([(
"/feeder_gateway/get_state_update?blockNumber=193137",
response!("integration/state_update/193137.json"),
)]);
let parsed = client
.state_update(StarknetBlockNumber(193137).into())
.await
.expect("should had parsed with the optional declared contracts");
assert_ne!(parsed.state_diff.declared_contracts, &[]);
}
}
#[tokio::test]
async fn eth_contract_addresses() {
let (_jh, client) = setup([(
"/feeder_gateway/get_contract_addresses",
(
r#"{"Starknet":"0xde29d060d45901fb19ed6c6e959eb22d8626708e","GpsStatementVerifier":"0xab43ba48c9edf4c2c4bb01237348d1d7b28ef168"}"#,
200,
),
)]);
client.eth_contract_addresses().await.unwrap();
}
mod add_transaction {
use std::collections::HashMap;
use super::*;
use crate::{
core::{ByteCodeOffset, CallParam, CallSignatureElem, EntryPoint},
sequencer::request::contract::{EntryPointType, SelectorAndOffset},
starkhash,
};
use web3::types::H256;
#[tokio::test]
async fn invalid_entry_point_selector() {
// test with values dumped from `starknet invoke` for a test contract,
// except for an invalid entry point value
let (_jh, client) = setup([(
"/gateway/add_transaction",
StarknetErrorCode::UnsupportedSelectorForFee.into_response(),
)]);
let error = client
.add_invoke_transaction(
Call {
contract_address: ContractAddress(starkhash!(
"023371b227eaecd8e8920cd429357edddd2cd0f3fee6abaacca08d3ab82a7cdd"
)),
calldata: vec![
CallParam(starkhash!("01")),
CallParam(starkhash!(
"0677bb1cdc050e8d63855e8743ab6e09179138def390676cc03c484daf112ba1"
)),
CallParam(starkhash!(
"0362398bec32bc0ebb411203221a35a0301193a96f317ebe5e40be9f60d15320"
)),
CallParam(StarkHash::ZERO),
CallParam(starkhash!("01")),
CallParam(starkhash!("01")),
CallParam(starkhash!("2b")),
CallParam(StarkHash::ZERO),
],
entry_point_selector: EntryPoint(StarkHash::ZERO),
signature: vec![
CallSignatureElem(starkhash!(
"07dd3a55d94a0de6f3d6c104d7e6c88ec719a82f4e2bbc12587c8c187584d3d5"
)),
CallSignatureElem(starkhash!(
"071456dded17015d1234779889d78f3e7c763ddcfd2662b19e7843c7542614f8"
)),
],
},
Fee(5444010076217u128.to_be_bytes().into()),
TransactionVersion(H256::zero()),
)
.await
.unwrap_err();
assert_matches!(
error,
SequencerError::StarknetError(e) => assert_eq!(e.code, StarknetErrorCode::UnsupportedSelectorForFee)
);
}
#[tokio::test]
async fn invoke_function() {
let (_jh, client) = setup([(
"/gateway/add_transaction",
(
r#"{"code":"TRANSACTION_RECEIVED","transaction_hash":"0x0389DD0629F42176CC8B6C43ACEFC0713D0064ECDFC0470E0FC179F53421A38B"}"#,
200,
),
)]);
// test with values dumped from `starknet invoke` for a test contract
client
.add_invoke_transaction(
Call {
contract_address: ContractAddress(starkhash!(
"023371b227eaecd8e8920cd429357edddd2cd0f3fee6abaacca08d3ab82a7cdd"
)),
calldata: vec![
CallParam(starkhash!("01")),
CallParam(starkhash!(
"0677bb1cdc050e8d63855e8743ab6e09179138def390676cc03c484daf112ba1"
)),
CallParam(starkhash!(
"0362398bec32bc0ebb411203221a35a0301193a96f317ebe5e40be9f60d15320"
)),
CallParam(StarkHash::ZERO),
CallParam(starkhash!("01")),
CallParam(starkhash!("01")),
CallParam(starkhash!("2b")),
CallParam(StarkHash::ZERO),
],
entry_point_selector: EntryPoint(starkhash!(
"015d40a3d6ca2ac30f4031e42be28da9b056fef9bb7357ac5e85627ee876e5ad"
)),
signature: vec![
CallSignatureElem(starkhash!(
"07dd3a55d94a0de6f3d6c104d7e6c88ec719a82f4e2bbc12587c8c187584d3d5"
)),
CallSignatureElem(starkhash!(
"071456dded17015d1234779889d78f3e7c763ddcfd2662b19e7843c7542614f8"
)),
],
},
Fee(5444010076217u128.to_be_bytes().into()),
TransactionVersion(H256::zero()),
)
.await
.unwrap();
}
#[test]
fn test_program_is_valid_compressed_json() {
use flate2::write::GzDecoder;
use std::io::Write;
let json = include_bytes!("../resources/deploy_transaction.json");
let json: serde_json::Value = serde_json::from_slice(json).unwrap();
let program = json["contract_definition"]["program"].as_str().unwrap();
let gzipped_program = base64::decode(program).unwrap();
let mut decoder = GzDecoder::new(Vec::new());
decoder.write_all(&gzipped_program).unwrap();
let json = decoder.finish().unwrap();
let _contract: serde_json::Value = serde_json::from_slice(&json).unwrap();
}
#[tokio::test]
async fn declare_class() {
let contract_class = get_contract_class_from_fixture();
let (_jh, client) = setup([(
"/gateway/add_transaction",
(
r#"{"code": "TRANSACTION_RECEIVED",
"transaction_hash": "0x77ccba4df42cf0f74a8eb59a96d7880fae371edca5d000ca5f9985652c8a8ed",
"class_hash": "0x711941b11a8236b8cca42b664e19342ac7300abb1dc44957763cb65877c2708"}"#,
200,
),
)]);
client
.add_declare_transaction(
contract_class,
// actual address dumped from a `starknet declare` call
ContractAddress(starkhash!("01")),
Fee(0u128.to_be_bytes().into()),
vec![],
TransactionNonce(StarkHash::ZERO),
TransactionVersion(H256::zero()),
None,
)
.await
.unwrap();
}
#[tokio::test]
async fn deploy_contract() {
let contract_definition = get_contract_class_from_fixture();
let (_jh, client) = setup([(
"/gateway/add_transaction",
(
r#"{"code":"TRANSACTION_RECEIVED","transaction_hash":"0x057ED4B4C76A1CA0BA044A654DD3EE2D0D3E550343D739350A22AACDD524110D",
"address":"0x03926AEA98213EC34FE9783D803237D221C54C52344422E1F4942A5B340FA6AD"}"#,
200,
),
)]);
client
.add_deploy_transaction(
ContractAddressSalt(starkhash!(
"05864b5e296c05028ac2bbc4a4c1378f56a3489d13e581f21d566bb94580f76d"
)),
// Regression: use a dummy constructor param here to make sure that
// it is serialized properly
vec![ConstructorParam(starkhash!("01"))],
contract_definition,
None,
)
.await
.unwrap();
}
/// Return a contract definition that was dumped from a `starknet deploy`.
fn get_contract_class_from_fixture() -> ContractDefinition {
let json = include_bytes!("../resources/deploy_transaction.json");
let json: serde_json::Value = serde_json::from_slice(json).unwrap();
let program = json["contract_definition"]["program"].as_str().unwrap();
let entry_points_by_type: HashMap<EntryPointType, Vec<SelectorAndOffset>> =
HashMap::from([
(EntryPointType::Constructor, vec![]),
(
EntryPointType::External,
vec![
SelectorAndOffset {
offset: ByteCodeOffset(starkhash!("3a")),
selector: EntryPoint(starkhash!(
"0362398bec32bc0ebb411203221a35a0301193a96f317ebe5e40be9f60d15320")
),
},
SelectorAndOffset{
offset: ByteCodeOffset(starkhash!("5b")),
selector: EntryPoint(starkhash!(
"039e11d48192e4333233c7eb19d10ad67c362bb28580c604d67884c85da39695"
)),
},
],
),
(EntryPointType::L1Handler, vec![]),
]);
ContractDefinition {
program: program.to_owned(),
entry_points_by_type,
abi: Some(json["contract_definition"]["abi"].clone()),
}
}
mod deploy_token {
use super::*;
use http::StatusCode;
use std::collections::HashMap;
use warp::{http::Response, Filter};
const EXPECTED_TOKEN: &str = "magic token value";
const EXPECTED_ERROR_MESSAGE: &str = "error message";
fn test_server() -> (tokio::task::JoinHandle<()>, std::net::SocketAddr) {
fn token_check(params: HashMap<String, String>) -> impl warp::Reply {
match params.get("token") {
Some(token) if token == EXPECTED_TOKEN => Response::builder().status(StatusCode::OK).body(serde_json::to_vec(&serde_json::json!({
"code": "TRANSACTION_ACCEPTED",
"transaction_hash": "0x57ed4b4c76a1ca0ba044a654dd3ee2d0d3e550343d739350a22aacdd524110d",
"address":"0x3926aea98213ec34fe9783d803237d221c54c52344422e1f4942a5b340fa6ad"
})).unwrap()),
_ => Response::builder().status(StatusCode::INTERNAL_SERVER_ERROR).body(serde_json::to_vec(&serde_json::json!({
"code": "StarknetErrorCode.NON_PERMITTED_CONTRACT",
"message": EXPECTED_ERROR_MESSAGE,
})).unwrap())
}
}
let route = warp::any()
.and(warp::query::<HashMap<String, String>>())
.map(token_check);
let (addr, run_srv) = warp::serve(route).bind_ephemeral(([127, 0, 0, 1], 0));
let server_handle = tokio::spawn(run_srv);
(server_handle, addr)
}
#[test_log::test(tokio::test)]
async fn test_token_is_passed_to_sequencer_api() {
let (_jh, addr) = test_server();
let mut url = reqwest::Url::parse("http://localhost/").unwrap();
url.set_port(Some(addr.port())).unwrap();
let client = Client::with_url(url).unwrap();
client
.add_deploy_transaction(
ContractAddressSalt(StarkHash::ZERO),
vec![],
ContractDefinition {
program: "".to_owned(),
entry_points_by_type: HashMap::new(),
abi: None,
},
Some(EXPECTED_TOKEN.to_owned()),
)
.await
.unwrap();
}
#[test_log::test(tokio::test)]
async fn test_deploy_fails_with_no_token() {
let (_jh, addr) = test_server();
let mut url = reqwest::Url::parse("http://localhost/").unwrap();
url.set_port(Some(addr.port())).unwrap();
let client = Client::with_url(url).unwrap();
let err = client
.add_deploy_transaction(
ContractAddressSalt(StarkHash::ZERO),
vec![],
ContractDefinition {
program: "".to_owned(),
entry_points_by_type: HashMap::new(),
abi: None,
},
None,
)
.await
.unwrap_err();
assert_matches!(err, SequencerError::StarknetError(se) => {
assert_eq!(se.code, StarknetErrorCode::NotPermittedContract);
assert_eq!(se.message, EXPECTED_ERROR_MESSAGE);
});
}
}
}
mod chain {
use crate::core::Chain;
use crate::sequencer;
#[derive(Copy, Clone, PartialEq, Eq)]
/// Used by [setup_server] to determine which block to return.
enum TargetChain {
Goerli,
Mainnet,
Invalid,
}
/// Creates a [sequencer::Client] whose Sequencer gateway is either the real Sequencer,
/// or a local warp server. A local server is created if:
/// - SEQUENCER_TESTS_LIVE_API is not set, __or__
/// - `target == TargetChain::Invalid`
///
/// The local server only supports the `feeder_gateway/get_block?blockNumber=0` queries.
fn setup_server(
target: TargetChain,
) -> (Option<tokio::task::JoinHandle<()>>, sequencer::Client) {
use warp::http::{Response, StatusCode};
use warp::Filter;
// `TargetChain::Invalid` always uses the local server setup as the Sequencer
// won't return an invalid genesis block.
if std::env::var_os("SEQUENCER_TESTS_LIVE_API").is_some()
&& target != TargetChain::Invalid
{
match target {
TargetChain::Mainnet => (None, sequencer::Client::new(Chain::Mainnet).unwrap()),
TargetChain::Goerli => (None, sequencer::Client::new(Chain::Goerli).unwrap()),
// Escaped above already
TargetChain::Invalid => unreachable!(),
}
} else {
#[derive(serde::Deserialize, serde::Serialize)]
#[serde(deny_unknown_fields)]
struct Params {
#[serde(rename = "blockNumber")]
block_number: u64,
}
let filter = warp::get()
.and(warp::path("feeder_gateway"))
.and(warp::path("get_block"))
.and(warp::query::<Params>())
.map(move |params: Params| match params.block_number {
0 => {
const GOERLI_GENESIS: &str =
include_str!("../fixtures/sequencer/0.9.0/block/genesis.json");
let data = match target {
TargetChain::Goerli => GOERLI_GENESIS.to_owned(),
// This is a bit of a cheat, but we don't currently have a mainnet fixture and I'm hesitant to introduce one
// since it requires re-organising all the fixtures.
TargetChain::Mainnet => GOERLI_GENESIS.replace(
r#""block_hash": "0x7d328a71faf48c5c3857e99f20a77b18522480956d1cd5bff1ff2df3c8b427b"#,
r#""block_hash": "0x047C3637B57C2B079B93C61539950C17E868A28F46CDEF28F88521067F21E943"#,
),
TargetChain::Invalid => GOERLI_GENESIS.replace(
r#"block_hash": "0x7d328"#,
r#"block_hash": "0x11111"#,
),
};
Response::new(data)
}
_ => Response::builder()
.status(StatusCode::BAD_REQUEST)
.body("Only supports genesis block request".to_owned())
.unwrap(),
});
let (addr, serve_fut) = warp::serve(filter).bind_ephemeral(([127, 0, 0, 1], 0));
let server_handle = tokio::spawn(serve_fut);
let client = sequencer::Client::with_url(
reqwest::Url::parse(&format!("http://{}", addr)).unwrap(),
)
.unwrap();
(Some(server_handle), client)
}
}
#[tokio::test]
async fn goerli() {
let (_server_handle, sequencer) = setup_server(TargetChain::Goerli);
let chain = sequencer.chain().await.unwrap();
assert_eq!(chain, crate::core::Chain::Goerli);
}
#[tokio::test]
async fn mainnet() {
let (_server_handle, sequencer) = setup_server(TargetChain::Mainnet);
let chain = sequencer.chain().await.unwrap();
assert_eq!(chain, crate::core::Chain::Mainnet);
}
#[tokio::test]
async fn invalid() {
let (_server_handle, sequencer) = setup_server(TargetChain::Invalid);
sequencer.chain().await.unwrap_err();
}
}
}
|
//! Module with everything related to the OAuth2 login flow
mod port;
mod callback_endpoint;
pub mod db;
use crate::env::Env;
use actix_web::{HttpServer, App};
use rand::Rng;
use std::sync::mpsc::{Sender, channel};
use crate::api::oauth::LoginData;
use crate::{Result, unwrap_other_err};
/// Struct describing the data to be passed to Actix endpoints
#[derive(Clone, Debug)]
pub struct ActixData {
/// The state parameter. Refer to the Google OAuth2 docs for why this is used
state: String,
/// THe channel on which the endpoint can send the received code
tx: Sender<String>
}
/// Perform the OAuth2 login flow
pub fn perform_oauth2_login(env: &Env) -> Result<LoginData> {
//Generate a code_verifier and code_challenge
let (code_verifier, code_challenge) = generate_code();
//Generate a state parameter
let state = rand::thread_rng().sample_iter(rand::distributions::Alphanumeric).take(32).map(char::from).collect::<String>();
//Determine a port to listen on
let port = {
let mut port = rand::thread_rng().gen_range(4000..8000) as u16;
while !port::is_free(port) {
port = rand::thread_rng().gen_range(4000..8000) as u16;
}
port
};
//This channel will be used to receive the code from the HTTP endpoint
let (tx_code, rx_code) = channel();
let actix_data = ActixData { state: state.clone(), tx: tx_code};
//This channel will be used to receive the Serve instance from Actix
let (tx_srv, rx_srv) = channel();
//Start the actix web server and wait for it to return us the Server instance
std::thread::spawn(move || {
match start_actix(actix_data, port, tx_srv) {
Ok(_) => {},
Err(e) => {
eprintln!("Error: Failed to start Actix Web Server: {:?}", e);
std::process::exit(1);
}
}
});
let server = unwrap_other_err!(rx_srv.recv());
let auth_uri = crate::api::oauth::create_authentication_uri(&env, &code_challenge, &state, &format!("http://localhost:{}", port));
println!("Info: Please open the following URL:");
println!("\n{}\n", auth_uri);
//Wait for the code from the HTTP endpoint
let code = unwrap_other_err!(rx_code.recv());
println!("Info: Code received. Exchanging for tokens.");
//Stop the Actix web server, we dont need it anymore
actix_web::rt::System::new("").block_on(server.stop(true));
crate::api::oauth::exchange_access_token(&env, &code, &code_verifier, &format!("http://localhost:{}", port))
}
/// Start the Actix Web Server.
/// This is a blocking method call
/// An instance of Actix's Server will be send over the provided channel so it can be stopped later
fn start_actix(data: ActixData, port: u16, tx: Sender<actix_server::Server>) -> Result<()> {
let mut sys = actix_web::rt::System::new("GSync");
let actix = unwrap_other_err!(HttpServer::new(move || {
App::new()
.data(data.clone())
.service(callback_endpoint::authorization)
}).bind(format!("0.0.0.0:{}", port))).run();
let _ = tx.send(actix.clone());
let _ = sys.block_on(actix);
Ok(())
}
/// Generate a code_verifier and code_challenge
fn generate_code() -> (String, String) {
loop {
let code_verifier: String = rand::thread_rng().sample_iter(rand::distributions::Alphanumeric).take(96).map(char::from).collect();
let code_challenge = {
use sha2::digest::Digest;
let mut hasher = sha2::Sha256::new();
hasher.update(code_verifier.as_bytes());
let digest = hasher.finalize();
base64::encode(digest.as_slice())
};
if code_challenge.contains('+') || code_challenge.contains('/') {
continue;
}
return (code_verifier, code_challenge.replace("=", ""))
}
} |
use std::rc::Rc;
use flux::ast::SourceLocation;
use flux::semantic::nodes::Expression;
use flux::semantic::types::MonoType;
use flux::semantic::walk::{Node, Visitor};
use lspower::lsp;
pub struct FunctionInfo {
pub name: String,
pub package_name: String,
pub required_args: Vec<String>,
pub optional_args: Vec<String>,
}
impl FunctionInfo {
pub fn new(
name: String,
f: &flux::semantic::types::Function,
package_name: String,
) -> Self {
FunctionInfo {
name,
package_name,
required_args: f.req.keys().map(String::from).collect(),
optional_args: f.opt.keys().map(String::from).collect(),
}
}
}
pub struct FunctionFinderVisitor {
pub pos: lsp::Position,
pub functions: Vec<Rc<FunctionInfo>>,
}
fn create_function_result(
name: String,
expr: &Expression,
) -> Option<FunctionInfo> {
if let Expression::Function(f) = expr {
if let MonoType::Fun(fun) = f.typ.clone() {
return Some(FunctionInfo::new(
name,
fun.as_ref(),
"self".to_string(),
));
}
}
None
}
fn is_before_position(
loc: &SourceLocation,
pos: lsp::Position,
) -> bool {
if loc.start.line > pos.line + 1
|| (loc.start.line == pos.line + 1
&& loc.start.column > pos.character + 1)
{
return false;
}
true
}
impl<'a> Visitor<'a> for FunctionFinderVisitor {
fn visit(&mut self, node: Node<'a>) -> bool {
let loc = node.loc();
let pos = self.pos;
if !is_before_position(loc, pos) {
return true;
}
if let Node::VariableAssgn(assgn) = node {
if let Some(f) = create_function_result(
assgn.id.name.to_string(),
&assgn.init,
) {
self.functions.push(Rc::new(f));
}
}
true
}
}
|
//! More examples on sample-based and frame-based implementation of digital
//! systems. I implemented these as iterator based, as usual.
//!
//! Runs entirely locally without hardware. Rounding might be different than on
//! device. Except for when printing you must be vigilent to not become reliant
//! on any std tools that can't otherwise port over no no_std without alloc.
//!
//! `cargo run --example 2_12`
use textplots::{Chart, Plot, Shape};
use itertools::Itertools;
use typenum::Unsigned;
type N = heapless::consts::U10;
const A: f32 = 0.8;
fn main() {
// e[n]
let exponential = (0..(N::to_usize())).map(|val| A.powf(val as f32));
// r[n]
let unit_ramp = (0..(N::to_usize())).map(|n| n as f32);
// y1[n]=x1[n]+x2[n], where x1[n]=r[n] and x2[n]=e[n]
let y1 = unit_ramp.clone().zip(exponential).map(|(r, e)| r + e);
display::<N, _>("y1", y1.clone());
// y2[n]=x3[n], where x3[n]=r^2[n]
let y2 = unit_ramp.clone().zip(unit_ramp).map(|(r, rr)| r * rr);
display::<N, _>("y2", y2.clone());
// y3[n]=2.2y1[n]-1.1y1[n-1]+.7y3[n-1]
let y3 = DigitalSystem5::new(y1);
display::<N, _>("y3", y3);
// y4[n]=2.2y2[n+1]-1.1y2[n]
let y4 = y2.tuple_windows().map(|(y2, y2_1)| 2.2 * y2_1 - 1.1 * y2);
display::<N, _>("y4", y4);
}
// y3[n]=2.2y1[n]-1.1y1[n-1]+.7y3[n-1]
#[derive(Clone, Debug)]
struct DigitalSystem5<I>
where
I: Iterator<Item = f32>,
{
last_in: Option<f32>,
last_out: Option<f32>,
iter: I,
}
impl<I> DigitalSystem5<I>
where
I: Iterator<Item = f32>,
{
fn new(iter: I) -> Self {
Self {
last_in: None,
last_out: None,
iter,
}
}
}
impl<I> Iterator for DigitalSystem5<I>
where
I: Iterator<Item = f32>,
{
type Item = f32;
fn next(&mut self) -> Option<f32> {
if let Some(val) = self.iter.next() {
let out = if let (Some(last_in), Some(last_out)) = (self.last_in, self.last_out) {
2.2 * val + -1.1 * last_in + 0.7 * last_out
} else {
2.2 * val
};
self.last_in = Some(val);
self.last_out = Some(out);
Some(out)
} else {
None
}
}
}
// Points isn't a great representation as you can lose the line in the graph,
// however while Lines occasionally looks good it also can be terrible.
// Continuous requires to be in a fn pointer closure which cant capture any
// external data so not useful without lots of code duplication.
fn display<N, I>(name: &str, input: I)
where
N: Unsigned,
I: Iterator<Item = f32> + core::clone::Clone + std::fmt::Debug,
{
println!("{:?}: {:.4?}", name, input.clone().format(", "));
let display = input
.enumerate()
.map(|(n, y)| (n as f32, y))
.collect::<Vec<(f32, f32)>>();
Chart::new(120, 60, 0.0, N::to_usize() as f32)
.lineplot(Shape::Lines(&display[..]))
.display();
}
|
#[macro_use]
extern crate lazy_static;
use regex::Regex;
mod z_decode;
mod z_encode;
pub use z_decode::z_decode;
pub use z_encode::z_encode;
#[derive(Debug, PartialEq, Eq)]
pub struct GhcSummary {
pub allocs: u64,
pub gcs: u64,
pub avg_res: u64,
pub max_res: u64,
pub in_use: u64,
}
lazy_static! {
static ref GHC_SUMMARY_RE: Regex = Regex::new(
r"<<ghc: (?P<allocs>\d+) bytes, (?P<gcs>\d+) GCs, (?P<avg_res>\d+)/(?P<max_res>\d+) .* (?P<in_use>\d+)M in use").unwrap();
}
pub fn parse_ghc_summary(s: &str) -> GhcSummary {
let captures = GHC_SUMMARY_RE.captures(s);
// println!("{:#?}", captures);
let captures = captures.unwrap();
GhcSummary {
allocs: captures["allocs"].parse().unwrap(),
gcs: captures["gcs"].parse().unwrap(),
avg_res: captures["avg_res"].parse().unwrap(),
max_res: captures["max_res"].parse().unwrap(),
in_use: captures["in_use"].parse().unwrap(),
}
}
#[test]
fn ghc_summary_parsing() {
assert_eq!(
parse_ghc_summary(
"<<ghc: 3227088 bytes, 4 GCs, 200584/234944 avg/max bytes residency (2 samples), \
2M in use, 0.000 INIT (0.000 elapsed), 0.001 MUT (0.002 elapsed), \
0.004 GC (0.007 elapsed) :ghc>>"
),
GhcSummary {
allocs: 3227088,
gcs: 4,
avg_res: 200584,
max_res: 234944,
in_use: 2
}
);
}
|
use hacspec_lib::prelude::*;
use unsafe_hacspec_examples::aes_gcm::gf128::*;
#[test]
fn test_gmac() {
let msg = ByteSeq::from_hex("feedfacedeadbeeffeedfacedeadbeefabaddad20000000000000000000000005a8def2f0c9e53f1f75d7853659e2a20eeb2b22aafde6419a058ab4f6f746bf40fc0c3b780f244452da3ebf1c5d82cdea2418997200ef82e44ae7e3f");
let key = Key::from_hex("acbef20579b4b8ebce889bac8732dad7");
let output = Tag::from_hex("cc9ae9175729a649936e890bd971a8bf");
let tag = gmac(&msg, key);
assert!(output.declassify_eq(&tag));
}
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
use std::alloc;
use std::env;
use std::mem;
use nix::unistd;
use reverie::syscalls::Displayable;
use reverie::syscalls::MemoryAccess;
use reverie::syscalls::Syscall;
use reverie::syscalls::Sysno;
use reverie::Error;
use reverie::GlobalTool;
use reverie::Guest;
use reverie::Pid;
use reverie::Tool;
use tracing::warn;
#[derive(Debug, Default, Clone)]
struct TestTool {}
type Dupcount = u64;
#[reverie::global_tool]
impl GlobalTool for TestTool {
type Request = ();
type Response = ();
type Config = Dupcount;
async fn receive_rpc(&self, _from: Pid, _message: ()) {}
}
/// How many bytes of randomness to peak at.
const RAND_SIZE: usize = mem::size_of::<u64>();
/// How many times to DUPLICATE select system calls that are intercepted.
const NUM_REPS: Dupcount = 3;
#[reverie::tool]
impl Tool for TestTool {
type GlobalState = TestTool;
type ThreadState = ();
async fn handle_syscall_event<T: Guest<Self>>(
&self,
guest: &mut T,
call: Syscall,
) -> Result<i64, Error> {
let reps = guest.config().clone();
match call {
Syscall::Gettid(_)
| Syscall::Getgid(_)
| Syscall::Getsid(_)
| Syscall::Getppid(_)
| Syscall::Getpgid(_)
| Syscall::Getpid(_) => {
for i in 1..=reps {
let syscall_ret = guest.inject(call).await;
warn!(
"[pid {}] Duplicated syscall ({}/{})! {} = {}",
guest.tid(),
i,
reps,
call.display_with_outputs(&guest.memory()),
syscall_ret.unwrap_or_else(|errno| errno.into_raw() as i64)
);
}
}
Syscall::Getrandom(r) => {
if r.buflen() < RAND_SIZE {
warn!(
"[pid {}] not touching getrandom, buflen too small.",
guest.tid()
);
} else {
for i in 1..=reps {
let syscall_ret = guest.inject(call).await;
let bufaddr = r.buf().unwrap();
let mut buf: [u8; RAND_SIZE] = [0; RAND_SIZE];
guest.memory().read_exact(bufaddr, &mut buf).unwrap();
let rand_word: u64 = u64::from_le_bytes(buf);
warn!(
"[pid {}] Duplicated getrandom syscall ({}/{}): {}, returned {}, first word {}",
guest.tid(),
i,
reps,
call.display_with_outputs(&guest.memory()),
syscall_ret.unwrap_or_else(|errno| errno.into_raw() as i64),
rand_word
);
}
}
}
_ => {}
}
// Irrespective of above, run a tail_inject at the end:
guest.tail_inject(call).await
}
}
fn guest_mode() {
println!("Running in guest mode (actual test).");
let tid = unistd::gettid();
let pid = unistd::getpid();
let gid = unistd::getgid();
let ppid = unistd::getppid();
let pgid = unistd::getpgid(None).unwrap();
let sid = unistd::getsid(None).unwrap();
println!(
"Read IDs: t {}, p {}, g {}, pp {}, pg {}, s{}",
tid, pid, gid, ppid, pgid, sid,
);
// let r = syscalls::syscall!(0, 100, 0);
let sz = RAND_SIZE;
let rand_num: u64 = unsafe {
let layout = alloc::Layout::from_size_align(sz, sz).unwrap();
let buf = alloc::alloc(layout);
let no = Sysno::getrandom as i64;
let rand = libc::syscall(no, buf, sz, 0);
if rand < 0 {
panic!("getrandom returned error code {}\n", rand);
} else if rand != sz as i64 {
panic!(
"getrandom did not generate all {} bytes (instead {}\n",
sz, rand
);
}
#[allow(clippy::cast_ptr_alignment)]
let num: u64 = *(buf as *mut u64);
alloc::dealloc(buf, layout);
num
};
println!("Generated random number: {}", rand_num);
}
async fn host_mode(thisprog: &str) -> Result<i32, Error> {
println!("Running in HOST mode (ReverieTool)");
let mut command = reverie::process::Command::new(thisprog);
command.arg("guest");
let tracer = reverie_ptrace::TracerBuilder::<TestTool>::new(command)
.config(NUM_REPS)
.spawn()
.await?;
let (status, _) = tracer.wait().await?;
Ok(status.code().unwrap_or(1))
}
#[tokio::main]
async fn main() -> Result<(), Error> {
let args: Vec<String> = env::args().collect();
match &args[..] {
[p] => std::process::exit(host_mode(p).await?),
[_, s] if s == "guest" => guest_mode(),
_ => panic!(
"Expected 'guest' or no CLI argument. Got unexpected command line args ({}): {:?}",
args.len(),
args
),
}
Ok(())
}
|
use std::collections::HashMap;
use std::time::SystemTime;
/// `InputCellID` is a unique identifier for an input cell.
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct InputCellID {
position: (u32, u32),
}
/// `ComputeCellID` is a unique identifier for a compute cell.
/// Values of type `InputCellID` and `ComputeCellID` should not be mutually assignable,
/// demonstrated by the following tests:
///
/// ```compile_fail
/// let mut r = react::Reactor::new();
/// let input: react::ComputeCellID = r.create_input(111);
/// ```
///
/// ```compile_fail
/// let mut r = react::Reactor::new();
/// let input = r.create_input(111);
/// let compute: react::InputCellID = r.create_compute(&[react::CellID::Input(input)], |_| 222).unwrap();
/// ```
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct ComputeCellID {
position: (u32, u32)
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct CallbackID {
position: (u32, u32),
cbid: u32
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CellID {
Input(InputCellID),
Compute(ComputeCellID),
}
#[derive(Debug, PartialEq)]
pub enum RemoveCallbackError {
NonexistentCell,
NonexistentCallback,
}
pub struct Reactor<'a, T> {
input_sheet: HashMap<(u32, u32), T>,
compute_sheet: HashMap<(u32, u32), (Vec<CellID>, Box<dyn Fn(&[T]) -> T>)>,
callbacks: HashMap<(u32, u32), HashMap<u32, Box<dyn Fn(T) -> () + 'a>>>,
last_entry_input: (u32, u32),
last_entry_compute: (u32, u32)
}
// You are guaranteed that Reactor will only be tested against types that are Copy + PartialEq.
impl<'a, T: std::fmt::Debug + Copy + PartialEq> Reactor<'a, T> {
pub fn new() -> Self {
Reactor {
input_sheet: HashMap::new(),
compute_sheet: HashMap::new(),
callbacks: HashMap::new(),
last_entry_input: (0, 0),
last_entry_compute: (0, 0)
}
}
// Creates an input cell with the specified initial value, returning its ID.
pub fn create_input(&mut self, _initial: T) -> InputCellID {
let seed = match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
Ok(n) => n.as_secs(),
Err(_) => 0,
};
if self.last_entry_input.0 == 0 {
self.last_entry_input.0 = seed as u32;
}
let entry_position: (u32, u32) = (self.last_entry_input.0 + 1, self.last_entry_input.1 + 1);
self.input_sheet.insert(entry_position, _initial.clone());
self.last_entry_input = entry_position;
InputCellID {
position: entry_position
}
}
// Creates a compute cell with the specified dependencies and compute function.
// The compute function is expected to take in its arguments in the same order as specified in
// `dependencies`.
// You do not need to reject compute functions that expect more arguments than there are
// dependencies (how would you check for this, anyway?).
//
// If any dependency doesn't exist, returns an Err with that nonexistent dependency.
// (If multiple dependencies do not exist, exactly which one is returned is not defined and
// will not be tested)
//
// Notice that there is no way to *remove* a cell.
// This means that you may assume, without checking, that if the dependencies exist at creation
// time they will continue to exist as long as the Reactor exists.
pub fn create_compute<F: Copy + 'static + Fn(&[T]) -> T>(
&mut self,
_dependencies: &[CellID],
_compute_func: F,
) -> Result<ComputeCellID, CellID> {
let entry_position: (u32, u32) = (self.last_entry_compute.0 + 1, self.last_entry_compute.1 + 1);
let mut output: Result<ComputeCellID, CellID> = Err(_dependencies[0]);
for item in _dependencies {
let position = match item {
CellID::Compute(pos) => pos.position,
CellID::Input(pos) => pos.position
};
match self.input_sheet.contains_key(&position) || self.compute_sheet.contains_key(&position) {
true => {
self.last_entry_compute = entry_position;
self.compute_sheet.insert(entry_position,
(
_dependencies.to_vec(),
Box::new(_compute_func)
)
);
let compute_cell = ComputeCellID {
position: entry_position
};
output = Ok(compute_cell);
},
false => {
output = Err(*item);
}
}
}
output
}
// Retrieves the current value of the cell, or None if the cell does not exist.
//
// You may wonder whether it is possible to implement `get(&self, id: CellID) -> Option<&Cell>`
// and have a `value(&self)` method on `Cell`.
//
// It turns out this introduces a significant amount of extra complexity to this exercise.
// We chose not to cover this here, since this exercise is probably enough work as-is.
pub fn value(&self, id: CellID) -> Option<T> {
let find_value = |id| {
match id {
CellID::Input(x) => {
match self.input_sheet.get(&x.position) {
Some(x) => Some(*x),
None => None
}
},
CellID::Compute(x) => {
match self.compute_sheet.get(&x.position) {
Some(x) => {
let (dependency_vector, compute_function) = x;
let mut values: Vec<T> = Vec::new();
for dependency in dependency_vector {
match dependency {
CellID::Input(x) => {
match self.input_sheet.contains_key(&x.position) {
true => values.push(*self.input_sheet.get(&x.position).unwrap()),
false => ()
}
},
CellID::Compute(x) => {
match self.value(CellID::Compute(*x)) {
Some(re_computed) => values.push(re_computed),
None => ()
}
}
}
}
Some(compute_function(&values))
}
None => None
}
}
}
};
find_value(id)
}
// Sets the value of the specified input cell.
//
// Returns false if the cell does not exist.
//
// Similarly, you may wonder about `get_mut(&mut self, id: CellID) -> Option<&mut Cell>`, with
// a `set_value(&mut self, new_value: T)` method on `Cell`.
//
// As before, that turned out to add too much extra complexity.
pub fn set_value(&mut self, _id: InputCellID, _new_value: T) -> bool {
let mut output = false;
let mut find_dependencies = |_input: CellID| {
let mut existing_values = HashMap::new();
for (k, _) in self.compute_sheet.iter() {
let value = self.value(
CellID::Compute (ComputeCellID {
position: *k
})
).unwrap();
existing_values.insert(k, value);
}
output = match self.input_sheet.get_mut(&_id.position) {
Some(x) => {
*x = _new_value;
true
},
None => false
};
for (k, _) in self.compute_sheet.iter() {
match self.callbacks.contains_key(&k) {
true => {
let callback_vector = self.callbacks.get(&k).unwrap();
for (_key, cb) in callback_vector.values().enumerate() {
let value = self.value(
CellID::Compute (ComputeCellID {
position: *k
})
).unwrap();
if value != *existing_values.get(&k).unwrap() {
(cb)(self.value(CellID::Compute (ComputeCellID {
position: *k
})).unwrap())
}
}
},
false => ()
}
}
};
find_dependencies(CellID::Input(_id));
output
}
// Adds a callback to the specified compute cell.
//
// Returns the ID of the just-added callback, or None if the cell doesn't exist.
//
// Callbacks on input cells will not be tested.
//
// The semantics of callbacks (as will be tested):
// For a single set_value call, each compute cell's callbacks should each be called:
// * Zero times if the compute cell's value did not change as a result of the set_value call.
// * Exactly once if the compute cell's value changed as a result of the set_value call.
// The value passed to the callback should be the final value of the compute cell after the
// set_value call.
pub fn add_callback<F: Fn(T,) -> () + FnMut(T) -> () + 'a>(
&mut self,
_id: ComputeCellID,
_callback: F,
) -> Option<CallbackID> {
let mut cbid;
match self.compute_sheet.contains_key(&_id.position) {
true => {
match self.callbacks.contains_key(&_id.position) {
false => {
let mut cb_vector: HashMap<u32, Box<dyn Fn(T) -> () + 'a>> = HashMap::new();
cb_vector.insert(0, Box::new(_callback));
self.callbacks.insert(_id.position, cb_vector);
cbid = 0;
},
true => {
let compute_cell_id = self.callbacks.get_mut(&_id.position).unwrap();
let max = compute_cell_id.keys().fold(0, |a, &b| a.max(b));
compute_cell_id.insert(max+1 as u32, Box::new(_callback));
cbid = compute_cell_id.keys().len() - 1;
}
}
Some(CallbackID {
position: _id.position,
cbid: cbid as u32
})
},
false => None
}
}
// Removes the specified callback, using an ID returned from add_callback.
//
// Returns an Err if either the cell or callback does not exist.
//
// A removed callback should no longer be called.
pub fn remove_callback(
&mut self,
cell: ComputeCellID,
callback: CallbackID,
) -> Result<(), RemoveCallbackError> {
let mut res: Result<(), RemoveCallbackError> =Ok(());
match self.compute_sheet.contains_key(&cell.position) {
true => {
match self.callbacks.contains_key(&cell.position) {
true => {
let callback_list = self.callbacks.get_mut(&cell.position).unwrap();
match callback_list.contains_key(&callback.cbid) {
true => {
callback_list.remove(&callback.cbid);
},
false => {
res = Err(RemoveCallbackError::NonexistentCallback);
}
}
},
false => {
res = Err(RemoveCallbackError::NonexistentCallback);
}
}
},
false => {
res = Err(RemoveCallbackError::NonexistentCell);
}
}
res
}
}
|
extern "C" {
pub static mut end: u32;
}
static mut allocator_end: u32 = 0;
#[lang="exchange_malloc"]
unsafe fn kmalloc(size: usize, align: usize) -> *mut u8 {
let aligned_size: u32 = (size + align) as u32;
let ret = (allocator_end & !(align as u32 - 1)) + align as u32;
allocator_end = ret + aligned_size;
return ret as *mut u8;
}
#[allow(unused_variables)]
#[lang="exchange_free"]
unsafe fn kfree(ptr: *mut u8, old_size: usize, align: usize) {
/* LOL */
}
pub fn init()
{
unsafe {
allocator_end = ::core::mem::transmute(&end);
}
}
|
use std::fs;
const ROWS: i32 = 128;
const COLUMNS: i32 = 8;
fn parse_seat_id(code: String) -> i32 {
let mut lower = 0;
let mut upper = ROWS;
for i in code[..7].chars() {
let m = (lower + upper) / 2;
if i == 'F' {
upper = m;
} else if i == 'B' {
lower = m;
} else {
panic!("Something went wrong");
}
}
let result_row = lower * 8;
lower = 0;
upper = COLUMNS;
for i in code[7..].chars() {
let m = (lower + upper) / 2;
if i == 'L' {
upper = m;
} else if i == 'R' {
lower = m;
} else {
panic!("Something went wrong");
}
}
result_row + lower
}
fn main() {
let contents = fs::read_to_string("input.txt")
.expect("error loading file");
let seat_list: Vec<i32> = contents.lines().map(|x| parse_seat_id(String::from(x))).collect();
let max = seat_list.iter().max().unwrap();
let min = seat_list.iter().min().unwrap();
println!("result1 = {}", max);
let my = (*min..*max).step_by(1).find(|x| seat_list.iter().all(|y| x != y)).unwrap();
println!("result2 = {}", my);
} |
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use bevy::prelude::*;
use bevy_skybox_cubemap::{SkyboxBundle, SkyboxMaterial, SkyboxTextureConversion};
use rand::seq::SliceRandom;
use crate::SaverState;
pub struct SkyboxesPlugin;
impl Plugin for SkyboxesPlugin {
fn build(&self, app: &mut AppBuilder) {
app.init_resource::<Skyboxes>()
.add_startup_system(setup.system())
.add_system_set(
SystemSet::on_enter(SaverState::Run).with_system(change_skybox.system()),
);
}
}
#[derive(Default)]
struct Skyboxes(Vec<Handle<SkyboxMaterial>>);
/// Loads skybox textures.
fn setup(
mut commands: Commands,
asset_server: Res<AssetServer>,
mut skyboxes: ResMut<Skyboxes>,
mut materials: ResMut<Assets<SkyboxMaterial>>,
mut skybox_conversion: ResMut<SkyboxTextureConversion>,
) {
for tex in &[
"skyboxes/1.png",
"skyboxes/2.png",
"skyboxes/3.png",
"skyboxes/4.png",
] {
let tex = asset_server.load(*tex);
skybox_conversion.make_array(tex.clone());
let mat = materials.add(SkyboxMaterial::from_texture(tex));
skyboxes.0.push(mat);
}
commands.spawn_bundle(SkyboxBundle::new(choose_skybox(&*skyboxes)));
}
/// Randomly selects a new skybox texture.
fn change_skybox(mut query: Query<&mut Handle<SkyboxMaterial>>, skyboxes: Res<Skyboxes>) {
*query.single_mut().unwrap() = choose_skybox(&*skyboxes);
}
fn choose_skybox(skyboxes: &Skyboxes) -> Handle<SkyboxMaterial> {
skyboxes.0.choose(&mut rand::thread_rng()).unwrap().clone()
}
|
use std::cmp;
use std::cmp::Ordering;
#[derive(Debug, Copy, Clone)]
enum Segment {
HorizontalSegment { x_1: i32, x_2: i32, y: i32 },
VerticalSegment { y_1: i32, y_2: i32, x: i32 },
}
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
pub struct Point(i32, i32);
impl Point {
fn manhattan_distance(&self) -> i32 {
self.0.abs() + self.1.abs()
}
}
impl Ord for Point {
fn cmp(&self, other: &Self) -> Ordering {
self.manhattan_distance().cmp(&other.manhattan_distance())
}
}
impl PartialOrd for Point {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Segment {
fn new(a: Point, b: Point) -> Self {
if a.0 == b.0 {
Segment::VerticalSegment {
y_1: cmp::min(a.1, b.1),
y_2: cmp::max(a.1, b.1),
x: a.0,
}
} else {
Segment::HorizontalSegment {
x_1: cmp::min(a.0, b.0),
x_2: cmp::max(a.0, b.0),
y: a.1,
}
}
}
fn intersect(self, other: Self) -> Option<Point> {
match self {
Self::HorizontalSegment { x_1, x_2, y } => match other {
Self::HorizontalSegment { x_1, x_2, y } => None,
Self::VerticalSegment { y_1, y_2, x } => {
if x_1 <= x && x <= x_2 && y_1 <= y && y <= y_2 {
Some(Point(x, y))
} else {
None
}
}
},
Self::VerticalSegment { y_1, y_2, x } => match other {
Self::HorizontalSegment { x_1, x_2, y } => {
if x_1 <= x && x <= x_2 && y_1 <= y && y <= y_2 {
Some(Point(x, y))
} else {
None
}
}
Self::VerticalSegment { y_1, y_2, x } => None,
},
}
}
}
fn parse_input(input: &str) -> Vec<Segment> {
let parts = input.split(",");
let mut segments: Vec<Segment> = vec![];
let mut start = Point(0, 0);
for part in parts {
let (direction, distance) = part.split_at(1);
let distance = distance.parse::<i32>().unwrap();
let end = match direction {
"R" => Point(start.0 + distance, start.1),
"L" => Point(start.0 - distance, start.1),
"U" => Point(start.0, start.1 + distance),
"D" => Point(start.0, start.1 - distance),
_ => panic!("lol"),
};
segments.push(Segment::new(start, end));
start = end;
}
segments
}
pub fn day3(input1: &str, input2: &str) -> i32 {
let segments1 = parse_input(input1);
let segments2 = parse_input(input2);
let mut points = vec![];
for s1 in segments1 {
for s2 in segments2.clone() {
match s1.intersect(s2) {
Some(point) => points.push(point),
None => (),
}
}
}
points.sort();
if points[0] == Point(0, 0) {
points[1].manhattan_distance()
} else {
points[0].manhattan_distance()
}
}
#[cfg(test)]
mod tests {
// Note this useful idiom: importing names from outer (for mod tests) scope.
use super::*;
#[test]
fn test_point_sorting() {
let mut points = vec![Point(1, 2), Point(-2, -2), Point(12, 15), Point(5, 5)];
points.sort();
assert_eq!(
points,
vec![Point(1, 2), Point(-2, -2), Point(5, 5), Point(12, 15)]
)
}
#[test]
fn test_intersect() {
assert_eq!(
None,
Segment::HorizontalSegment {
x_1: 0,
x_2: 2,
y: -3
}
.intersect(Segment::HorizontalSegment {
x_1: 0,
x_2: 2,
y: -1
})
);
assert_eq!(
Some(Point(1, 2)),
Segment::VerticalSegment {
y_1: 0,
y_2: 3,
x: 1
}
.intersect(Segment::HorizontalSegment {
x_1: 0,
x_2: 2,
y: 2
})
)
}
#[test]
fn test_stuff() {
let input1 = "R75,D30,R83,U83,L12,D49,R71,U7,L72";
let input2 = "U62,R66,U55,R34,D71,R55,D58,R83";
assert_eq!(159, day3(input1, input2));
assert_eq!(
135,
day3(
"R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51",
"U98,R91,D20,R16,D67,R40,U7,R15,U6,R7"
)
);
}
}
|
extern crate byteorder;
use salticidae::{Deserializable, Deserialize, Serializable, Serialize, Stream};
use tokio::net::TcpListener;
use tokio::net::TcpStream;
use tokio::runtime::Runtime;
use tokio::sync::oneshot;
#[test]
fn test_listen() {
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
enum Message {
Hello { name: String, text: String },
Ack {},
}
let rt = Runtime::new().unwrap();
let (tx, rx) = oneshot::channel();
rt.block_on(async move {
let listener = TcpListener::bind("127.0.0.1:8081").await.unwrap();
tx.send(listener).unwrap();
});
rt.spawn(async move {
let mut listener = rx.await.unwrap();
loop {
let (mut socket, _) = listener.accept().await.unwrap();
tokio::spawn(async move {
let (header, received_bytes) = socket.read_message().await.unwrap();
if let Message::Hello { .. } = Message::deserialize(&received_bytes, header.opcode)
{
socket.write_message(&Message::Ack {}, 1).await;
}
});
}
});
rt.block_on(async move {
let mut stream = TcpStream::connect("127.0.0.1:8081").await.unwrap();
let original = Message::Hello {
name: "alice".to_string(),
text: "Hello there!".to_string(),
};
stream.write_message(&original, 0).await;
let (header, received_bytes) = stream.read_message().await.unwrap();
assert_eq!(
Message::Ack {},
Message::deserialize(&received_bytes, header.opcode)
);
})
}
|
//! Utilities for working with `/proc`, where Linux's `procfs` is typically
//! mounted. `/proc` serves as an adjunct to Linux's main syscall surface area,
//! providing additional features with an awkward interface.
//!
//! This module does a considerable amount of work to determine whether `/proc`
//! is mounted, with actual `procfs`, and without any additional mount points
//! on top of the paths we open.
use crate::{
fs::{cwd, fstat, fstatfs, major, openat, renameat, Mode, OFlags, Stat, PROC_SUPER_MAGIC},
io,
path::DecInt,
process::{getgid, getpid, getuid},
};
use io_lifetimes::{AsFd, BorrowedFd, OwnedFd};
use once_cell::sync::Lazy;
use std::path::Path;
/// Linux's procfs always uses inode 1 for its root directory.
const PROC_ROOT_INO: u64 = 1;
// Identify a subdirectory of "/proc", to determine which anomalies to
// check for.
enum Subdir {
Proc,
Pid,
Fd,
}
/// Check a subdirectory of "/proc" for anomalies.
fn check_proc_dir(
kind: Subdir,
dir: BorrowedFd<'_>,
proc_stat: Option<&Stat>,
uid: u32,
gid: u32,
) -> io::Result<Stat> {
// Check the filesystem magic.
check_procfs(dir)?;
let dir_stat = fstat(&dir)?;
// We use `O_DIRECTORY`, so open should fail if we don't get a directory.
assert_eq!(dir_stat.st_mode & Mode::IFMT.bits(), Mode::IFDIR.bits());
// Check the root inode number.
if let Subdir::Proc = kind {
if dir_stat.st_ino != PROC_ROOT_INO {
return Err(io::Error::NOTSUP);
}
// Proc is a non-device filesystem, so check for major number 0.
// <https://www.kernel.org/doc/Documentation/admin-guide/devices.txt>
if major(dir_stat.st_dev) != 0 {
return Err(io::Error::NOTSUP);
}
// Check that "/proc" is a mountpoint.
if !is_mountpoint(dir)? {
return Err(io::Error::NOTSUP);
}
} else {
// Check that we haven't been linked back to the root of "/proc".
if dir_stat.st_ino == PROC_ROOT_INO {
return Err(io::Error::NOTSUP);
}
// Check that we're still in procfs.
if dir_stat.st_dev != proc_stat.unwrap().st_dev {
return Err(io::Error::NOTSUP);
}
// Check that subdirectories of "/proc" are not mount points.
if is_mountpoint(dir)? {
return Err(io::Error::NOTSUP);
}
}
// Check the ownership of the directory.
if (dir_stat.st_uid, dir_stat.st_gid) != (uid, gid) {
return Err(io::Error::NOTSUP);
}
// "/proc" directories are typically mounted r-xr-xr-x.
// "/proc/self/fd" is r-x------. Allow them to have fewer permissions, but
// not more.
let expected_mode = if let Subdir::Fd = kind { 0o500 } else { 0o555 };
if dir_stat.st_mode & 0o777 & !expected_mode != 0 {
return Err(io::Error::NOTSUP);
}
if let Subdir::Fd = kind {
// Check that the "/proc/self/fd" directory doesn't have any extraneous
// links into it (which might include unexpected subdirectories).
if dir_stat.st_nlink != 2 {
return Err(io::Error::NOTSUP);
}
} else {
// Check that the "/proc" and "/proc/self" directories aren't empty.
if dir_stat.st_nlink <= 2 {
return Err(io::Error::NOTSUP);
}
}
Ok(dir_stat)
}
/// Check that `file` is opened on a `procfs` filesystem.
fn check_procfs(file: BorrowedFd<'_>) -> io::Result<()> {
let statfs = fstatfs(&file)?;
let f_type = statfs.f_type;
if f_type != PROC_SUPER_MAGIC {
return Err(io::Error::NOTSUP);
}
Ok(())
}
/// Check whether the given directory handle is a mount point. We use a
/// `rename` call that would otherwise fail, but which fails with `EXDEV`
/// first if it would cross a mount point.
fn is_mountpoint(file: BorrowedFd<'_>) -> io::Result<bool> {
let err = renameat(&file, "../.", &file, ".").unwrap_err();
match err {
io::Error::XDEV => Ok(true), // the rename failed due to crossing a mount point
io::Error::BUSY => Ok(false), // the rename failed normally
_ => panic!("Unexpected error from `renameat`: {:?}", err),
}
}
/// Returns a handle to Linux's `/proc` directory.
///
/// This ensures that `procfs` is mounted on `/proc`, that nothing is
/// mounted on top of it, and that it looks normal. It also returns the
/// `Stat` of `/proc`.
///
/// # References
/// - [Linux]
///
/// [Linux]: https://man7.org/linux/man-pages/man5/proc.5.html
pub fn proc() -> io::Result<(BorrowedFd<'static>, &'static Stat)> {
#[allow(clippy::useless_conversion)]
static PROC: Lazy<io::Result<(OwnedFd, Stat)>> = Lazy::new(|| {
let oflags =
OFlags::NOFOLLOW | OFlags::PATH | OFlags::DIRECTORY | OFlags::CLOEXEC | OFlags::NOCTTY;
let proc = openat(&cwd(), "/proc", oflags, Mode::empty())?;
let proc_stat = check_proc_dir(Subdir::Proc, proc.as_fd(), None, 0, 0)?;
Ok((proc, proc_stat))
});
PROC.as_ref()
.map(|(fd, stat)| (fd.as_fd(), stat))
.map_err(|_err| io::Error::NOTSUP)
}
/// Returns a handle to Linux's `/proc/self` directory.
///
/// This ensures that `procfs` is mounted on `/proc/self`, that nothing is
/// mounted on top of it, and that it looks normal. It also returns the
/// `Stat` of `/proc/self`.
///
/// # References
/// - [Linux]
///
/// [Linux]: https://man7.org/linux/man-pages/man5/proc.5.html
pub fn proc_self() -> io::Result<(BorrowedFd<'static>, &'static Stat)> {
#[allow(clippy::useless_conversion)]
static PROC_SELF: Lazy<io::Result<(OwnedFd, Stat)>> = Lazy::new(|| {
let (proc, proc_stat) = proc()?;
let (uid, gid, pid) = (getuid(), getgid(), getpid());
let oflags =
OFlags::NOFOLLOW | OFlags::PATH | OFlags::DIRECTORY | OFlags::CLOEXEC | OFlags::NOCTTY;
// Open "/proc/self". Use our pid to compute the name rather than literally
// using "self", as "self" is a symlink.
let proc_self = openat(&proc, DecInt::new(pid), oflags, Mode::empty())?;
let proc_self_stat =
check_proc_dir(Subdir::Pid, proc_self.as_fd(), Some(proc_stat), uid, gid)?;
Ok((proc_self, proc_self_stat))
});
PROC_SELF
.as_ref()
.map(|(owned, stat)| (owned.as_fd(), stat))
.map_err(|_err| io::Error::NOTSUP)
}
/// Returns a handle to Linux's `/proc/self/fd` directory.
///
/// This ensures that `procfs` is mounted on `/proc/self/fd`, that nothing is
/// mounted on top of it, and that it looks normal. It also returns the
/// `Stat` of `/proc/self/fd`.
///
/// # References
/// - [Linux]
///
/// [Linux]: https://man7.org/linux/man-pages/man5/proc.5.html
pub fn proc_self_fd() -> io::Result<(BorrowedFd<'static>, &'static Stat)> {
#[allow(clippy::useless_conversion)]
static PROC_SELF_FD: Lazy<io::Result<(OwnedFd, Stat)>> = Lazy::new(|| {
let (_, proc_stat) = proc()?;
let (proc_self, proc_self_stat) = proc_self()?;
let oflags =
OFlags::NOFOLLOW | OFlags::PATH | OFlags::DIRECTORY | OFlags::CLOEXEC | OFlags::NOCTTY;
// Open "/proc/self/fd".
let proc_self_fd = openat(&proc_self, Path::new("fd"), oflags, Mode::empty())?;
let proc_self_fd_stat = check_proc_dir(
Subdir::Fd,
proc_self_fd.as_fd(),
Some(proc_stat),
proc_self_stat.st_uid,
proc_self_stat.st_gid,
)?;
Ok((proc_self_fd, proc_self_fd_stat))
});
PROC_SELF_FD
.as_ref()
.map(|(owned, stat)| (owned.as_fd(), stat))
.map_err(|_err| io::Error::NOTSUP)
}
|
mod main_controller;
use router::Router;
#[derive(Debug)]
pub struct Routes {}
impl Routes {
pub fn new() -> Router {
let mut router = Router::new();
router.get("/", main_controller::Index, "index");
router
}
}
|
//! ## `Call Account` Receipt Binary Format Version 0
//!
//! On success (`is_success = 1`)
//!
//! ```text
//! +---------------------------------------------------+
//! | | | | |
//! | tx type | version | is_success | new State |
//! | (1 byte) | (2 bytes) | (1 byte) | (32 bytes) |
//! | | | | |
//! +---------------------------------------------------+
//! | | | |
//! | returndata | returndata | gas_used |
//! | byte-size | (Blob) | (8 bytes) |
//! | (2 bytes) | | |
//! | | | |
//! +---------------------------------------------------+
//! | | | | |
//! | #logs | log #1 | . . . | log #N |
//! | (1 byte) | (Blob) | | (Blob) |
//! | | | | |
//! +---------------------------------------------------+
//! ```
//!
//!
//! On Error (`is_success = 0`)
//! See [error.rs](./error.rs)
use std::io::Cursor;
use svm_types::CallReceipt;
use super::{decode_error, encode_error, gas, logs, returndata};
use crate::version;
use crate::{ReadExt, WriteExt};
/// Encodes an [`CallReceipt`] into its binary format.
pub fn encode_call(receipt: &CallReceipt) -> Vec<u8> {
let mut w = Vec::new();
w.write_byte(super::types::CALL);
version::encode_version(receipt.version, &mut w);
w.write_bool(receipt.success);
if receipt.success {
encode_new_state(receipt, &mut w);
encode_returndata(receipt, &mut w);
gas::encode_gas_used(&receipt.gas_used, &mut w);
logs::encode_logs(&receipt.logs, &mut w);
} else {
let logs = receipt.logs();
encode_error(receipt.error(), logs, &mut w);
};
w
}
/// Decodes a binary [`CallReceipt`].
pub fn decode_call(bytes: &[u8]) -> CallReceipt {
let mut cursor = Cursor::new(bytes);
let ty = cursor.read_byte().unwrap();
debug_assert_eq!(ty, crate::receipt::types::CALL);
let version = version::decode_version(&mut cursor).unwrap();
debug_assert_eq!(0, version);
let is_success = cursor.read_bool().unwrap();
match is_success {
false => {
let (err, logs) = decode_error(&mut cursor);
CallReceipt::from_err(err, logs)
}
true => {
let new_state = cursor.read_state().unwrap();
let returndata = returndata::decode(&mut cursor).unwrap();
let gas_used = gas::decode_gas_used(&mut cursor).unwrap();
let logs = logs::decode_logs(&mut cursor).unwrap();
CallReceipt {
version,
success: true,
error: None,
new_state: Some(new_state),
returndata: Some(returndata),
gas_used,
logs,
}
}
}
}
fn encode_new_state(receipt: &CallReceipt, w: &mut Vec<u8>) {
debug_assert!(receipt.success);
let state = receipt.new_state();
w.write_state(state);
}
fn encode_returndata(receipt: &CallReceipt, w: &mut Vec<u8>) {
debug_assert!(receipt.success);
let data = receipt.returndata();
returndata::encode(&data, w);
}
#[cfg(test)]
mod tests {
use super::*;
use svm_types::{Address, Gas, ReceiptLog, RuntimeError, State};
#[test]
fn encode_decode_call_receipt_error() {
let account = Address::of("@Account");
let error = RuntimeError::AccountNotFound(account.into());
let logs = vec![ReceiptLog::new(b"something happened".to_vec())];
let receipt = CallReceipt {
version: 0,
success: false,
error: Some(error),
new_state: None,
returndata: None,
gas_used: Gas::new(),
logs,
};
let bytes = encode_call(&receipt);
let decoded = crate::receipt::decode_receipt(&bytes[..]);
assert_eq!(decoded.into_call(), receipt);
}
#[test]
fn encode_decode_call_receipt_success_without_returns() {
let new_state = State::of("some-state");
let logs = vec![ReceiptLog::new(b"something happened".to_vec())];
let receipt = CallReceipt {
version: 0,
success: true,
error: None,
new_state: Some(new_state),
returndata: Some(Vec::new()),
gas_used: Gas::with(100),
logs: logs.clone(),
};
let bytes = encode_call(&receipt);
let decoded = crate::receipt::decode_receipt(&bytes[..]);
assert_eq!(decoded.into_call(), receipt);
}
#[test]
fn encode_decode_call_receipt_success_with_returns() {
let new_state = State::of("some-state");
let returndata = vec![0x10, 0x20];
let logs = vec![ReceiptLog::new(b"something happened".to_vec())];
let receipt = CallReceipt {
version: 0,
success: true,
error: None,
new_state: Some(new_state),
returndata: Some(returndata),
gas_used: Gas::with(100),
logs: logs.clone(),
};
let bytes = encode_call(&receipt);
let decoded = crate::receipt::decode_receipt(&bytes[..]);
assert_eq!(decoded.into_call(), receipt);
}
}
|
extern crate base58;
use pow::*;
use std::time::{SystemTime, UNIX_EPOCH};
use std::{ fmt, str };
use self::base58::ToBase58;
macro_rules! genesis_block {
() => {
Block::new("Genesis Block".to_string(), vec![]);
}
}
#[derive(Debug)]
pub struct Block {
pub time_stamp: u64,
pub data: String,
pub nonce: u64,
#[derive(ToBase58)]
pub prev_block_hash: Vec<u8>,
pub hash: Vec<u8>,
}
impl Block {
pub fn new(data: String, prev_block_hash: Vec<u8>) -> Block {
let mut b = Block {
time_stamp: match SystemTime::now().duration_since(UNIX_EPOCH) {
Ok(n) => n.as_secs(),
Err(_) => 0
},
data: data,
prev_block_hash: prev_block_hash,
hash: vec![],
nonce: 0u64,
};
run_pow(&mut b);
b
}
}
impl fmt::Display for Block {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"Block {:?}\n\tTimeStamp: {}\n\tData: {}\n\tPrevBlock: {:?}\n\tNonce: {}\n\n",
self.hash.to_base58(), self.time_stamp, self.data, self.prev_block_hash.to_base58(),
self.nonce
)
}
}
#[derive(Debug)]
pub struct BlockChain {
pub blocks: Vec<Block>
}
impl BlockChain {
pub fn new() -> BlockChain {
BlockChain {
blocks: vec![genesis_block!()]
}
}
pub fn add_block(&mut self, data: String) {
let prev_block = match self.blocks.pop() {
Some(x) => x,
None => genesis_block!()
};
let new_block = Block::new(data, prev_block.hash.clone());
self.blocks.push(prev_block);
self.blocks.push(new_block);
}
pub fn show_blocks(&self) {
for blk in self.blocks.iter() {
println!("{}", blk);
}
}
}
|
extern crate libc;
use libc::*;
use std::net::{TcpStream};
//use std::thread;
use std::io::{Read, Write};
use std::str;
use std::os::unix::io::AsRawFd;
#[link(name = "osl", kind = "static")]
#[link(name = "ssl", kind = "static")]
#[link(name = "crypto", kind = "static")]
extern {
fn newctx(cert_file: *const c_uchar, key_file: *const c_uchar) -> *mut c_void;
fn newssl(ctx: *const c_void, fd: c_int) -> *mut c_void;
//fn sslaccept(ssl: *const c_void) -> c_int;
fn sslread(ssl: *mut c_void, buf: *mut c_uchar, size: c_int) -> c_int;
fn sslwrite(ssl: *mut c_void, buf: *const c_uchar, size: c_int) -> c_int;
fn deletessl(ssl: *mut c_void);
fn deletectx(ssl: *mut c_void);
}
// need to allow dead code because we need to own the stream
// even though we dont use it "directly"
#[allow(dead_code)]
#[derive(Debug)]
pub struct OsslStream {
ssl: *mut c_void,
stream: TcpStream,
}
unsafe impl Send for OsslStream {}
impl OsslStream {
pub fn accept(ctx: &Ctx, mut stream: TcpStream) -> Result<Self, &'static str> {
let ssl: *mut c_void;
unsafe { ssl = newssl(ctx.0, stream.as_raw_fd()) }
if ssl == std::ptr::null::<c_void>() as *mut c_void {
// for now, we tell the client to reconnect using ssl
// could return the stream back in Err in order to handle more
// situations
println!("NOT TLS");
stream.write_all(b"HTTP/1.1 307 Temporary Redirect\r\nLocation: https://localhost:8080\r\n\r\n").unwrap();
stream.shutdown(std::net::Shutdown::Both).unwrap();
return Err("can't accpet")
}
Ok ( OsslStream { ssl, stream } )
}
}
impl Read for OsslStream {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let n = unsafe { sslread(self.ssl, buf.as_mut_ptr() as *mut u8, buf.len() as c_int) };
Ok(n as usize)
}
}
impl Write for OsslStream {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let n = unsafe { sslwrite(self.ssl, buf.as_ptr() as *const u8, buf.len() as c_int) };
Ok(n as usize)
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
impl Drop for OsslStream {
fn drop(&mut self) {
println!("droping ssl stream");
unsafe { deletessl(self.ssl) };
}
}
//#[derive(Clone, Copy)]
pub struct Ctx(*mut c_void);
unsafe impl Send for Ctx {}
impl Drop for Ctx {
fn drop(&mut self) {
println!("droping ctx");
unsafe { deletectx(self.0) };
}
}
pub fn make_ctx(cert_file: &str, key_file: &str) -> Ctx {
Ctx ( unsafe { newctx ( (String::from(cert_file) + "\0").as_ptr() as *const c_uchar,
(String::from(key_file) + "\0").as_ptr() as *const c_uchar
) }
)
}
// mod tests {
// use super::*;
// #[test]
// fn it_works() {
// let listener = TcpListener::bind("127.0.0.1:8080").unwrap();
// let ctx = get_ctx();
// for stream in listener.incoming() {
// match stream {
// Ok(stream) => {
// println!("{:?}", stream);
// println!("CHECK SSL");
// //##############################################
// if let Ok(mut ssl_stream) = OsslStream::accept(&ctx, stream) {
// //println!("{:?}", ssl_stream);
// thread::spawn(move|| {
// println!("GOT SSL CONNECTION");
// let mut buf = [0;4096];
// let n = ssl_stream.read(&mut buf).unwrap();
// println!("{}", n);
// let s = str::from_utf8(&buf[..n]).unwrap();
// //for c in buf.iter() {
// println!("{}", s);
// //}
// ssl_stream.write(b"HTTP/1.1 200 OK\r\n\r\nhello world!").unwrap();
// //handle_client(tls_stream);
// return;
// });
// }
// }
// Err(_) => { /* connection failed */ }
// }
// }
// println!("Hello, world!");
// }
// }
|
// Passthrough decoder for librespot
use std::{
io::{Read, Seek},
time::{SystemTime, UNIX_EPOCH},
};
// TODO: move this to the Symphonia Ogg demuxer
use ogg::{OggReadError, Packet, PacketReader, PacketWriteEndInfo, PacketWriter};
use super::{AudioDecoder, AudioPacket, AudioPacketPosition, DecoderError, DecoderResult};
use crate::{
metadata::audio::{AudioFileFormat, AudioFiles},
MS_PER_PAGE, PAGES_PER_MS,
};
fn get_header<T>(code: u8, rdr: &mut PacketReader<T>) -> DecoderResult<Vec<u8>>
where
T: Read + Seek,
{
let pck: Packet = rdr
.read_packet_expected()
.map_err(|e| DecoderError::PassthroughDecoder(e.to_string()))?;
let pkt_type = pck.data[0];
debug!("Vorbis header type {}", &pkt_type);
if pkt_type != code {
return Err(DecoderError::PassthroughDecoder("Invalid Data".into()));
}
Ok(pck.data)
}
pub struct PassthroughDecoder<R: Read + Seek> {
rdr: PacketReader<R>,
wtr: PacketWriter<'static, Vec<u8>>,
eos: bool,
bos: bool,
ofsgp_page: u64,
stream_serial: u32,
ident: Vec<u8>,
comment: Vec<u8>,
setup: Vec<u8>,
}
impl<R: Read + Seek> PassthroughDecoder<R> {
/// Constructs a new Decoder from a given implementation of `Read + Seek`.
pub fn new(rdr: R, format: AudioFileFormat) -> DecoderResult<Self> {
if !AudioFiles::is_ogg_vorbis(format) {
return Err(DecoderError::PassthroughDecoder(format!(
"Passthrough decoder is not implemented for format {format:?}"
)));
}
let mut rdr = PacketReader::new(rdr);
let since_epoch = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map_err(|e| DecoderError::PassthroughDecoder(e.to_string()))?;
let stream_serial = since_epoch.as_millis() as u32;
info!("Starting passthrough track with serial {stream_serial}");
// search for ident, comment, setup
let ident = get_header(1, &mut rdr)?;
let comment = get_header(3, &mut rdr)?;
let setup = get_header(5, &mut rdr)?;
// remove un-needed packets
rdr.delete_unread_packets();
Ok(PassthroughDecoder {
rdr,
wtr: PacketWriter::new(Vec::new()),
ofsgp_page: 0,
stream_serial,
ident,
comment,
setup,
eos: false,
bos: false,
})
}
fn position_pcm_to_ms(position_pcm: u64) -> u32 {
(position_pcm as f64 * MS_PER_PAGE) as u32
}
}
impl<R: Read + Seek> AudioDecoder for PassthroughDecoder<R> {
fn seek(&mut self, position_ms: u32) -> Result<u32, DecoderError> {
let absgp = (position_ms as f64 * PAGES_PER_MS) as u64;
// add an eos to previous stream if missing
if self.bos && !self.eos {
match self.rdr.read_packet() {
Ok(Some(pck)) => {
let absgp_page = pck.absgp_page() - self.ofsgp_page;
self.wtr
.write_packet(
pck.data,
self.stream_serial,
PacketWriteEndInfo::EndStream,
absgp_page,
)
.map_err(|e| DecoderError::PassthroughDecoder(e.to_string()))?;
}
_ => warn! {"Cannot write EoS after seeking"},
};
}
self.eos = false;
self.bos = false;
self.ofsgp_page = 0;
self.stream_serial += 1;
match self.rdr.seek_absgp(None, absgp) {
Ok(_) => {
// need to set some offset for next_page()
let pck = self
.rdr
.read_packet()
.map_err(|e| DecoderError::PassthroughDecoder(e.to_string()))?;
match pck {
Some(pck) => {
let new_page = pck.absgp_page();
self.ofsgp_page = new_page;
debug!("Seek to offset page {}", new_page);
let new_position_ms = Self::position_pcm_to_ms(new_page);
Ok(new_position_ms)
}
None => Err(DecoderError::PassthroughDecoder("Packet is None".into())),
}
}
Err(e) => Err(DecoderError::PassthroughDecoder(e.to_string())),
}
}
fn next_packet(&mut self) -> DecoderResult<Option<(AudioPacketPosition, AudioPacket)>> {
// write headers if we are (re)starting
if !self.bos {
self.wtr
.write_packet(
self.ident.clone(),
self.stream_serial,
PacketWriteEndInfo::EndPage,
0,
)
.map_err(|e| DecoderError::PassthroughDecoder(e.to_string()))?;
self.wtr
.write_packet(
self.comment.clone(),
self.stream_serial,
PacketWriteEndInfo::NormalPacket,
0,
)
.map_err(|e| DecoderError::PassthroughDecoder(e.to_string()))?;
self.wtr
.write_packet(
self.setup.clone(),
self.stream_serial,
PacketWriteEndInfo::EndPage,
0,
)
.map_err(|e| DecoderError::PassthroughDecoder(e.to_string()))?;
self.bos = true;
debug!("Wrote Ogg headers");
}
loop {
let pck = match self.rdr.read_packet() {
Ok(Some(pck)) => pck,
Ok(None) | Err(OggReadError::NoCapturePatternFound) => {
info!("end of streaming");
return Ok(None);
}
Err(e) => return Err(DecoderError::PassthroughDecoder(e.to_string())),
};
let pckgp_page = pck.absgp_page();
// skip till we have audio and a calculable granule position
if pckgp_page == 0 || pckgp_page == self.ofsgp_page {
continue;
}
// set packet type
let inf = if pck.last_in_stream() {
self.eos = true;
PacketWriteEndInfo::EndStream
} else if pck.last_in_page() {
PacketWriteEndInfo::EndPage
} else {
PacketWriteEndInfo::NormalPacket
};
self.wtr
.write_packet(
pck.data,
self.stream_serial,
inf,
pckgp_page - self.ofsgp_page,
)
.map_err(|e| DecoderError::PassthroughDecoder(e.to_string()))?;
let data = self.wtr.inner_mut();
if !data.is_empty() {
let position_ms = Self::position_pcm_to_ms(pckgp_page);
let packet_position = AudioPacketPosition {
position_ms,
skipped: false,
};
let ogg_data = AudioPacket::Raw(std::mem::take(data));
return Ok(Some((packet_position, ogg_data)));
}
}
}
}
|
use super::{
super::{backend::Backend, entity::Entity},
GetEntityFuture, ListEntitiesFuture, RemoveEntitiesFuture, RemoveEntityFuture,
UpsertEntitiesFuture, UpsertEntityFuture,
};
use futures_util::future::{self, FutureExt, TryFutureExt};
pub trait Repository<E: Entity, B: Backend> {
/// Retrieve an immutable reference to the backend that the repository is
/// tied to.
fn backend(&self) -> B;
/// Get an entity by its ID in the cache.
fn get(&self, entity_id: E::Id) -> GetEntityFuture<'_, E, B::Error>;
/// Stream a list of records of the entity.
fn list(&self) -> ListEntitiesFuture<'_, E, B::Error>;
/// Remove an entity by its ID from the cache.
fn remove(&self, entity_id: E::Id) -> RemoveEntityFuture<'_, B::Error>;
/// Bulk remove multiple entities from the cache.
///
/// **B implementations**: a default implementation is provided that
/// will concurrently await [`remove`] calls for all provided entity IDs.
/// This may not be optimal for all implementations, so you may want to
/// implement this manually.
///
/// [`remove`]: #tymethod.remove
fn remove_bulk<T: Iterator<Item = E::Id>>(
&self,
entity_ids: T,
) -> RemoveEntitiesFuture<'_, B::Error> {
future::try_join_all(entity_ids.map(|id| self.remove(id)))
.map_ok(|_| ())
.boxed()
}
/// Upsert an entity into the cache.
fn upsert(&self, entity: E) -> UpsertEntityFuture<'_, B::Error>;
/// Bulk upsert multiple entities in the cache.
///
/// **B implementations**: a default implementation is provided that
/// will concurrently await [`upsert`] calls for all provided entity IDs.
/// This may not be optimal for all implementations, so you may want to
/// implement this manually.
///
/// [`upsert`]: #tymethod.upsert
fn upsert_bulk<T: Iterator<Item = E> + Send>(
&self,
entities: T,
) -> UpsertEntitiesFuture<'_, B::Error> {
Box::pin(future::try_join_all(entities.map(|entity| self.upsert(entity))).map_ok(|_| ()))
}
}
pub trait SingleEntityRepository<E: Entity, B: Backend> {
/// Retrieve an immutable reference to the backend that the repository is
/// tied to.
fn backend(&self) -> B;
/// Get the entity in the cache.
fn get(&self) -> GetEntityFuture<'_, E, B::Error>;
/// Remove the entity from the cache.
fn remove(&self) -> RemoveEntityFuture<'_, B::Error>;
/// Upsert the entity into the cache.
fn upsert(&self, entity: E) -> UpsertEntityFuture<'_, B::Error>;
}
|
pub fn print_fun(uper:i32,lower:i32){
for index in uper..lower{
println!("Downward:{}",index);
}
} |
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
use base::prelude::*;
use atomic::{Atomic};
const UNINITIALIZED: u8 = 0;
const WORKING: u8 = 1;
const INITIALIZED: u8 = 2;
/// The status of a once.
pub enum OnceStatus {
Uninitialized,
Working,
Initialized,
}
#[repr(C)]
pub struct Once {
status: Atomic<u8>,
}
impl<'a> Once {
/// Creates a new, uninitialized, once.
pub const fn new() -> Once {
Once { status: Atomic::new(UNINITIALIZED) }
}
/// Returns the status of the once.
pub fn status(&self) -> OnceStatus {
match self.status.load_unordered() {
UNINITIALIZED => OnceStatus::Uninitialized,
WORKING => OnceStatus::Working,
_ => OnceStatus::Initialized,
}
}
pub fn once<F, T>(&self, f: F) -> Option<T>
where F: FnOnce() -> T,
{
let mut status = self.status.load_acquire();
if status == INITIALIZED {
return None;
}
if status == UNINITIALIZED {
status = self.status.compare_exchange(UNINITIALIZED, WORKING);
}
if status == UNINITIALIZED {
let res = f();
self.status.store_release(INITIALIZED);
return Some(res);
}
while status == WORKING {
status = self.status.load_acquire();
}
None
}
}
|
fn f() {
}
fn main() {
// Can't produce a bare function by binding
let g: native fn() = bind f();
//!^ ERROR mismatched types: expected `native fn()` but found `fn@()`
}
|
use criterion::{criterion_group, criterion_main, Criterion};
use pixelwar_client_rs::proof;
fn criterion_benchmark(c: &mut Criterion) {
let mut proof_gen = proof::ProofGeneratorBuilder::new()
.with_prefix("prefix-")
.with_suffix_length(20)
.with_digest_prefix("00000")
.build();
c.bench_function("gen_proof 7 20 00000", |b| b.iter(|| proof_gen.next()));
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
|
//! DMA-based serial logging - Teensy 4 example
//!
//! This use the same setup as the `t4_uart.rs` example. Connect
//! a serial receive to pin 14, and you should receive log messages
//! and timing measurements.
#![no_std]
#![no_main]
extern crate panic_halt;
mod demo;
use cortex_m_rt::entry;
use cortex_m_rt::interrupt;
use imxrt_hal::ral::interrupt;
const BAUD: u32 = 115_200;
#[interrupt]
fn DMA7_DMA23() {
imxrt_uart_log::dma::poll();
}
/// See the "BYOB" documentation for more details
#[cfg(feature = "byob")]
mod buffer {
use imxrt_hal::dma::Buffer;
pub use imxrt_hal::dma::Circular;
// Using a 512-byte buffer, rather than the 2KiB default buffer
#[repr(align(512))]
pub struct Alignment(pub Buffer<[u8; 512]>);
pub static BUFFER: Alignment = Alignment(Buffer::new([0; 512]));
}
#[entry]
fn main() -> ! {
let teensy4_bsp::Peripherals {
uart,
mut ccm,
dcdc,
gpt1,
gpt2,
iomuxc,
dma,
..
} = teensy4_bsp::Peripherals::take().unwrap();
let pins = teensy4_bsp::t40::into_pins(iomuxc);
//
// DMA initialization
//
let mut dma_channels = dma.clock(&mut ccm.handle);
let mut channel = dma_channels[7].take().unwrap();
channel.set_interrupt_on_completion(true);
unsafe {
cortex_m::peripheral::NVIC::unmask(interrupt::DMA7_DMA23);
}
//
// UART initialization
//
let uarts = uart.clock(
&mut ccm.handle,
imxrt_hal::ccm::uart::ClockSelect::OSC,
imxrt_hal::ccm::uart::PrescalarSelect::DIVIDE_1,
);
let uart = uarts.uart2.init(pins.p14, pins.p15, BAUD).unwrap();
let (tx, _) = uart.split();
imxrt_uart_log::dma::init(
tx,
channel,
Default::default(),
#[cfg(feature = "byob")]
{
buffer::Circular::new(&buffer::BUFFER.0).unwrap()
},
)
.unwrap();
demo::log_loop(
demo::Setup {
ccm,
dcdc,
gpt1,
gpt2,
dwt: cortex_m::Peripherals::take().unwrap().DWT,
led: teensy4_bsp::configure_led(pins.p13),
},
|_| {},
);
}
|
use reqwest::r#async::Client;
use std::sync::Arc;
use std::time::Duration;
use tokio::runtime::Runtime;
pub struct DaemonRuntime<'a> {
pub runtime: &'a mut Runtime,
pub client: Arc<Client>,
}
impl<'a> DaemonRuntime<'a> {
pub fn new(runtime: &'a mut Runtime) -> Self {
// This client contains a thread pool for performing HTTP/s requests.
let client = Arc::new(
Client::builder()
.timeout(Duration::from_secs(3))
.build()
.expect("failed to initialize reqwest client"),
);
DaemonRuntime { runtime, client }
}
}
|
use crate::llvm;
use crate::builder::Builder;
use crate::common::CodegenCx;
use libc::c_uint;
use llvm::coverageinfo::CounterMappingRegion;
use rustc_codegen_ssa::coverageinfo::map::{CounterExpression, FunctionCoverage};
use rustc_codegen_ssa::traits::{
BaseTypeMethods, CoverageInfoBuilderMethods, CoverageInfoMethods, MiscMethods, StaticMethods,
};
use rustc_data_structures::fx::FxHashMap;
use rustc_llvm::RustString;
use rustc_middle::mir::coverage::{
CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId, Op,
};
use rustc_middle::ty::Instance;
use std::cell::RefCell;
use std::ffi::CString;
use tracing::debug;
pub mod mapgen;
const VAR_ALIGN_BYTES: usize = 8;
/// A context object for maintaining all state needed by the coverageinfo module.
pub struct CrateCoverageContext<'tcx> {
// Coverage data for each instrumented function identified by DefId.
pub(crate) function_coverage_map: RefCell<FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>>>,
}
impl<'tcx> CrateCoverageContext<'tcx> {
pub fn new() -> Self {
Self { function_coverage_map: Default::default() }
}
pub fn take_function_coverage_map(&self) -> FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>> {
self.function_coverage_map.replace(FxHashMap::default())
}
}
impl CoverageInfoMethods for CodegenCx<'ll, 'tcx> {
fn coverageinfo_finalize(&self) {
mapgen::finalize(self)
}
}
impl CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
/// Calls llvm::createPGOFuncNameVar() with the given function instance's mangled function name.
/// The LLVM API returns an llvm::GlobalVariable containing the function name, with the specific
/// variable name and linkage required by LLVM InstrProf source-based coverage instrumentation.
fn create_pgo_func_name_var(&self, instance: Instance<'tcx>) -> Self::Value {
let llfn = self.cx.get_fn(instance);
let mangled_fn_name = CString::new(self.tcx.symbol_name(instance).name)
.expect("error converting function name to C string");
unsafe { llvm::LLVMRustCoverageCreatePGOFuncNameVar(llfn, mangled_fn_name.as_ptr()) }
}
fn set_function_source_hash(
&mut self,
instance: Instance<'tcx>,
function_source_hash: u64,
) -> bool {
if let Some(coverage_context) = self.coverage_context() {
debug!(
"ensuring function source hash is set for instance={:?}; function_source_hash={}",
instance, function_source_hash,
);
let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
coverage_map
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.set_function_source_hash(function_source_hash);
true
} else {
false
}
}
fn add_coverage_counter(
&mut self,
instance: Instance<'tcx>,
id: CounterValueReference,
region: CodeRegion,
) -> bool {
if let Some(coverage_context) = self.coverage_context() {
debug!(
"adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
instance, id, region,
);
let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
coverage_map
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.add_counter(id, region);
true
} else {
false
}
}
fn add_coverage_counter_expression(
&mut self,
instance: Instance<'tcx>,
id: InjectedExpressionId,
lhs: ExpressionOperandId,
op: Op,
rhs: ExpressionOperandId,
region: Option<CodeRegion>,
) -> bool {
if let Some(coverage_context) = self.coverage_context() {
debug!(
"adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; \
region: {:?}",
instance, id, lhs, op, rhs, region,
);
let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
coverage_map
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.add_counter_expression(id, lhs, op, rhs, region);
true
} else {
false
}
}
fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool {
if let Some(coverage_context) = self.coverage_context() {
debug!(
"adding unreachable code to coverage_map: instance={:?}, at {:?}",
instance, region,
);
let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
coverage_map
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.add_unreachable_region(region);
true
} else {
false
}
}
}
pub(crate) fn write_filenames_section_to_buffer<'a>(
filenames: impl IntoIterator<Item = &'a CString>,
buffer: &RustString,
) {
let c_str_vec = filenames.into_iter().map(|cstring| cstring.as_ptr()).collect::<Vec<_>>();
unsafe {
llvm::LLVMRustCoverageWriteFilenamesSectionToBuffer(
c_str_vec.as_ptr(),
c_str_vec.len(),
buffer,
);
}
}
pub(crate) fn write_mapping_to_buffer(
virtual_file_mapping: Vec<u32>,
expressions: Vec<CounterExpression>,
mapping_regions: Vec<CounterMappingRegion>,
buffer: &RustString,
) {
unsafe {
llvm::LLVMRustCoverageWriteMappingToBuffer(
virtual_file_mapping.as_ptr(),
virtual_file_mapping.len() as c_uint,
expressions.as_ptr(),
expressions.len() as c_uint,
mapping_regions.as_ptr(),
mapping_regions.len() as c_uint,
buffer,
);
}
}
pub(crate) fn hash_str(strval: &str) -> u64 {
let strval = CString::new(strval).expect("null error converting hashable str to C string");
unsafe { llvm::LLVMRustCoverageHashCString(strval.as_ptr()) }
}
pub(crate) fn hash_bytes(bytes: Vec<u8>) -> u64 {
unsafe { llvm::LLVMRustCoverageHashByteArray(bytes.as_ptr().cast(), bytes.len()) }
}
pub(crate) fn mapping_version() -> u32 {
unsafe { llvm::LLVMRustCoverageMappingVersion() }
}
pub(crate) fn save_cov_data_to_mod<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
cov_data_val: &'ll llvm::Value,
) {
let covmap_var_name = llvm::build_string(|s| unsafe {
llvm::LLVMRustCoverageWriteMappingVarNameToString(s);
})
.expect("Rust Coverage Mapping var name failed UTF-8 conversion");
debug!("covmap var name: {:?}", covmap_var_name);
let covmap_section_name = llvm::build_string(|s| unsafe {
llvm::LLVMRustCoverageWriteMapSectionNameToString(cx.llmod, s);
})
.expect("Rust Coverage section name failed UTF-8 conversion");
debug!("covmap section name: {:?}", covmap_section_name);
let llglobal = llvm::add_global(cx.llmod, cx.val_ty(cov_data_val), &covmap_var_name);
llvm::set_initializer(llglobal, cov_data_val);
llvm::set_global_constant(llglobal, true);
llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::set_section(llglobal, &covmap_section_name);
llvm::set_alignment(llglobal, VAR_ALIGN_BYTES);
cx.add_used_global(llglobal);
}
pub(crate) fn save_func_record_to_mod<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
func_name_hash: u64,
func_record_val: &'ll llvm::Value,
is_used: bool,
) {
// Assign a name to the function record. This is used to merge duplicates.
//
// In LLVM, a "translation unit" (effectively, a `Crate` in Rust) can describe functions that
// are included-but-not-used. If (or when) Rust generates functions that are
// included-but-not-used, note that a dummy description for a function included-but-not-used
// in a Crate can be replaced by full description provided by a different Crate. The two kinds
// of descriptions play distinct roles in LLVM IR; therefore, assign them different names (by
// appending "u" to the end of the function record var name, to prevent `linkonce_odr` merging.
let func_record_var_name =
format!("__covrec_{:X}{}", func_name_hash, if is_used { "u" } else { "" });
debug!("function record var name: {:?}", func_record_var_name);
let func_record_section_name = llvm::build_string(|s| unsafe {
llvm::LLVMRustCoverageWriteFuncSectionNameToString(cx.llmod, s);
})
.expect("Rust Coverage function record section name failed UTF-8 conversion");
debug!("function record section name: {:?}", func_record_section_name);
let llglobal = llvm::add_global(cx.llmod, cx.val_ty(func_record_val), &func_record_var_name);
llvm::set_initializer(llglobal, func_record_val);
llvm::set_global_constant(llglobal, true);
llvm::set_linkage(llglobal, llvm::Linkage::LinkOnceODRLinkage);
llvm::set_visibility(llglobal, llvm::Visibility::Hidden);
llvm::set_section(llglobal, &func_record_section_name);
llvm::set_alignment(llglobal, VAR_ALIGN_BYTES);
llvm::set_comdat(cx.llmod, llglobal, &func_record_var_name);
cx.add_used_global(llglobal);
}
|
use rug::Float;
use std::fs::create_dir_all;
use std::path::Path;
use std::sync::mpsc::channel;
use structopt::StructOpt;
use threadpool::ThreadPool;
use mandelbrot::{color_palette, Mandel};
#[derive(StructOpt)]
#[structopt(name = "mandlebrot", about = "Generate Mandlebrot zoom images")]
struct Opt {
#[structopt(
short = "f",
long = "frames",
help = "Set number of frames",
default_value = "1"
)]
frames: u32,
#[structopt(
short = "w",
long = "width",
help = "Set width of image",
default_value = "80"
)]
width: u32,
#[structopt(
short = "h",
long = "height",
help = "Set height of image",
default_value = "80"
)]
height: u32,
#[structopt(
short = "x",
long = "center-x",
help = "Set center-x of image",
default_value = "0.001643721971153"
)]
center_x: f64,
#[structopt(
short = "y",
long = "center-y",
help = "Set center-y of image",
default_value = "-0.822467633298876"
)]
center_y: f64,
#[structopt(
short = "s",
long = "scale",
help = "Set start scale",
default_value = "0.01"
)]
start_scale: String,
}
fn set_zoom(sc: &Float) -> Float {
Float::with_val(128, sc / 150.0)
}
fn main() {
let opt = Opt::from_args();
let palette = color_palette();
// Resolution
let samples = (opt.width, opt.height);
let center = (
Float::with_val(128, opt.center_x),
Float::with_val(128, opt.center_y),
);
let mut scale = Float::from_str(&opt.start_scale, 128).unwrap();
let mut zoom_step = set_zoom(&scale);
// Create output dir
create_dir_all("img").unwrap();
let n_workers = 2;
let pool = ThreadPool::new(n_workers);
let (tx, rx) = channel::<bool>();
let mut frame = 0;
let mut skip = 0;
while frame < opt.frames {
if scale < 0.0 {
println!("Scale going negative at frame {}", frame);
break;
}
//Skip frames that have already been generated
if !Path::new(&Mandel::image_path(frame)).exists() {
let sc = scale.clone();
let cen = center.clone();
let tx = tx.clone();
let pal = palette.clone();
pool.execute(move || {
println!("Render frame {}", frame);
let mut man = Mandel::new(samples, sc, cen, frame);
man.generate();
man.draw_image(&pal);
tx.send(true).expect("done channel open");
});
} else {
skip += 1;
}
if frame % 25 == 0 {
zoom_step = set_zoom(&scale);
}
println!("frame {} scale {}", frame, scale);
scale -= &zoom_step;
frame += 1;
}
// Wait for work to complete
rx.iter().take((frame - skip) as usize).for_each(drop);
}
|
/**********************************************
> File Name : Range.rs
> Author : lunar
> Email : lunar_ubuntu@qq.com
> Created Time : Fri 01 Apr 2022 04:27:46 PM CST
> Location : Shanghai
> Copyright@ https://github.com/xiaoqixian
**********************************************/
use std::collections::BTreeSet;
#[derive(Debug, Clone, Copy)]
struct Range {
left: i32,
right: i32
}
impl Range {
fn new(left: i32, right: i32) -> Self {
Self {
left,
right
}
}
}
impl std::fmt::Display for Range {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "({}, {})", self.left, self.right)
}
}
impl PartialEq for Range {
fn eq(&self, other: &Self) -> bool {
self.left == other.left
}
}
impl Eq for Range {}
impl PartialOrd for Range {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.left.cmp(&other.left))
}
}
impl Ord for Range {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.partial_cmp(other).unwrap()
}
}
struct RangeModule {
ranges: BTreeSet<Range>
}
impl RangeModule {
fn new() -> Self {
Self {
ranges: {
let mut ranges = BTreeSet::<Range>::new();
ranges.insert(Range::new(-1, 0));
ranges
}
}
}
//fn check(x: &Range) -> bool {
//if x.left <= 0 {return false;}
//if x.right > left {
//println!("{} on the left", x);
//return true;
//}
//if x.left < right {
//println!("{} on the right", x);
//return true;
//}
//false
//}
pub fn add_range(&mut self, left: i32, right: i32) {
if self.ranges.len() == 1 {
self.ranges.insert(Range::new(left, right));
print!("add range({}, {}): ", left, right); self.show_ranges();
return;
}
//println!("ranges: {:?}", self.ranges);
let delete = self.ranges.iter().
filter(|x| x.left > 0 && ((x.left < left && x.right >= left) || (x.left <= right && x.right > right) || (x.left >= left && x.right <= right))).
//filter(|x| check).
map(|x| *x).collect::<Vec<Range>>();
let mut new_range = Range::new(left, right);
println!("delete: {:?}", delete);
if delete.is_empty() {
self.ranges.insert(new_range);
print!("add range({}, {}): ", left, right); self.show_ranges();
return;
}
for r in delete.iter() {
assert!(self.ranges.remove(r));
}
if delete[0].left < left {
new_range.left = delete[0].left;
}
if delete[delete.len() - 1].right > right {
new_range.right = delete[delete.len() - 1].right;
}
self.ranges.insert(new_range);
print!("add range({}, {}): ", left, right); self.show_ranges();
}
fn remove_range(&mut self, left: i32, right: i32) {
if self.ranges.len() == 1 {
return;
}
let delete = self.ranges.iter().
filter(|x| x.left > 0 && ((x.left < left && x.right > left) || (x.left < right && x.right > right) || (x.left >= left && x.right <= right))).
map(|x| *x).collect::<Vec<Range>>();
if delete.is_empty() {
println!("remove range({}, {}) failed", left, right);
return;
}
for r in delete.iter() {
assert!(self.ranges.remove(r));
}
if delete[0].left < left {
self.ranges.insert(Range::new(delete[0].left, left));
}
if delete[delete.len() - 1].right > right {
self.ranges.insert(Range::new(right, delete[delete.len() - 1].right));
}
//println!("{:?}", self.ranges);
print!("remove range({}, {}): ", left, right); self.show_ranges();
}
fn query_range(&self, left: i32, right: i32) -> bool {
if self.ranges.len() == 1 {
return false;
}
let query_size = self.ranges.iter().filter(|x| x.left <= left && x.right >= right).fold(0 as usize, |acc, x| acc+1);
println!("query range({}, {}): {}", left, right, query_size == 1);
query_size == 1
}
fn show_ranges(&self) {
let mut iter = self.ranges.iter();
iter.next();
print!("[");
while let Some(i) = iter.next() {
print!("{}, ", i);
}
println!("]");
}
}
fn main() {
let mut rm = RangeModule::new();
rm.add_range(6, 8);
rm.remove_range(7, 8);
rm.remove_range(8, 9);
rm.add_range(8, 9);
rm.remove_range(1, 3);
rm.add_range(1, 8);
rm.query_range(2, 4);
rm.query_range(2, 9);
rm.query_range(4, 6);
}
|
extern crate base64;
use hex;
use reqwest::{StatusCode, Url};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::fmt;
use std::fs;
use super::constants::*;
/// Get list of `Color` using Google Cloud Vision API
pub(crate) fn get_dominant_colors(image_url: &Url) -> Result<Vec<Color>, CloudVisionError> {
let response_json = use_cloud_vision_api(image_url)?;
let extracted_color_vec = extract_colors(&response_json)?;
if extracted_color_vec.is_empty() {
return Err(VectorIsEmpty);
}
Ok(extracted_color_vec)
}
/// Use cloud vision api
fn use_cloud_vision_api(image_url: &Url) -> Result<Value, CloudVisionError> {
let base64_image =
get_base64_image(image_url).ok_or_else(|| UnableToFetchImage(image_url.to_owned()))?;
let request = json!({
"requests": [
{
"image": {
"content": base64_image
},
"features": [
{
"maxResults": 10,
"type": "IMAGE_PROPERTIES"
}
]
}
]
});
let secret_key = fs::read_to_string(API_KEY_FILE_PATH)
.map_err(|_| KeyFileNotFound(API_KEY_FILE_PATH.to_owned()))?;
let mut response = reqwest::Client::new()
.post(CLOUD_VISION_URI)
.query(&[("key", secret_key)])
.json(&request)
.send()
.map_err(|_err| FailedGCV)?;
if response.status() != StatusCode::OK {
return Err(BadRequest(image_url.to_owned()));
}
let response_json: Value = response.json().map_err(|_err| NotJSON)?;
let err = &response_json["responses"][0]["error"];
if err.is_object() {
if let Some(error_message) = &err["message"].as_str() {
return Err(FailedToParseImage((*error_message).to_string()));
} else {
return Err(UnknownError);
};
}
Ok(response_json)
}
/// Extract `Vec<Color>` with given `Value`
fn extract_colors(val: &Value) -> Result<Vec<Color>, CloudVisionError> {
let colors = &val["responses"][0]["imagePropertiesAnnotation"]["dominantColors"]["colors"];
match colors.as_array() {
Some(color_ary) => {
let mut color_vec = Vec::new();
color_ary.iter().for_each(|color_value| {
if let Some(color) = to_color(color_value) {
color_vec.push(color);
};
});
Ok(color_vec)
}
None => Err(UnableToParseColorData(val.to_owned())),
}
}
/// Color struct
#[derive(Debug, PartialEq, PartialOrd, Clone, Serialize, Deserialize)]
pub struct Color {
/// Pixel fraction
pub pixel_fraction: f32,
/// Score
pub score: f32,
/// Color in hex string
pub hex_color: String,
}
/// Construct `Color` struct with given `Value`
fn to_color(value: &Value) -> Option<Color> {
let pixel_fraction = value.get("pixelFraction")?.as_f64()? as f32;
let score = value.get("score")?.as_f64()? as f32;
let color = &value.get("color")?;
// For unknown reason, some responsones does not have all the fields. WIP
let red: u8 = color.get("red")?.to_owned().as_u64()? as u8;
let green: u8 = color.get("green")?.to_owned().as_u64()? as u8;
let blue: u8 = color.get("blue")?.to_owned().as_u64()? as u8;
let hex = hex::encode(vec![red, green, blue]);
let hex_color = format!("#{}", hex);
let color_struct = Color {
pixel_fraction,
score,
hex_color,
};
Some(color_struct)
}
/// Get image content and encod it with base64
fn get_base64_image(image_url: &Url) -> Option<String> {
let mut response = reqwest::get(image_url.to_owned()).ok()?;
if response.status() != StatusCode::OK {
return None;
}
let mut buf: Vec<u8> = vec![];
response.copy_to(&mut buf).ok()?;
let encoded = base64::encode(&buf);
Some(encoded)
}
use super::cloud_vision::CloudVisionError::*;
#[derive(Debug, PartialEq, Clone)]
pub enum CloudVisionError {
UnableToFetchImage(Url),
BadRequest(Url),
FailedToParseImage(String),
UnableToParseColorData(Value),
UnknownError,
VectorIsEmpty,
FailedGCV,
NotJSON,
KeyFileNotFound(String),
}
impl std::error::Error for CloudVisionError {}
impl fmt::Display for CloudVisionError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result {
let error_message: String = match self {
BadRequest(url) => format!("Bad request: {}", url),
FailedToParseImage(msg) => format!("Cloud vision api failed to parse image: {}", msg),
UnableToParseColorData(val) => format!("Unable to parse data: {:#?}", val),
UnknownError => String::from("Unknown error"),
VectorIsEmpty => String::from("Extracted data is empty"),
FailedGCV => String::from("Failed to request Google Cloud Vision API"),
UnableToFetchImage(url) => format!("Unable to fetch image from url: {}", url),
NotJSON => String::from("Response body is not JSON"),
KeyFileNotFound(filepath) => {
format!("Cloud vision api key file not found at: {}", filepath)
}
};
write!(f, "{}", error_message)
}
}
|
extern crate day_02_corruption_checksum;
extern crate utils;
use day_02_corruption_checksum::corr_checksum;
use utils::file2str;
fn main() {
let puzzle_string = file2str("puzzle.txt");
let checksum = corr_checksum(&puzzle_string);
println!("The checksum is: {}", checksum);
}
|
use crate::dice::ui::RollDiceDialog;
use crate::state;
use cursive::theme::*;
use cursive::traits::*;
use cursive::utils::span::SpannedString;
use cursive::view::*;
use cursive::views::*;
use cursive::Cursive;
use enumset::EnumSet;
use std::sync::mpsc;
pub struct Ui {
cursive: Cursive,
ui_rx: mpsc::Receiver<UiMessage>,
ui_tx: mpsc::Sender<UiMessage>,
controller_tx: mpsc::Sender<ControllerMessage>,
}
pub enum UiMessage {
Log(String),
}
pub enum ControllerMessage {
LogMessage(String),
AddNote(String),
}
impl Ui {
pub fn new(controller_tx: mpsc::Sender<ControllerMessage>) -> Self {
let (ui_tx, ui_rx) = mpsc::channel::<UiMessage>();
let mut ui = Ui {
cursive: cursive::default(),
ui_rx,
ui_tx,
controller_tx,
};
ui.cursive.load_toml(include_str!("style.toml")).unwrap();
ui.cursive.add_global_callback('q', move |cursive| {
cursive.quit();
});
let tx = ui.controller_tx.clone();
ui.cursive.add_global_callback('r', move |cursive| {
let dialog = RollDiceDialog::new(&tx);
dialog.show(cursive);
});
let tx = ui.controller_tx.clone();
ui.cursive.add_global_callback('N', move |cursive| {
show_notes_dialog(cursive, &tx);
});
let root = build_root();
ui.cursive.add_layer(root);
ui
}
pub fn step(&mut self) -> bool {
if !self.cursive.is_running() {
return false;
}
while let Some(message) = self.ui_rx.try_iter().next() {
match message {
UiMessage::Log(msg) => self.add_log_msg(msg),
}
}
self.cursive.refresh();
self.cursive.step();
true
}
pub fn send(&mut self, msg: UiMessage) {
let _ = self.ui_tx.send(msg).unwrap();
}
pub fn display_state(&mut self, state: state::State) {
let mut view = self
.cursive
.find_name::<SelectView<String>>("player_list")
.unwrap();
draw_character_list(&mut view, &state)
}
fn add_log_msg(&mut self, msg: String) {
self.cursive.call_on_name("log", |view: &mut ListView| {
view.add_child("", TextView::new(msg));
});
}
}
fn build_root() -> impl View {
let encounter_list = LinearLayout::vertical().child(TextView::new("> Goblin ambush"));
let encounter_panel =
Panel::new(encounter_list.resized(SizeConstraint::Full, SizeConstraint::Full))
.title("Encounters");
let player_list = SelectView::<String>::new();
let player_panel = Panel::new(
player_list
.with_name("player_list")
.resized(SizeConstraint::Full, SizeConstraint::Full),
)
.title("Players");
let panel1 = LinearLayout::vertical()
.child(player_panel)
.child(encounter_panel)
.resized(SizeConstraint::Fixed(56), SizeConstraint::Full);
let log_list = ListView::new().with_name("log").scrollable();
let panel2 = Panel::new(log_list.with_name("log_scroll"))
.title("Log")
.resized(SizeConstraint::Full, SizeConstraint::Full);
LinearLayout::horizontal().child(panel1).child(panel2)
}
fn show_notes_dialog(cursive: &mut Cursive, tx: &mpsc::Sender<ControllerMessage>) {
let tx = tx.clone();
let dialog = build_input_dialog("Notes", None, move |cursive, text| {
tx.send(ControllerMessage::AddNote(text.to_string()))
.unwrap();
cursive.pop_layer();
});
cursive.add_layer(dialog);
}
pub fn build_input_dialog<F>(
title: impl Into<String>,
message: Option<String>,
on_submit: F,
) -> impl View
where
F: 'static + Clone + Fn(&mut Cursive, &str),
{
let on_submit_clone = on_submit.clone();
let input_field = EditView::new()
.on_submit(move |cursive, input| on_submit_clone.clone()(cursive, input))
.filler(" ")
.style(ColorStyle {
front: ColorType::Palette(PaletteColor::Highlight),
back: ColorType::Palette(PaletteColor::Primary),
})
.with_name("input_field")
.full_width();
let mut content = LinearLayout::vertical().child(input_field);
let msg = message.unwrap_or("".to_string());
content.add_child(TextView::new(msg).with_name("input_msg"));
Dialog::new()
.title(title.into())
.padding(Margins::lrtb(1, 1, 1, 0))
.content(content)
.button("Ok", move |cursive| {
let input = cursive
.call_on_name("input_field", |view: &mut EditView| view.get_content())
.unwrap();
on_submit(cursive, &input);
})
.max_width(40)
}
fn draw_character_list(view: &mut SelectView<String>, state: &state::State) {
let longest_name = state
.characters
.iter()
.map(|c| c.name.chars().count())
.max()
.unwrap_or(0);
for (i, c) in state.characters.iter().enumerate() {
let name_length = c.name.chars().count();
let padding = longest_name - name_length + 2;
let dots = std::iter::repeat(".").take(padding).collect::<String>();
let notes = c.notes.clone().unwrap_or("".to_string());
let selection = if i == state.selected_index { ">" } else { " " };
let mut span = SpannedString::styled(selection, Style::default());
let name_span = SpannedString::styled(
&c.name,
Style {
effects: EnumSet::only(Effect::Bold),
color: Some(ColorStyle {
front: ColorType::Palette(PaletteColor::Primary),
back: ColorType::Palette(PaletteColor::Background),
}),
},
);
let rest_span =
SpannedString::styled(format!("{}{} {}", dots, c.hp, notes), Style::default());
span.append(name_span);
span.append(rest_span);
view.add_item(span, "".to_string());
view.set_selection(state.selected_index);
}
}
|
//! [Generic Types], Traits, and Lifetimes
//!
//! [generic types]: https://doc.rust-lang.org/book/ch10-00-generics.html
pub mod sec00;
pub mod sec01;
pub mod sec02;
pub mod sec03;
pub use sec01::{largest, Point};
pub use sec02::{detailed_notify, detailed_notify2, notify, notify2, summarizable};
pub use sec02::{Article, Pair, Summary, Tweet};
pub use sec03::{first_word, longest, ImportantExcerpt};
|
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Vertex {
// wgpu::FrontFace::Ccw
pub(crate) position: [f32; 3],
pub(crate) color: [f32; 3],
}
// descriptor
impl Vertex {
pub fn descriptor<'a>() -> wgpu::VertexBufferLayout<'a> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float3,
},
],
}
}
}
|
extern crate utils;
use std::env;
use std::collections::BTreeSet;
use std::ops::RangeInclusive;
use std::io::{self, BufReader};
use std::io::prelude::*;
use std::fs::File;
use utils::*;
type Seat = String;
type Input = Vec<Seat>;
fn bsp_to_val(s: &str, l_chr: char, h_chr: char, mut range: RangeInclusive<usize>) -> usize {
for c in s.chars() {
let span = range.end() - range.start();
let h_span = span / 2;
let s = *range.start();
let e = *range.end();
if c == l_chr {
range = s ..= (s + h_span);
} else if c == h_chr {
range = (s + h_span + 1) ..= e;
} else {
unreachable!()
}
}
*range.start()
}
fn seat_id(seat: &str) -> usize {
let row = bsp_to_val(&seat[..7], 'F', 'B', 0..=127);
let col = bsp_to_val(&seat[7..], 'L', 'R', 0..=7);
row * 8 + col
}
fn part1(input: &Input) -> usize {
input.iter()
.map(|seat| seat_id(seat.as_str())).max()
.unwrap_or(0)
}
fn part2(input: &Input) -> usize {
let seat_ids: BTreeSet<_> = input.iter()
.map(|seat| seat_id(seat.as_str()))
.collect();
seat_ids.iter()
.find(|&seat_id| {
let check_id = seat_id + 1;
seat_ids.get(&check_id).is_none() && seat_ids.get(&(check_id + 1)).is_some()
})
.map(|seat_id| seat_id + 1)
.unwrap_or( 0)
}
fn main() {
measure(|| {
let input = input().expect("Input failed");
println!("Part1: {}", part1(&input));
println!("Part2: {}", part2(&input));
});
}
fn read_input<R: Read>(reader: BufReader<R>) -> io::Result<Input> {
Ok(reader.lines().map(|l| l.unwrap()).collect())
}
fn input() -> io::Result<Input> {
let f = File::open(env::args().skip(1).next().expect("No input file given"))?;
read_input(BufReader::new(f))
}
#[cfg(test)]
mod tests {
use super::*;
const INPUT: &'static str =
"FBFBBFFRLR
BFFFBBFRRR
FFFBBBFRRR
BBFFBBFRLL";
fn as_input(s: &str) -> Input {
read_input(BufReader::new(s.split('\n').map(|s| s.trim()).collect::<Vec<_>>().join("\n").as_bytes())).unwrap()
}
#[test]
fn test_set_id() {
let seat_ids: Vec<_> = as_input(INPUT).iter().map(|seat| seat_id(seat)).collect();
assert_eq!(seat_ids, vec![357, 567, 119, 820]);
}
#[test]
fn test_part1() {
assert_eq!(part1(&as_input(INPUT)), 820);
}
}
|
use std::io::{self};
fn main() -> io::Result<()> {
let files_results = vec![
("test.txt", 1, 1),
("input.txt", 1, 1)
];
for (f, result_1, result_2) in files_results.into_iter() {
println!("File: {}", f);
let file_content: Vec<String> = std::fs::read_to_string(f)?
.lines()
.map(|x| x.to_string())
.collect();
for line in file_content.iter() {
}
}
Ok(())
}
|
use std::fs;
use std::path::PathBuf;
use serde_json::Value;
fn main() {
fs::remove_dir_all("out").unwrap();
fs::create_dir("out").unwrap();
in_to_out("./in");
println!("All done.")
}
fn in_to_out(starting_path: &str) {
for entry in fs::read_dir(starting_path).unwrap() {
let in_path = entry.unwrap().path();
if in_path.is_dir() {
let out_path = in_path.to_str().unwrap().replace("in/", "out/");
let _ = fs::create_dir(out_path);
in_to_out(in_path.to_str().unwrap());
} else {
let data = generate_md_file(&in_path);
let out_path = in_path
.to_str()
.unwrap()
.replace("in/", "out/")
.replace(".json", ".md.erb");
fs::write(out_path, data).unwrap();
}
}
}
fn generate_md_file(in_path: &PathBuf) -> String {
let mut output = String::new();
let original = parse_json_file(in_path.clone());
// --------------------------------------- request
output.push_str("> Request\n\n");
// route
output.push_str("```\n");
output.push_str(&format!("{}\n", &original["route"].to_string().clean()));
output.push_str("```\n\n");
// json
output.push_str("```json\n");
output.push_str(&format!(
"{}\n",
serde_json::to_string_pretty(&original["request"]).unwrap()
));
output.push_str("```\n\n");
// shell
output.push_str("```shell\n");
output.push_str(&format!(
"{}\n",
serde_json::to_string_pretty(&original["requestSchema"]).unwrap()
));
output.push_str("```\n\n");
// --------------------------------------- response
output.push_str("> Response\n\n");
// json
output.push_str("```json\n");
output.push_str(&format!(
"{}\n",
serde_json::to_string_pretty(&original["response"]).unwrap()
));
output.push_str("```\n\n");
// shell
output.push_str("```shell\n");
output.push_str(&format!(
"{}\n",
serde_json::to_string_pretty(&original["responseSchema"]).unwrap()
));
output.push_str("```\n");
output
}
fn parse_json_file(path: PathBuf) -> Value {
let data = fs::read_to_string(path).expect("failed to read file");
serde_json::from_str(&data).unwrap()
}
trait Clean {
fn clean(&mut self) -> Self;
}
impl Clean for String {
fn clean(&mut self) -> Self {
String::from(self.trim_start_matches("\"").trim_end_matches("\""))
}
}
|
use aoc::*;
use std::iter;
fn main() -> Result<()> {
let input: Vec<_> = input("16.txt")?.bytes().map(|b| b - b'0').collect();
let offset = extract(&input[0..7]);
let mut signal: Vec<_> = iter::repeat(input)
.take(10000)
.flatten()
.skip(offset)
.collect();
for _ in 0..100 {
phase(&mut signal);
}
Ok(println!("{}", extract(&signal[0..8])))
}
fn extract(digits: &[u8]) -> usize {
digits.iter().fold(0, |acc, &i| acc * 10 + i as usize)
}
fn phase(signal: &mut [u8]) {
let mut sum: usize = 0;
for s in signal.iter_mut().rev() {
sum += *s as usize;
*s = (sum % 10) as u8;
}
}
|
use crate::component::entry::TxEntry;
use crate::error::SubmitTxError;
use crate::pool::TxPool;
use crate::FeeRate;
use ckb_error::{Error, InternalErrorKind};
use ckb_snapshot::Snapshot;
use ckb_types::{
core::{
cell::{
resolve_transaction, OverlayCellProvider, ResolvedTransaction, TransactionsProvider,
},
Capacity, Cycle, TransactionView,
},
packed::Byte32,
};
use ckb_verification::cache::CacheEntry;
use ckb_verification::{ContextualTransactionVerifier, TransactionVerifier};
use futures::future::Future;
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use tokio::prelude::{Async, Poll};
use tokio::sync::lock::Lock;
type ResolveResult = Result<(ResolvedTransaction, usize, Capacity, TxStatus), Error>;
pub struct PreResolveTxsProcess {
pub tx_pool: Lock<TxPool>,
pub txs: Option<Vec<TransactionView>>,
}
impl PreResolveTxsProcess {
pub fn new(tx_pool: Lock<TxPool>, txs: Vec<TransactionView>) -> PreResolveTxsProcess {
PreResolveTxsProcess {
tx_pool,
txs: Some(txs),
}
}
}
type PreResolveTxsItem = (
Byte32,
Arc<Snapshot>,
Vec<ResolvedTransaction>,
Vec<(usize, Capacity, TxStatus)>,
);
impl Future for PreResolveTxsProcess {
type Item = PreResolveTxsItem;
type Error = Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.tx_pool.poll_lock() {
Async::Ready(tx_pool) => {
let txs = self.txs.take().expect("cannot execute twice");
debug_assert!(!txs.is_empty(), "txs should not be empty!");
let snapshot = tx_pool.cloned_snapshot();
let tip_hash = snapshot.tip_hash();
check_transaction_hash_collision(&tx_pool, &txs)?;
let mut txs_provider = TransactionsProvider::default();
let resolved = txs
.iter()
.map(|tx| {
let ret = resolve_tx(&tx_pool, &snapshot, &txs_provider, tx.clone());
txs_provider.insert(tx);
ret
})
.collect::<Result<Vec<(ResolvedTransaction, usize, Capacity, TxStatus)>, _>>(
)?;
let (rtxs, status) = resolved
.into_iter()
.map(|(rtx, tx_size, fee, status)| (rtx, (tx_size, fee, status)))
.unzip();
Ok(Async::Ready((tip_hash, snapshot, rtxs, status)))
}
Async::NotReady => Ok(Async::NotReady),
}
}
}
pub struct VerifyTxsProcess {
pub snapshot: Arc<Snapshot>,
pub txs_verify_cache: HashMap<Byte32, CacheEntry>,
pub txs: Option<Vec<ResolvedTransaction>>,
pub max_tx_verify_cycles: Cycle,
}
impl VerifyTxsProcess {
pub fn new(
snapshot: Arc<Snapshot>,
txs_verify_cache: HashMap<Byte32, CacheEntry>,
txs: Vec<ResolvedTransaction>,
max_tx_verify_cycles: Cycle,
) -> VerifyTxsProcess {
VerifyTxsProcess {
snapshot,
txs_verify_cache,
txs: Some(txs),
max_tx_verify_cycles,
}
}
}
impl Future for VerifyTxsProcess {
type Item = Vec<(ResolvedTransaction, CacheEntry)>;
type Error = Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let txs = self.txs.take().expect("cannot execute twice");
Ok(Async::Ready(verify_rtxs(
&self.snapshot,
txs,
&self.txs_verify_cache,
self.max_tx_verify_cycles,
)?))
}
}
pub struct SubmitTxsProcess {
pub tx_pool: Lock<TxPool>,
pub txs: Option<Vec<(ResolvedTransaction, CacheEntry)>>,
pub pre_resolve_tip: Byte32,
pub status: Option<Vec<(usize, Capacity, TxStatus)>>,
}
impl SubmitTxsProcess {
pub fn new(
tx_pool: Lock<TxPool>,
txs: Vec<(ResolvedTransaction, CacheEntry)>,
pre_resolve_tip: Byte32,
status: Vec<(usize, Capacity, TxStatus)>,
) -> SubmitTxsProcess {
SubmitTxsProcess {
tx_pool,
pre_resolve_tip,
status: Some(status),
txs: Some(txs),
}
}
}
impl Future for SubmitTxsProcess {
type Item = (HashMap<Byte32, CacheEntry>, Vec<CacheEntry>);
type Error = Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.tx_pool.poll_lock() {
Async::Ready(mut guard) => {
let executor = SubmitTxsExecutor {
tx_pool: &mut guard,
};
let txs = self.txs.take().expect("cannot execute twice");
let status = self.status.take().expect("cannot execute twice");
Ok(Async::Ready(executor.execute(
&self.pre_resolve_tip,
txs,
status,
)?))
}
Async::NotReady => Ok(Async::NotReady),
}
}
}
pub enum TxStatus {
Fresh,
Gap,
Proposed,
}
struct SubmitTxsExecutor<'a> {
tx_pool: &'a mut TxPool,
}
impl<'a> SubmitTxsExecutor<'a> {
fn execute(
self,
pre_resolve_tip: &Byte32,
txs: Vec<(ResolvedTransaction, CacheEntry)>,
status: Vec<(usize, Capacity, TxStatus)>,
) -> Result<(HashMap<Byte32, CacheEntry>, Vec<CacheEntry>), Error> {
let snapshot = self.tx_pool.snapshot();
if pre_resolve_tip != &snapshot.tip_hash() {
let mut txs_provider = TransactionsProvider::default();
for (tx, _) in &txs {
resolve_tx(
self.tx_pool,
snapshot,
&txs_provider,
tx.transaction.clone(),
)?;
txs_provider.insert(&tx.transaction);
}
}
let cache = txs
.iter()
.map(|(tx, cycles)| (tx.transaction.hash(), *cycles))
.collect();
let cycles_vec = txs.iter().map(|(_, cycles)| *cycles).collect();
for ((rtx, cache_entry), (tx_size, fee, status)) in txs.into_iter().zip(status.into_iter())
{
if self.tx_pool.reach_cycles_limit(cache_entry.cycles) {
return Err(InternalErrorKind::TransactionPoolFull.into());
}
let min_fee = self.tx_pool.config.min_fee_rate.fee(tx_size);
// reject txs which fee lower than min fee rate
if fee < min_fee {
return Err(SubmitTxError::LowFeeRate(min_fee.as_u64()).into());
}
let related_dep_out_points = rtx.related_dep_out_points();
let entry = TxEntry::new(
rtx.transaction,
cache_entry.cycles,
fee,
tx_size,
related_dep_out_points,
);
let inserted = match status {
TxStatus::Fresh => {
let tx_hash = entry.transaction.hash();
let inserted = self.tx_pool.add_pending(entry)?;
if inserted {
let height = self.tx_pool.snapshot().tip_number();
let fee_rate = FeeRate::calculate(fee, tx_size);
self.tx_pool
.fee_estimator
.track_tx(tx_hash, fee_rate, height);
}
inserted
}
TxStatus::Gap => self.tx_pool.add_gap(entry)?,
TxStatus::Proposed => self.tx_pool.add_proposed(entry)?,
};
if inserted {
self.tx_pool
.update_statics_for_add_tx(tx_size, cache_entry.cycles);
}
}
Ok((cache, cycles_vec))
}
}
fn check_transaction_hash_collision(
tx_pool: &TxPool,
txs: &[TransactionView],
) -> Result<(), Error> {
for tx in txs {
let short_id = tx.proposal_short_id();
if tx_pool.contains_proposal_id(&short_id) {
return Err(InternalErrorKind::PoolTransactionDuplicated.into());
}
}
Ok(())
}
fn resolve_tx<'a>(
tx_pool: &TxPool,
snapshot: &Snapshot,
txs_provider: &'a TransactionsProvider<'a>,
tx: TransactionView,
) -> ResolveResult {
let tx_size = tx.data().serialized_size_in_block();
if tx_pool.reach_size_limit(tx_size) {
return Err(InternalErrorKind::TransactionPoolFull.into());
}
let short_id = tx.proposal_short_id();
if snapshot.proposals().contains_proposed(&short_id) {
resolve_tx_from_proposed(tx_pool, snapshot, txs_provider, tx).and_then(|rtx| {
let fee = tx_pool.calculate_transaction_fee(snapshot, &rtx);
fee.map(|fee| (rtx, tx_size, fee, TxStatus::Proposed))
})
} else {
resolve_tx_from_pending_and_proposed(tx_pool, snapshot, txs_provider, tx).and_then(|rtx| {
let status = if snapshot.proposals().contains_gap(&short_id) {
TxStatus::Gap
} else {
TxStatus::Fresh
};
let fee = tx_pool.calculate_transaction_fee(snapshot, &rtx);
fee.map(|fee| (rtx, tx_size, fee, status))
})
}
}
fn resolve_tx_from_proposed<'a>(
tx_pool: &TxPool,
snapshot: &Snapshot,
txs_provider: &'a TransactionsProvider<'a>,
tx: TransactionView,
) -> Result<ResolvedTransaction, Error> {
let cell_provider = OverlayCellProvider::new(&tx_pool.proposed, snapshot);
let provider = OverlayCellProvider::new(txs_provider, &cell_provider);
resolve_transaction(tx, &mut HashSet::new(), &provider, snapshot)
}
fn resolve_tx_from_pending_and_proposed<'a>(
tx_pool: &TxPool,
snapshot: &Snapshot,
txs_provider: &'a TransactionsProvider<'a>,
tx: TransactionView,
) -> Result<ResolvedTransaction, Error> {
let proposed_provider = OverlayCellProvider::new(&tx_pool.proposed, snapshot);
let gap_and_proposed_provider = OverlayCellProvider::new(&tx_pool.gap, &proposed_provider);
let pending_and_proposed_provider =
OverlayCellProvider::new(&tx_pool.pending, &gap_and_proposed_provider);
let provider = OverlayCellProvider::new(txs_provider, &pending_and_proposed_provider);
resolve_transaction(tx, &mut HashSet::new(), &provider, snapshot)
}
fn verify_rtxs(
snapshot: &Snapshot,
txs: Vec<ResolvedTransaction>,
txs_verify_cache: &HashMap<Byte32, CacheEntry>,
max_tx_verify_cycles: Cycle,
) -> Result<Vec<(ResolvedTransaction, CacheEntry)>, Error> {
let tip_header = snapshot.tip_header();
let tip_number = tip_header.number();
let epoch = tip_header.epoch();
let consensus = snapshot.consensus();
txs.into_iter()
.map(|tx| {
let tx_hash = tx.transaction.hash();
if let Some(cache_entry) = txs_verify_cache.get(&tx_hash) {
ContextualTransactionVerifier::new(
&tx,
snapshot,
tip_number + 1,
epoch,
tip_header.hash(),
consensus,
)
.verify()
.map(|_| (tx, *cache_entry))
} else {
TransactionVerifier::new(
&tx,
snapshot,
tip_number + 1,
epoch,
tip_header.hash(),
consensus,
snapshot,
)
.verify(max_tx_verify_cycles)
.map(|cycles| (tx, cycles))
}
})
.collect::<Result<Vec<_>, _>>()
}
|
use crate::{
rows::{row::Row, row_schema::RowSchema},
table::table_name::TableName,
table_column_name::TableColumnName,
};
use apllodb_shared_components::{NnSqlValue, SqlValue};
use std::collections::HashSet;
/// - people:
/// - id BIGINT NOT NULL, PRIMARY KEY
/// - age INTEGER NOT NULL
#[derive(Clone, PartialEq, Debug)]
pub struct People;
impl People {
pub fn table_name() -> TableName {
TableName::new("people").unwrap()
}
pub fn tc_id() -> TableColumnName {
TableColumnName::factory(Self::table_name().as_str(), "id")
}
pub fn tc_age() -> TableColumnName {
TableColumnName::factory(Self::table_name().as_str(), "age")
}
pub fn schema() -> RowSchema {
RowSchema::from(
vec![Self::tc_id(), Self::tc_age()]
.into_iter()
.collect::<HashSet<_>>(),
)
}
pub fn row(id: i64, age: i32) -> Row {
// note: order by column name (see RowSchema implementation)
Row::new(vec![
SqlValue::NotNull(NnSqlValue::Integer(age)),
SqlValue::NotNull(NnSqlValue::BigInt(id)),
])
}
}
/// - body:
/// - id BIGINT NOT NULL, PRIMARY KEY
/// - people_id BIGINT NOT NULL
/// - height INTEGER NOT NULL
#[derive(Clone, PartialEq, Debug)]
pub struct Body;
impl Body {
pub fn table_name() -> TableName {
TableName::new("body").unwrap()
}
pub fn tc_id() -> TableColumnName {
TableColumnName::factory(Self::table_name().as_str(), "id")
}
pub fn tc_people_id() -> TableColumnName {
TableColumnName::factory(Self::table_name().as_str(), "people_id")
}
pub fn tc_height() -> TableColumnName {
TableColumnName::factory(Self::table_name().as_str(), "height")
}
pub fn schema() -> RowSchema {
RowSchema::from(
vec![Self::tc_id(), Self::tc_people_id(), Self::tc_height()]
.into_iter()
.collect::<HashSet<_>>(),
)
}
pub fn row(id: i64, people_id: i64, height: i32) -> Row {
// note: order by column name (see RowSchema implementation)
Row::new(vec![
SqlValue::NotNull(NnSqlValue::Integer(height)),
SqlValue::NotNull(NnSqlValue::BigInt(id)),
SqlValue::NotNull(NnSqlValue::BigInt(people_id)),
])
}
}
/// - pet:
/// - id BIGINT NOT NULL, PRIMARY KEY
/// - people_id BIGINT NOT NULL
/// - kind TEXT NOT NULL
/// - age SMALLINT NOT NULL
#[derive(Clone, PartialEq, Debug)]
pub struct Pet;
impl Pet {
pub fn table_name() -> TableName {
TableName::new("pet").unwrap()
}
pub fn tc_id() -> TableColumnName {
TableColumnName::factory(Self::table_name().as_str(), "id")
}
pub fn tc_people_id() -> TableColumnName {
TableColumnName::factory(Self::table_name().as_str(), "people_id")
}
pub fn tc_kind() -> TableColumnName {
TableColumnName::factory(Self::table_name().as_str(), "kind")
}
pub fn tc_age() -> TableColumnName {
TableColumnName::factory(Self::table_name().as_str(), "age")
}
pub fn schema() -> RowSchema {
RowSchema::from(
vec![
Self::tc_id(),
Self::tc_people_id(),
Self::tc_kind(),
Self::tc_age(),
]
.into_iter()
.collect::<HashSet<_>>(),
)
}
pub fn row(id: i64, people_id: i64, kind: &str, age: i16) -> Row {
// note: order by column name (see RowSchema implementation)
Row::new(vec![
SqlValue::NotNull(NnSqlValue::SmallInt(age)),
SqlValue::NotNull(NnSqlValue::BigInt(id)),
SqlValue::NotNull(NnSqlValue::Text(kind.to_string())),
SqlValue::NotNull(NnSqlValue::BigInt(people_id)),
])
}
}
#[derive(Clone, PartialEq, Debug)]
pub struct ModelsMock {
pub people: Vec<Row>,
pub body: Vec<Row>,
pub pet: Vec<Row>,
}
|
use crate::{alphabet::Alphabet, dfa::DFA, range_set::Range, state::State};
use core::{marker::PhantomData, ops::Bound};
use valis_ds::{
ops::{Complement, Difference, Intersection, Union},
set::{Set, SetIterExt, VectorSet},
};
#[derive(Debug, PartialEq, Eq, Clone, Hash)]
pub struct StandardDFA<A, I: State> {
// The contiguous range of states that are valid in this dfa
states: Range<I>,
// a complete function, represented as a table, of states x symbols to states
transitions: Box<[I]>,
// sets that accept the input when encountered as the final state
accept_states: VectorSet<I>,
// the initial state for every simulation of the dfa
start_state: I,
// an optional state that indicates early stoppage
dead_state: Option<I>,
_alphabet: PhantomData<A>,
}
impl<A, I> StandardDFA<A, I>
where
A: Alphabet,
I: State,
{
pub fn new(
states: Range<I>,
transitions: Box<[I]>,
accept_states: VectorSet<I>,
start_state: I,
dead_state: Option<I>,
) -> Self {
// Check basic validity of the dfa components
assert_eq!(
states.size() * A::num_symbols(),
transitions.len(),
"Num States * Num Symbols != Num Transitions: ({} x {} != {})",
states.size(),
A::num_symbols(),
transitions.len()
);
debug_assert!(
transitions.iter().all(|state| states.contains(*state)),
"Transition table contains invalid state: {:?}",
transitions
.iter()
.find(|item| !states.contains(**item))
.unwrap()
);
debug_assert!((&accept_states)
.into_iter()
.all(|state| states.contains(*state)));
debug_assert!(states.contains(start_state));
debug_assert!(dead_state
.map(|state| states.contains(state))
.unwrap_or(true));
StandardDFA {
states,
transitions,
accept_states,
start_state,
dead_state,
_alphabet: PhantomData,
}
}
#[inline]
pub fn states(&self) -> &Range<I> {
&self.states
}
#[inline]
pub fn transitions(&self) -> &[I] {
&self.transitions
}
#[inline]
pub fn start_state(&self) -> &I {
&self.start_state
}
#[inline]
pub fn accept_states(&self) -> &VectorSet<I> {
&self.accept_states
}
#[inline]
pub fn dead_state(&self) -> &Option<I> {
&self.dead_state
}
}
impl<A, I> DFA<A> for StandardDFA<A, I>
where
A: Alphabet,
I: State,
{
type AcceptSet = VectorSet<I>;
type ID = I;
fn all_states(&self) -> &Range<Self::ID> {
&self.states
}
fn start_state(&self) -> Self::ID {
self.start_state
}
fn dead_state(&self) -> Option<Self::ID> {
self.dead_state
}
fn accept_states(&self) -> &Self::AcceptSet {
&self.accept_states
}
// the transition function
fn next_state(&self, current: Self::ID, sym: A) -> Self::ID {
let idx = current.to_usize() * A::num_symbols() + sym.to_usize();
self.transitions[idx]
}
// fast version of transition function
unsafe fn next_state_unchecked(&self, current: Self::ID, sym: A) -> Self::ID {
let idx = current.to_usize() * A::num_symbols() + sym.to_usize();
*self.transitions.get_unchecked(idx)
}
}
impl<A, I> Complement for &StandardDFA<A, I>
where
A: Alphabet,
I: State,
{
type Output = StandardDFA<A, I>;
fn complement(self) -> Self::Output {
let mut dfa = self.clone();
dfa.accept_states = self
.states
.into_iter()
.difference(&self.accept_states)
.collect();
dfa
}
}
#[inline]
fn convert_product_state<I: State>(a: I, b: I, num_b_elements: usize) -> I {
I::from_usize(a.to_usize() * num_b_elements + b.to_usize())
}
fn cross_product_construction<A: Alphabet, I: State>(
lhs: &StandardDFA<A, I>,
rhs: &StandardDFA<A, I>,
accept_combinations: impl Fn(&Range<I>, &VectorSet<I>, &Range<I>, &VectorSet<I>) -> VectorSet<I>,
) -> StandardDFA<A, I> {
let num_rhs_states = rhs.states().size();
let accept_states = accept_combinations(
lhs.states(),
lhs.accept_states(),
rhs.states(),
rhs.accept_states(),
);
let start_state = convert_product_state(*lhs.start_state(), *rhs.start_state(), num_rhs_states);
let dead_state = lhs.dead_state().and_then(|l_state| {
rhs.dead_state()
.map(|r_state| convert_product_state(l_state, r_state, num_rhs_states))
});
let transitions: Vec<_> = lhs
.transitions
.chunks(A::num_symbols())
.flat_map(|a_chunk| a_chunk.iter().cycle().zip(rhs.transitions.iter()))
.map(|(a_state, b_state)| convert_product_state(*a_state, *b_state, num_rhs_states))
.collect();
// Create a new set of states by simply making a new flat range of numbers from
// 0 to (size of lhs states * size of rhs states)
let states = Range {
start: Bound::Included(State::min_value()),
end: Bound::Excluded(I::from_usize(lhs.states().size() * rhs.states().size())),
};
StandardDFA {
states,
transitions: transitions.into_boxed_slice(),
accept_states,
dead_state,
start_state,
_alphabet: PhantomData,
}
}
impl<A, I> Intersection<&StandardDFA<A, I>> for &StandardDFA<A, I>
where
A: Alphabet,
I: State,
{
type Output = StandardDFA<A, I>;
fn intersection(self, rhs: &StandardDFA<A, I>) -> Self::Output {
let num_rhs_states = rhs.states().size();
cross_product_construction(&self, rhs, |_l_all, l_accept, _r_all, r_accept| {
l_accept
.iter()
.cartesian_product(r_accept)
.map(|state| convert_product_state(*state.0, *state.1, num_rhs_states))
.collect()
})
}
}
impl<A, I> Union<&StandardDFA<A, I>> for &StandardDFA<A, I>
where
A: Alphabet,
I: State,
{
type Output = StandardDFA<A, I>;
fn union(self, rhs: &StandardDFA<A, I>) -> Self::Output {
let num_rhs_states = rhs.states().size();
cross_product_construction(&self, rhs, |l_all, l_accept, r_all, r_accept| {
l_all
.into_iter()
.cartesian_product(r_all)
.filter(|(a, b)| l_accept.contains(a) || r_accept.contains(b))
.map(|state| convert_product_state(state.0, state.1, num_rhs_states))
.collect()
})
}
}
impl<A, I> Difference<&StandardDFA<A, I>> for &StandardDFA<A, I>
where
A: Alphabet,
I: State,
{
type Output = StandardDFA<A, I>;
fn difference(self, rhs: &StandardDFA<A, I>) -> Self::Output {
let num_rhs_states = rhs.states().size();
cross_product_construction(&self, rhs, |l_all, l_accept, r_all, r_accept| {
l_all
.into_iter()
.cartesian_product(r_all)
.filter(|(a, b)| l_accept.contains(a) && !r_accept.contains(b))
.map(|state| convert_product_state(state.0, state.1, num_rhs_states))
.collect()
})
}
}
|
use mysql::from_row;
use mysql::error::Error::MySqlError;
use common::utils::*;
use common::lazy_static::SQL_POOL;
pub fn is_voted(user_id: &str, comment_id: &str) -> bool {
let mut result = SQL_POOL.prep_exec(r#"
SELECT count(id) FROM comment_vote
WHERE
user_id = ? AND comment_id = ?
"#, (user_id, comment_id)).unwrap();
let row_wrapper = result.next();
if row_wrapper.is_none() {
return false;
}
let row = row_wrapper.unwrap().unwrap();
let (count, ) = from_row::<(u8, )>(row);
if count == 0 {
false
} else {
true
}
}
pub fn is_agreed(user_id: &str, comment_id: &str) -> bool {
let mut result = SQL_POOL.prep_exec(r#"
SELECT count(id) FROM comment_vote
WHERE
user_id = ? AND comment_id = ? AND state = 1
"#, (user_id, comment_id)).unwrap();
let row_wrapper = result.next();
if row_wrapper.is_none() {
return false;
}
let row = row_wrapper.unwrap().unwrap();
let (count, ) = from_row::<(u8, )>(row);
if count == 0 {
false
} else {
true
}
}
pub fn is_disagreed(user_id: &str, comment_id: &str) -> bool {
let mut result = SQL_POOL.prep_exec(r#"
SELECT count(id) FROM comment_vote
WHERE
user_id = ? AND comment_id = ? AND state = -1
"#, (user_id, comment_id)).unwrap();
let row_wrapper = result.next();
if row_wrapper.is_none() {
return false;
}
let row = row_wrapper.unwrap().unwrap();
let (count, ) = from_row::<(u8, )>(row);
if count == 0 {
false
} else {
true
}
}
pub fn create_comment_vote(user_id: &str, comment_id: &str, state: &str) -> Option<u8> {
let create_time = gen_datetime().to_string();
let mut stmt = SQL_POOL.prepare(r#"
INSERT INTO comment_vote
(user_id, comment_id, state, create_time, update_time)
VALUES
(?, ?, ?, ?, ?)
"#).unwrap();
let result = stmt.execute((user_id, comment_id, state, &*create_time, &*create_time));
if let Err(MySqlError(ref err)) = result {
println!("{:?}", err.message);
return None;
}
Some(1)
}
pub fn update_comment_vote(user_id: &str, comment_id: &str, state: &str) -> Option<u8> {
let update_time = gen_datetime().to_string();
let mut stmt = SQL_POOL.prepare(r#"
UPDATE comment_vote SET
state = ?,
update_time = ?
WHERE
user_id = ? AND comment_id = ?
"#).unwrap();
let result = stmt.execute((state, &*update_time, user_id, comment_id));
if let Err(MySqlError(ref err)) = result {
println!("{:?}", err.message);
return None;
}
Some(1)
}
pub fn delete_comment_vote(user_id: &str, comment_id: &str) -> Option<u8> {
let mut stmt = SQL_POOL.prepare(r#"
DELETE FROM comment_vote
WHERE
user_id = ? AND comment_id = ?
"#).unwrap();
let result = stmt.execute((user_id, comment_id));
if let Err(MySqlError(ref err)) = result {
println!("{:?}", err.message);
return None;
}
Some(1)
} |
//! Mutator context for each application thread.
use crate::plan::barriers::{Barrier, WriteTarget};
use crate::plan::global::Plan;
use crate::plan::AllocationSemantics as AllocationType;
use crate::policy::space::Space;
use crate::util::alloc::allocators::{AllocatorSelector, Allocators};
use crate::util::OpaquePointer;
use crate::util::{Address, ObjectReference};
use crate::vm::VMBinding;
use enum_map::EnumMap;
type SpaceMapping<VM> = Vec<(AllocatorSelector, &'static dyn Space<VM>)>;
// This struct is part of the Mutator struct.
// We are trying to make it fixed-sized so that VM bindings can easily define a Mutator type to have the exact same layout as our Mutator struct.
#[repr(C)]
pub struct MutatorConfig<VM: VMBinding> {
// Mapping between allocation semantics and allocator selector
pub allocator_mapping: &'static EnumMap<AllocationType, AllocatorSelector>,
// Mapping between allocator selector and spaces. Each pair represents a mapping.
// Put this behind a box, so it is a pointer-sized field.
#[allow(clippy::box_vec)]
pub space_mapping: Box<SpaceMapping<VM>>,
// Plan-specific code for mutator prepare/release
pub prepare_func: &'static dyn Fn(&mut Mutator<VM>, OpaquePointer),
pub release_func: &'static dyn Fn(&mut Mutator<VM>, OpaquePointer),
}
unsafe impl<VM: VMBinding> Send for MutatorConfig<VM> {}
unsafe impl<VM: VMBinding> Sync for MutatorConfig<VM> {}
/// A mutator is a per-thread data structure that manages allocations and barriers. It is usually highly coupled with the language VM.
/// It is recommended for MMTk users 1) to have a mutator struct of the same layout in the thread local storage that can be accessed efficiently,
/// and 2) to implement fastpath allocation and barriers for the mutator in the VM side.
// We are trying to make this struct fixed-sized so that VM bindings can easily define a type to have the exact same layout as this struct.
// Currently Mutator is fixed sized, and we should try keep this invariant:
// - Allocators are fixed-length arrays of allocators.
// - MutatorConfig only has pointers/refs (including fat pointers), and is fixed sized.
#[repr(C)]
pub struct Mutator<VM: VMBinding> {
pub allocators: Allocators<VM>,
pub barrier: Box<dyn Barrier>,
pub mutator_tls: OpaquePointer,
pub plan: &'static dyn Plan<VM = VM>,
pub config: MutatorConfig<VM>,
}
impl<VM: VMBinding> MutatorContext<VM> for Mutator<VM> {
fn prepare(&mut self, tls: OpaquePointer) {
(*self.config.prepare_func)(self, tls)
}
fn release(&mut self, tls: OpaquePointer) {
(*self.config.release_func)(self, tls)
}
// Note that this method is slow, and we expect VM bindings that care about performance to implement allocation fastpath sequence in their bindings.
fn alloc(
&mut self,
size: usize,
align: usize,
offset: isize,
allocator: AllocationType,
) -> Address {
unsafe {
self.allocators
.get_allocator_mut(self.config.allocator_mapping[allocator])
}
.alloc(size, align, offset)
}
// Note that this method is slow, and we expect VM bindings that care about performance to implement allocation fastpath sequence in their bindings.
fn post_alloc(&mut self, refer: ObjectReference, _bytes: usize, allocator: AllocationType) {
unsafe {
self.allocators
.get_allocator_mut(self.config.allocator_mapping[allocator])
}
.get_space()
.unwrap()
.initialize_header(refer, true)
}
fn get_tls(&self) -> OpaquePointer {
self.mutator_tls
}
fn barrier(&mut self) -> &mut dyn Barrier {
&mut *self.barrier
}
}
/// Each GC plan should provide their implementation of a MutatorContext. *Note that this trait is no longer needed as we removed
/// per-plan mutator implementation and we will remove this trait as well in the future.*
// TODO: We should be able to remove this trait, as we removed per-plan mutator implementation, and there is no other type that implements this trait.
// The Mutator struct above is the only type that implements this trait. We should be able to merge them.
pub trait MutatorContext<VM: VMBinding>: Send + Sync + 'static {
fn prepare(&mut self, tls: OpaquePointer);
fn release(&mut self, tls: OpaquePointer);
fn alloc(
&mut self,
size: usize,
align: usize,
offset: isize,
allocator: AllocationType,
) -> Address;
fn post_alloc(&mut self, refer: ObjectReference, bytes: usize, allocator: AllocationType);
fn flush_remembered_sets(&mut self) {
self.barrier().flush();
}
fn flush(&mut self) {
self.flush_remembered_sets();
}
fn get_tls(&self) -> OpaquePointer;
fn barrier(&mut self) -> &mut dyn Barrier;
fn record_modified_node(&mut self, obj: ObjectReference) {
self.barrier().post_write_barrier(WriteTarget::Object(obj));
}
}
|
use cargo_rename_demo::rename_demo;
fn main() {
rename_demo::foo();
} |
#[macro_use]
extern crate log;
#[cfg(target_os="android")]
#[allow(non_snake_case)]
pub mod android;
pub mod server;
pub mod client;
pub mod types;
|
//! Rust crate associated with the article [`DSS21`].
//!
//! Provides an efficient function to compute the condition number of *V_n*, the Vandermonde matrix associated with the *n*th cyclotomic polynomial.
//! The condition number is computed via the trace of the matrix *H_n*, as shown in [`DSS20`].
//!
//! [`DSS20`]: https://doi.org/10.1515/jmc-2020-0009
use ndarray::{Array, ArrayBase, Dim, OwnedRepr};
use ndarray_linalg::*;
use num::integer::gcd;
use red_primality::{euler_totient, factor, PrimeFactorization};
#[inline]
fn ct(t: i64, n: u64, n_factor: PrimeFactorization) -> i64 {
let g_nt = gcd(n as i64, t) as u64;
let n_g_factor = factor(n / g_nt);
n_g_factor.mobius() * (n_factor.euler_totient() as i64) / (n_g_factor.euler_totient() as i64)
}
#[inline]
fn get_index(i: usize, j: usize) -> usize {
let idx = (j as i64 - i as i64).abs();
idx as usize
}
#[inline]
fn get_h(n: u64) -> ArrayBase<OwnedRepr<f64>, Dim<[usize; 2]>> {
let n_factor = factor(n);
let m = n_factor.euler_totient() as usize;
let v: Vec<_> = (0..(m as i64))
.map(|t: i64| ct(t, n, n_factor.clone()))
.collect();
let g = Array::from_shape_fn((m, m), |(i, j)| v[get_index(i, j)] as f64);
let g_inv = g.inv_into().unwrap();
(n as f64) * g_inv
}
/// Compute the trace of *H_n*.
///
/// *H_n* is defined as *n G_n^{-1}*, where *G_n* is the Gram matrix of *V_n*, the Vandermonde matrix associated with the *n*th cyclotomic polynomial.
///
/// In [`DSS20`] it is shown that for every positive integer *n*, the matrix *H_n* has integer entries.
///
/// [`DSS20`]: https://doi.org/10.1515/jmc-2020-0009
#[inline]
pub fn tr_h(n: u64) -> u64 {
let h = get_h(n);
let tr = h.trace().unwrap();
tr.round() as u64
}
/// Compute the condition number of *V_n*.
///
/// *V_n* is the Vandermonde matrix associated with the *n*th cyclotomic polynomial.
///
/// As shown in [`DSS20`], the condition number can be computed as *m\sqrt(n) * sqrt(Tr(H_n))*.
/// The trace of *H_n* is computed in [`tr_h`].
///
/// [`DSS20`]: https://doi.org/10.1515/jmc-2020-0009
#[inline]
pub fn cond(n: u64) -> f64 {
let tr = tr_h(n) as f64;
let m = euler_totient(n) as f64;
m * (tr / (n as f64)).sqrt()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn tr_prime() {
let primes: [u64; 5] = [11, 101, 103, 137, 179];
let traces = primes.map(|p| tr_h(p));
assert_eq!(traces, [20, 200, 204, 272, 356]);
}
#[test]
fn tr_prime_power() {
let primes_power: [u64; 5] = [25, 27, 49, 121, 841];
let traces = primes_power.map(|p| tr_h(p));
assert_eq!(traces, [40, 36, 84, 220, 1624]);
}
#[test]
fn tr_squarefree() {
let squarefrees: [u64; 5] = [259, 534, 649, 785, 901];
let traces = squarefrees.map(|p| tr_h(p));
assert_eq!(traces, [1576, 1640, 5776, 3740, 11600]);
}
macro_rules! precision_tests {
($($name:ident: $value:expr,)*) => {
$(
#[test]
#[ignore]
fn $name() {
let (n, tol) = $value;
let h = get_h(n);
for ((i, j), x) in h.indexed_iter() {
let fract = x.fract();
assert!(
fract < tol || (1f64 - fract) < tol,
"Entry ({}, {}): {} is above tollerance {}",
i,
j,
x,
tol
)
}
}
)*
};
}
precision_tests! {
precision_3: (3, 1e-9),
precision_15: (15, 1e-9),
precision_107: (107, 1e-9),
precision_1155: (1155, 1e7),
precision_8151: (8151, 1e-4),
}
}
|
// Inspired by: https://github.com/denismr/SymmetricPCVT/blob/master/C%2B%2B/SPCVT.cc
use bitset_core::BitSet;
use std::vec::Vec;
use crate::{utils::Octant, Fov, FovCallbackEnum, FovConfig, Los, VisionShape};
use rl_utils::{tranthong_func, Area, Coord};
const fn nth_triangle_nr(n: usize) -> usize {
(n * (n + 1)) / 2
}
const fn row_cell_to_index(row: usize, cell: usize) -> usize {
nth_triangle_nr(row) + cell
}
const fn array_sz(size: usize) -> usize {
(size + (BITSET_UNIT - 1)) / BITSET_UNIT
}
type BitSetType = u64;
const BITSET_UNIT: usize = BitSetType::BITS as usize;
#[derive(Clone, PartialEq, Hash, PartialOrd, Debug)]
struct PCID<const MAX_RADIUS: usize>
where [(); array_sz(MAX_RADIUS)]: , {
ids: [BitSetType; array_sz(MAX_RADIUS)],
}
impl<const MAX_RADIUS: usize> PCID<{ MAX_RADIUS }> where [(); array_sz(MAX_RADIUS)]: , {
pub const fn new() -> Self {
Self { ids: [0; array_sz(MAX_RADIUS)], }
}
}
#[derive(Clone, PartialEq, Hash, PartialOrd, Debug)]
pub struct PCRCbuffer<const MAX_RADIUS: usize>
where [(); array_sz(MAX_RADIUS)]: , {
lines: Vec<PCID<{ MAX_RADIUS }>>,
default_lines: [BitSetType; array_sz(MAX_RADIUS)],
}
impl<const MAX_RADIUS: usize> PCRCbuffer<{ MAX_RADIUS }> where [(); array_sz(MAX_RADIUS)]: , {
pub fn generate() -> PCRCbuffer<{ MAX_RADIUS }> {
Self { lines: vec![PCID::new(); nth_triangle_nr(MAX_RADIUS)],
default_lines: [BitSetType::MAX; array_sz(MAX_RADIUS)], }.generate_priv()
}
fn generate_priv(mut self) -> PCRCbuffer<{ MAX_RADIUS }> {
assert!(nth_triangle_nr(MAX_RADIUS - 1) < self.lines.len());
let start = (0, 0).into();
for line_id in 0..MAX_RADIUS {
let current = (MAX_RADIUS - 1, line_id).into();
tranthong_func(start, current, |c: Coord| {
let idx = row_cell_to_index(c.x as usize, c.y as usize);
if let Some(atom) = self.lines.get_mut(idx) {
atom.ids.bit_set(line_id);
}
});
}
self.lines
.iter()
.take(MAX_RADIUS - 1)
.enumerate()
.for_each(|(i, pcid)| assert_eq!(pcid.ids.bit_none(), false, "failed at {}", i));
self
}
}
pub struct PCRC<'a, T, Func, const MAX_RADIUS: usize>
where Func: FnMut(&mut T, Coord, FovCallbackEnum) -> bool,
[(); array_sz(MAX_RADIUS)]: , {
pub area: Area,
pub buffer: &'a PCRCbuffer<{ MAX_RADIUS }>,
pub radius: usize,
pub vision: VisionShape,
pub cb_type: &'a mut T,
pub callback: Func,
}
impl<'a, T, Func, const MAX_RADIUS: usize> PCRC<'a, T, Func, MAX_RADIUS>
where Func: FnMut(&mut T, Coord, FovCallbackEnum) -> bool,
[(); array_sz(MAX_RADIUS)]: ,
{
// pub fn new(buffer: &'a PCRCbuffer, cb_type: &'a mut T, callback: Func) -> PCRC<'a, T, Func> {
// PCRC {
// area: Area::new( (0,0).into(), (100,100).into() ),
// buffer,
// radius: 20,
// src: (50,50).into(),
// vision: VisionShape::Octagon,
// cb_type,
// callback,
// }
// }
// pub fn new(area: Area, buffer: &'a PCRCbuffer, radius: usize, src: Coord, vision: VisionShape, cb_type: &'a mut T, callback: Func) -> PCRC<'a, T, Func> {
// PCRC {
// area,
// buffer,
// radius,
// src,
// vision,
// cb_type,
// callback,
// }
// }
fn fov_octant(&mut self, src: Coord, octant: Octant) {
let mut active_lines = self.buffer.default_lines.clone();
for (((row, cell), point_mod), pcid) in octant.iter(self.radius).zip(&self.buffer.lines).skip(1) {
let point = src + point_mod;
if !self.area.point_within(point) {
continue;
} else if !self.vision.in_radius(row, cell, self.radius) {
continue;
} else if active_lines.bit_none() {
break;
}
let visible = !active_lines.bit_disjoint(&pcid.ids);
(self.callback)(self.cb_type, point, FovCallbackEnum::SetVisible(visible));
if visible && (self.callback)(self.cb_type, point, FovCallbackEnum::IsBlocked) {
active_lines.bit_andnot(&pcid.ids);
}
}
}
}
impl<'a, T, Func, const MAX_RADIUS: usize> FovConfig for PCRC<'a, T, Func, MAX_RADIUS>
where Func: FnMut(&mut T, Coord, FovCallbackEnum) -> bool,
[(); array_sz(MAX_RADIUS)]: ,
{
fn with_area(mut self, area: Area) -> Self {
self.area = area;
self
}
fn with_radius(mut self, radius: usize) -> Self {
self.radius = radius;
self
}
fn with_vision_shape(mut self, vision: VisionShape) -> Self {
self.vision = vision;
self
}
}
impl<'a, T, Func, const MAX_RADIUS: usize> Fov for PCRC<'a, T, Func, MAX_RADIUS>
where Func: FnMut(&mut T, Coord, FovCallbackEnum) -> bool,
[(); array_sz(MAX_RADIUS)]: ,
{
fn fov(&mut self, src: Coord) {
assert!(self.radius < MAX_RADIUS);
for octant in Octant::iterator() {
self.fov_octant(src, *octant);
}
}
}
impl<'a, T, Func, const MAX_RADIUS: usize> Los for PCRC<'a, T, Func, MAX_RADIUS>
where Func: FnMut(&mut T, Coord, FovCallbackEnum) -> bool,
[(); array_sz(MAX_RADIUS)]: ,
{
fn los(&mut self, src: Coord, dst: Coord) -> bool {
let distance = src.pyth(dst);
assert!(distance < MAX_RADIUS as isize);
if let Some(octant) = Octant::find_octant(src, dst) {
let delta = src.delta_abs(dst);
let idx = row_cell_to_index(delta.x as usize, delta.y as usize);
if let Some(dst_pcid) = self.buffer.lines.get(idx) {
let mut active_lines = dst_pcid.ids.clone();
let mut cell_start = 0;
let mut cell_end = 1;
let mut cell_start_flag = false;
for row in 0..distance + 1 {
let mut found = false;
for cell in cell_start..=cell_end {
let idx = row_cell_to_index(row as usize, cell as usize);
let point = octant.calc_point(src, (row, cell).into());
if let Some(pcid) = self.buffer.lines.get(idx) {
let visible = !active_lines.bit_disjoint(&pcid.ids);
if visible {
let blocks = (self.callback)(self.cb_type, point, FovCallbackEnum::IsBlocked);
if !blocks {
found = true;
cell_end = cell + 1;
if !cell_start_flag {
cell_start_flag = true;
cell_start = if cell - 1 >= 0 { cell - 1 } else { cell };
}
} else {
active_lines.bit_andnot(&pcid.ids);
}
}
}
}
if active_lines.bit_none() {
return false;
}
assert!(found == true);
}
cell_start = 0;
cell_end = 1;
for row in 1..distance {
let mut found = false;
let mut cell_start_flag = false;
for cell in cell_start..=cell_end {
let point = octant.calc_point(src, (row, cell).into());
let idx = row_cell_to_index(row as usize, cell as usize);
if let Some(pcid) = self.buffer.lines.get(idx) {
let visible = !active_lines.bit_disjoint(&pcid.ids);
if visible {
found = true;
if !cell_start_flag {
cell_start_flag = true;
cell_start = if cell - 1 >= 0 { cell - 1 } else { cell };
}
cell_end = if cell + 1 <= row + 1 { cell + 1 } else { row };
(self.callback)(self.cb_type, point, FovCallbackEnum::SetVisible(true));
}
}
}
assert!(found == true);
}
return true;
}
}
false
}
}
|
use crate::schema::*;
#[derive(Queryable, Debug)]
pub struct Actor {
pub actor_id: i32,
pub first_name: String,
pub last_name: String,
pub last_update: diesel::pg::data_types::PgTimestamp
}
#[derive(Insertable)]
#[table_name = "actor"]
pub struct NewActor {
pub actor_id: i32,
pub first_name: String,
pub last_name: String,
pub last_update: diesel::pg::data_types::PgTimestamp
}
#[derive(Queryable, Debug)]
pub struct Address {
pub address_id: i32,
pub address: String,
pub address2: Option<String>,
pub district: String,
pub city_id: i32,
pub postal_code: Option<String>,
pub phone: String,
pub last_update: diesel::pg::data_types::PgTimestamp
}
#[derive(Queryable, Debug)]
pub struct Category {
pub category_id: i32,
pub name: String,
pub last_update: diesel::pg::data_types::PgTimestamp
}
#[derive(Queryable, Debug)]
pub struct City {
pub city_id: i32,
pub city_name: String,
pub country_id: i32,
pub last_update: diesel::pg::data_types::PgTimestamp
}
#[derive(Queryable, Debug)]
pub struct Country {
pub country_id: i32,
pub country_name: String,
pub last_update: diesel::pg::data_types::PgTimestamp
}
#[derive(Queryable, Debug)]
pub struct Customer {
pub customer_id: i32,
pub store_id: i32,
pub first_name: String,
pub last_name: String,
pub email: Option<String>,
pub address_id: i32,
pub activebool: bool,
pub create_date: diesel::pg::data_types::PgDate,
pub last_update: Option<diesel::pg::data_types::PgTimestamp>,
pub active: Option<i32>
}
use diesel_derive_enum::DbEnum;
#[derive(Debug, DbEnum)]
pub enum MyEnum {
#[db_rename = "G"]
G,
#[db_rename = "PG"]
PG,
#[db_rename = "PG-13"]
PG13,
#[db_rename = "R"]
R,
#[db_rename = "NC-17"]
NC17
}
#[derive(Queryable, Debug)]
pub struct Film {
pub film_id: i32,
pub title: String,
pub description: Option<String>,
pub release_year: Option<i32>,
pub language_id: i32,
pub original_language_id: Option<i32>,
pub rental_duration: i32,
pub rental_rate: diesel::pg::data_types::PgNumeric,
pub length: Option<i32>,
pub replacement_cost: diesel::pg::data_types::PgNumeric,
pub rating: MyEnum,
pub last_update: diesel::pg::data_types::PgTimestamp,
pub special_features: Option<Vec<String>>,
pub fulltext: String
}
#[derive(Queryable, Debug)]
pub struct FilmActor {
pub actor_id: i32,
pub film_id: i32,
pub last_update: diesel::pg::data_types::PgTimestamp
}
#[derive(Queryable, Debug)]
pub struct FilmCategory {
pub film_id: i32,
pub category_id: i32,
pub last_update: diesel::pg::data_types::PgTimestamp
}
#[derive(Queryable, Debug)]
pub struct Inventory {
pub inventory_id: i32,
pub film_id: i32,
pub store_id: i32,
pub last_update: diesel::pg::data_types::PgTimestamp
}
#[derive(Queryable, Debug)]
pub struct Language {
pub language_id: i32,
pub name: String,
pub last_update: diesel::pg::data_types::PgTimestamp
}
#[derive(Queryable, Debug)]
pub struct Rental {
pub rental_id: i32,
pub rental_date: diesel::pg::data_types::PgTimestamp,
pub inventory_id: i32,
pub customer_id: i32,
pub return_date: Option<diesel::pg::data_types::PgTimestamp>,
pub staff_id: i32,
pub last_update: diesel::pg::data_types::PgTimestamp
}
#[derive(Queryable, Debug)]
pub struct Staff {
pub staff_id: i32,
pub first_name: String,
pub last_name: String,
pub address_id: i32,
pub email: Option<String>,
pub store_id: i32,
pub active: bool,
pub username: String,
pub password: Option<String>,
pub last_update: diesel::pg::data_types::PgTimestamp,
pub picture: Option<Vec<u8>>,
}
#[derive(Queryable, Debug)]
pub struct Store {
pub store_id: i32,
pub manager_staff_id: i32,
pub address_id: i32,
pub last_update: diesel::pg::data_types::PgTimestamp
}
|
extern crate chrono;
use chrono::{DateTime, Utc};
pub fn log(msg: String) {
let now: DateTime<Utc> = Utc::now();
print!("[{}]: ", now.to_rfc3339());
println!("{}", msg);
} |
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::io::{self, prelude::*};
use serde::{Serialize, Deserialize};
mod helper;
pub use helper::*;
#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash, Serialize, Deserialize)]
pub struct Pointer {
#[serde(rename = "FileID")]
pub file: i32,
#[serde(rename = "PathID")]
pub path: i64,
}
impl Pointer {
pub fn write(&self, writer: &mut impl Write) -> io::Result<()> {
writer.write_i32::<LittleEndian>(self.file)?;
writer.write_i64::<LittleEndian>(self.path)?;
Ok(())
}
pub fn read(reader: &mut impl Read) -> io::Result<Self> {
let file = reader.read_i32::<LittleEndian>()?;
let path = reader.read_i64::<LittleEndian>()?;
Ok(Self { file, path, })
}
}
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct Difficulty {
#[serde(rename = "_difficulty")]
pub difficulty: i32,
#[serde(rename = "_difficultyRank")]
pub rank: i32,
#[serde(rename = "_noteJumpMovementSpeed")]
pub note_jump: f32,
#[serde(rename = "_noteJumpStartBeatOffset")]
pub note_jump_offset: i32,
#[serde(rename = "_beatmapData")]
pub beatmap: Pointer,
}
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct Difficulties {
#[serde(rename = "Array")]
pub vec: Vec<Difficulty>,
pub size: u32,
}
impl Difficulties {
pub fn write(&self, writer: &mut impl Write) -> io::Result<()> {
writer.write_u32::<LittleEndian>(self.size)?;
for difficulty in &self.vec {
difficulty.write(writer)?;
}
Ok(())
}
pub fn read(reader: &mut impl Read) -> io::Result<Self> {
let mut vec = Vec::new();
let size = reader.read_u32::<LittleEndian>()?;
for _ in 0..size {
vec.push(Difficulty::read(reader)?);
}
Ok(Self { vec, size, })
}
}
impl Difficulty {
pub fn write(&self, writer: &mut impl Write) -> io::Result<()> {
writer.write_i32::<LittleEndian>(self.difficulty)?;
writer.write_i32::<LittleEndian>(self.rank)?;
writer.write_f32::<LittleEndian>(self.note_jump)?;
writer.write_i32::<LittleEndian>(self.note_jump_offset)?;
self.beatmap.write(writer)?;
Ok(())
}
pub fn read(reader: &mut impl Read) -> io::Result<Self> {
let difficulty = reader.read_i32::<LittleEndian>()?;
let rank = reader.read_i32::<LittleEndian>()?;
let note_jump = reader.read_f32::<LittleEndian>()?;
let note_jump_offset = reader.read_i32::<LittleEndian>()?;
let beatmap = Pointer::read(reader)?;
Ok(Self { difficulty, rank, note_jump, note_jump_offset, beatmap, })
}
}
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct Beatmap {
#[serde(rename = "GameObject")]
pub game_obj: Pointer,
#[serde(rename = "Enabled")]
pub enabled: u32,
#[serde(rename = "MonoScript")]
pub script: Pointer,
#[serde(rename = "Name")]
pub name: String,
#[serde(rename = "_levelID")]
pub id: String,
#[serde(rename = "_songName")]
pub song_name: String,
#[serde(rename = "_songSubName")]
pub song_sub_name: String,
#[serde(rename = "_songAuthorName")]
pub song_author_name: String,
#[serde(rename = "_levelAuthorName")]
pub author: String,
#[serde(rename = "_audioClip")]
pub audio_clip: Pointer,
#[serde(rename = "_beatsPerMinute")]
pub bpm: f32,
#[serde(rename = "_songTimeOffset")]
pub time_offset: f32,
#[serde(rename = "_shuffle")]
pub shuffle: f32,
#[serde(rename = "_shufflePeriod")]
pub shuffle_period: f32,
#[serde(rename = "_previewStartTime")]
pub preview_start: f32,
#[serde(rename = "_previewDuration")]
pub preview_len: f32,
#[serde(rename = "_coverImageTexture2D")]
pub cover: Pointer,
#[serde(rename = "_environmentSceneInfo")]
pub environment: Pointer,
#[serde(rename = "_difficultyBeatmapSets")]
pub difficulties: Difficulties,
}
impl Beatmap {
pub fn write(&self, writer: &mut impl Write) -> io::Result<()> {
self.game_obj.write(writer)?;
writer.write_u32::<LittleEndian>(self.enabled)?;
self.script.write(writer)?;
write_aligned_str(writer, &self.name)?;
write_aligned_str(writer, &self.id)?;
write_aligned_str(writer, &self.song_name)?;
write_aligned_str(writer, &self.song_sub_name)?;
write_aligned_str(writer, &self.song_author_name)?;
write_aligned_str(writer, &self.author)?;
self.audio_clip.write(writer)?;
writer.write_f32::<LittleEndian>(self.bpm)?;
writer.write_f32::<LittleEndian>(self.time_offset)?;
writer.write_f32::<LittleEndian>(self.shuffle)?;
writer.write_f32::<LittleEndian>(self.shuffle_period)?;
writer.write_f32::<LittleEndian>(self.preview_start)?;
writer.write_f32::<LittleEndian>(self.preview_len)?;
self.cover.write(writer)?;
self.environment.write(writer)?;
self.difficulties.write(writer)?;
Ok(())
}
pub fn read(reader: &mut impl Read) -> io::Result<Self> {
let game_obj = Pointer::read(reader)?;
let enabled = reader.read_u32::<LittleEndian>()?;
let script = Pointer::read(reader)?;
let name = read_aligned_str(reader)?;
let id = read_aligned_str(reader)?;
let song_name = read_aligned_str(reader)?;
let song_sub_name = read_aligned_str(reader)?;
let song_author_name = read_aligned_str(reader)?;
let author = read_aligned_str(reader)?;
let audio_clip = Pointer::read(reader)?;
let bpm = reader.read_f32::<LittleEndian>()?;
let time_offset = reader.read_f32::<LittleEndian>()?;
let shuffle = reader.read_f32::<LittleEndian>()?;
let shuffle_period = reader.read_f32::<LittleEndian>()?;
let preview_start = reader.read_f32::<LittleEndian>()?;
let preview_len = reader.read_f32::<LittleEndian>()?;
let cover = Pointer::read(reader)?;
let environment = Pointer::read(reader)?;
let difficulties = Difficulties::read(reader)?;
Ok(Self { game_obj, enabled, script, name, id, song_name, song_sub_name, song_author_name, author, audio_clip, bpm, time_offset, shuffle, shuffle_period, preview_start, preview_len, cover, environment, difficulties, })
}
}
#[cfg(test)]
mod tests {
use super::*;
}
|
use std::error::Error;
use std::io::{self, prelude::*};
fn main() -> Result<(), Box<dyn Error>> {
run_tests(io::stdin().lock())
}
/// Panics if the input isn't correctly formatted.
#[allow(non_snake_case)]
fn run_tests(input: impl BufRead) -> Result<(), Box<dyn Error>> {
let mut lines = input.lines();
let T: u32 = lines.next().unwrap()?.parse()?;
for t in 1..=T {
let line = lines.next().unwrap()?;
let mut words = line.split_whitespace();
let N: usize = words.next().unwrap().parse()?;
let B: u32 = words.next().unwrap().parse()?;
assert!(words.next().is_none());
let line = lines.next().unwrap()?;
let A: Vec<_> = line
.split_whitespace()
.map(|w| w.parse::<u32>())
.collect::<Result<_, _>>()?;
assert_eq!(A.len(), N);
let ans = max_num_purchases(A, B);
println!("Case #{}: {}", t, ans);
}
assert!(lines.next().is_none());
Ok(())
}
fn max_num_purchases(mut prices: Vec<u32>, mut budget: u32) -> usize {
let n = prices.len();
prices.sort_unstable();
for (i, p) in prices.into_iter().enumerate() {
if p <= budget {
budget -= p;
} else {
return i;
}
}
n
}
|
use crate::{
widget,
widget::unit::content::{ContentBoxItemLayout, ContentBoxItemNode, ContentBoxNode},
widget_component,
};
use serde::{Deserialize, Serialize};
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct ContentBoxProps {
#[serde(default)]
pub clipping: bool,
}
implement_props_data!(ContentBoxProps, "ContentBoxProps");
widget_component! {
pub content_box(id, props, listed_slots) {
let ContentBoxProps { clipping } = props.read_cloned_or_default();
let items = listed_slots.into_iter().filter_map(|slot| {
if let Some(props) = slot.props() {
let layout = props.read_cloned_or_default::<ContentBoxItemLayout>();
Some(ContentBoxItemNode {
slot,
layout,
})
} else {
None
}
}).collect::<Vec<_>>();
widget! {{{
ContentBoxNode {
id: id.to_owned(),
props: props.clone(),
items,
clipping,
}
}}}
}
}
|
use async_trait::async_trait;
use uuid::Uuid;
use common::cache::Cache;
use common::error::Error;
use common::infrastructure::cache::InMemCache;
use common::result::Result;
use crate::domain::author::AuthorId;
use crate::domain::category::CategoryId;
use crate::domain::collection::{Collection, CollectionId, CollectionRepository};
use crate::mocks;
pub struct InMemCollectionRepository {
cache: InMemCache<CollectionId, Collection>,
}
impl InMemCollectionRepository {
pub fn new() -> Self {
InMemCollectionRepository {
cache: InMemCache::new(),
}
}
pub async fn populated() -> Self {
let repo = Self::new();
repo.save(&mut mocks::empty_collection1()).await.unwrap();
repo
}
}
impl Default for InMemCollectionRepository {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl CollectionRepository for InMemCollectionRepository {
async fn next_id(&self) -> Result<CollectionId> {
let id = Uuid::new_v4();
CollectionId::new(id.to_string())
}
async fn find_all(&self) -> Result<Vec<Collection>> {
Ok(self.cache.all().await)
}
async fn find_by_id(&self, id: &CollectionId) -> Result<Collection> {
self.cache
.get(id)
.await
.ok_or(Error::new("collection", "not_found"))
}
async fn find_by_author_id(&self, author_id: &AuthorId) -> Result<Vec<Collection>> {
Ok(self
.cache
.filter(|&(_, collection)| collection.author_id() == author_id)
.await)
}
async fn find_by_category_id(&self, category_id: &CategoryId) -> Result<Vec<Collection>> {
Ok(self
.cache
.filter(|&(_, collection)| collection.header().category_id() == category_id)
.await)
}
async fn search(&self, text: &str) -> Result<Vec<Collection>> {
Ok(self
.cache
.filter(|&(_, collection)| collection.header().name().value().contains(text))
.await)
}
async fn save(&self, collection: &mut Collection) -> Result<()> {
self.cache
.set(collection.base().id().clone(), collection.clone())
.await
}
}
|
#[macro_use]
extern crate lazy_static;
use chrono::{DateTime, NaiveDateTime, Utc};
use redis::FromRedisValue;
use redis::InfoDict;
use std::collections::HashMap;
use std::time::Duration;
lazy_static! {
static ref IGNORE_COMMANDS: Vec<&'static str> = vec!["SLOWLOG", "INFO"];
}
#[derive(Default, Debug)]
struct RedisVersion {
major: usize,
minor: usize,
patch: usize,
}
fn get_version(conn: &redis::Connection) -> Option<RedisVersion> {
let info: InfoDict = redis::cmd("INFO")
.arg("server")
.query(conn)
.expect("fail info command");
let version_str = info.get("redis_version").unwrap_or_else(|| "".to_string());
if version_str.is_empty() {
None
} else {
let v: Vec<&str> = version_str.split('.').collect();
let version = RedisVersion {
major: v[0].parse::<usize>().expect("invalid major version"),
minor: v[1].parse::<usize>().expect("invalid minor version"),
patch: v[2].parse::<usize>().expect("invalid patch version"),
};
Some(version)
}
}
#[derive(Default, Debug)]
struct RedisSlowlog {
id: u64,
timestamp: u64,
exec_time: Duration,
cmd: Vec<String>,
address: String, // support by Redis 4.0 or greater
client_name: String, // support by Redis 4.0 or greater
}
fn get_slowlogs(conn: &redis::Connection, num: usize, version: usize) -> Vec<RedisSlowlog> {
let mut slowlogs: Vec<RedisSlowlog> = vec![];
let raw_slowlogs: Vec<redis::Value> = redis::cmd("SLOWLOG")
.arg("GET")
.arg(format!("{}", num))
.query(conn)
.expect("fail slowlog command");
for raw_slowlog in raw_slowlogs.iter() {
let slowlog = if version >= 4 {
let s: (u64, u64, u64, Vec<String>, String, String) =
FromRedisValue::from_redis_value(raw_slowlog).unwrap();
RedisSlowlog {
id: s.0,
timestamp: s.1,
exec_time: Duration::from_micros(s.2),
cmd: s.3,
address: s.4,
client_name: s.5,
}
} else {
let s: (u64, u64, u64, Vec<String>) =
FromRedisValue::from_redis_value(raw_slowlog).unwrap();
RedisSlowlog {
id: s.0,
timestamp: s.1,
exec_time: Duration::from_micros(s.2),
cmd: s.3,
..RedisSlowlog::default()
}
};
if IGNORE_COMMANDS.contains(&slowlog.cmd[0].to_uppercase().as_str()) {
continue;
}
slowlogs.push(slowlog);
}
slowlogs
}
fn main() {
let client = redis::Client::open("redis://127.0.0.1").expect("fail connect redis");
let conn = client
.get_connection()
.expect("fail to get redis connection");
let redis_version = get_version(&conn);
match redis_version {
Some(ref v) => println!("redis version: {}.{}.{}", v.major, v.minor, v.patch),
None => println!("redis version: unknown"),
}
let redis_version_major = match redis_version {
Some(v) => v.major,
None => 0,
};
let mut all_slowlogs: HashMap<u64, RedisSlowlog> = HashMap::new();
loop {
let slowlogs = get_slowlogs(&conn, 100, redis_version_major);
for slowlog in slowlogs {
if !all_slowlogs.contains_key(&slowlog.id) {
let ndt = NaiveDateTime::from_timestamp_opt(slowlog.timestamp as i64, 0);
if ndt.is_none() {
continue;
}
let dt = DateTime::<Utc>::from_utc(ndt.unwrap(), Utc);
println!(
"[{:?}] id={}, time={:.1}[ms], cmd='{:?}', address={}, name={}",
dt,
slowlog.id,
slowlog.exec_time.subsec_nanos() as f64 * 1e-6,
slowlog.cmd,
slowlog.address,
slowlog.client_name
);
all_slowlogs.insert(slowlog.id, slowlog);
}
}
std::thread::sleep(Duration::from_millis(5000));
}
}
|
extern crate day_06_memory_reallocation;
use day_06_memory_reallocation::memory_reallocate;
fn main() {
let puzzle = vec![4, 1, 15, 12, 0, 9, 9, 5, 5, 8, 7, 3, 14, 5, 12, 3];
let steps = memory_reallocate(puzzle);
println!("Steps to seen config: {}", steps);
}
|
//! This module corresponds to `mach/mach_init.h`.
use port::mach_port_t;
use mach_types::{thread_port_t, host_t};
use vm_types::vm_size_t;
use kern_return::kern_return_t;
extern "C" {
pub fn mach_host_self() -> mach_port_t;
pub fn mach_thread_self() -> thread_port_t;
pub fn host_page_size(host: host_t, size: *mut vm_size_t) -> kern_return_t;
}
#[cfg(test)]
mod tests {
use mach_init::*;
use port::*;
#[test]
fn mach_host_self_test() {
unsafe {
let host = mach_host_self();
assert!(host != 0);
}
}
#[test]
fn mach_thread_self_test() {
unsafe {
let this_thread = mach_thread_self();
assert!(this_thread != MACH_PORT_NULL);
assert!(this_thread != MACH_PORT_DEAD);
}
}
#[test]
fn host_page_size_test() {
unsafe {
let mut ps: vm_size_t = 0;
assert!(0 == host_page_size(mach_host_self(), &mut ps));
assert!(ps > 0);
};
}
}
|
pub mod planet_events;
|
use alloc::{boxed::Box, sync::Arc};
use core::{ops::Drop, task::Poll};
pub struct Buffer {
device: Arc<wgpu::Device>,
pub(crate) buffer: Arc<wgpu::Buffer>,
pub(crate) offset: usize,
pub(crate) size: usize,
free: Box<dyn Fn() + Sync + Send + 'static>,
}
impl Buffer {
pub(crate) fn new<F>(device: Arc<wgpu::Device>, buffer: Arc<wgpu::Buffer>, offset: usize, size: usize, free: F) -> Self
where
F: Fn() + Sync + Send + 'static,
{
Self {
device,
buffer,
offset,
size,
free: Box::new(free),
}
}
pub async fn write(&self, data: &[u8]) -> Result<(), wgpu::BufferAsyncErr> {
// TODO move poll to event loop
let mut future = self.buffer.map_write(self.offset as u64, self.size as u64);
let mut mapping;
loop {
if let Poll::Ready(x) = futures::poll!(&mut future) {
mapping = x?;
break;
}
self.device.poll(wgpu::Maintain::Wait);
}
mapping.as_slice().copy_from_slice(data);
Ok(())
}
pub(crate) fn binding_resource(&self) -> wgpu::BindingResource {
wgpu::BindingResource::Buffer {
buffer: &self.buffer,
range: self.offset as u64..self.offset as u64 + self.size as u64,
}
}
}
impl Drop for Buffer {
fn drop(&mut self) {
(self.free)()
}
}
|
use crate::utils::{
config::{
LOCK_TYPE_FLAG, METRIC_TYPE_FLAG_MASK, REMAIN_FLAGS_BITS, SINCE_TYPE_TIMESTAMP, VALUE_MASK,
},
transaction::{get_sum_sudt_amount, XChainKind},
types::{Error, ToCKBCellDataView},
};
use alloc::string::String;
use alloc::vec::Vec;
use bech32::ToBase32;
use bitcoin_spv::types::{HeaderArray, MerkleArray, PayloadType, Vin, Vout};
use bitcoin_spv::{btcspv, validatespv};
use ckb_std::ckb_constants::Source;
use ckb_std::ckb_types::{bytes::Bytes, prelude::*};
use ckb_std::debug;
use ckb_std::high_level::{
load_cell, load_cell_capacity, load_cell_data, load_input_since, QueryIter,
};
use primitive_types::U256;
use tockb_types::config::{BTC_ADDRESS_PREFIX, TX_PROOF_DIFFICULTY_FACTOR};
use tockb_types::generated::btc_difficulty::BTCDifficultyReader;
use tockb_types::generated::mint_xt_witness::BTCSPVProofReader;
use tockb_types::{BtcExtraView, XExtraView};
pub fn verify_since() -> Result<u64, Error> {
let since = load_input_since(0, Source::GroupInput).map_err(|_| Error::InputSinceInvalid)?;
if since & REMAIN_FLAGS_BITS != 0 // check flags is valid
|| since & LOCK_TYPE_FLAG == 0 // check if it is relative_flag
|| since & METRIC_TYPE_FLAG_MASK != SINCE_TYPE_TIMESTAMP
// check if it is timestamp value
{
return Err(Error::InputSinceInvalid);
}
let auction_time = since & VALUE_MASK;
Ok(auction_time)
}
pub fn verify_since_by_value(value: u64) -> Result<(), Error> {
let since = load_input_since(0, Source::GroupInput)?;
if since != value {
return Err(Error::InputSinceInvalid);
}
Ok(())
}
pub fn verify_auction_inputs(
toCKB_lock_hash: &[u8],
lot_amount: u128,
signer_fee: u128,
) -> Result<u128, Error> {
// inputs[0]: toCKB cell
// inputs[1:]: XT cell the bidder provides
// check XT cell on inputs
let inputs_amount = get_sum_sudt_amount(1, Source::Input, toCKB_lock_hash)?;
if inputs_amount < lot_amount + signer_fee {
return Err(Error::FundingNotEnough);
}
Ok(inputs_amount)
}
pub fn verify_capacity() -> Result<(), Error> {
let cap_input = load_cell_capacity(0, Source::GroupInput).expect("get input capacity");
let cap_output = load_cell_capacity(0, Source::GroupOutput).expect("get output capacity");
if cap_input != cap_output {
return Err(Error::CapacityInvalid);
}
Ok(())
}
pub fn verify_capacity_with_value(input_data: &ToCKBCellDataView, value: u64) -> Result<(), Error> {
let sum = QueryIter::new(load_cell, Source::Output)
.filter(|cell| cell.lock().as_bytes() == input_data.signer_lockscript)
.map(|cell| cell.capacity().unpack())
.collect::<Vec<u64>>()
.into_iter()
.sum::<u64>();
if sum < value {
return Err(Error::CapacityInvalid);
}
Ok(())
}
pub fn verify_data(
input_toCKB_data: &ToCKBCellDataView,
out_toCKB_data: &ToCKBCellDataView,
) -> Result<u128, Error> {
let lot_size = match input_toCKB_data.get_xchain_kind() {
XChainKind::Btc => {
if out_toCKB_data.get_btc_lot_size()? != input_toCKB_data.get_btc_lot_size()? {
return Err(Error::InvariantDataMutated);
}
verify_btc_address(out_toCKB_data.x_unlock_address.as_ref())?;
out_toCKB_data.get_btc_lot_size()?.get_sudt_amount()
}
XChainKind::Eth => {
if out_toCKB_data.get_eth_lot_size()? != input_toCKB_data.get_eth_lot_size()? {
return Err(Error::InvariantDataMutated);
}
if out_toCKB_data.x_unlock_address.as_ref().len() != 20 {
return Err(Error::XChainAddressInvalid);
}
out_toCKB_data.get_eth_lot_size()?.get_sudt_amount()
}
};
if input_toCKB_data.user_lockscript != out_toCKB_data.user_lockscript
|| input_toCKB_data.x_lock_address != out_toCKB_data.x_lock_address
|| input_toCKB_data.signer_lockscript != out_toCKB_data.signer_lockscript
|| input_toCKB_data.x_extra != out_toCKB_data.x_extra
{
return Err(Error::InvariantDataMutated);
}
Ok(lot_size)
}
pub fn verify_btc_witness(
_data: &ToCKBCellDataView,
proof: &[u8],
cell_dep_index_list: &[u8],
expect_address: &[u8],
expect_value: u128,
is_return_vin: bool,
) -> Result<BtcExtraView, Error> {
debug!(
"proof: {:?}, cell_dep_index_list: {:?}",
proof, cell_dep_index_list
);
// parse difficulty
if cell_dep_index_list.len() != 1 {
return Err(Error::InvalidWitness);
}
let dep_data = load_cell_data(cell_dep_index_list[0].into(), Source::CellDep)?;
debug!("dep data is {:?}", &dep_data);
if BTCDifficultyReader::verify(&dep_data, false).is_err() {
return Err(Error::DifficultyDataInvalid);
}
let difficulty_reader = BTCDifficultyReader::new_unchecked(&dep_data);
debug!("difficulty_reader: {:?}", difficulty_reader);
// parse witness
if BTCSPVProofReader::verify(proof, false).is_err() {
return Err(Error::InvalidWitness);
}
let proof_reader = BTCSPVProofReader::new_unchecked(proof);
debug!("proof_reader: {:?}", proof_reader);
// verify btc spv
let tx_hash = verify_btc_spv(proof_reader, difficulty_reader)?;
// verify transfer amount, to matches
let funding_output_index = proof_reader.funding_output_index().into();
let vout = Vout::new(proof_reader.vout().raw_data())?;
let tx_out = vout.index(funding_output_index as usize)?;
let script_pubkey = tx_out.script_pubkey();
debug!("script_pubkey payload: {:?}", script_pubkey.payload()?);
match script_pubkey.payload()? {
PayloadType::WPKH(pkh) => {
let mut addr_u5 = Vec::with_capacity(33);
addr_u5.push(bech32::u5::try_from_u8(0).unwrap());
addr_u5.extend(pkh.to_base32());
debug!("addr_u5: {:?}", &addr_u5);
let addr = bech32::encode(BTC_ADDRESS_PREFIX, addr_u5)
.expect("bech32 encode should not return error");
debug!(
"hex format: addr: {}, expect_address: {}",
hex::encode(addr.as_bytes().to_vec()),
hex::encode(expect_address.as_ref().to_vec())
);
debug!(
"addr: {}, expect_address: {}",
String::from_utf8(addr.as_bytes().to_vec()).unwrap(),
String::from_utf8(expect_address.as_ref().to_vec()).unwrap()
);
if addr.as_bytes() != expect_address {
return Err(Error::WrongFundingAddr);
}
}
_ => return Err(Error::UnsupportedFundingType),
}
let value = tx_out.value() as u128;
debug!("actual value: {}, expect: {}", value, expect_value);
if value < expect_value {
return Err(Error::FundingNotEnough);
}
if is_return_vin {
let funding_input_index: u32 = proof_reader.funding_input_index().into();
let vin = Vin::new(proof_reader.vin().raw_data())?;
let tx_in = vin.index(funding_input_index as usize)?;
debug!(
"vin tx_id {}",
hex::encode(tx_in.outpoint().txid_le().as_ref().as_ref())
);
debug!("vin output index {}", tx_in.outpoint().vout_index());
Ok(BtcExtraView {
lock_tx_hash: tx_in.outpoint().txid_le().as_ref().as_ref().into(),
lock_vout_index: tx_in.outpoint().vout_index(),
})
} else {
Ok(BtcExtraView {
lock_tx_hash: tx_hash,
lock_vout_index: funding_output_index,
})
}
}
pub fn verify_btc_faulty_witness(
data: &ToCKBCellDataView,
proof: &[u8],
cell_dep_index_list: &[u8],
is_when_redeeming: bool,
) -> Result<(), Error> {
debug!(
"proof: {:?}, cell_dep_index_list: {:?}",
proof, cell_dep_index_list
);
// parse difficulty
if cell_dep_index_list.len() != 1 {
return Err(Error::InvalidWitness);
}
let dep_data = load_cell_data(cell_dep_index_list[0].into(), Source::CellDep)?;
debug!("dep data is {:?}", &dep_data);
if BTCDifficultyReader::verify(&dep_data, false).is_err() {
return Err(Error::DifficultyDataInvalid);
}
let difficulty_reader = BTCDifficultyReader::new_unchecked(&dep_data);
debug!("difficulty_reader: {:?}", difficulty_reader);
// parse witness
if BTCSPVProofReader::verify(proof, false).is_err() {
return Err(Error::InvalidWitness);
}
let proof_reader = BTCSPVProofReader::new_unchecked(proof);
debug!("proof_reader: {:?}", proof_reader);
// verify btc spv
verify_btc_spv(proof_reader, difficulty_reader)?;
// get tx in
let funding_input_index: u32 = proof_reader.funding_input_index().into();
let vin = Vin::new(proof_reader.vin().raw_data())?;
let tx_in = vin.index(funding_input_index as usize)?;
// get mint_xt's funding_output info from cell_data
let btc_extra = match &data.x_extra {
XExtraView::Btc(extra) => Ok(extra),
_ => Err(Error::FaultyBtcWitnessInvalid),
}?;
// check if the locked btc is transferred by signer
let btc_extra_txid: Vec<u8> = btc_extra.lock_tx_hash.clone().into();
debug!(
"btc_extra_txid: {}, tx_in.outpoint().txid_le(): {}",
hex::encode(btc_extra_txid.as_slice()),
hex::encode(tx_in.outpoint().txid_le().as_ref().as_ref())
);
debug!(
"btc_extra.lock_vout_index: {}, tx_in.outpoint().vout_index(): {}",
btc_extra.lock_vout_index,
tx_in.outpoint().vout_index()
);
if tx_in.outpoint().txid_le().as_ref().as_ref() != btc_extra_txid.as_slice()
|| tx_in.outpoint().vout_index() != btc_extra.lock_vout_index
{
return Err(Error::FaultyBtcWitnessInvalid);
}
// if is_when_redeeming, check if signer transferred insufficient btc_amount to user_unlock_addr
if is_when_redeeming {
debug!("verify_btc_faulty_witness is_when_redeeming");
// verify transfer amount, to matches
let vout = Vout::new(proof_reader.vout().raw_data())?;
let mut index: usize = 0;
let mut sum_amount: u128 = 0;
let expect_address = data.x_unlock_address.as_ref();
let lot_amount = data.get_btc_lot_size()?.get_sudt_amount();
// calc sum_amount which signer transferred to user
debug!("begin calc sum_amount which signer transferred to user");
loop {
let tx_out = match vout.index(index.into()) {
Ok(out) => out,
Err(_) => {
break;
}
};
index += 1;
let script_pubkey = tx_out.script_pubkey();
match script_pubkey.payload()? {
PayloadType::WPKH(pkh) => {
let mut addr_u5 = Vec::with_capacity(33);
addr_u5.push(bech32::u5::try_from_u8(0).unwrap());
addr_u5.extend(pkh.to_base32());
debug!("addr_u5: {:?}", &addr_u5);
let addr = bech32::encode(BTC_ADDRESS_PREFIX, addr_u5)
.expect("bech32 encode should not return error");
debug!(
"hex format: addr: {}, x_lock_address: {}",
hex::encode(addr.as_bytes().to_vec()),
hex::encode(data.x_lock_address.as_ref().to_vec())
);
debug!(
"addr: {}, x_unlock_address: {}",
String::from_utf8(addr.as_bytes().to_vec()).unwrap(),
String::from_utf8(expect_address.to_vec()).unwrap()
);
if addr.as_bytes() != expect_address {
continue;
}
}
_ => continue,
}
sum_amount += tx_out.value() as u128;
}
debug!(
"calc sum_amount: {}, lot_amount: {}",
sum_amount, lot_amount
);
if sum_amount >= lot_amount {
// it means signer transferred enough amount to user, mismatch FaultyWhenRedeeming condition
return Err(Error::FaultyBtcWitnessInvalid);
}
}
Ok(())
}
pub fn verify_btc_spv(
proof: BTCSPVProofReader,
difficulty: BTCDifficultyReader,
) -> Result<Bytes, Error> {
debug!("start verify_btc_spv");
if !btcspv::validate_vin(proof.vin().raw_data()) {
return Err(Error::SpvProofInvalid);
}
debug!("finish validate_vin");
if !btcspv::validate_vout(proof.vout().raw_data()) {
return Err(Error::SpvProofInvalid);
}
debug!("finish validate_vout");
let mut ver = [0u8; 4];
ver.copy_from_slice(proof.version().raw_data());
let mut lock = [0u8; 4];
lock.copy_from_slice(proof.locktime().raw_data());
debug!("ver: {:?}, lock: {:?}", ver, lock);
// btcspv::hash256(&[version, vin.as_ref(), vout.as_ref(), locktime])
let vin = Vin::new(proof.vin().raw_data())?;
let vout = Vout::new(proof.vout().raw_data())?;
debug!("{:?}", &[&ver, vin.as_ref(), vout.as_ref(), &lock]);
let tx_id = validatespv::calculate_txid(&ver, &vin, &vout, &lock);
debug!("tx_id: {:?}", tx_id);
if tx_id.as_ref() != proof.tx_id().raw_data() {
return Err(Error::WrongTxId);
}
// verify difficulty
let raw_headers = proof.headers();
let headers = HeaderArray::new(raw_headers.raw_data())?;
let observed_diff = validatespv::validate_header_chain(&headers, false)?;
let previous_diff = U256::from_little_endian(difficulty.previous().raw_data());
let current_diff = U256::from_little_endian(difficulty.current().raw_data());
let first_header_diff = headers.index(0).difficulty();
debug!(
"previous: {:?}, current: {:?}, first_header_diff: {:?}",
previous_diff, current_diff, first_header_diff
);
let req_diff = if first_header_diff == current_diff {
current_diff
} else if first_header_diff == previous_diff {
previous_diff
} else {
return Err(Error::NotAtCurrentOrPreviousDifficulty);
};
if observed_diff < req_diff * TX_PROOF_DIFFICULTY_FACTOR {
return Err(Error::InsufficientDifficulty);
}
debug!("finish diff verify");
// verify tx
let header = headers.index(headers.len() - 1);
let mut idx = [0u8; 8];
idx.copy_from_slice(proof.index().raw_data());
debug!("tx_id: {}", hex::encode(tx_id.as_ref()));
debug!("merkle_root: {}", hex::encode(header.tx_root().as_ref()));
debug!(
"proof: {}",
hex::encode(proof.intermediate_nodes().raw_data())
);
debug!("index: {}", u64::from_le_bytes(idx));
if !validatespv::prove(
tx_id,
header.tx_root(),
&MerkleArray::new(proof.intermediate_nodes().raw_data())?,
u64::from_le_bytes(idx),
) {
return Err(Error::BadMerkleProof);
}
debug!("finish merkle proof verify");
Ok(Bytes::from(&tx_id.as_ref()[..]))
}
pub fn verify_btc_address(addr: &[u8]) -> Result<(), Error> {
let (hrp, data) =
bech32::decode(core::str::from_utf8(addr).map_err(|_| Error::XChainAddressInvalid)?)
.map_err(|_| Error::XChainAddressInvalid)?;
if hrp != BTC_ADDRESS_PREFIX {
return Err(Error::XChainAddressInvalid);
}
if data.len() != 33 {
return Err(Error::XChainAddressInvalid);
}
if data[0].to_u8() != 0 {
return Err(Error::XChainAddressInvalid);
}
Ok(())
}
|
pub mod cmd_queue;
pub mod modeless;
pub mod personal_data;
pub use cmd_queue::CmdQueue;
pub use modeless::Modeless;
pub use personal_data::PersonalData;
|
use super::*;
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
#[repr(transparent)]
pub struct ToneSweep(u8);
impl ToneSweep {
const_new!();
bitfield_int!(u8; 0..=2: u8, sweep_shift, with_sweep_shift, set_sweep_shift);
bitfield_bool!(u8; 3, frequency_decreasing, with_frequency_decreasing, set_frequency_decreasing);
bitfield_int!(u8; 4..=6: u8, sweep_time, with_sweep_time, set_sweep_time);
}
|
use crate::request::prelude::*;
pub struct DeleteInvite<'a> {
code: String,
fut: Option<Pending<'a, ()>>,
http: &'a Client,
reason: Option<String>,
}
impl<'a> DeleteInvite<'a> {
pub(crate) fn new(http: &'a Client, code: impl Into<String>) -> Self {
Self {
code: code.into(),
fut: None,
http,
reason: None,
}
}
pub fn reason(mut self, reason: impl Into<String>) -> Self {
self.reason.replace(reason.into());
self
}
fn start(&mut self) -> Result<()> {
let request = if let Some(reason) = &self.reason {
let headers = audit_header(&reason)?;
Request::from((
headers,
Route::DeleteInvite {
code: self.code.clone(),
},
))
} else {
Request::from(Route::DeleteInvite {
code: self.code.clone(),
})
};
self.fut.replace(Box::pin(self.http.verify(request)));
Ok(())
}
}
poll_req!(DeleteInvite<'_>, ());
|
extern crate staticfile;
extern crate mount;
extern crate iron;
use std::env;
use std::path::Path;
use staticfile::Static;
use mount::Mount;
use iron::Iron;
fn main() {
let mut mount = Mount::new();
let root = env::var("ROOT").unwrap();
mount.mount("/", Static::new(Path::new(&*root).join("html")));
Iron::new(mount).http("0.0.0.0:80").unwrap();
}
|
use gembiler::code_generator::intermediate;
#[test]
fn it_works() {
let code = r#"
DECLARE
a, b
BEGIN
READ a;
IF a GEQ 0 THEN
WHILE a GE 0 DO
b ASSIGN a DIV 2;
b ASSIGN 2 TIMES b;
IF a GE b THEN
WRITE 1;
ELSE
WRITE 0;
ENDIF
a ASSIGN a DIV 2;
ENDWHILE
ENDIF
END
"#;
let ast = parser::parse_ast(code);
assert!(ast.is_ok());
let program = ast.unwrap();
let ir = intermediate::generate(&program);
assert!(ir.is_ok());
println!("{:?}", ir.unwrap());
}
|
//! ## Data for the [`Mission` component](https://docs.lu-dev.net/en/latest/components/084-mission.html)
use serde::{Deserialize, Serialize};
/// Data for the [`Mission` component](https://docs.lu-dev.net/en/latest/components/084-mission.html)
#[derive(Default, Debug, PartialEq, Deserialize, Serialize)]
pub struct Missions {
/// Completed missions
pub done: MissionList,
/// Currently active missions
#[serde(rename = "cur")]
pub current: MissionList,
}
#[derive(Default, Debug, PartialEq, Deserialize, Serialize)]
/// A list of missions
pub struct MissionList {
/// List of missions
#[serde(rename = "m")]
pub missions: Vec<Mission>,
}
/// A single mission
#[derive(Default, Debug, PartialEq, Deserialize, Serialize)]
pub struct Mission {
/// State of the mission
state: u8, // FIXME: DLU specific?
/// ID from the [`Missions` table](https://docs.lu-dev.net/en/latest/database/Missions.html)
id: u32,
/// Amount of times completed (Can be more than 1 for repeatable missions)
#[serde(default, rename = "cct")]
completion_count: u32,
/// Timestamp of last completion in seconds.
#[serde(rename = "cts")]
completion_time: Option<u64>,
#[serde(default, rename = "sv")]
/// For achievements like collecting flags, there is one of this that has the displayed
/// progress N, and N other <sv> elements that seem to have a bitflag in the id?
sub_value: Vec<MissionSubValue>,
}
/// Progress for a task
#[derive(Default, Debug, PartialEq, Deserialize, Serialize)]
pub struct MissionSubValue {
/// Value of the progress.
#[serde(rename = "v")]
value: u32,
}
|
use std::fmt::{ Debug, Formatter, Result as FmtResult };
use std::cmp::{ PartialEq, Ordering };
use std::hash::{ Hash, Hasher };
use std::collections::HashMap;
use std::cell::RefCell;
use std::rc::Rc;
use crate::vm::error::RuntimeError;
use crate::common::Value;
#[derive(Clone, PartialEq)]
pub struct Table {
pub tbl: Rc<RefCell<HashMap<Value, Value>>>
}
impl Table {
pub fn new() -> Self {
Table {
tbl: Rc::new(RefCell::new(HashMap::new()))
}
}
pub fn validate_index(&self, idx: &Value) -> Result<(), RuntimeError> {
if let Value::Nil = idx {
return Err(RuntimeError::TableIdxNil)
}
if let Value::Number(n) = idx {
if n.is_nan() {
return Err(RuntimeError::TableIdxNaN)
}
}
Ok(())
}
#[inline]
pub fn get(&self, idx: &Value) -> Result<Value, RuntimeError> {
self.validate_index(idx)?;
Ok(self.tbl.borrow().get(idx).unwrap_or(&Value::Nil).clone())
}
#[inline]
pub fn insert(&self, idx: Value, val: Value) -> Result<(), RuntimeError> {
self.validate_index(&idx)?;
self.tbl.borrow_mut().insert(idx, val);
Ok(())
}
#[inline]
pub fn len(&self) -> usize {
self.tbl.borrow().len()
}
}
impl Debug for Table {
fn fmt(&self, fmt: &mut Formatter<'_>) -> FmtResult {
write!(fmt, "{:?}", self.tbl.borrow())
}
}
impl Hash for Table {
fn hash<H>(&self, state: &mut H) where H: Hasher {
Rc::as_ptr(&self.tbl).hash(state)
}
}
impl PartialOrd for Table {
fn partial_cmp(&self, _other: &Table) -> Option<Ordering> {
None
}
} |
fn is_valid(s: &str) -> bool {
use regex::Regex;
let re = Regex::new(r"^([0-9]+)-([0-9]+) ([a-z]): ([a-z]+)").unwrap();
let cap = re.captures(s).unwrap();
let min:usize = cap[1].parse().unwrap();
let max:usize = cap[2].parse().unwrap();
let cnt = cap[4].matches(&cap[3]).count();
min <= cnt && cnt <= max
}
fn is_valid_part2(s: &str) -> bool {
use regex::Regex;
let re = Regex::new(r"^([0-9]+)-([0-9]+) ([a-z]): ([a-z]+)").unwrap();
let cap = re.captures(s).unwrap();
let p1:usize = cap[1].parse().unwrap();
let p2:usize = cap[2].parse().unwrap();
let char:u8 = cap[3].as_bytes()[0];
let first:u8 = cap[4].as_bytes()[p1-1];
let second:u8 = cap[4].as_bytes()[p2-1];
(first == char || second == char) && first != second
}
fn main() {
let count = std::fs::read_to_string("input")
.expect("file not found!")
.lines()
.filter(|x| is_valid(x))
.count();
let count2 = std::fs::read_to_string("input")
.expect("file not found!")
.lines()
.filter(|x| is_valid_part2(x))
.count();
println!("Liczba poprawnych haseł {}", count);
println!("Liczba poprawnych haseł v2 {}", count2);
}
|
use actix_web::web::Data;
use actix_web_httpauth::extractors::basic::BasicAuth;
use actix_web::HttpResponse;
use super::*;
use crate::controller::State as TargetState;
use crate::controller::{Command, Event};
///////////////////// lamp commands ///////////////////////////////
pub fn toggle(state: Data<State>, auth: BasicAuth) -> HttpResponse {
if authenticated(auth) {
state
.controller_addr
.send(Event::Command(Command::LampsToggle))
.unwrap();
HttpResponse::Ok().finish()
} else {
make_auth_error()
}
}
pub fn dim(state: Data<State>, auth: BasicAuth) -> HttpResponse {
if authenticated(auth) {
state
.controller_addr
.send(Event::Command(Command::LampsDim))
.unwrap();
HttpResponse::Ok().finish()
} else {
make_auth_error()
}
}
pub fn dimmest(state: Data<State>, auth: BasicAuth) -> HttpResponse {
if authenticated(auth) {
state
.controller_addr
.send(Event::Command(Command::LampsDimmest))
.unwrap();
HttpResponse::Ok().finish()
} else {
make_auth_error()
}
}
pub fn normal(state: Data<State>, auth: BasicAuth) -> HttpResponse {
if authenticated(auth) {
state
.controller_addr
.send(Event::Command(Command::LampsDay))
.unwrap();
HttpResponse::Ok().finish()
} else {
make_auth_error()
}
}
pub fn evening(state: Data<State>, auth: BasicAuth) -> HttpResponse {
if authenticated(auth) {
state
.controller_addr
.send(Event::Command(Command::LampsEvening))
.unwrap();
HttpResponse::Ok().finish()
} else {
make_auth_error()
}
}
pub fn night(state: Data<State>, auth: BasicAuth) -> HttpResponse {
if authenticated(auth) {
state
.controller_addr
.send(Event::Command(Command::LampsNight))
.unwrap();
HttpResponse::Ok().finish()
} else {
make_auth_error()
}
}
//////////////////////// go to state commands /////////////////////////////////
pub fn lightloop(state: Data<State>, auth: BasicAuth) -> HttpResponse {
if authenticated(auth) {
state
.controller_addr
.send(Event::Command(Command::ChangeState(TargetState::LightLoop)))
.unwrap(); //TODO FIXME
HttpResponse::Ok().finish()
} else {
make_auth_error()
}
}
|
use std::hash::Hash;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Suit {
Diamonds,
Clubs,
Hearts,
Spades,
}
impl Suit {
pub fn list() -> Vec<Self> {
vec![Self::Diamonds, Self::Clubs, Self::Hearts, Self::Spades]
}
}
#[cfg(test)]
mod tests {
use super::Suit;
#[test]
fn test_equality() {
assert_eq!(Suit::Diamonds, Suit::Diamonds);
assert_eq!(Suit::Clubs, Suit::Clubs);
assert_eq!(Suit::Hearts, Suit::Hearts);
assert_eq!(Suit::Spades, Suit::Spades);
}
#[test]
fn test_list() {
assert_eq!(
Suit::list(),
vec![Suit::Diamonds, Suit::Clubs, Suit::Hearts, Suit::Spades]
)
}
}
|
use flate2::{read::GzDecoder, write::GzEncoder, Compression};
use group::{RsaGroup, RsaQuotientGroup, SemiGroup};
use num_bigint::BigUint;
use num_traits::Num;
use rug::{ops::Pow, Assign, Integer};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::ops::{Index, MulAssign, RemAssign};
use std::path::PathBuf;
use std::rc::Rc;
use super::int_set::IntSet;
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
/// A comb of precomputed powers of a base, plus optional precomputed tables of combinations
pub struct ParExpComb {
bs: Vec<Integer>,
m: Integer,
lgsp: usize,
ts: Vec<Vec<Integer>>,
npt: usize,
}
/// pcb[idx] is the idx'th precomputed table
impl Index<usize> for ParExpComb {
type Output = Vec<Integer>;
fn index(&self, idx: usize) -> &Self::Output {
&self.ts[idx]
}
}
impl Default for ParExpComb {
/// get default precomps
fn default() -> Self {
// XXX(HACK): we read from $CARGO_MANIFEST_DIR/lib/pcb_dflt
let dir = std::env::var("CARGO_MANIFEST_DIR")
.expect("Missing CARGO_MANIFEST_DIR env variable (needed for ParExpComb)\nPlease run using cargo.");
let mut pbuf = PathBuf::from(dir);
pbuf.push("lib");
pbuf.push("pcb_dflt");
Self::deserialize(pbuf.to_str().unwrap())
}
}
#[allow(clippy::len_without_is_empty)]
impl ParExpComb {
// ** initialization and precomputation ** //
/// read in a file with bases
pub fn from_file(filename: &str, log_spacing: usize) -> Self {
let mut ifile = BufReader::new(File::open(filename).unwrap());
let modulus = {
let mut mbuf = String::new();
ifile.read_line(&mut mbuf).unwrap();
Integer::from_str_radix(&mbuf, 16).unwrap()
};
let ret = Self {
bs: ifile
.lines()
.map(|x| Integer::from_str_radix(x.unwrap().as_ref(), 16).unwrap())
.collect(),
m: modulus,
lgsp: log_spacing,
ts: Vec::new(),
npt: 0,
};
ret._check();
ret
}
/// build tables from bases
pub fn make_tables(&mut self, n_per_table: usize) {
// parallel table building with Rayon
use rayon::prelude::*;
// n_per_table must be a power of 2 or things get messy
assert!(n_per_table.is_power_of_two());
// reset tables and n_per_table
self.ts.clear();
self.npt = n_per_table;
if n_per_table == 0 {
return;
}
// for each n bases, compute powerset of values
self.ts.reserve(self.bs.len() / n_per_table + 1);
self.ts.par_extend(self.bs.par_chunks(n_per_table).map({
// closure would capture borrow of self, which breaks because self is borrowed already.
// instead, borrow the piece of self we need outside, then move the borrow inside
// http://smallcultfollowing.com/babysteps/blog/2018/04/24/rust-pattern-precise-closure-capture-clauses/
let modulus = &self.m;
move |x| _make_table(x, modulus)
}));
}
// ** exponentiation ** //
/// Parallel exponentiation using windows and combs
pub fn exp(&self, expt: &Integer) -> Integer {
use rayon::prelude::*;
// expt must be positive
let expt_sign = expt.cmp0();
assert_ne!(expt_sign, std::cmp::Ordering::Less);
if expt_sign == std::cmp::Ordering::Equal {
return Integer::from(1);
}
// figure out how many of the tables we'll need to use
let bits_per_expt = 1 << self.log_spacing();
let expts_per_table = self.n_per_table();
let bits_per_table = bits_per_expt * expts_per_table;
let n_sig_bits = expt.significant_bits() as usize;
let n_tables = (n_sig_bits + bits_per_table - 1) / bits_per_table;
// make sure this precomp is big enough!
assert!(n_sig_bits < (1 << (self.log_spacing() + self.log_num_bases())));
assert!(n_tables <= self.len());
// figure out chunk size
let n_threads = rayon::current_num_threads();
let tables_per_chunk = (n_tables + n_threads - 1) / n_threads;
// parallel multiexponentiation
let modulus = &self.m;
self.ts[0..n_tables]
.par_chunks(tables_per_chunk)
.enumerate()
.map(|(chunk_idx, ts)| {
let mut acc = Integer::from(1);
let chunk_offset = chunk_idx * tables_per_chunk * bits_per_table;
for bdx in (0..bits_per_expt).rev() {
for (tdx, tsent) in ts.iter().enumerate() {
let mut val = 0u32;
for edx in 0..expts_per_table {
let bitnum =
chunk_offset + tdx * bits_per_table + edx * bits_per_expt + bdx;
let bit = expt.get_bit(bitnum as u32) as u32;
val |= bit << edx;
}
acc.mul_assign(&tsent[val as usize]);
acc.rem_assign(modulus);
}
if bdx != 0 {
acc.square_mut();
acc.rem_assign(modulus);
}
}
acc
})
.reduce(
|| Integer::from(1),
|mut acc, next| {
acc.mul_assign(&next);
acc.rem_assign(modulus);
acc
},
)
}
// ** serialization ** //
/// write struct to a file
pub fn serialize(&self, filename: &str) {
let output = GzEncoder::new(File::create(filename).unwrap(), Compression::default());
bincode::serialize_into(output, self).unwrap();
}
/// read struct from file
pub fn deserialize(filename: &str) -> Self {
let input = GzDecoder::new(File::open(filename).unwrap());
let ret: Self = bincode::deserialize_from(input).unwrap();
ret._check();
ret
}
// ** accessors and misc ** //
/// return number of tables
pub fn len(&self) -> usize {
self.ts.len()
}
/// return number of bases per precomputed table (i.e., log2(table.len()))
pub fn n_per_table(&self) -> usize {
self.npt
}
/// log of the number of bases in this struct
pub fn log_num_bases(&self) -> usize {
// this works because we enforce self.bs.len() is power of two
self.bs.len().trailing_zeros() as usize
}
/// spacing between successive exponents
pub fn log_spacing(&self) -> usize {
self.lgsp
}
/// return iterator over tables
pub fn iter(&self) -> std::slice::Iter<Vec<Integer>> {
self.ts.iter()
}
/// ref to bases
pub fn bases(&self) -> &[Integer] {
&self.bs[..]
}
/// ref to modulus
pub fn modulus(&self) -> &Integer {
&self.m
}
// ** internal ** //
// internal consistency checks --- fn should be called on any newly created object
fn _check(&self) {
assert!(self.bs.len().is_power_of_two());
}
}
// make a table from a set of bases
fn _make_table(bases: &[Integer], modulus: &Integer) -> Vec<Integer> {
let mut ret = vec![Integer::new(); 1 << bases.len()];
// base case: 0 and 1
ret[0].assign(1);
ret[1].assign(&bases[0]);
// compute powerset of bases
// for each element in bases
for (bnum, base) in bases.iter().enumerate().skip(1) {
let base_idx = 1 << bnum;
// multiply bases[bnum] by the first base_idx elms of ret
let (src, dst) = ret.split_at_mut(base_idx);
for idx in 0..base_idx {
dst[idx].assign(&src[idx] * base);
dst[idx].rem_assign(modulus);
}
}
ret
}
// ** utility traits ** //
pub trait IntegerConversion {
fn to_integer(s: &Self) -> Integer;
fn from_integer(i: &Integer) -> Self;
}
impl IntegerConversion for BigUint {
fn to_integer(n: &BigUint) -> Integer {
Integer::from_str_radix(n.to_str_radix(32).as_ref(), 32).unwrap()
}
fn from_integer(n: &Integer) -> BigUint {
BigUint::from_str_radix(n.to_string_radix(32).as_ref(), 32).unwrap()
}
}
pub trait IntoElem: SemiGroup {
fn into_elem(&self, e: <Self as SemiGroup>::Elem) -> <Self as SemiGroup>::Elem;
}
impl IntoElem for RsaGroup {
fn into_elem(&self, n: BigUint) -> BigUint {
&n % &self.m
}
}
impl IntoElem for RsaQuotientGroup {
fn into_elem(&self, mut n: BigUint) -> BigUint {
n = &n % &self.m;
let y = &self.m - &n;
std::cmp::min(n, y)
}
}
// ** ParallelExpSet ** //
/// ParallelExpSet uses precomputed tables to speed up rebuilding the set
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct ParallelExpSet<G: SemiGroup> {
group: G,
elements: BTreeMap<Integer, usize>,
digest: Option<Integer>,
comb: Rc<ParExpComb>, // NOTE: does this need to be Arc?
}
impl<G: SemiGroup> ParallelExpSet<G> {
const N_PER_TABLE: usize = 8;
pub fn clear_digest(&mut self) {
self.digest = None;
}
}
impl<G: SemiGroup> IntSet for ParallelExpSet<G>
where
G::Elem: Ord + IntegerConversion,
G: IntoElem,
{
type G = G;
fn new(group: G) -> Self {
let mut pc = ParExpComb::default();
pc.make_tables(Self::N_PER_TABLE);
Self {
digest: None, // start with None so that new_with builds in parallel by default
elements: BTreeMap::new(),
group,
comb: Rc::new(pc),
}
}
// FIXME? insert_all will insert one by one. This is slow if you're inserting
// lots of elements at once, say, more than 1/4 of the current size.
// In this case, you can call clear_digest() to clear the digest first.
fn new_with<I: IntoIterator<Item = BigUint>>(group: G, items: I) -> Self {
let mut this = Self::new(group);
this.insert_all(items);
this
}
fn insert(&mut self, n: BigUint) {
let int_n = <BigUint as IntegerConversion>::to_integer(&n);
if let Some(ref mut d) = self.digest {
d.mul_assign(&int_n);
d.rem_assign(&self.comb.m);
}
*self.elements.entry(int_n).or_insert(0) += 1;
}
fn remove(&mut self, n: &BigUint) -> bool {
let int_n = <BigUint as IntegerConversion>::to_integer(&n);
if let Some(count) = self.elements.get_mut(&int_n) {
*count -= 1;
if *count == 0 {
self.elements.remove(&int_n);
}
self.digest = None;
true
} else {
false
}
}
fn digest(&mut self) -> G::Elem {
use rayon::prelude::*;
if self.digest.is_none() {
// step 1: compute the exponent
let expt = {
let mut tmp = Vec::with_capacity(self.elements.len() + 1);
tmp.par_extend(
self.elements
.par_iter()
.map(|(elem, ct)| Integer::from(elem.pow(*ct as u32))),
);
_parallel_product(&mut tmp);
tmp.pop().unwrap()
};
// step 2: exponentiate using ParExpComb
self.digest = Some(self.comb.exp(&expt));
}
// convert internal Integer repr to Elem repr, respecting structure of group via IntoElem
self.group
.into_elem(<G::Elem as IntegerConversion>::from_integer(
&self.digest.as_ref().unwrap(),
))
}
fn group(&self) -> &G {
&self.group
}
}
fn _parallel_product(v: &mut Vec<Integer>) {
use rayon::prelude::*;
if v.len() % 2 == 1 {
v.push(Integer::from(1));
}
while v.len() > 1 {
// invariant: length of list is always even
assert!(v.len() % 2 == 0);
// split the list in half; multiply first half by second half in parallel
let split_point = v.len() / 2;
let (fst, snd) = v.split_at_mut(split_point);
fst.par_iter_mut()
.zip(snd)
.for_each(|(f, s)| f.mul_assign(s as &Integer));
// cut length of list in half, possibly padding with an extra '1'
if split_point != 1 && split_point % 2 == 1 {
v.truncate(split_point + 1);
v[split_point].assign(1);
} else {
v.truncate(split_point);
}
}
assert!(v.len() == 1);
}
#[cfg(test)]
mod tests {
use super::*;
use rug::rand::RandState;
#[test]
fn precomp_table() {
const NELMS: usize = 8;
let mut pc = ParExpComb::default();
pc.make_tables(NELMS);
assert!(pc.len() > 0);
let num_tables = (pc.bases().len() + NELMS - 1) / NELMS;
assert!(pc.len() == num_tables);
assert!(pc[0].len() == (1 << NELMS));
// check the first precomputed table for correctness
let bases = pc.bases();
let modulus = pc.modulus();
for idx in 0..(1 << NELMS) {
let mut accum = Integer::from(1);
for jdx in 0..NELMS {
if idx & (1 << jdx) != 0 {
accum.mul_assign(&bases[jdx]);
accum.rem_assign(modulus);
}
}
assert_eq!(&accum, &pc[0][idx]);
}
}
#[test]
fn precomp_serdes() {
let pc = {
let mut tmp = ParExpComb::default();
tmp.make_tables(4);
tmp
};
pc.serialize("/tmp/serialized.gz");
let pc2 = ParExpComb::deserialize("/tmp/serialized.gz");
assert_eq!(pc, pc2);
}
#[test]
fn pprod_test() {
const NELMS: usize = 2222;
let mut rnd = RandState::new();
_seed_rng(&mut rnd);
let mut v = Vec::with_capacity(NELMS);
(0..NELMS).for_each(|_| v.push(Integer::from(Integer::random_bits(2048, &mut rnd))));
// sequential
let mut prod = Integer::from(1);
v.iter().for_each(|p| prod.mul_assign(p));
// parallel
_parallel_product(&mut v);
assert!(prod == v[0]);
}
#[test]
fn precomp_exp_test() {
const LOG_EXPSIZE: usize = 22;
let pc = {
let mut tmp = ParExpComb::default();
tmp.make_tables(2);
tmp
};
let mut rnd = RandState::new();
_seed_rng(&mut rnd);
let expt = Integer::from(Integer::random_bits(1 << LOG_EXPSIZE, &mut rnd));
let expect = Integer::from(pc.bases()[0].pow_mod_ref(&expt, pc.modulus()).unwrap());
let result = pc.exp(&expt);
assert_eq!(expect, result);
}
fn _seed_rng(rnd: &mut RandState) {
use rug::integer::Order;
rnd.seed(&Integer::from_digits(
&rand::random::<[u64; 4]>()[..],
Order::Lsf,
));
}
}
|
use std::collections::HashMap;
use tera::{Result, Value};
use super::TeraFilter;
pub fn all<'a>() -> Vec<(&'static str, TeraFilter<'a>)> {
let mut result = Vec::new();
result.push(("TitleCase", &case::title_case as TeraFilter<'a>));
result.push(("togglecase", &case::toggle_case as TeraFilter<'a>));
result.push(("flatcase", &case::flat_case as TeraFilter<'a>));
result.push(("alternatingcase", &case::alternating_case as TeraFilter<'a>));
result.push(("snake_case", &case::snake_case as TeraFilter<'a>));
result.push((
"screaming_snake_case",
&case::screaming_snake_case as TeraFilter<'a>,
));
result.push(("kebab-case", &case::kebab_case as TeraFilter<'a>));
result.push(("COBOL-CASE", &case::cobol_case as TeraFilter<'a>));
result.push(("Train-Case", &case::train_case as TeraFilter<'a>));
result.push(("PascalCase", &case::pascal_case as TeraFilter<'a>));
result.push(("camelCase", &case::camel_case as TeraFilter<'a>));
result
}
pub mod case {
use convert_case::{Case, Casing};
use super::{HashMap, Result, Value};
pub fn title_case(value: &Value, _: &HashMap<String, Value>) -> Result<Value> {
let s = tera::try_get_value!("TitleCase", "value", String, value);
Ok(Value::String(s.to_case(Case::Title)))
}
pub fn toggle_case(value: &Value, _: &HashMap<String, Value>) -> Result<Value> {
let s = tera::try_get_value!("togglecase", "value", String, value);
Ok(Value::String(s.to_case(Case::Toggle)))
}
pub fn flat_case(value: &Value, _: &HashMap<String, Value>) -> Result<Value> {
let s = tera::try_get_value!("flatcase", "value", String, value);
Ok(Value::String(s.to_case(Case::Flat)))
}
pub fn alternating_case(value: &Value, _: &HashMap<String, Value>) -> Result<Value> {
let s = tera::try_get_value!("alternatingcase", "value", String, value);
Ok(Value::String(s.to_case(Case::Alternating)))
}
pub fn snake_case(value: &Value, _: &HashMap<String, Value>) -> Result<Value> {
let s = tera::try_get_value!("snake_case", "value", String, value);
Ok(Value::String(s.to_case(Case::Snake)))
}
pub fn screaming_snake_case(value: &Value, _: &HashMap<String, Value>) -> Result<Value> {
let s = tera::try_get_value!("screaming_snake_case", "value", String, value);
Ok(Value::String(s.to_case(Case::ScreamingSnake)))
}
pub fn kebab_case(value: &Value, _: &HashMap<String, Value>) -> Result<Value> {
let s = tera::try_get_value!("kebab-case", "value", String, value);
Ok(Value::String(s.to_case(Case::Kebab)))
}
pub fn cobol_case(value: &Value, _: &HashMap<String, Value>) -> Result<Value> {
let s = tera::try_get_value!("COBOL-CASE", "value", String, value);
Ok(Value::String(s.to_case(Case::Cobol)))
}
pub fn train_case(value: &Value, _: &HashMap<String, Value>) -> Result<Value> {
let s = tera::try_get_value!("Train-Case", "value", String, value);
Ok(Value::String(s.to_case(Case::Train)))
}
pub fn pascal_case(value: &Value, _: &HashMap<String, Value>) -> Result<Value> {
let s = tera::try_get_value!("PascalCase", "value", String, value);
Ok(Value::String(s.to_case(Case::Pascal)))
}
pub fn camel_case(value: &Value, _: &HashMap<String, Value>) -> Result<Value> {
let s = tera::try_get_value!("camelCase", "value", String, value);
Ok(Value::String(s.to_case(Case::Camel)))
}
}
|
use std::time::Instant;
pub struct FpsCounter {
pub start_time: Instant,
pub frame_count: usize,
pub fps: usize,
}
impl FpsCounter {
pub fn new() -> Self {
FpsCounter {
start_time: Instant::now(),
frame_count: 0,
fps: 0,
}
}
pub fn reset(&mut self) {
self.start_time = Instant::now();
self.frame_count = 0;
}
pub fn tick(&mut self) -> usize {
self.frame_count += 1;
if self.start_time.elapsed().as_secs() >= 1 {
self.fps = self.frame_count;
self.reset();
}
self.fps
}
} |
use crate::{
field_access::Access,
ident_or_index::IdentOrIndex,
parse_utils::ParsePunctuated,
structural_alias_impl::TypeParamBounds,
};
use as_derive_utils::{
attribute_parsing::with_nested_meta,
datastructure::{DataStructure,Field,FieldMap},
utils::{LinearResult,SynResultExt,SynPathExt},
spanned_err,
return_spanned_err,
};
use quote::ToTokens;
use syn::{
Attribute,
Ident,
Lit,
Meta,MetaList,MetaNameValue,
};
use std::marker::PhantomData;
#[derive(Debug)]
pub(crate) struct StructuralOptions<'a>{
pub(crate) fields:FieldMap<FieldConfig>,
pub(crate) debug_print:bool,
pub(crate) with_trait_alias:bool,
pub(crate) delegate_to:Option<&'a Field<'a>>,
_marker:PhantomData<&'a ()>,
}
impl<'a> StructuralOptions<'a>{
fn new(
_ds: &'a DataStructure<'a>,
this:StructuralAttrs<'a>,
)->Result<Self,syn::Error> {
let StructuralAttrs{
fields,
debug_print,
with_trait_alias,
delegate_to,
errors:_,
_marker,
}=this;
Ok(Self{
fields,
debug_print,
with_trait_alias,
delegate_to,
_marker,
})
}
}
////////////////////////////////////////////////////////////////////////////////////////////////
#[derive(Debug,Default)]
pub(crate) struct FieldConfig{
pub(crate) access:Access,
pub(crate) renamed:Option<IdentOrIndex>,
/// Whether the type is replaced with bounds in the `<deriving_type>_SI` trait.
pub(crate) is_impl:Option<TypeParamBounds>,
/// Determines whether the field is considered public.
///
/// `false`: means that the field does not get an accessor.
/// `true`: means that the field gets an accessor.
pub(crate) is_pub:bool,
}
////////////////////////////////////////////////////////////////////////////////////////////////
#[derive(Default)]
struct StructuralAttrs<'a>{
fields:FieldMap<FieldConfig>,
debug_print:bool,
with_trait_alias:bool,
delegate_to:Option<&'a Field<'a>>,
errors:LinearResult<()>,
_marker:PhantomData<&'a ()>,
}
#[derive(Debug,Copy, Clone)]
enum ParseContext<'a> {
TypeAttr{
name:&'a Ident,
},
Field{
field:&'a Field<'a>,
},
}
/// Parses the attributes for the `Structural` derive macro.
pub(crate) fn parse_attrs_for_structural<'a>(
ds: &'a DataStructure<'a>,
) -> Result<StructuralOptions<'a>,syn::Error> {
let mut this = StructuralAttrs::default();
this.with_trait_alias=true;
this.fields=FieldMap::with(ds,|field|{
FieldConfig{
access:Default::default(),
renamed:Default::default(),
is_impl:None,
is_pub:field.is_public(),
}
});
let name=ds.name;
parse_inner(&mut this, ds.attrs, ParseContext::TypeAttr{name})?;
for (_,field) in ds.variants[0].fields.iter().enumerate() {
parse_inner(&mut this, field.attrs, ParseContext::Field{field} )?;
}
this.errors.take()?;
StructuralOptions::new(ds, this)
}
/// Parses an individual attribute
fn parse_inner<'a,I>(
this: &mut StructuralAttrs<'a>,
attrs: I,
pctx: ParseContext<'a>,
)-> Result<(),syn::Error>
where
I:IntoIterator<Item=&'a Attribute>
{
for attr in attrs {
match attr.parse_meta() {
Ok(Meta::List(list)) => {
parse_attr_list(this,pctx, list)
.combine_into_err(&mut this.errors);
}
Err(e)=>{
this.errors.push_err(e);
}
_ => {}
}
}
Ok(())
}
/// Parses an individual attribute list (A `#[attribute( .. )] attribute`).
fn parse_attr_list<'a>(
this: &mut StructuralAttrs<'a>,
pctx: ParseContext<'a>,
list: MetaList,
)-> Result<(),syn::Error> {
if list.path.equals_str("struc") {
with_nested_meta("struc", list.nested, |attr| {
parse_sabi_attr(this,pctx, attr)
.combine_into_err(&mut this.errors);
Ok(())
})?;
}
Ok(())
}
/// Parses the contents of a `#[sabi( .. )]` attribute.
fn parse_sabi_attr<'a>(
this: &mut StructuralAttrs<'a>,
pctx: ParseContext<'a>,
attr: Meta,
)-> Result<(),syn::Error> {
fn make_err(tokens:&dyn ToTokens)->syn::Error{
spanned_err!(tokens,"unrecognized attribute")
}
match (pctx, attr) {
(
ParseContext::Field{field,..},
Meta::NameValue(MetaNameValue{lit:Lit::Str(ref value),ref path,..})
) => {
if path.equals_str("rename") {
let renamed=value.parse::<IdentOrIndex>()?;
this.fields[field].renamed=Some(renamed);
}else if path.equals_str("access") {
let access=value.parse::<Access>()?;
let fa=&mut this.fields[field];
fa.access=access;
fa.is_pub=true;
}else if path.equals_str("impl") {
if !cfg!(feature="impl_fields") {
return_spanned_err!{
path,
"\
Cannot use the `#[struc(impl=\"Trait\")]` \
attribute without enabling the \
\"nightly_impl_fields\" or \"impl_fields\" feature.\
",
}
}
let bounds:TypeParamBounds=value.parse::<ParsePunctuated<_,_>>()?.list;
this.fields[field].is_impl=Some(bounds)
}else{
return Err(make_err(&path))?;
}
}
(ParseContext::Field{field,..}, Meta::Path(path)) => {
if path.equals_str("public") {
this.fields[field].is_pub=true;
}else if path.equals_str("not_public")||path.equals_str("private") {
this.fields[field].is_pub=false;
}else if path.equals_str("delegate_to") {
if this.delegate_to.is_some() {
return_spanned_err!{
path,
"Cannot use the `#[struc(delegate_to)]` attribute on multiple fields."
};
}
this.with_trait_alias=false;
this.delegate_to=Some(field)
}else{
return Err(make_err(&path))?;
}
}
(ParseContext::TypeAttr{..},Meta::Path(ref path)) =>{
if path.equals_str("debug_print"){
this.debug_print=true;
}else if path.equals_str("no_trait") {
this.with_trait_alias=false;
}else if path.equals_str("public") {
for (_,field) in this.fields.iter_mut() {
field.is_pub=true;
}
}else if path.equals_str("not_public")||path.equals_str("private") {
for (_,field) in this.fields.iter_mut() {
field.is_pub=false;
}
}else{
return Err(make_err(&path))?;
}
}
(
ParseContext::TypeAttr{..},
Meta::NameValue(MetaNameValue{lit:Lit::Str(ref unparsed_lit),ref path,..})
)=>{
let ident=path.get_ident().ok_or_else(|| make_err(path) )?;
if ident=="access" {
let access=unparsed_lit.parse::<Access>()?;
for (_,fa) in this.fields.iter_mut() {
fa.access=access;
}
}else{
return Err(make_err(path));
}
}
(_,x) => return Err(make_err(&x)),
}
Ok(())
}
|
use super::{RdfProp, RdfStorePropExt};
use extend::ext;
use skorm_store::{NamedNode, NamedOrBlankNode, RdfStore, SubjectExt};
#[derive(Debug, Clone, Copy)]
pub struct RdfClass<'a> {
store: &'a RdfStore,
name: NamedNode<'a>,
}
impl<'a> RdfClass<'a> {
pub fn name(&self) -> &'a str {
self.name.iri().as_ref()
}
#[inline]
pub fn node(&self) -> NamedNode<'a> {
self.name
}
pub fn base_classes(&self) -> impl Iterator<Item = RdfClass<'a>> {
let store = self.store;
self
.name
.term("rdfs:subClassOf")
.unwrap()
.filter_map(move |term| term.as_named().map(|name| RdfClass { name, store }))
}
pub fn triples(&self) -> skorm_store::Triples<'a> {
self.name.triples()
}
pub fn own_props(&self) -> impl Iterator<Item = RdfProp<'a>> + 'a {
let self_name = self.name;
let all_props = self.store.props();
let with_self_as_domain =
all_props.filter(move |prop| prop.domains().any(|domain| domain == self_name));
with_self_as_domain
}
}
#[ext(pub, name = RdfStoreClassExt)]
impl RdfStore {
fn classes<'a>(&'a self) -> Box<dyn Iterator<Item = RdfClass<'a>> + 'a> {
let iter = self
.subjects_of_type("rdfs:Class")
.unwrap()
.filter_map(move |sub| match sub {
NamedOrBlankNode::Named(name) => Some(RdfClass { store: self, name }),
NamedOrBlankNode::Blank(_) => None,
});
Box::new(iter)
}
}
|
use super::super::{
components,
resources::game_map::{GameMap, TileProperties},
};
use specs::{Read, ReadStorage, System, WriteStorage};
pub struct CollisionsSolid;
impl<'a> System<'a> for CollisionsSolid {
type SystemData = (
WriteStorage<'a, components::Moved>,
ReadStorage<'a, components::material::Material>,
specs::Entities<'a>,
Read<'a, GameMap>,
);
fn run(&mut self, (mut moved, materials, entities, game_map): Self::SystemData) {
use specs::Join;
let mut to_remove = Vec::new();
for (mv, entity, material) in (&moved, &entities, &materials).join() {
if material.solid && game_map.tile_is(&mv.to, TileProperties::BLOCKED) {
to_remove.push(entity);
}
}
for entity in to_remove {
moved.remove(entity);
}
}
}
#[cfg(test)]
mod tests {
use super::super::super::resources::game_map::GameMap;
use super::*;
use specs::{Builder, RunNow, World, WorldExt};
#[test]
fn doesnt_affect_nonsolid_entities() {
let mut world = World::new();
let start = components::Position::new(3, -4);
let target = components::Position::new(3, -5);
let mut game_map = GameMap::new();
world.register::<components::Position>();
world.register::<components::Moved>();
world.register::<components::material::Material>();
game_map.mark_tile(&target, TileProperties::BLOCKED);
world.insert(game_map);
let ent_player = world
.create_entity()
.with(start.clone())
.with(components::Moved {
from: start.clone(),
to: target.clone(),
})
.build();
let mut collisions_solid = CollisionsSolid;
collisions_solid.run_now(&world);
world.maintain();
let read_moved = world.read_storage::<components::Moved>();
let player_move = read_moved.get(ent_player);
match player_move {
None => {
panic!("Should still have a Moved component");
}
Some(_) => (),
};
}
#[test]
fn doesnt_affect_nonsolid_targets() {
let mut world = World::new();
let start = components::Position::new(3, -4);
let target = components::Position::new(3, -5);
let game_map = GameMap::new();
world.register::<components::Position>();
world.register::<components::Moved>();
world.register::<components::material::Material>();
world.insert(game_map);
let ent_player = world
.create_entity()
.with(start.clone())
.with(components::Moved {
from: start.clone(),
to: target.clone(),
})
.build();
let mut collisions_solid = CollisionsSolid;
collisions_solid.run_now(&world);
world.maintain();
let read_moved = world.read_storage::<components::Moved>();
let player_move = read_moved.get(ent_player);
match player_move {
None => {
panic!("Should still have a Moved component");
}
Some(_) => (),
};
}
#[test]
fn cancels_move_through_solid_tile() {
let mut world = World::new();
let start = components::Position::new(3, -4);
let target = components::Position::new(3, -5);
let mut game_map = GameMap::new();
world.register::<components::Position>();
world.register::<components::Moved>();
world.register::<components::material::Material>();
game_map.mark_tile(&target, TileProperties::BLOCKED);
world.insert(game_map);
let ent_player = world
.create_entity()
.with(start.clone())
.with(components::Moved {
from: start.clone(),
to: target.clone(),
})
.with(components::material::flesh())
.build();
let mut collisions_solid = CollisionsSolid;
collisions_solid.run_now(&world);
world.maintain();
let read_moved = world.read_storage::<components::Moved>();
let player_move = read_moved.get(ent_player);
match player_move {
None => (),
Some(_) => {
panic!("Should not still have Moved component");
}
};
}
}
|
use crate::context::QueryEnv;
use crate::parser::query::{Selection, TypeCondition};
use crate::{Context, ContextSelectionSet, ObjectType, Result, Schema, SchemaEnv, Type};
use futures::{Future, Stream};
use std::pin::Pin;
/// Represents a GraphQL subscription object
#[async_trait::async_trait]
pub trait SubscriptionType: Type {
/// This function returns true of type `EmptySubscription` only
#[doc(hidden)]
fn is_empty() -> bool {
false
}
#[doc(hidden)]
async fn create_field_stream(
&self,
idx: usize,
ctx: &Context<'_>,
schema_env: SchemaEnv,
query_env: QueryEnv,
) -> Result<Pin<Box<dyn Stream<Item = Result<serde_json::Value>> + Send>>>;
}
type BoxCreateStreamFuture<'a> = Pin<Box<dyn Future<Output = Result<()>> + Send + 'a>>;
pub fn create_subscription_stream<'a, Query, Mutation, Subscription>(
schema: &'a Schema<Query, Mutation, Subscription>,
environment: QueryEnv,
ctx: &'a ContextSelectionSet<'_>,
streams: &'a mut Vec<Pin<Box<dyn Stream<Item = Result<serde_json::Value>> + Send>>>,
) -> BoxCreateStreamFuture<'a>
where
Query: ObjectType + Send + Sync + 'static,
Mutation: ObjectType + Send + Sync + 'static,
Subscription: SubscriptionType + Send + Sync + 'static + Sized,
{
Box::pin(async move {
for (idx, selection) in ctx.items.iter().enumerate() {
match &selection.node {
Selection::Field(field) => {
if ctx.is_skip(&field.directives)? {
continue;
}
streams.push(
schema
.subscription
.create_field_stream(
idx,
&ctx.with_field(field),
schema.env.clone(),
environment.clone(),
)
.await?,
)
}
Selection::FragmentSpread(fragment_spread) => {
if ctx.is_skip(&fragment_spread.directives)? {
continue;
}
if let Some(fragment) = ctx
.query_env
.document
.fragments()
.get(fragment_spread.fragment_name.as_str())
{
create_subscription_stream(
schema,
environment.clone(),
&ctx.with_selection_set(&fragment.selection_set),
streams,
)
.await?;
}
}
Selection::InlineFragment(inline_fragment) => {
if ctx.is_skip(&inline_fragment.directives)? {
continue;
}
if let Some(TypeCondition::On(name)) =
inline_fragment.type_condition.as_ref().map(|v| &v.node)
{
if name.node == Subscription::type_name() {
create_subscription_stream(
schema,
environment.clone(),
&ctx.with_selection_set(&inline_fragment.selection_set),
streams,
)
.await?;
}
} else {
create_subscription_stream(
schema,
environment.clone(),
&ctx.with_selection_set(&inline_fragment.selection_set),
streams,
)
.await?;
}
}
}
}
Ok(())
})
}
#[async_trait::async_trait]
impl<T: SubscriptionType + Send + Sync> SubscriptionType for &T {
async fn create_field_stream(
&self,
idx: usize,
ctx: &Context<'_>,
schema_env: SchemaEnv,
query_env: QueryEnv,
) -> Result<Pin<Box<dyn Stream<Item = Result<serde_json::Value>> + Send>>> {
T::create_field_stream(*self, idx, ctx, schema_env, query_env).await
}
}
|
use image::DynamicImage;
use super::super::Transformer;
impl Transformer for DynamicImage {
fn as_vector(&self) -> Vec<f32> {
self.raw_pixels().iter().map(|&elem| elem as f32).collect()
}
} |
/*
chapter 4
syntax and semantics
*/
struct Circle {
h: f64,
v: f64,
r: f64,
}
impl Circle {
fn area(&self) -> f64 {
std::f64::consts::PI * (self.r * self.r)
}
fn grow(&self, increment: f64) -> Circle {
Circle { h: self.h, v: self.v, r: self.r + increment }
}
}
fn main() {
let a = Circle { h: 0.0, v: 0.0, r: 2.0 };
println!("{}", a.area());
let b = a.grow(2.0).area();
println!("{}", b);
}
// output should be:
/*
*/
|
use std::{fmt, ops::Deref};
use serde::{
de::{self, Visitor},
ser::Serializer,
};
use serde_derive::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct KV {
pub key: String,
pub value: KVValue,
}
impl KV {
pub fn new<K, V>(key: K, value: V) -> Self
where
K: Into<String>,
V: Into<KVValue>,
{
KV {
key: key.into(),
value: value.into(),
}
}
}
struct KVVisitor;
impl<'de> Visitor<'de> for KVVisitor {
type Value = KVValue;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a base64-encoded string")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
self.visit_string(v.to_string())
}
fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
where
E: de::Error,
{
let decoded = base64::decode(v).map_err(de::Error::custom)?;
let s = String::from_utf8(decoded).map_err(de::Error::custom)?;
Ok(KVValue(s))
}
}
/// Newtype wrapper to automatically handle encoding/decoding base64 from the KV API
#[derive(Debug)]
pub struct KVValue(pub String);
impl<T> From<T> for KVValue
where
T: Into<String>,
{
fn from(v: T) -> Self {
KVValue(v.into())
}
}
impl Deref for KVValue {
type Target = String;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl serde::Serialize for KVValue {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let encoded = base64::encode(self.as_bytes());
serializer.serialize_str(&encoded)
}
}
impl<'de> serde::Deserialize<'de> for KVValue {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: de::Deserializer<'de>,
{
deserializer.deserialize_string(KVVisitor)
}
}
|
//! Proof Implemation
//!
use std::fmt::Display;
use crate::hash::{hash_leaf, hash_mid};
use crate::merkle_tree::ProofNode;
#[derive(Debug)]
pub struct Proof<T: Display> {
root_hash: String,
val: T,
path: Vec<ProofNode>,
}
impl<T> Proof<T>
where T: Display
{
pub fn new(root_hash: String, val: T, path: Vec<ProofNode>) -> Self {
Self {
root_hash,
val,
path,
}
}
pub fn validate(&self, root_hash: &str) -> bool {
let mut hash = hash_leaf(&self.val);
for node in &self.path {
hash = match node {
&ProofNode::Left(ref proof_hash) => hash_mid(proof_hash, &hash),
&ProofNode::Right(ref proof_hash) => hash_mid(&hash, proof_hash),
};
}
root_hash == hash
}
} |
use logger::Logger;
use std::io;
use std::time::Instant;
pub struct MultiLogger {
loggers: Vec<Box<dyn Logger>>
}
impl MultiLogger {
pub fn new() -> Self {
Self {
loggers: Vec::new()
}
}
pub fn log_to<L: Logger + 'static>(&mut self, logger: L) {
self.loggers.push(Box::new(logger));
}
}
impl Logger for MultiLogger {
fn push(&mut self, time: Instant, text: &str) {
for logger in &mut self.loggers {
logger.push(time, text);
}
}
fn try_flush(&mut self) -> io::Result<()>{
for logger in &mut self.loggers {
logger.try_flush()?;
}
Ok(())
}
} |
// This is the main function
fn main() {
// The statements here will be executed when the compiled binary is called
// Print text to the console
println!("Hello World!");
// In general, the `{}` will be automatically replaced with any
// arguments. These will be stringified.
println!("{} days", 31);
// There are various optional patterns this works with. Positional
// arguments can be used.
println!("{0}, this is {1}. {1}, this is {0}", "Alice", "Bob");
// As can named arguments.
println!("{subject} {verb} {object}",
object="the lazy dog",
subject="the quick brown fox",
verb="jumps over");
// Special formatting can be specified after a `:`.
println!("{0} as binery is {0:b} ", 2);
// You can right-align text with a specified width. This will output
// " 1". 5 white spaces and a "1".
println!("{number:>width$}", number=1, width=6);
// You can pad numbers with extra zeroes. This will output "000001".
println!("{number:>0width$}", number=1, width=6);
// It will even check to make sure the correct number of arguments are
// used.
println!("My name is {0}, {1} {0}", "Bond", "James");
let pi = 3.141592;
println!("Debug pi {:?}", pi);
//https://doc.rust-lang.org/std/fmt/
println!("Debug pi {:.*}",2, pi);
let formatted_number = format!("{:.*}", 2, 1.234567);
assert_eq!("1.23", formatted_number);
} |
#[macro_use]
pub mod rule;
pub mod lr1;
pub mod parser;
pub use lr1::*;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.