text stringlengths 8 4.13M |
|---|
use std::collections::HashMap;
use std::cmp;
use errors::*;
pub mod enums;
pub trait Deser
where Self: ::std::marker::Sized
{
fn deser(value: Download) -> Result<Self>;
}
use destiny::{Download, write_body};
use failure::ResultExt;
macro_rules! body_wrapper{
($inner:ident, $outer:ident) => {
#[derive(Deserialize, Debug)]
#[serde(rename_all = "PascalCase")]
pub struct $outer {
pub response: $inner,
pub error_code: i32,
pub error_status: String,
pub message: String,
}
impl Deser for $outer {
fn deser(value: Download) -> Result<$outer> {
let (outurl, json_out, body_chunk) = value;
info!("Derializing: {}", outurl);
write_body(&json_out, &body_chunk);
Ok(serde_json::from_slice(&body_chunk).with_context(|_| format!("deserializing JSON: Source URL: {} recorded at {:?}", outurl, json_out))?)
}
}
}
}
body_wrapper!(ItemResponse, ItemResponseBody);
body_wrapper!(UserMembershipData, UserResponseBody);
body_wrapper!(DestinyManifest, ManifestResponseBody);
body_wrapper!(DestinyProfileResponse, ProfileResponseBody);
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct DestinyManifest {
pub version: String,
pub mobile_asset_content_path: String,
pub mobile_gear_asset_data_bases: Vec<GearAssetDataBaseDefinition>,
pub mobile_clan_banner_database_path: String,
pub mobile_world_content_paths: HashMap<String, String>,
#[serde(rename = "mobileGearCDN")]
pub mobile_gear_cdn: HashMap<String, String>,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct DestinyProfileResponse {
pub profile_inventory: Option<InventoryComponentResponse>,
pub character_equipment: Option<CharacterEquipmentComponentResponse>,
pub character_inventories: Option<CharacterEquipmentComponentResponse>,
pub item_components: ItemComponentSet,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ItemComponentSet {
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ItemResponse {
// stats SingleItemStats
// sockets SingleItemSockets
pub character_id: Option<String>, // API says i64...
pub item: Option<SingleItem>,
pub instance: Option<SingleItemInstance>,
pub sockets: Option<ItemSocketsComponent>,
bucket: Option<InventoryBucketDefinition>,
item_def: Option<InventoryItemDefinition>,
#[serde(skip)]
pub plug_defs: Vec<ItemSocketState>,
}
use rusqlite::Connection;
use serde_json;
fn fetch_plug_def(hash: i32, db: &Connection) -> Result<InventoryItemDefinition> {
let mut stmt =
db.prepare_cached("select json from DestinyInventoryItemDefinition where id = ?1")?;
stmt.query_row(&[&hash], |row| {
let json: String = row.get(0);
serde_json::from_str(&json).with_context(|_| format!("deserializing JSON: {}", hash))
})
.map_err(|e| Error::from(e))
.and_then(|res| Ok(res?))
}
impl ItemResponse {
pub fn fetch_component_defs<'f, 'g>(&'f mut self, db: &'g Connection) {
match self.fetch_item_def(db)
.and(self.fetch_bucket_def(db))
.and(self.fetch_plug_defs(db)) {
Ok(_) => (),
Err(e) => println!("Problem getting defs for:\n {:?}: \n{:?}", self, e),
}
}
fn fetch_item_def<'f, 'g>(&'f mut self, db: &'g Connection) -> Result<()> {
let mut stmt =
db.prepare_cached("select json from DestinyInventoryItemDefinition where id = ?1")?;
let mut rows = stmt.query(&[&(self.item_hash()?)])?;
match rows.next() {
Some(row) => {
let json: String = row?.get(0);
let item: InventoryItemDefinition =
serde_json::from_str(&json).with_context(|_| format!("deserializing JSON: {}", json))?;
self.item_def = Some(item);
Ok(())
}
None => bail!("No item def for hash!"),
}
}
fn fetch_bucket_def<'f, 'g>(&'f mut self, db: &'g Connection) -> Result<()> {
let mut stmt =
db.prepare_cached("select json from DestinyInventoryBucketDefinition where id = ?1")?;
let mut rows = stmt.query(&[&(self.bucket_hash()?)])?;
match rows.next() {
Some(row) => {
let json: String = row?.get(0);
let bucket: InventoryBucketDefinition =
serde_json::from_str(&json).with_context(|_| format!("deserializing JSON: {}", json))?;
self.bucket = Some(bucket);
Ok(())
}
None => bail!("No bucket def for hash!"),
}
}
fn fetch_plug_defs<'f, 'g>(&'f mut self, db: &'g Connection) -> Result<()> {
self.plug_defs = self.plug_hashes()
.iter()
.map(|sock| {
let mut sock = sock.clone();
match sock.plug_hash {
Some(hash) => {
match fetch_plug_def(hash as i32, db) {
Ok(v) => {
sock.plug_def = Some(v);
}
Err(e) => {
println!("{:?}", e);
}
}
}
None => (),
};
sock
})
.collect();
Ok(())
}
fn plug_hashes(&self) -> Vec<ItemSocketState> {
self.sockets
.clone()
.and_then(|sockc| sockc.data.map(|socks| socks.sockets))
.unwrap_or_default()
}
pub fn holding_status(&self) -> String {
let mut status = String::new();
if self.instance.clone().and_then(|i| Some(i.data.is_equipped)).unwrap_or(false) {
status.push('*')
} else {
status.push(' ')
}
match self.item.clone().and_then(|i| Some(i.data.state)).unwrap_or(enums::ItemState::None) {
enums::ItemState::Locked => status.push('L'),
enums::ItemState::Tracked => status.push('T'),
enums::ItemState::Masterwork => status.push('M'),
enums::ItemState::None => status.push(' '),
}
status
}
pub fn bucket_name(&self) -> String {
self.bucket.clone().map_or("".to_owned(),
|b| b.display_properties.name.unwrap_or_default())
}
pub fn item_hash(&self) -> Result<i32> {
self.item.clone().ok_or(format_err!("No item!")).map(|i| i.data.item_hash as i32)
}
pub fn bucket_hash(&self) -> Result<i32> {
self.item.clone().ok_or(format_err!("No item!")).map(|i| i.data.bucket_hash as i32)
}
pub fn item_name(&self) -> String {
self.item_def.clone().map_or("".to_owned(),
|i| i.display_properties.name.unwrap_or_default())
}
// pub fn level(&self) -> String {
// self.instance.clone().map_or("".to_owned(), |inst| format!("{}", inst.data.item_level))
// }
//
pub fn tier(&self) -> String {
self.item_def.clone().map_or("".to_owned(),
|def| format!("{:?}", def.inventory.tier_type))
}
pub fn item_kind(&self) -> String {
self.item_def.clone().map_or("".to_owned(), |def| def.item_type_display_name)
}
pub fn stat_value(&self) -> String {
format!("{}", self.stat_num())
}
fn stat_num(&self) -> i32 {
self.instance.clone().map_or(0,
|inst| inst.data.primary_stat.map(|s| s.value).unwrap_or(0))
}
pub fn infusion_category(&self) -> String {
format!("{}", self.infusion_category_hash())
}
fn infusion_category_hash(&self) -> u32 {
self.item_def.clone().map_or(0,
|i| i.quality.map_or(0, |q| q.infusion_category_hash.unwrap_or(0)))
}
pub fn infusion_power(&self) -> String {
if self.plug_defs.iter().any(|sock| sock.bumps_power()) {
format!("{}", cmp::max(0, self.stat_num() - 5))
} else {
self.stat_value()
}
}
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct SingleItem {
pub data: Item,
pub privacy: i32,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct SingleItemInstance {
pub data: ItemInstance,
pub privacy: i32,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ItemSocketsComponent {
pub data: Option<ItemSockets>,
pub privacy: i32,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ItemSockets {
pub sockets: Vec<ItemSocketState>,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ItemSocketState {
pub plug_hash: Option<u32>,
pub is_enabled: bool,
pub enable_fail_indexes: Option<Vec<i32>>,
pub reusable_plug_hashes: Option<Vec<u32>>,
plug_def: Option<InventoryItemDefinition>,
}
impl ItemSocketState {
// pub fn plug_name(&self) -> String {
// self.plug_def.clone().map_or("".to_owned(),
// |plug| plug.display_properties.name.unwrap_or_default())
// }
// pub fn plug_type(&self) -> String {
// self.plug_def.clone().map_or("".to_owned(), |plug| plug.item_type_display_name)
// }
// pub fn plug_tier(&self) -> String {
// format!("{:?}", self.tier())
// }
//
fn tier(&self) -> enums::TierType {
self.plug_def.clone().map_or(enums::TierType::Unknown, |plug| plug.inventory.tier_type)
}
pub fn category_id(&self) -> String {
self.plug_def.clone().map_or("".to_owned(), |plug| {
plug.plug.map_or("".to_owned(), |plug| plug.plug_category_identifier)
})
}
// pub fn is_enabled(&self) -> String {
// format!("{}", self.is_enabled)
// }
//
fn bumps_power(&self) -> bool {
let cat = self.category_id();
self.tier() == enums::TierType::Legendary &&
(cat.contains("enhancements.") || cat.contains(".weapon.damage_type."))
}
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Item {
// omitted for the moment:
// bindStatus
// location
// transferStatuses
// state
pub item_hash: u32,
pub item_instance_id: Option<String>,
pub quantity: i32,
pub bucket_hash: u32,
pub state: enums::ItemState,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ItemInstance {
// damageType
// damageTypeHash
pub primary_stat: Option<Stat>,
pub item_level: i32,
pub quality: i32,
pub is_equipped: bool,
pub can_equip: bool,
pub equip_required_level: i32,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct InventoryBucketDefinition {
pub display_properties: DisplayProperties,
pub scope: i32,
pub category: i32,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct InventoryItemDefinition {
pub display_properties: DisplayProperties,
pub item_type_display_name: String,
pub item_type: i32,
pub item_sub_type: i32,
pub quality: Option<QualityBlockDefinition>,
pub plug: Option<PlugDefinition>,
pub investment_stats: Vec<InvestmentStatDefinition>,
pub inventory: InventoryBlockDefinition,
}
// many fields omitted. See online docs
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct InvestmentStatDefinition {
pub stat_type_hash: u32,
pub value: i32,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct InventoryBlockDefinition {
pub stack_unique_label: Option<String>,
pub max_stack_size: i32,
pub bucket_type_hash: u32,
pub recovery_bucket_type_hash: Option<u32>,
pub is_instance_item: bool,
pub tier_type: enums::TierType,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct PlugDefinition {
pub insertion_rules: Vec<PlugRuleDefinition>,
pub plug_category_identifier: String,
pub on_action_recreate_self: bool,
pub insertion_material_requirement_hash: Option<u32>,
pub enabled_material_requirement_hash: Option<u32>,
pub enabled_rules: Vec<PlugRuleDefinition>,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct PlugRuleDefinition {
pub failure_message: String,
}
// ("{\"
// displayProperties\":{\"hasIcon\":false},\"scope\":0,\"category\":0,\"bucketOrder\":0,\"itemCount\":1,\"location\":1,\"hasTransferDestination\":false,\"enabled\":false,\"fifo\":t
// rue,\"hash\":2422292810,\"index\":36,\"redacted\":false}"), State { next_error: Some(ErrorImpl { code: Message("missing field `description`"), line: 1, column: 38 })
//
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct DisplayProperties {
pub description: Option<String>,
pub name: Option<String>,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct QualityBlockDefinition {
pub infusion_category_name: Option<String>,
pub infusion_category_hash: Option<u32>,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Stat {
// maximum_value //unreliable, per docs
pub stat_hash: u32,
pub value: i32,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct CharacterEquipmentComponentResponse {
pub data: HashMap<String, InventoryComponent>,
pub privacy: i32,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct InventoryComponentResponse {
pub data: InventoryComponent,
pub privacy: i32,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct InventoryComponent {
pub items: Vec<Item>,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct GearAssetDataBaseDefinition {
version: i32,
path: String,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct UserMembershipData {
pub destiny_memberships: Vec<UserInfoCard>,
pub bungie_net_user: GeneralUser,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct GeneralUser {
pub membership_id: String,
pub unique_name: String,
pub display_name: String,
pub is_deleted: bool,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct UserInfoCard {
pub display_name: String,
#[serde(default)]
pub supplemental_display_name: String,
pub membership_type: enums::BungieMemberType,
pub membership_id: String,
}
impl UserInfoCard {
pub fn id(&self) -> Result<i64> {
Ok(self.membership_id.parse()?)
}
}
|
pub use runner::{Runner, State};
mod runner;
mod lua_api;
#[allow(unsafe_code)]
mod context_wrap; |
#![feature(test)]
extern crate strobe_rs;
extern crate test;
use test::Bencher;
use strobe_rs::{SecParam, Strobe};
#[bench]
fn simple_bench(b: &mut Bencher) {
let mut s = Strobe::new(b"simplebench", SecParam::B256);
b.iter(|| {
let mut v = [0u8; 256];
s.send_enc(&mut v, false);
s.recv_enc(&mut v, false);
});
}
|
use eyre::Report;
use crate::{commands::fun::BgGameState, Context};
impl Context {
#[cold]
pub async fn stop_all_games(&self) -> usize {
let active_games: Vec<_> = self.bg_games().iter().map(|entry| *entry.key()).collect();
if active_games.is_empty() {
return 0;
}
let mut count = 0;
let content = "I'll abort this game because I'm about to reboot, \
you can start a new game again in just a moment...";
let msg_fut = |channel| {
self.http
.create_message(channel)
.content(content)
.unwrap()
.exec()
};
for channel in active_games {
if let Some((_, state)) = self.bg_games().remove(&channel) {
match state {
BgGameState::Running { game } => match game.stop() {
Ok(_) => {
let _ = msg_fut(channel).await;
count += 1;
}
Err(err) => {
let wrap = format!("error while stopping game in channel {channel}");
let report = Report::new(err).wrap_err(wrap);
warn!("{report:?}");
}
},
BgGameState::Setup { .. } => {
let _ = msg_fut(channel).await;
count += 1;
}
}
}
}
count
}
}
|
use tokio;
use tokio::prelude::*;
use failure;
pub(crate) struct Packetizer<S> {}
fn wrap<S>(stream: S) -> Packetizer
where S: AsyncRead + AsyncWrite {
Packetizer {stream}
}
impl<S> Sink for Packetizer<S> {
}
|
// https://www.codewars.com/kata/530265044b7e23379d00076a
type Point = (f32, f32);
// ref: https://rosettacode.org/wiki/Ray-casting_algorithm
fn point_in_poly(poly: &[Point], point: Point) -> bool {
// need to pop the first onto the end
let looped_poly = [poly, &[*poly.first().unwrap()]].concat();
looped_poly.windows(2)
.fold(false, |inside, x| {
if let [a, b] = x {
match point_intersects(point, a, b) {
true => return !inside,
false => return inside
};
}
unreachable!();
})
}
fn point_intersects(p: Point, a: &Point, b: &Point) -> bool {
// need to know which point is higher and which is lower
let (lower, higher) = match a.1 < b.1 {
true => (a, b),
false => (b, a)
};
// check the simple cases
// is it between y's?
if p.1 < lower.1 || p.1 > higher.1 { return false; }
// is it right of the x's
if p.0 > a.0.max(b.0) { return false; }
// is it left of the x's
if p.0 < a.0.min(b.0) { return true; }
// check the slopes
let edge_slope = (higher.1 - lower.1)/(higher.0 - lower.0);
let p_slope = (p.1 - lower.1)/(p.0 - lower.0);
p_slope >= edge_slope
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn intersects() {
assert_eq!(point_intersects((0.0, 0.0), &(1.0, 1.0), &(1.0, -1.0)), true);
assert_eq!(point_intersects((0.0, 0.0), &(-1.0, 1.0), &(-1.0, -1.0)), false);
assert_eq!(point_intersects((0.0, 0.0), &(1.0, 1.0), &(0.0, -2.0)), true);
assert_eq!(point_intersects((0.0, 0.0), &(1.0, 1.0), &(0.0, -2.0)), true);
}
#[test]
fn simple_square() {
let poly = [(-5., -5.), (5., -5.), (5., 5.), (-5., 5.)];
assert_eq!(point_in_poly(&poly, (-6., 0.)), false);
assert_eq!(point_in_poly(&poly, (-1., 1.)), true);
}
#[test]
fn simple_triangle() {
let poly = [(-5.0, -5.0), (5.0, -5.0), (0.0, 5.0)];
// assert_eq!(point_in_poly(&poly, (-4.0, -4.0)), true);
assert_eq!(point_in_poly(&poly, (1.0, 3.0)), true);
}
}
|
#[doc = "Register `APB1RSTR2` reader"]
pub type R = crate::R<APB1RSTR2_SPEC>;
#[doc = "Register `APB1RSTR2` writer"]
pub type W = crate::W<APB1RSTR2_SPEC>;
#[doc = "Field `LPUART1RST` reader - Low-power UART 1 reset"]
pub type LPUART1RST_R = crate::BitReader;
#[doc = "Field `LPUART1RST` writer - Low-power UART 1 reset"]
pub type LPUART1RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `I2C4RST` reader - I2C4 reset"]
pub type I2C4RST_R = crate::BitReader;
#[doc = "Field `I2C4RST` writer - I2C4 reset"]
pub type I2C4RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SWPMI1RST` reader - Single wire protocol reset"]
pub type SWPMI1RST_R = crate::BitReader;
#[doc = "Field `SWPMI1RST` writer - Single wire protocol reset"]
pub type SWPMI1RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `LPTIM2RST` reader - Low-power timer 2 reset"]
pub type LPTIM2RST_R = crate::BitReader;
#[doc = "Field `LPTIM2RST` writer - Low-power timer 2 reset"]
pub type LPTIM2RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - Low-power UART 1 reset"]
#[inline(always)]
pub fn lpuart1rst(&self) -> LPUART1RST_R {
LPUART1RST_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - I2C4 reset"]
#[inline(always)]
pub fn i2c4rst(&self) -> I2C4RST_R {
I2C4RST_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - Single wire protocol reset"]
#[inline(always)]
pub fn swpmi1rst(&self) -> SWPMI1RST_R {
SWPMI1RST_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 5 - Low-power timer 2 reset"]
#[inline(always)]
pub fn lptim2rst(&self) -> LPTIM2RST_R {
LPTIM2RST_R::new(((self.bits >> 5) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - Low-power UART 1 reset"]
#[inline(always)]
#[must_use]
pub fn lpuart1rst(&mut self) -> LPUART1RST_W<APB1RSTR2_SPEC, 0> {
LPUART1RST_W::new(self)
}
#[doc = "Bit 1 - I2C4 reset"]
#[inline(always)]
#[must_use]
pub fn i2c4rst(&mut self) -> I2C4RST_W<APB1RSTR2_SPEC, 1> {
I2C4RST_W::new(self)
}
#[doc = "Bit 2 - Single wire protocol reset"]
#[inline(always)]
#[must_use]
pub fn swpmi1rst(&mut self) -> SWPMI1RST_W<APB1RSTR2_SPEC, 2> {
SWPMI1RST_W::new(self)
}
#[doc = "Bit 5 - Low-power timer 2 reset"]
#[inline(always)]
#[must_use]
pub fn lptim2rst(&mut self) -> LPTIM2RST_W<APB1RSTR2_SPEC, 5> {
LPTIM2RST_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "APB1 peripheral reset register 2\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`apb1rstr2::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`apb1rstr2::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct APB1RSTR2_SPEC;
impl crate::RegisterSpec for APB1RSTR2_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`apb1rstr2::R`](R) reader structure"]
impl crate::Readable for APB1RSTR2_SPEC {}
#[doc = "`write(|w| ..)` method takes [`apb1rstr2::W`](W) writer structure"]
impl crate::Writable for APB1RSTR2_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets APB1RSTR2 to value 0"]
impl crate::Resettable for APB1RSTR2_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
//! # CKB AppConfig
//!
//! Because the limitation of toml library,
//! we must put nested config struct in the tail to make it serializable,
//! details https://docs.rs/toml/0.5.0/toml/ser/index.html
use std::fs;
use std::path::{Path, PathBuf};
use serde_derive::{Deserialize, Serialize};
use ckb_chain_spec::ChainSpec;
use ckb_db::DBConfig;
use ckb_logger::Config as LogConfig;
use ckb_miner::BlockAssemblerConfig;
use ckb_miner::MinerConfig;
use ckb_network::NetworkConfig;
use ckb_resource::{Resource, ResourceLocator};
use ckb_rpc::Config as RpcConfig;
use ckb_script::ScriptConfig;
use ckb_shared::tx_pool::TxPoolConfig;
use ckb_store::StoreConfig;
use ckb_sync::Config as SyncConfig;
use super::sentry_config::SentryConfig;
use super::{cli, ExitCode};
pub struct AppConfig {
resource: Resource,
content: AppConfigContent,
}
pub enum AppConfigContent {
CKB(Box<CKBAppConfig>),
Miner(Box<MinerAppConfig>),
}
// change the order of fields will break integration test, see module doc.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CKBAppConfig {
pub data_dir: PathBuf,
pub logger: LogConfig,
pub sentry: SentryConfig,
pub chain: ChainConfig,
pub block_assembler: Option<BlockAssemblerConfig>,
#[serde(skip)]
pub db: DBConfig,
pub network: NetworkConfig,
pub rpc: RpcConfig,
pub sync: SyncConfig,
pub tx_pool: TxPoolConfig,
pub script: ScriptConfig,
pub store: StoreConfig,
}
// change the order of fields will break integration test, see module doc.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MinerAppConfig {
pub data_dir: PathBuf,
pub chain: ChainConfig,
pub logger: LogConfig,
pub sentry: SentryConfig,
pub miner: MinerConfig,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ChainConfig {
pub spec: PathBuf,
}
impl AppConfig {
pub fn is_bundled(&self) -> bool {
self.resource.is_bundled()
}
pub fn load_for_subcommand(
locator: &ResourceLocator,
subcommand_name: &str,
) -> Result<AppConfig, ExitCode> {
match subcommand_name {
cli::CMD_MINER => {
let resource = locator.miner();
let config: MinerAppConfig = toml::from_slice(&resource.get()?)?;
Ok(AppConfig {
resource,
content: AppConfigContent::with_miner(
config.derive_options(locator.root_dir())?,
),
})
}
_ => {
let resource = locator.ckb();
let config: CKBAppConfig = toml::from_slice(&resource.get()?)?;
Ok(AppConfig {
resource,
content: AppConfigContent::with_ckb(
config.derive_options(locator.root_dir(), subcommand_name)?,
),
})
}
}
}
pub fn logger(&self) -> &LogConfig {
match &self.content {
AppConfigContent::CKB(config) => &config.logger,
AppConfigContent::Miner(config) => &config.logger,
}
}
pub fn sentry(&self) -> &SentryConfig {
match &self.content {
AppConfigContent::CKB(config) => &config.sentry,
AppConfigContent::Miner(config) => &config.sentry,
}
}
pub fn chain_spec(&self, locator: &ResourceLocator) -> Result<ChainSpec, ExitCode> {
let spec_path = PathBuf::from(match &self.content {
AppConfigContent::CKB(config) => &config.chain.spec,
AppConfigContent::Miner(config) => &config.chain.spec,
});
ChainSpec::resolve_relative_to(locator, spec_path, &self.resource).map_err(|err| {
eprintln!("{}", err);
ExitCode::Config
})
}
pub fn into_ckb(self) -> Result<Box<CKBAppConfig>, ExitCode> {
match self.content {
AppConfigContent::CKB(config) => Ok(config),
_ => {
eprintln!("unmatched config file");
Err(ExitCode::Failure)
}
}
}
pub fn into_miner(self) -> Result<Box<MinerAppConfig>, ExitCode> {
match self.content {
AppConfigContent::Miner(config) => Ok(config),
_ => {
eprintln!("unmatched config file");
Err(ExitCode::Failure)
}
}
}
}
impl AppConfigContent {
fn with_ckb(config: CKBAppConfig) -> AppConfigContent {
AppConfigContent::CKB(Box::new(config))
}
fn with_miner(config: MinerAppConfig) -> AppConfigContent {
AppConfigContent::Miner(Box::new(config))
}
}
impl CKBAppConfig {
fn derive_options(mut self, root_dir: &Path, subcommand_name: &str) -> Result<Self, ExitCode> {
self.data_dir = canonicalize_data_dir(self.data_dir, root_dir)?;
if self.logger.log_to_file {
self.logger.file = Some(touch(
mkdir(self.data_dir.join("logs"))?.join(subcommand_name.to_string() + ".log"),
)?);
}
self.db.path = mkdir(self.data_dir.join("db"))?;
self.network.path = mkdir(self.data_dir.join("network"))?;
Ok(self)
}
}
impl MinerAppConfig {
fn derive_options(mut self, root_dir: &Path) -> Result<Self, ExitCode> {
self.data_dir = canonicalize_data_dir(self.data_dir, root_dir)?;
if self.logger.log_to_file {
self.logger.file = Some(touch(mkdir(self.data_dir.join("logs"))?.join("miner.log"))?);
}
Ok(self)
}
}
fn canonicalize_data_dir(data_dir: PathBuf, root_dir: &Path) -> Result<PathBuf, ExitCode> {
let path = if data_dir.is_absolute() {
data_dir
} else {
root_dir.join(data_dir)
};
mkdir(path)
}
fn mkdir(dir: PathBuf) -> Result<PathBuf, ExitCode> {
fs::create_dir_all(&dir)?;
// TODO: Use https://github.com/danreeves/path-clean to clean the
// path if needed. std::fs::canonicalize will bring windows compatibility
// problems
Ok(dir)
}
fn touch(path: PathBuf) -> Result<PathBuf, ExitCode> {
fs::OpenOptions::new()
.create(true)
.append(true)
.open(&path)?;
Ok(path)
}
#[cfg(test)]
mod tests {
use super::*;
use ckb_resource::TemplateContext;
fn mkdir() -> tempfile::TempDir {
tempfile::Builder::new()
.prefix("app_config_test")
.tempdir()
.unwrap()
}
#[cfg(all(unix, target_pointer_width = "64"))]
#[test]
fn test_ckb_toml() {
let dir = mkdir();
let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()).unwrap();
let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_RUN)
.unwrap_or_else(|err| panic!(err));
let ckb_config = app_config.into_ckb().unwrap_or_else(|err| panic!(err));
assert_eq!(ckb_config.chain.spec, PathBuf::from("specs/dev.toml"));
assert_eq!(
ckb_config.logger.file,
Some(locator.root_dir().join("data/logs/run.log"))
);
assert_eq!(ckb_config.db.path, locator.root_dir().join("data/db"));
assert_eq!(
ckb_config.network.path,
locator.root_dir().join("data/network")
);
}
#[cfg(all(unix, target_pointer_width = "64"))]
#[test]
fn test_miner_toml() {
let dir = mkdir();
let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()).unwrap();
let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_MINER)
.unwrap_or_else(|err| panic!(err));
let miner_config = app_config.into_miner().unwrap_or_else(|err| panic!(err));
assert_eq!(miner_config.chain.spec, PathBuf::from("specs/dev.toml"));
assert_eq!(
miner_config.logger.file,
Some(locator.root_dir().join("data/logs/miner.log"))
);
}
#[test]
fn test_export_dev_config_files() {
let dir = mkdir();
let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()).unwrap();
let context = TemplateContext {
spec: "dev",
rpc_port: "7000",
p2p_port: "8000",
log_to_file: true,
log_to_stdout: true,
runner: "Rust",
block_assembler: "",
};
{
locator.export_ckb(&context).expect("export config files");
let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_RUN)
.unwrap_or_else(|err| panic!(err));
let ckb_config = app_config.into_ckb().unwrap_or_else(|err| panic!(err));
assert_eq!(ckb_config.logger.filter, Some("info".to_string()));
assert_eq!(ckb_config.chain.spec, PathBuf::from("specs/dev.toml"));
assert_eq!(
ckb_config.network.listen_addresses,
vec!["/ip4/0.0.0.0/tcp/8000".parse().unwrap()]
);
assert_eq!(ckb_config.network.connect_outbound_interval_secs, 15);
assert_eq!(ckb_config.rpc.listen_address, "127.0.0.1:7000");
}
{
locator.export_miner(&context).expect("export config files");
let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_MINER)
.unwrap_or_else(|err| panic!(err));
let miner_config = app_config.into_miner().unwrap_or_else(|err| panic!(err));
assert_eq!(miner_config.logger.filter, Some("info".to_string()));
assert_eq!(miner_config.chain.spec, PathBuf::from("specs/dev.toml"));
assert_eq!(miner_config.miner.client.rpc_url, "http://127.0.0.1:7000/");
}
}
#[test]
fn test_log_to_stdout_only() {
let dir = mkdir();
let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()).unwrap();
let context = TemplateContext {
spec: "dev",
rpc_port: "7000",
p2p_port: "8000",
log_to_file: false,
log_to_stdout: true,
runner: "Rust",
block_assembler: "",
};
{
locator.export_ckb(&context).expect("export config files");
let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_RUN)
.unwrap_or_else(|err| panic!(err));
let ckb_config = app_config.into_ckb().unwrap_or_else(|err| panic!(err));
assert_eq!(ckb_config.logger.file, None);
assert_eq!(ckb_config.logger.log_to_file, false);
assert_eq!(ckb_config.logger.log_to_stdout, true);
}
{
locator.export_miner(&context).expect("export config files");
let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_MINER)
.unwrap_or_else(|err| panic!(err));
let miner_config = app_config.into_miner().unwrap_or_else(|err| panic!(err));
assert_eq!(miner_config.logger.file, None);
assert_eq!(miner_config.logger.log_to_file, false);
assert_eq!(miner_config.logger.log_to_stdout, true);
}
}
#[test]
fn test_export_testnet_config_files() {
let dir = mkdir();
let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()).unwrap();
let context = TemplateContext {
spec: "testnet",
rpc_port: "7000",
p2p_port: "8000",
log_to_file: true,
log_to_stdout: true,
runner: "Rust",
block_assembler: "",
};
locator.export_ckb(&context).expect("export config files");
{
let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_RUN)
.unwrap_or_else(|err| panic!(err));
let ckb_config = app_config.into_ckb().unwrap_or_else(|err| panic!(err));
assert_eq!(ckb_config.logger.filter, Some("info".to_string()));
assert_eq!(ckb_config.chain.spec, PathBuf::from("specs/testnet.toml"));
assert_eq!(
ckb_config.network.listen_addresses,
vec!["/ip4/0.0.0.0/tcp/8000".parse().unwrap()]
);
assert_eq!(ckb_config.network.connect_outbound_interval_secs, 15);
assert_eq!(ckb_config.rpc.listen_address, "127.0.0.1:7000");
}
{
locator.export_miner(&context).expect("export config files");
let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_MINER)
.unwrap_or_else(|err| panic!(err));
let miner_config = app_config.into_miner().unwrap_or_else(|err| panic!(err));
assert_eq!(miner_config.logger.filter, Some("info".to_string()));
assert_eq!(miner_config.chain.spec, PathBuf::from("specs/testnet.toml"));
assert_eq!(miner_config.miner.client.rpc_url, "http://127.0.0.1:7000/");
}
}
#[test]
fn test_export_integration_config_files() {
let dir = mkdir();
let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()).unwrap();
let context = TemplateContext {
spec: "integration",
rpc_port: "7000",
p2p_port: "8000",
log_to_file: true,
log_to_stdout: true,
runner: "Rust",
block_assembler: "",
};
locator.export_ckb(&context).expect("export config files");
{
let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_RUN)
.unwrap_or_else(|err| panic!(err));
let ckb_config = app_config.into_ckb().unwrap_or_else(|err| panic!(err));
assert_eq!(
ckb_config.chain.spec,
PathBuf::from("specs/integration.toml")
);
assert_eq!(
ckb_config.network.listen_addresses,
vec!["/ip4/0.0.0.0/tcp/8000".parse().unwrap()]
);
assert_eq!(ckb_config.rpc.listen_address, "127.0.0.1:7000");
}
{
locator.export_miner(&context).expect("export config files");
let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_MINER)
.unwrap_or_else(|err| panic!(err));
let miner_config = app_config.into_miner().unwrap_or_else(|err| panic!(err));
assert_eq!(
miner_config.chain.spec,
PathBuf::from("specs/integration.toml")
);
assert_eq!(miner_config.miner.client.rpc_url, "http://127.0.0.1:7000/");
}
}
#[cfg(all(unix, target_pointer_width = "64"))]
#[test]
fn test_export_dev_config_files_assembly() {
let dir = mkdir();
let locator = ResourceLocator::with_root_dir(dir.path().to_path_buf()).unwrap();
let context = TemplateContext {
spec: "dev",
rpc_port: "7000",
p2p_port: "8000",
log_to_file: true,
log_to_stdout: true,
runner: "Assembly",
block_assembler: "",
};
{
locator.export_ckb(&context).expect("export config files");
let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_RUN)
.unwrap_or_else(|err| panic!(err));
let ckb_config = app_config.into_ckb().unwrap_or_else(|err| panic!(err));
assert_eq!(ckb_config.logger.filter, Some("info".to_string()));
assert_eq!(ckb_config.chain.spec, PathBuf::from("specs/dev.toml"));
assert_eq!(
ckb_config.network.listen_addresses,
vec!["/ip4/0.0.0.0/tcp/8000".parse().unwrap()]
);
assert_eq!(ckb_config.network.connect_outbound_interval_secs, 15);
assert_eq!(ckb_config.rpc.listen_address, "127.0.0.1:7000");
}
{
locator.export_miner(&context).expect("export config files");
let app_config = AppConfig::load_for_subcommand(&locator, cli::CMD_MINER)
.unwrap_or_else(|err| panic!(err));
let miner_config = app_config.into_miner().unwrap_or_else(|err| panic!(err));
assert_eq!(miner_config.logger.filter, Some("info".to_string()));
assert_eq!(miner_config.chain.spec, PathBuf::from("specs/dev.toml"));
assert_eq!(miner_config.miner.client.rpc_url, "http://127.0.0.1:7000/");
}
}
}
|
use roaster::editor::Editor;
fn main() {
let mut editor = Editor::new();
editor.run();
} |
//! This module contains a [`Disable`] structure which helps to
//! remove an etheir column or row from a [`Table`].
//!
//! # Example
//!
//! ```rust,no_run
//! # use tabled::{Table, settings::{Disable, object::Rows}};
//! # let data: Vec<&'static str> = Vec::new();
//! let table = Table::new(&data).with(Disable::row(Rows::first()));
//! ```
//!
//! [`Table`]: crate::Table
use std::marker::PhantomData;
use crate::{
grid::records::{ExactRecords, Records, Resizable},
settings::{locator::Locator, TableOption},
};
/// Disable removes particular rows/columns from a [`Table`].
///
/// It tries to keeps track of style changes which may occur.
/// But it's not guaranteed will be the way you would expect it to be.
///
/// Generally you should avoid use of [`Disable`] because it's a slow function and modifies the underlying records.
/// Providing correct data right away is better.
///
/// # Example
///
/// ```
/// use tabled::{Table, settings::{Disable, object::Rows}};
///
/// let data = vec!["Hello", "World", "!!!"];
///
/// let table = Table::new(data).with(Disable::row(Rows::new(1..2))).to_string();
///
/// assert_eq!(
/// table,
/// "+-------+\n\
/// | &str |\n\
/// +-------+\n\
/// | World |\n\
/// +-------+\n\
/// | !!! |\n\
/// +-------+"
/// );
///
/// ```
/// [`Table`]: crate::Table
#[derive(Debug)]
pub struct Disable<L, Target> {
locator: L,
target: PhantomData<Target>,
}
impl<L> Disable<L, TargetColumn> {
/// Disable columns.
///
/// Available locators are:
///
/// - [`Columns`]
/// - [`Column`]
/// - [`FirstColumn`]
/// - [`LastColumn`]
/// - [`ByColumnName`]
///
/// ```rust
/// use tabled::{builder::Builder, settings::{Disable, locator::ByColumnName, object::Columns}};
///
/// let mut builder = Builder::default();
///
/// builder.push_record(["col1", "col2", "col3"]);
/// builder.push_record(["Hello", "World", "1"]);
///
/// let table = builder.build()
/// .with(Disable::column(ByColumnName::new("col3")))
/// .to_string();
///
/// assert_eq!(
/// table,
/// "+-------+-------+\n\
/// | col1 | col2 |\n\
/// +-------+-------+\n\
/// | Hello | World |\n\
/// +-------+-------+"
/// );
/// ```
///
/// [`Columns`]: crate::settings::object::Columns
/// [`Column`]: crate::settings::object::Column
/// [`FirstColumn`]: crate::settings::object::FirstColumn
/// [`LastColumn`]: crate::settings::object::LastColumn
/// [`ByColumnName`]: crate::settings::locator::ByColumnName
pub fn column(locator: L) -> Self {
Self {
locator,
target: PhantomData,
}
}
}
impl<L> Disable<L, TargetRow> {
/// Disable rows.
///
/// Available locators are:
///
/// - [`Rows`]
/// - [`Row`]
/// - [`FirstRow`]
/// - [`LastRow`]
///
/// ```rust
/// use tabled::{settings::{Disable, object::Rows}, builder::Builder};
///
/// let mut builder = Builder::default();
/// builder.push_record(["col1", "col2", "col3"]);
/// builder.push_record(["Hello", "World", "1"]);
///
/// let table = builder.build()
/// .with(Disable::row(Rows::first()))
/// .to_string();
///
/// assert_eq!(
/// table,
/// "+-------+-------+---+\n\
/// | Hello | World | 1 |\n\
/// +-------+-------+---+"
/// );
/// ```
///
/// [`Rows`]: crate::settings::object::Rows
/// [`Row`]: crate::settings::object::Row
/// [`FirstRow`]: crate::settings::object::FirstRow
/// [`LastRow`]: crate::settings::object::LastRow
pub fn row(locator: L) -> Self {
Self {
locator,
target: PhantomData,
}
}
}
/// A marker struct for [`Disable`].
#[derive(Debug)]
pub struct TargetRow;
/// A marker struct for [`Disable`].
#[derive(Debug)]
pub struct TargetColumn;
impl<L, R, D, C> TableOption<R, D, C> for Disable<L, TargetColumn>
where
for<'a> L: Locator<&'a R, Coordinate = usize>,
R: Records + Resizable,
{
fn change(mut self, records: &mut R, _: &mut C, _: &mut D) {
let columns = self.locator.locate(records).into_iter().collect::<Vec<_>>();
let mut shift = 0;
for col in columns.into_iter() {
if col - shift > records.count_columns() {
continue;
}
records.remove_column(col - shift);
shift += 1;
}
// fixme: I am pretty sure that we violate span constrains by removing rows/cols
// Because span may be bigger then the max number of rows/cols
}
}
impl<L, R, D, C> TableOption<R, D, C> for Disable<L, TargetRow>
where
for<'a> L: Locator<&'a R, Coordinate = usize>,
R: ExactRecords + Resizable,
{
fn change(mut self, records: &mut R, _: &mut C, _: &mut D) {
let rows = self.locator.locate(records).into_iter().collect::<Vec<_>>();
let mut shift = 0;
for row in rows.into_iter() {
if row - shift > records.count_rows() {
continue;
}
records.remove_row(row - shift);
shift += 1;
}
// fixme: I am pretty sure that we violate span constrains by removing rows/cols
// Because span may be bigger then the max number of rows/cols
}
}
|
use ctrlc;
use dotenv;
use std::env;
use std::error::Error;
use std::fs::OpenOptions;
use std::io::prelude::*;
use std::net::{TcpListener, TcpStream};
use std::process::exit;
use std::sync::{mpsc, Arc, Mutex};
use std::thread::spawn;
mod cgi;
mod filereader;
mod parser;
mod thread_pool;
enum LoggingSignal {
Logging(String),
Shutdown,
}
fn main() -> Result<(), Box<dyn Error>> {
dotenv::dotenv().ok();
let host = env::var("HOST").unwrap_or_else(|_| "127.0.0.1".into());
let port = env::var("PORT").unwrap_or_else(|_| "8000".into());
let logfile = env::var("LOG_FILE").unwrap_or_else(|_| "log/logfile.txt".into());
let listener = TcpListener::bind(format!("{}:{}", host, port)).unwrap();
let pool = Arc::new(Mutex::new(thread_pool::ThreadPool::new(10)?));
let pool_handler = pool.clone();
// (Almost) Gracefully exit.
let (log_sender, log_receiver) = mpsc::channel();
let t = spawn(move || {
let mut file = OpenOptions::new()
.create(true)
.write(true)
.append(true)
.open(logfile)
.unwrap();
loop {
match log_receiver.recv() {
Ok(LoggingSignal::Logging(message)) => {
//println!("{}", message);
file.write(message.as_bytes());
}
Ok(LoggingSignal::Shutdown) => {
println!("Logger exits, close logfile handler!");
break;
}
Err(e) => {
println!("Logger exits");
println!("{:?}", e);
}
}
}
});
let mut t = Some(Some(t));
let log_sender_shutdown = log_sender.clone();
ctrlc::set_handler(move || {
let mut pool = pool_handler.lock().unwrap();
pool.manual_drop();
let t = t.replace(None);
match t {
Some(Some(t)) => {
log_sender_shutdown.send(LoggingSignal::Shutdown);
t.join();
}
_ => {
println!("Logger thread have been destroyed!");
}
};
exit(0);
})
.unwrap();
for stream in listener.incoming() {
if stream.is_err() {
if cfg!(feature = "debug") {
println!("get a Err incoming stream");
}
continue;
}
let stream = stream.unwrap();
if cfg!(feature = "debug") {
println!("[main:86]: try to get the lock of pool.");
}
let pool = pool.lock().unwrap();
let log_sender = log_sender.clone();
pool.execute(move || {
let log = handle_connection(stream);
if cfg!(feature = "debug") {
println!("[main:89]: get log, prepared to send.");
}
log_sender.send(LoggingSignal::Logging(log));
if cfg!(feature = "debug") {
println!("[main:94]: send log.");
}
std::mem::drop(log_sender);
});
}
println!("Won't execute here.");
Ok(())
}
fn handle_connection(mut stream: TcpStream) -> String {
if cfg!(feature = "debug") {
println!("start to read buffer {:?}", stream);
}
stream.set_read_timeout(Some( std::time::Duration::from_millis(500) ));
let mut buffer = [0; 2048];
if let Err(_) = stream.read(&mut buffer) {
return "Empty Stream".to_string();
}
if cfg!(feature = "debug") {
println!("buffer read. {:?}", stream);
}
let b = parser::parser(&buffer);
if b.is_err() {
if cfg!(feature = "debug") {
println!("bad request!");
}
let time = chrono::Local::now().to_rfc3339();
return format!("{} {}\n", time, "bad request!");
}
let b = b.unwrap();
let response = if b.iscgi {
let result = if b.method == "GET" {
cgi::cgi_caller_get(&b.path, &b.query_string)
} else {
cgi::cgi_caller_post(&b.path, &b.content_length, &b.content_type, &b.body_string)
};
match result {
Ok(content) => {
let status_line = "HTTP/1.1 200 OK";
format!("{}\r\n{}", status_line, content)
}
_ => {
let status_line = "HTTP/1.1 404 Not Found";
format!("{}", status_line)
}
}
} else {
let res = filereader::readfile(b.path.clone());
match res {
Some(res) => {
let status_line = "HTTP/1.1 200 OK";
let content_type = res.1;
let content = res.0;
format!("{}\r\n{}\r\n\r\n{}", status_line, content_type, content)
}
_ => {
let status_line = "HTTP/1.1 404 Not Found";
format!("{}", status_line)
}
}
};
stream.write(response.as_bytes()).unwrap();
stream.flush().unwrap();
let time = chrono::Local::now().to_rfc3339();
format!(
"{} {} {} {} {} {} {}\n",
time, b.host, b.method, b.user, b.url, b.path, b.query_string
)
}
|
use amethyst::{
assets::AssetStorage,
audio::{output::Output, Source},
core::transform::Transform,
derive::SystemDesc,
ecs::{Join, Read, ReadExpect, System, SystemData, WriteStorage},
};
use crate::audio::{play_score_sound, Sounds};
use crate::components::ball::{Ball, BALL_VELOCITY_X, BALL_VELOCITY_Y};
// use crate::components::scoreboard::{ScoreBoard};
use crate::pong::{ARENA_HEIGHT, ARENA_WIDTH};
#[derive(SystemDesc)]
pub struct ScoreSystem;
pub const SCORE_SYSTEM: &str = "score_system";
impl<'s> System<'s> for ScoreSystem {
type SystemData = (
WriteStorage<'s, Ball>,
WriteStorage<'s, Transform>,
Read<'s, AssetStorage<Source>>,
ReadExpect<'s, Sounds>,
Option<Read<'s, Output>>,
);
fn run(
&mut self,
(mut balls, mut locals, storage, sounds, audio_output): Self::SystemData,
) {
for (ball, transform) in (&mut balls, &mut locals).join() {
let bx = transform.translation().x;
let did_hit = if bx <= ball.radius {
// TODO triggre score event
true
} else if bx >= ARENA_WIDTH - ball.radius {
// TODO trigger score event
true
} else {
false
};
if did_hit {
play_score_sound(&*sounds, &storage, audio_output.as_deref());
ball.velocity[0] = BALL_VELOCITY_X * ball.velocity[0].min(-1.).max(1.);
ball.velocity[1] = BALL_VELOCITY_Y * ball.velocity[1].min(-1.).max(1.);
transform.set_translation_x(ARENA_WIDTH / 2.0);
transform.set_translation_y(ARENA_HEIGHT / 2.0);
}
}
}
}
|
use crate::data::Pair;
use crate::non_empty::NonEmpty;
#[derive(Debug)]
pub enum Error {
Json(serde_json::Error),
#[cfg(not(feature = "minimal"))]
Hyper(hyper::error::Error),
Io(std::io::Error),
Uri(http::uri::InvalidUri),
InvalidResponse(String),
Http(http::Error),
UnsupportedPairs(NonEmpty<Pair>),
Other(String),
}
impl From<http::uri::InvalidUri> for Error {
fn from(uri_err: http::uri::InvalidUri) -> Self {
Error::Uri(uri_err)
}
}
impl From<http::Error> for Error {
fn from(http_err: http::Error) -> Self {
Error::Http(http_err)
}
}
impl From<serde_json::Error> for Error {
fn from(json_err: serde_json::Error) -> Self {
Error::Json(json_err)
}
}
#[cfg(not(feature = "minimal"))]
impl From<hyper::error::Error> for Error {
fn from(hyper_err: hyper::error::Error) -> Self {
Error::Hyper(hyper_err)
}
}
impl From<std::io::Error> for Error {
fn from(io_err: std::io::Error) -> Self {
Error::Io(io_err)
}
}
|
use std::collections::HashMap;
use std::ffi::OsString;
use crate::common::context::LaunchType;
use crate::common::{error::Error, Context};
use crate::log::{dev_info, user_warn};
use crate::pam::{CLIConverser, Converser, PamContext, PamError, PamErrorType, PamResult};
use crate::system::term::current_tty_name;
use super::pipeline::AuthPlugin;
type PamBuilder<C> = dyn Fn(&Context) -> PamResult<PamContext<C>>;
pub struct PamAuthenticator<C: Converser> {
builder: Box<PamBuilder<C>>,
pam: Option<PamContext<C>>,
}
impl<C: Converser> PamAuthenticator<C> {
fn new(
initializer: impl Fn(&Context) -> PamResult<PamContext<C>> + 'static,
) -> PamAuthenticator<C> {
PamAuthenticator {
builder: Box::new(initializer),
pam: None,
}
}
}
impl PamAuthenticator<CLIConverser> {
pub fn new_cli() -> PamAuthenticator<CLIConverser> {
PamAuthenticator::new(|context| {
init_pam(
matches!(context.launch, LaunchType::Login),
matches!(context.launch, LaunchType::Shell),
context.stdin,
context.non_interactive,
&context.current_user.name,
&context.current_user.name,
)
})
}
}
impl<C: Converser> AuthPlugin for PamAuthenticator<C> {
fn init(&mut self, context: &Context) -> Result<(), Error> {
self.pam = Some((self.builder)(context)?);
Ok(())
}
fn authenticate(&mut self, non_interactive: bool, max_tries: u16) -> Result<(), Error> {
let pam = self
.pam
.as_mut()
.expect("Pam must be initialized before authenticate");
attempt_authenticate(pam, non_interactive, max_tries)?;
Ok(())
}
fn pre_exec(&mut self, target_user: &str) -> Result<HashMap<OsString, OsString>, Error> {
let pam = self
.pam
.as_mut()
.expect("Pam must be initialized before pre_exec");
// make sure that the user that needed to authenticate has a valid token
pam.validate_account_or_change_auth_token()?;
// check what the current user in PAM is
let user = pam.get_user()?;
if user != target_user {
// switch pam over to the target user
pam.set_user(target_user)?;
// make sure that credentials are loaded for the target user
// errors are ignored because not all modules support this functionality
if let Err(e) = pam.credentials_reinitialize() {
dev_info!(
"PAM gave an error while trying to re-initialize credentials: {:?}",
e
);
}
}
pam.open_session()?;
let env_vars = pam.env()?;
Ok(env_vars)
}
fn cleanup(&mut self) {
let pam = self
.pam
.as_mut()
.expect("Pam must be initialized before cleanup");
// closing the pam session is best effort, if any error occurs we cannot
// do anything with it
let _ = pam.close_session();
}
}
pub fn init_pam(
is_login_shell: bool,
is_shell: bool,
use_stdin: bool,
non_interactive: bool,
auth_user: &str,
requesting_user: &str,
) -> PamResult<PamContext<CLIConverser>> {
let service_name = if is_login_shell { "sudo-i" } else { "sudo" };
let mut pam = PamContext::builder_cli("sudo", use_stdin, non_interactive)
.service_name(service_name)
.build()?;
pam.mark_silent(!is_shell && !is_login_shell);
pam.mark_allow_null_auth_token(false);
pam.set_requesting_user(requesting_user)?;
pam.set_user(auth_user)?;
// attempt to set the TTY this session is communicating on
if let Ok(pam_tty) = current_tty_name() {
pam.set_tty(&pam_tty)?;
}
Ok(pam)
}
pub fn attempt_authenticate<C: Converser>(
pam: &mut PamContext<C>,
non_interactive: bool,
mut max_tries: u16,
) -> Result<(), Error> {
let mut current_try = 0;
loop {
current_try += 1;
match pam.authenticate() {
// there was no error, so authentication succeeded
Ok(_) => break,
// maxtries was reached, pam does not allow any more tries
Err(PamError::Pam(PamErrorType::MaxTries, _)) => {
return Err(Error::MaxAuthAttempts(current_try));
}
// there was an authentication error, we can retry
Err(PamError::Pam(PamErrorType::AuthError, _)) => {
max_tries -= 1;
if max_tries == 0 {
return Err(Error::MaxAuthAttempts(current_try));
} else if non_interactive {
return Err(Error::Authentication("interaction required".to_string()));
} else {
user_warn!("Authentication failed, try again.");
}
}
// there was another pam error, return the error
Err(e) => {
return Err(e.into());
}
}
}
Ok(())
}
|
use super::state;
|
use std::{borrow::Cow, cmp::Reverse, fmt::Write};
use hashbrown::HashMap;
use rosu_v2::prelude::{Grade, Team, Username};
use crate::{
commands::osu::{
CommonMap, MatchCompareComparison, MatchCompareScore, ProcessedMatch, UniqueMap,
},
embeds::{Author, EmbedFields, Footer},
util::{
constants::OSU_BASE,
numbers::{round, with_comma_int},
osu::grade_emote,
},
};
pub struct MatchCompareMapEmbed {
author: Author,
footer: Footer,
title: String,
url: String,
fields: EmbedFields,
}
impl MatchCompareMapEmbed {
pub fn new(
map: CommonMap,
match_1: &str,
match_2: &str,
users: &HashMap<u32, Username>,
comparison: MatchCompareComparison,
(common_idx, common_total, maps_total): (usize, usize, usize),
) -> Self {
let author_text = format!("Match compare - Common map {common_idx}/{common_total}");
let author = Author::new(author_text);
let footer_text = format!(
"Page {common_idx}/{pages} | Common maps: {common_total}/{maps_total}",
pages = common_total + 2,
);
let footer = Footer::new(footer_text);
let fields = match comparison {
MatchCompareComparison::Both => {
vec![
field!(
match_1,
prepare_scores(&map.match_1, map.match_1_scores, users, false),
false
),
field!(
match_2,
prepare_scores(&map.match_2, map.match_2_scores, users, false),
false
),
field!(
"Total team scores",
team_scores(&map, match_1, match_2),
false
),
]
}
MatchCompareComparison::Players => {
vec![
field!(
match_1,
prepare_scores(&map.match_1, map.match_1_scores, users, true),
false
),
field!(
match_2,
prepare_scores(&map.match_2, map.match_2_scores, users, true),
false
),
]
}
MatchCompareComparison::Teams => {
vec![field!(
"Total team scores",
team_scores(&map, match_1, match_2),
false
)]
}
};
let title = map.map;
let url = format!("{OSU_BASE}b/{}", map.map_id);
Self {
author,
footer,
title,
url,
fields,
}
}
}
fn team_scores(map: &CommonMap, match_1: &str, match_2: &str) -> String {
let mut scores = Vec::new();
for team in [Team::Blue, Team::Red] {
if map.match_1_scores[team as usize] > 0 {
scores.push(TeamScore::new(
team,
match_1,
map.match_1_scores[team as usize],
));
}
if map.match_2_scores[team as usize] > 0 {
scores.push(TeamScore::new(
team,
match_2,
map.match_2_scores[team as usize],
));
}
}
if scores.is_empty() {
return "No teams".to_owned();
}
scores.sort_unstable_by_key(|score| Reverse(score.score));
let mut value = String::with_capacity(scores.len() * 80);
for (score, i) in scores.into_iter().zip(1..) {
let _ = writeln!(
value,
"**{i}.** `{score}` :{team}_circle:\n> {name}",
score = with_comma_int(score.score),
team = if score.team == Team::Blue {
"blue"
} else {
"red"
},
name = score.name,
);
}
value
}
struct TeamScore<'n> {
team: Team,
name: &'n str,
score: u32,
}
impl<'n> TeamScore<'n> {
fn new(team: Team, name: &'n str, score: u32) -> Self {
Self { team, name, score }
}
}
fn prepare_scores(
scores: &[MatchCompareScore],
totals: [u32; 3],
users: &HashMap<u32, Username>,
with_total: bool,
) -> String {
let mut embed_scores = Vec::with_capacity(scores.len());
let mut sizes = ColumnSizes::default();
let iter = scores.iter().filter(|score| score.score > 0).map(|score| {
let name = match users.get(&score.user_id) {
Some(name) => Cow::Borrowed(name.as_str()),
None => format!("`User id {}`", score.user_id).into(),
};
let score_str = with_comma_int(score.score).to_string();
let combo = with_comma_int(score.combo).to_string();
let mods = score.mods.to_string();
sizes.name = sizes.name.max(name.len());
sizes.combo = sizes.combo.max(combo.len());
sizes.score = sizes.score.max(score_str.len());
sizes.mods = sizes.mods.max(mods.len());
EmbedScore {
username: name,
mods,
accuracy: score.acc,
grade: score.grade,
combo,
score_str,
team: score.team,
}
});
// Collect iter so that `sizes` is correct
embed_scores.extend(iter);
let mut value = String::new();
if with_total && totals[1] + totals[2] > 0 {
let _ = writeln!(
value,
"**Total**: :blue_circle: {blue_won}{blue_score}{blue_won} \
- {red_won}{red_score}{red_won} :red_circle:",
blue_score = with_comma_int(totals[1]),
red_score = with_comma_int(totals[2]),
blue_won = if totals[1] > totals[2] { "**" } else { "" },
red_won = if totals[2] > totals[1] { "**" } else { "" },
);
}
for score in embed_scores {
let _ = write!(
value,
"{grade} `{name:<name_len$}` `+{mods:<mods_len$}` `{acc:>5}%` \
`{combo:>combo_len$}x` `{score:>score_len$}`",
grade = grade_emote(score.grade),
name = score.username,
name_len = sizes.name,
mods = score.mods,
mods_len = sizes.mods,
acc = round(score.accuracy),
combo = score.combo,
combo_len = sizes.combo,
score = score.score_str,
score_len = sizes.score,
);
match score.team {
Team::None => {}
Team::Blue => value.push_str(" :blue_circle:"),
Team::Red => value.push_str(" :red_circle:"),
}
value.push('\n');
}
if value.is_empty() {
value.push_str("No scores");
}
value
}
struct EmbedScore<'n> {
username: Cow<'n, str>,
mods: String,
accuracy: f32,
grade: Grade,
combo: String,
score_str: String,
team: Team,
}
#[derive(Default)]
struct ColumnSizes {
name: usize,
combo: usize,
score: usize,
mods: usize,
}
impl_builder!(MatchCompareMapEmbed {
author,
footer,
title,
url,
fields,
});
pub struct MatchCompareSummaryEmbed {
author: Author,
description: String,
footer: Footer,
title: String,
url: String,
}
impl MatchCompareSummaryEmbed {
pub fn new(
common: &[CommonMap],
processed: &ProcessedMatch,
(page, common_maps, total_maps): (usize, usize, usize),
) -> Self {
let author = Author::new("Match compare - Summary");
let title = processed.name.to_owned();
let url = format!("{OSU_BASE}mp/{}", processed.match_id);
let footer_text = format!(
"Page {page}/{pages} | Common maps: {common_maps}/{total_maps}",
pages = common_maps + 2,
);
let footer = Footer::new(footer_text);
let mut description = String::new();
description.push_str("__Common maps in both matches:__\n");
for CommonMap { map, map_id, .. } in common {
let _ = writeln!(description, "- [{map}]({OSU_BASE}b/{map_id})",);
}
description.push_str("\n__Maps of this match but not the other:__\n");
for UniqueMap { map, map_id } in processed.unique_maps.iter() {
let _ = writeln!(description, "- [{map}]({OSU_BASE}b/{map_id})");
}
Self {
author,
description,
footer,
title,
url,
}
}
}
impl_builder!(MatchCompareSummaryEmbed {
author,
description,
footer,
title,
url,
});
|
#![feature(test)]
/// A command-line interface for the brute-force attack on the Discrete Log Problem
///
/// Compiled against rustc 1.13.0-nightly (497d67d70 2016-09-01)
///
/// Usage: dl-brute-force <p> <g> <pr>
///
extern crate test;
use std::env;
mod lib;
fn main() {
let args: Vec<String> = env::args().collect();
match args.len() {
4 => {
let p = args[1].parse::<u64>().unwrap();
let g = args[2].parse::<u64>().unwrap();
let residue = args[3].parse::<u64>().unwrap();
let x = lib::brute_force(g, p, residue, p);
println!("{}", x);
},
_ => {
println!("Usage: dl-brute-force <p> <g> <qr>");
},
}
}
|
fn main() {
let s = String::from("hello");
let slice1 = &s[0..2];
let slice2 = &s[..2];
let len = s.len();
let slice3 = &s[3..len];
let slice4 = &s[3..];
let slice5 = &s[0..len];
let slice6 = &s[..];
println!("{:?}", slice1);
println!("{:?}", slice2);
println!("{:?}", slice3);
println!("{:?}", slice4);
println!("{:?}", slice5);
println!("{:?}", slice6);
}
|
//! Report panic messages to the host stderr using semihosting
//!
//! This crate contains an implementation of `panic_fmt` that logs panic messages to the host stderr
//! using [`cortex-m-semihosting`]. Before logging the message the panic handler disables (masks)
//! the device specific interrupts. After logging the message the panic handler trigger a breakpoint
//! and then goes into an infinite loop.
//!
//! Currently, this crate only supports the ARM Cortex-M architecture.
//!
//! [`cortex-m-semihosting`]: https://crates.io/crates/cortex-m-semihosting
//!
//! # Usage
//!
//! ``` ignore
//! #![no_std]
//!
//! extern crate panic_semihosting;
//!
//! fn main() {
//! panic!("FOO")
//! }
//! ```
//!
//! ``` text
//! (gdb) monitor arm semihosting enable
//! (gdb) continue
//! Program received signal SIGTRAP, Trace/breakpoint trap.
//! rust_begin_unwind (args=..., file=..., line=8, col=5)
//! at $CRATE/src/lib.rs:69
//! 69 asm::bkpt();
//! ```
//!
//! ``` text
//! $ openocd -f (..)
//! (..)
//! panicked at 'FOO', src/main.rs:6:5
//! ```
//!
//! # Optional features
//!
//! ## `exit`
//!
//! When this feature is enabled the panic handler performs an exit semihosting call after logging
//! the panic message. This is useful when emulating the program on QEMU as it causes the QEMU
//! process to exit with a non-zero exit code; thus it can be used to implement Cortex-M tests that
//! run on the host.
//!
//! We discourage using this feature when the program will run on hardware as the exit call can
//! leave the hardware debugger in an inconsistent state.
#![cfg(all(target_arch = "arm", target_os = "none"))]
#![deny(missing_docs)]
#![deny(warnings)]
#![no_std]
extern crate cortex_m;
extern crate cortex_m_semihosting as sh;
use core::fmt::Write;
use core::panic::PanicInfo;
#[cfg(not(feature = "exit"))]
use cortex_m::asm;
use cortex_m::interrupt;
#[cfg(feature = "exit")]
use sh::debug::{self, EXIT_FAILURE};
use sh::hio;
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
interrupt::disable();
if let Ok(mut hstdout) = hio::hstdout() {
writeln!(hstdout, "{}", info).ok();
}
match () {
// Exit the QEMU process
#[cfg(feature = "exit")]
() => debug::exit(EXIT_FAILURE),
// OK to fire a breakpoint here because we know the microcontroller is connected to a
// debugger
#[cfg(not(feature = "exit"))]
() => asm::bkpt(),
}
loop {}
}
|
$NetBSD: patch-pgx_pgx-pg-sys_build.rs,v 1.2 2023/01/11 03:33:46 tnn Exp $
Fix include directories for bindgen.
--- ../vendor/pgx-pg-sys-0.6.1/build.rs.orig 2006-07-24 01:21:28.000000000 +0000
+++ ../vendor/pgx-pg-sys-0.6.1/build.rs
@@ -562,10 +562,10 @@ struct StructDescriptor<'a> {
fn run_bindgen(pg_config: &PgConfig, include_h: &PathBuf) -> eyre::Result<syn::File> {
let major_version = pg_config.major_version()?;
eprintln!("Generating bindings for pg{}", major_version);
- let includedir_server = pg_config.includedir_server()?;
let bindings = bindgen::Builder::default()
.header(include_h.display().to_string())
- .clang_arg(&format!("-I{}", includedir_server.display()))
+ .clang_arg("-I@BUILDLINK_DIR@/include/postgresql/server")
+ .clang_arg("-I@BUILDLINK_DIR@/include")
.clang_args(&extra_bindgen_clang_args(pg_config)?)
.parse_callbacks(Box::new(PgxOverrides::default()))
.blocklist_type("(Nullable)?Datum") // manually wrapping datum types for correctness
|
pub mod built_in_command;
pub mod parser;
pub mod process; |
mod gc_work;
mod global;
pub mod mutator;
pub use self::global::MarkSweep;
pub use self::global::MS_CONSTRAINTS;
|
use std::str::FromStr;
use problem::{Problem, solve};
#[derive(Debug)]
enum Token {
Int(u64),
Plus,
Asterisk,
LeftParen,
RightParen,
}
#[derive(Debug)]
enum ParseError {
InvalidChar(char),
}
struct Expression {
tokens: Vec<Token>,
}
enum Operator {
Add,
Mul,
LeftParen,
}
#[derive(Debug)]
enum EvaluationError {
MissingLHS,
MissingRHS,
MismatchedParens,
InvalidExpression,
}
impl Expression {
fn add(outputs: &mut Vec<u64>) -> Result<u64, EvaluationError> {
let lhs = outputs.pop().ok_or(EvaluationError::MissingLHS)?;
let rhs = outputs.pop().ok_or(EvaluationError::MissingRHS)?;
Ok(lhs + rhs)
}
fn mul(outputs: &mut Vec<u64>) -> Result<u64, EvaluationError> {
let lhs = outputs.pop().ok_or(EvaluationError::MissingLHS)?;
let rhs = outputs.pop().ok_or(EvaluationError::MissingRHS)?;
Ok(lhs * rhs)
}
fn reduce(outputs: &mut Vec<u64>, operators: &mut Vec<Operator>) -> Result<(), EvaluationError> {
while operators.len() > 0 {
let result = match operators[operators.len() - 1] {
Operator::Add => Self::add(outputs)?,
Operator::Mul => Self::mul(outputs)?,
Operator::LeftParen => break,
};
outputs.push(result);
operators.pop();
}
Ok(())
}
fn reduce_precedence(outputs: &mut Vec<u64>, operators: &mut Vec<Operator>) -> Result<(), EvaluationError> {
while operators.len() > 0 {
let result = match operators[operators.len() - 1] {
Operator::Add => Self::add(outputs)?,
Operator::Mul => break,
Operator::LeftParen => break,
};
outputs.push(result);
operators.pop();
}
Ok(())
}
fn evaluate(&self) -> Result<u64, EvaluationError> {
let mut outputs = Vec::new();
let mut operators = Vec::new();
for t in self.tokens.iter() {
match t {
Token::Int(i) => outputs.push(*i),
Token::Plus => {
Self::reduce(&mut outputs, &mut operators)?;
operators.push(Operator::Add);
},
Token::Asterisk => {
Self::reduce(&mut outputs, &mut operators)?;
operators.push(Operator::Mul);
},
Token::LeftParen => operators.push(Operator::LeftParen),
Token::RightParen => {
Self::reduce(&mut outputs, &mut operators)?;
operators.pop().ok_or(EvaluationError::MismatchedParens)?;
}
}
}
Self::reduce(&mut outputs, &mut operators)?;
if outputs.len() != 1 {
Err(EvaluationError::InvalidExpression)
} else {
Ok(outputs.pop().unwrap())
}
}
fn evaluate_precedence(&self) -> Result<u64, EvaluationError> {
let mut outputs = Vec::new();
let mut operators = Vec::new();
for t in self.tokens.iter() {
match t {
Token::Int(i) => outputs.push(*i),
Token::Plus => {
Self::reduce_precedence(&mut outputs, &mut operators)?;
operators.push(Operator::Add);
},
Token::Asterisk => {
Self::reduce(&mut outputs, &mut operators)?;
operators.push(Operator::Mul);
},
Token::LeftParen => operators.push(Operator::LeftParen),
Token::RightParen => {
Self::reduce(&mut outputs, &mut operators)?;
operators.pop().ok_or(EvaluationError::MismatchedParens)?;
}
}
}
Self::reduce(&mut outputs, &mut operators)?;
if outputs.len() != 1 {
Err(EvaluationError::InvalidExpression)
} else {
Ok(outputs.pop().unwrap())
}
}
}
impl FromStr for Expression {
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let tokens = s.chars().filter_map(|c| match c {
' ' => None,
'0'..='9' => Some(Ok(Token::Int(c as u64 - '0' as u64))),
'+' => Some(Ok(Token::Plus)),
'*' => Some(Ok(Token::Asterisk)),
'(' => Some(Ok(Token::LeftParen)),
')' => Some(Ok(Token::RightParen)),
c => Some(Err(ParseError::InvalidChar(c))),
}).collect::<Result<_, _>>()?;
Ok(Self {
tokens,
})
}
}
struct Day18;
impl Problem for Day18 {
type Input = Vec<Expression>;
type Part1Output = u64;
type Part2Output = u64;
type Error = EvaluationError;
fn part_1(input: &Self::Input) -> Result<Self::Part1Output, Self::Error> {
let mut result = 0;
for expr in input.iter() {
result += expr.evaluate()?;
}
Ok(result)
}
fn part_2(input: &Self::Input) -> Result<Self::Part2Output, Self::Error> {
let mut result = 0;
for expr in input.iter() {
result += expr.evaluate_precedence()?;
}
Ok(result)
}
}
fn main() {
solve::<Day18>("input").unwrap();
}
|
use core::cmp::min;
use core::fmt::Debug;
use std::collections::HashMap;
// TODO: I probably do need to return actions here
// for every time the cursor moves.
pub struct LineIter<'a, Item> {
current_position: usize,
items: &'a [Item],
newline: &'a Item,
}
impl<'a, Item> Iterator for LineIter<'a, Item>
where
Item: PartialEq + Copy,
{
type Item = &'a [Item];
fn next(&mut self) -> Option<Self::Item> {
let original_position = self.current_position;
while self.current_position < self.items.len() {
let byte = self.items[self.current_position];
if byte == *self.newline {
let line = &self.items[original_position..self.current_position];
self.current_position += 1;
return Some(line);
}
self.current_position += 1;
}
if self.current_position != original_position {
let line = &self.items[original_position..self.current_position];
return Some(line);
}
None
}
}
pub trait TextBuffer {
type Item;
fn line_length(&self, line: usize) -> usize;
fn line_count(&self) -> usize;
// Rethink bytes because of utf8
fn insert_bytes(&mut self, line: usize, column: usize, text: &[Self::Item]);
fn byte_at_pos(&self, line: usize, column: usize) -> Option<&Self::Item>;
fn delete_char(&mut self, line: usize, column: usize);
fn lines(&self) -> LineIter<Self::Item>;
fn last_line(&self) -> usize {
self.line_count().saturating_sub(1)
}
fn set_contents(&mut self, contents: &[Self::Item]);
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub struct Token {
pub delta_line: usize,
pub delta_start: usize,
pub length: usize,
pub kind: usize,
pub modifiers: usize,
}
impl From<&[u64]> for Token {
fn from(chunk: &[u64]) -> Self {
assert!(
chunk.len() == 5,
"Expected chunk to be of length 5, but was {}",
chunk.len(),
);
Token {
delta_line: chunk[0] as usize,
delta_start: chunk[1] as usize,
length: chunk[2] as usize,
kind: chunk[3] as usize,
modifiers: chunk[4] as usize,
}
}
}
pub fn parse_tokens(tokens: &[u64]) -> Vec<Token> {
tokens.chunks(5).map(|chunk| Token::from(chunk)).collect()
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub enum Edit {
Insert(usize, usize, Vec<u8>),
Delete(usize, usize),
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub struct EditEvent {
pub edit: Edit,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub struct TokenTextBuffer<T: TextBuffer> {
pub tokens: Vec<Token>,
pub underlying_text_buffer: T,
pub edits: Vec<EditEvent>,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
enum TokenAction {
SplitToken {
index: usize,
length: usize,
offset: usize,
},
MergeToken,
DeleteToken,
CreateToken,
OffsetToken {
index: usize,
length: isize,
},
ReduceTokenLength {
index: usize,
length: usize,
},
}
#[allow(unused)]
struct TokenWindow {
index: usize,
line: usize,
column: usize,
left: Option<Token>,
center: Option<Token>,
right: Option<Token>,
}
impl<T> TokenTextBuffer<T>
where
T: TextBuffer<Item = u8>,
{
// TODO: Make this an iterator instead
pub fn decorated_lines(&self, skip: usize, take: usize) -> Vec<Vec<(&[u8], Option<&Token>)>> {
let mut result = vec![];
for (relative_line_number, (line, tokens)) in self
.lines()
.skip(skip)
.take(take)
.zip(self.tokens.token_lines().skip(skip).take(take))
.enumerate()
{
let line_number = relative_line_number + skip;
result.push(self.decorated_line(line_number, line, tokens));
}
result
}
fn decorated_line<'a>(
&self,
_line_number: usize,
line: &'a [u8],
tokens: &'a [Token],
) -> Vec<(&'a [u8], Option<&'a Token>)> {
// TODO: Need to account for edits
let mut result = vec![];
let mut current_position = 0;
let mut last_end = 0;
for token in tokens.iter() {
current_position += token.delta_start;
if current_position > last_end {
let non_token_end = current_position;
let non_token_end = std::cmp::min(non_token_end, line.len());
let non_token_range = last_end..non_token_end;
result.push((&line[non_token_range], None));
}
let end = current_position + token.length;
let end = std::cmp::min(end, line.len());
let start = std::cmp::min(current_position, end);
let token_range = start..end;
result.push((&line[token_range], Some(token)));
last_end = end;
}
if last_end < line.len() {
let non_token_range = last_end..line.len();
result.push((&line[non_token_range], None));
}
result
}
pub fn drain_edits(&mut self) -> Vec<EditEvent> {
std::mem::take(&mut self.edits)
}
pub fn set_tokens(&mut self, tokens: Vec<Token>) {
self.tokens = tokens;
}
fn update_tokens_insert(&mut self, line: usize, column: usize, text: &[u8]) {
let window: TokenWindow = self.find_token(line, column);
let actions = self.resolve_token_action_insert(window, line, column, text);
for action in actions.iter() {
self.apply_token_action(action);
}
}
fn update_tokens_delete(&mut self, line: usize, column: usize) {
let window: TokenWindow = self.find_token(line, column);
let actions = self.result_token_action_delete(window, line, column);
for action in actions.iter() {
self.apply_token_action(action);
}
}
fn find_token(&self, line: usize, column: usize) -> TokenWindow {
let mut index: usize = 0;
let mut current_line = 0;
let mut current_column = 0;
let mut left = None;
let mut center = None;
let mut right = None;
// remember, column resets if we change lines
for token in self.tokens.iter() {
if token.delta_line > 0 {
current_line += token.delta_line;
current_column = 0;
}
current_column += token.delta_start;
if current_line == line {
if current_column >= column && column < current_column + token.length {
center = Some(token.clone());
break;
}
}
index += 1;
}
if let Some(token) = self.tokens.get(index.saturating_sub(1)) {
left = Some(token.clone());
}
if let Some(token) = self.tokens.get(index + 1) {
right = Some(token.clone());
}
TokenWindow {
index,
line,
column,
left,
center,
right,
}
}
fn resolve_token_action_insert(&self, window: TokenWindow, _line: usize, column: usize, text: &[u8]) -> Vec<TokenAction> {
if text.iter().all(|x| x.is_ascii_whitespace()) {
if window.column <= column {
vec![TokenAction::OffsetToken { index: window.index, length: text.len() as isize }]
} else {
if let Some(token) = window.center {
if column < token.length + window.column {
vec![TokenAction::SplitToken { index: window.index, length: text.len(), offset: token.length + window.column - column }]
} else {
vec![TokenAction::OffsetToken { index: window.index + 1, length: text.len() as isize }]
}
} else {
vec![TokenAction::OffsetToken { index: window.index + 1, length: text.len() as isize }]
}
}
} else {
// TODO: have some default I can always do
vec![]
}
}
fn result_token_action_delete(&self, window: TokenWindow, _line: usize, column: usize) -> Vec<TokenAction> {
if let Some(token) = window.center {
if column <= window.column {
vec![TokenAction::OffsetToken { index: window.index, length: -1 }]
} else if column < token.length + window.column {
vec![TokenAction::ReduceTokenLength { index: window.index, length: 1 }]
} else {
vec![TokenAction::OffsetToken { index: window.index + 1, length: -1 }]
}
} else {
vec![TokenAction::OffsetToken { index: window.index + 1, length: -1 }]
}
}
fn apply_token_action(&mut self, action: &TokenAction) {
match action {
TokenAction::SplitToken { index, length, offset} => {
if let Some(token) = self.tokens.get_mut(*index) {
token.length = *offset;
let remaining_length = token.length - offset;
let new_token = Token {
delta_start: token.delta_start + length,
length: remaining_length,
..*token
};
self.tokens.insert(*index + 1, new_token);
}
}
TokenAction::MergeToken => todo!(),
TokenAction::DeleteToken => todo!(),
TokenAction::CreateToken => todo!(),
TokenAction::OffsetToken { index, length } => {
if let Some(token) = self.tokens.get_mut(*index) {
if length.is_negative() {
token.delta_start -= length.abs() as usize;
} else {
token.delta_start += *length as usize;
}
}
}
TokenAction::ReduceTokenLength { index, length } => {
if let Some(token) = self.tokens.get_mut(*index) {
token.length -= length;
}
}
}
}
}
impl<T> TextBuffer for TokenTextBuffer<T>
where
T: TextBuffer<Item = u8>,
{
type Item = u8;
fn line_length(&self, line: usize) -> usize {
self.underlying_text_buffer.line_length(line)
}
fn line_count(&self) -> usize {
self.underlying_text_buffer.line_count()
}
fn insert_bytes(&mut self, line: usize, column: usize, text: &[Self::Item]) {
self.underlying_text_buffer.insert_bytes(line, column, text);
self.edits.push(EditEvent { edit: Edit::Insert(line, column, text.to_vec())});
self.update_tokens_insert(line, column, text);
}
fn byte_at_pos(&self, line: usize, column: usize) -> Option<&Self::Item> {
self.underlying_text_buffer.byte_at_pos(line, column)
}
fn delete_char(&mut self, line: usize, column: usize) {
self.underlying_text_buffer.delete_char(line, column);
self.edits.push(EditEvent { edit: Edit::Delete(line, column) });
self.update_tokens_delete(line, column);
}
fn lines(&self) -> LineIter<Self::Item> {
self.underlying_text_buffer.lines()
}
fn set_contents(&mut self, contents: &[Self::Item]) {
self.underlying_text_buffer.set_contents(contents);
}
}
impl TokenTextBuffer<SimpleTextBuffer> {
pub fn new_with_contents(contents: &[u8]) -> Self {
let underlying_text_buffer = SimpleTextBuffer::new_with_contents(contents);
Self {
tokens: vec![],
underlying_text_buffer,
edits: vec![],
}
}
}
pub struct TokenLineIter<'a> {
current_position: usize,
tokens: &'a [Token],
empty_lines: usize,
}
impl<'a> Iterator for TokenLineIter<'a> {
type Item = &'a [Token];
fn next(&mut self) -> Option<Self::Item> {
let original_position = self.current_position;
if self.empty_lines > 0 {
self.empty_lines -= 1;
return Some(&[]);
}
while self.current_position < self.tokens.len() {
let token = &self.tokens[self.current_position];
if self.current_position != original_position && token.delta_line == 1 {
self.empty_lines = 0;
return Some(&self.tokens[original_position..self.current_position]);
} else if self.current_position != original_position && token.delta_line > 1 {
self.empty_lines = token.delta_line - 1;
return Some(&self.tokens[original_position..self.current_position]);
}
self.current_position += 1;
}
if self.current_position != original_position {
let line = &self.tokens[original_position..self.current_position];
return Some(line);
}
None
}
}
trait TokenLinerIterExt<'a> {
fn token_lines(self) -> TokenLineIter<'a>;
}
impl<'a> TokenLinerIterExt<'a> for &'a [Token] {
fn token_lines(self) -> TokenLineIter<'a> {
TokenLineIter {
current_position: 0,
tokens: self,
empty_lines: 0,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub struct SimpleTextBuffer {
pub bytes: Vec<u8>,
}
// These are really bad implementations
// probably need to actually know where lines start
// and stop in some data structure.
// But trying to start with the simplest thing that works
impl SimpleTextBuffer {
pub fn new() -> Self {
SimpleTextBuffer { bytes: Vec::new() }
}
pub fn new_with_contents(contents: &[u8]) -> Self {
SimpleTextBuffer {
bytes: contents.to_vec(),
}
}
pub fn set_contents(&mut self, contents: &[u8]) {
self.bytes = contents.to_vec();
}
pub fn line_start(&self, line: usize) -> usize {
let mut line_start = 0;
let mut lines_seen = 0;
for byte in self.bytes.iter() {
if lines_seen == line {
break;
}
if *byte == b'\n' {
lines_seen += 1;
}
line_start += 1;
}
line_start
}
}
impl TextBuffer for SimpleTextBuffer {
type Item = u8;
fn line_length(&self, line: usize) -> usize {
let line_start = self.line_start(line);
let mut length = 0;
for byte in self.bytes.iter().skip(line_start) {
if *byte == b'\n' {
break;
}
length += 1;
}
length
}
fn line_count(&self) -> usize {
self.bytes.iter().filter(|&&byte| byte == b'\n').count() + 1
}
fn insert_bytes(&mut self, line: usize, column: usize, text: &[u8]) {
let start = self.line_start(line) + column;
self.bytes.splice(start..start, text.iter().cloned());
}
fn byte_at_pos(&self, line: usize, column: usize) -> Option<&u8> {
self.bytes.get(self.line_start(line) + column)
}
fn delete_char(&mut self, line: usize, column: usize) {
let start = self.line_start(line) + column;
self.bytes.remove(start);
}
fn lines(&self) -> LineIter<Self::Item> {
LineIter {
current_position: 0,
items: &self.bytes,
newline: &b'\n',
}
}
fn set_contents(&mut self, contents: &[Self::Item]) {
self.bytes = contents.to_vec();
}
}
pub trait VirtualCursor: Clone + Debug {
fn move_to(&mut self, line: usize, column: usize);
fn line(&self) -> usize;
fn column(&self) -> usize;
fn new(line: usize, column: usize) -> Self;
fn move_to_bounded<T: TextBuffer>(&mut self, line: usize, column: usize, buffer: &T) {
let line = min(buffer.last_line(), line);
let column = min(buffer.line_length(line), column);
self.move_to(line, column);
}
fn move_up<T: TextBuffer>(&mut self, buffer: &T) {
let previous_line = self.line().saturating_sub(1);
self.move_to(
previous_line,
min(self.column(), buffer.line_length(previous_line)),
);
}
fn move_down<T: TextBuffer>(&mut self, buffer: &T) {
let next_line = self.line().saturating_add(1);
let last_line = buffer.last_line();
if next_line > last_line {
return;
}
self.move_to(
min(next_line, last_line),
min(self.column(), buffer.line_length(next_line)),
);
}
fn move_left<T: TextBuffer>(&mut self, buffer: &T) {
if self.column() == 0 && self.line() != 0 {
let new_line = self.line().saturating_sub(1);
let length = buffer.line_length(new_line);
self.move_to(new_line, length);
} else {
self.move_to(self.line(), self.column().saturating_sub(1));
}
}
fn move_right<T: TextBuffer>(&mut self, buffer: &T) {
let length = buffer.line_length(self.line());
if self.column() >= length {
if self.line().saturating_add(1) < buffer.line_count() {
self.move_to(self.line().saturating_add(1), 0);
}
} else {
self.move_to(self.line(), self.column().saturating_add(1));
}
}
fn start_of_line(&mut self) {
self.move_to(self.line(), 0);
}
fn end_of_line<T: TextBuffer>(&mut self, buffer: &T) {
self.move_to(self.line(), buffer.line_length(self.line()));
}
fn move_in_buffer<T: TextBuffer>(&mut self, line: usize, column: usize, buffer: &T) {
if line < buffer.line_count() {
self.move_to(line, min(column, buffer.line_length(line)));
}
}
fn right_of<T: TextBuffer>(&self, buffer: &T) -> Self {
let mut cursor = self.clone();
cursor.move_right(buffer);
cursor
}
fn left_of<T: TextBuffer>(&self, buffer: &T) -> Self {
let mut cursor = self.clone();
cursor.move_left(buffer);
cursor
}
fn above<T: TextBuffer<Item = u8>>(&self, buffer: &T) -> Self {
let mut cursor = self.clone();
cursor.move_up(buffer);
cursor
}
fn below<T: TextBuffer>(&self, buffer: &T) -> Self {
let mut cursor = self.clone();
cursor.move_down(buffer);
cursor
}
fn auto_bracket_insert<T: TextBuffer<Item = u8>>(&mut self, buffer: &mut T, to_insert: &[u8]) {
let to_insert = match to_insert {
b"(" => b"()",
b"[" => b"[]",
b"{" => b"{}",
b"\"" => b"\"\"",
_ => to_insert,
};
buffer.insert_bytes(self.line(), self.column(), to_insert);
self.move_right(buffer);
}
fn insert_normal_text<T: TextBuffer<Item = u8>>(&mut self, to_insert: &[u8], buffer: &mut T) {
buffer.insert_bytes(self.line(), self.column(), to_insert);
if to_insert == b"\n" {
self.move_down(buffer);
self.move_to(self.line(), 0);
} else {
// TODO: Do this more efficiently
for _ in 0..(to_insert.len() - 1) {
self.move_right(buffer);
}
self.move_right(buffer)
};
}
fn delete_char<T: TextBuffer>(&mut self, buffer: &mut T) {
self.move_left(buffer);
buffer.delete_char(self.line(), self.column());
}
fn is_open_bracket(byte: &[u8]) -> bool {
match byte {
b"(" | b"[" | b"{" | b"\"" => true,
_ => false,
}
}
fn is_close_bracket(byte: &[u8]) -> bool {
match byte {
b")" | b"]" | b"}" | b"\"" => true,
_ => false,
}
}
fn handle_insert<T: TextBuffer<Item = u8>>(&mut self, to_insert: &[u8], buffer: &mut T) {
if Self::is_open_bracket(to_insert) {
// Would need to have a setting for this
self.auto_bracket_insert(buffer, to_insert);
} else if Self::is_close_bracket(to_insert) {
let right_of_cursor = buffer.byte_at_pos(self.line(), self.column());
match right_of_cursor {
Some(right) if Self::is_close_bracket(&[*right]) => self.move_right(buffer),
_ => self.insert_normal_text(to_insert, buffer),
}
} else {
self.insert_normal_text(to_insert, buffer)
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub struct Cursor {
line: usize,
column: usize,
}
impl VirtualCursor for Cursor {
fn new(line: usize, column: usize) -> Self {
Self { line, column }
}
fn line(&self) -> usize {
self.line
}
fn column(&self) -> usize {
self.column
}
fn move_to(&mut self, line: usize, column: usize) {
self.line = line;
self.column = column;
}
}
// For right now it is a simple linear history
// Probably want it to be a tree
#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
struct CursorWithHistory {
cursor: Cursor,
history: Vec<Cursor>,
}
impl VirtualCursor for CursorWithHistory {
fn new(line: usize, column: usize) -> Self {
Self {
cursor: Cursor::new(line, column),
history: Vec::new(),
}
}
fn line(&self) -> usize {
self.cursor.line()
}
fn column(&self) -> usize {
self.cursor.column()
}
fn move_to(&mut self, line: usize, column: usize) {
self.history.push(self.cursor.clone());
self.cursor.move_to(line, column);
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub struct MultiCursor<C: VirtualCursor> {
cursors: Vec<C>,
}
impl MultiCursor<Cursor> {
pub fn new() -> Self {
Self { cursors: vec![] }
}
pub fn add_cursor(&mut self, cursor: Cursor) {
self.cursors.push(cursor);
}
}
impl<C: VirtualCursor> VirtualCursor for MultiCursor<C> {
fn move_up<T: TextBuffer>(&mut self, buffer: &T) {
for cursor in &mut self.cursors {
cursor.move_up(buffer);
}
}
fn move_down<T: TextBuffer>(&mut self, buffer: &T) {
for cursor in &mut self.cursors {
cursor.move_down(buffer);
}
}
fn move_left<T: TextBuffer>(&mut self, buffer: &T) {
for cursor in &mut self.cursors {
cursor.move_left(buffer);
}
}
fn move_right<T: TextBuffer>(&mut self, buffer: &T) {
for cursor in &mut self.cursors {
cursor.move_right(buffer);
}
}
fn start_of_line(&mut self) {
for cursor in &mut self.cursors {
cursor.start_of_line();
}
}
fn handle_insert<T: TextBuffer<Item = u8>>(&mut self, to_insert: &[u8], buffer: &mut T) {
for cursor in &mut self.cursors {
cursor.handle_insert(to_insert, buffer);
}
}
fn move_to(&mut self, line: usize, column: usize) {
self.cursors = vec![C::new(line, column)];
}
fn line(&self) -> usize {
self.cursors.get(0).map(C::line).unwrap_or(0)
}
fn column(&self) -> usize {
self.cursors.get(0).map(C::column).unwrap_or(0)
}
fn new(line: usize, column: usize) -> Self {
Self {
cursors: vec![C::new(line, column)],
}
}
fn end_of_line<T: TextBuffer>(&mut self, buffer: &T) {
for cursor in &mut self.cursors {
cursor.end_of_line(buffer);
}
}
fn move_in_buffer<T: TextBuffer>(&mut self, line: usize, column: usize, buffer: &T) {
let mut c = C::new(line, column);
c.move_in_buffer(line, column, buffer);
self.cursors = vec![c];
}
fn right_of<T: TextBuffer>(&self, buffer: &T) -> Self {
Self {
cursors: self.cursors.iter().map(|c| c.right_of(buffer)).collect(),
}
}
fn left_of<T: TextBuffer>(&self, buffer: &T) -> Self {
Self {
cursors: self.cursors.iter().map(|c| c.left_of(buffer)).collect(),
}
}
fn above<T: TextBuffer<Item = u8>>(&self, buffer: &T) -> Self {
Self {
cursors: self.cursors.iter().map(|c| c.above(buffer)).collect(),
}
}
fn below<T: TextBuffer>(&self, buffer: &T) -> Self {
Self {
cursors: self.cursors.iter().map(|c| c.below(buffer)).collect(),
}
}
fn auto_bracket_insert<T: TextBuffer<Item = u8>>(&mut self, buffer: &mut T, to_insert: &[u8]) {
for cursor in &mut self.cursors {
cursor.auto_bracket_insert(buffer, to_insert);
}
}
fn insert_normal_text<T: TextBuffer<Item = u8>>(&mut self, to_insert: &[u8], buffer: &mut T) {
for cursor in &mut self.cursors {
cursor.insert_normal_text(to_insert, buffer);
}
}
fn delete_char<T: TextBuffer>(&mut self, buffer: &mut T) {
for cursor in &mut self.cursors {
cursor.delete_char(buffer);
}
}
}
// TODO:
// To make this a fully headless editor I need to handle all the basic interactions
// Selections
// Undo/redo
// Copy/cut/paste
// Text decorations?
// Text annotation?
// What text is visible
// Real Text Buffer
// Save to File?
// Load from File?
// Single line support
// TODO: Make a fake text buffer impl
// test it by doing compensating actions
// and making sure state is always reset.
struct EventTextBuffer {
text_positions: HashMap<(usize, usize), u8>,
bytes: Vec<u8>,
}
// This is a weird implementation because I can insert anywhere
#[cfg(test)]
impl EventTextBuffer {
fn new() -> Self {
Self {
text_positions: HashMap::new(),
bytes: Vec::new(),
}
}
}
impl TextBuffer for EventTextBuffer {
type Item = u8;
fn line_length(&self, _line: usize) -> usize {
80
}
fn line_count(&self) -> usize {
80
}
fn insert_bytes(&mut self, line: usize, column: usize, text: &[u8]) {
for (i, byte) in text.iter().enumerate() {
self.text_positions.insert((line, column + i), *byte);
}
let mut text_positions: Vec<(&(usize, usize), &u8)> = self.text_positions.iter().collect();
text_positions.sort_by_key(|x| x.0);
self.bytes = text_positions.iter().map(|x| *x.1).collect();
}
fn byte_at_pos(&self, line: usize, column: usize) -> Option<&u8> {
self.text_positions.get(&(line, column))
}
fn delete_char(&mut self, line: usize, column: usize) {
self.text_positions.remove(&(line, column));
}
fn lines(&self) -> LineIter<'_, Self::Item> {
LineIter {
current_position: 0,
items: &self.bytes,
newline: &b'\n',
}
}
fn set_contents(&mut self, contents: &[Self::Item]) {
self.bytes = contents.to_vec();
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn adding_and_remove_basic() {
let mut cursor = Cursor::new(0, 0);
let mut buffer = EventTextBuffer::new();
let my_string = b"Hello World";
cursor.handle_insert(my_string, &mut buffer);
for i in 0..my_string.len() {
assert_eq!(buffer.byte_at_pos(0, i), Some(&my_string[i]));
}
for _ in 0..my_string.len() {
cursor.delete_char(&mut buffer);
}
assert!(buffer.text_positions.is_empty());
}
#[test]
fn adding_and_remove_basic_multi() {
let cursor = Cursor::new(0, 0);
let mut buffer = EventTextBuffer::new();
let cursor_below = cursor.below(&buffer);
let my_string = b"Hello World";
let mut multi_cursor = MultiCursor::new();
multi_cursor.add_cursor(cursor);
multi_cursor.add_cursor(cursor_below);
multi_cursor.handle_insert(my_string, &mut buffer);
for cursor in multi_cursor.cursors.iter() {
for i in 0..my_string.len() {
assert_eq!(buffer.byte_at_pos(cursor.line(), i), Some(&my_string[i]));
}
}
for _ in 0..my_string.len() {
multi_cursor.delete_char(&mut buffer);
}
assert!(buffer.text_positions.is_empty());
}
#[test]
fn test_lines() {
let mut cursor = Cursor::new(0, 0);
let mut buffer = EventTextBuffer::new();
let my_string = b"Hello World";
cursor.handle_insert(my_string, &mut buffer);
cursor.handle_insert(b"\n", &mut buffer);
cursor.handle_insert(b"Hello World", &mut buffer);
cursor.handle_insert(b"\n", &mut buffer);
for line in buffer.lines() {
assert!(line == my_string)
}
}
}
|
extern crate image;
/* catpicture
* @author Joseph Catrambone <jo.jcat@gmail.com>
* Release notes:
* v0.1.0 : First release -- Supports just '#' for output style. Allows -c for full-color mode, -r, -w, -h to change sizes.
* v0.2.0 : Automatically select correct aspect ratio when only -w or -h supplied. Support force-grey.
* v0.3.0 : Add new line algorithms with --line. Can fill BG instead of '#', supports BG, '#', and gradient.
* v0.4.0 : Use nearest neighbor to select the best looking ascii stand-in.
**v0.5.0 : Hardening and improvements to robustness. Bounds checking. Ready for beta release.
* v0.6.0 : Allow threshold to be set for _not_ drawing, so if people want black text to show as empty space (for writing to text file), that can be done.
* v0.7.0 : Introduce FFT to split high-frequency pixels from low frequency pixels. Draw high frequency in FG with font, low frequency in BG.
* v1.0.0 : Ready for release.
*/
use std::char;
use std::clone::Clone;
use std::collections::HashMap;
use std::fmt::Write;
use std::env;
use std::io::{Cursor, Read, self};
use std::option::Option;
use std::path::Path;
use image::{GenericImage, imageops, FilterType, DynamicImage, Pixel, GenericImageView}; // Pixel used for .to_luma.
const COMPARISON_SET : &'static str = "characters.png";
const DEFAULT_WIDTH : u32 = 80;
const LINE_ALGORITHM : &'static str = "-d";
const USE_FULL_COLORS : &'static str = "-c";
const OUTPUT_WIDTH : &'static str = "-w";
const OUTPUT_HEIGHT : &'static str = "-h";
const SOURCE_RECT : &'static str = "-r";
const FORCE_GREY : &'static str = "-g";
const HELP_SHORT : &'static str = "-?";
const HELP_LONG : &'static str = "--help";
const HELP_STRING : &'static str = r#"
Usage:
catpicture [--help|-?] [-c] [-w] [-h] [-r x1 y1 x2 y2] [-g] [-d block|art|char x] [filename]
--help/-? This message.
-c Try to use full color instead of nearest XTERM color.
-w Set output width.
-h Set output height.
-r xywh Given four points (left top right bottom), cut the specified region from the picture for display.
-g Force greyscale on image.
-d Specify the 'draw mode' for the output.
block -> Only background will be filled.
art -> Use nearest neighbor to find the best approximate character match for a patch.
char -> Use the specified character to draw.
filename The name of the image to open. If unspecified, reads from stdin.
"#;
#[derive(PartialEq)]
enum DrawMode {
Block,
Char(char),
Art,
}
struct Settings {
input_filename : String, // Will be "" for stdin.
output_width : Option<u32>,
output_height : Option<u32>,
region : Option<(u32, u32, u32, u32)>,
use_full_colors : bool,
show_help : bool,
force_grey : bool,
draw_mode : DrawMode,
}
fn parse_args(args : Vec<String>) -> Settings {
let mut settings = Settings {
input_filename : "".to_string(),
output_width : None,
output_height : None,
region : None,
show_help : false,
use_full_colors : false,
force_grey : false,
draw_mode : DrawMode::Block,
};
let mut skip_args = 0; // True if the argument was consumed.
for i in 1..args.len() {
if skip_args > 0 { // We consumed this argument as part of the first run.
skip_args -= 1;
continue;
}
// args[0] == file name.
let arg = args[i].to_lowercase();
if arg == USE_FULL_COLORS {
settings.use_full_colors = true;
} else if arg == HELP_SHORT || args[i] == HELP_LONG {
settings.show_help = true;
} else if arg == OUTPUT_WIDTH { // TODO: Check OOB.
settings.output_width = Some(args[i+1].parse::<u32>().unwrap());
skip_args = 1;
} else if arg == OUTPUT_HEIGHT {
// Check if the user is passing -h to try and get to help.
if i+1 >= args.len() {
println!("-h specifies the height, but that argument is missing. You probably meant to use -? or --help");
settings.show_help = true;
continue;
} else {
settings.output_height = Some(args[i+1].parse::<u32>().unwrap());
skip_args = 1;
}
} else if arg == LINE_ALGORITHM {
skip_args = 0; // Set this inside the switch.
let mode = &args[i+1].to_lowercase();
settings.draw_mode = match mode.as_ref() {
"block" => DrawMode::Block,
"art" => DrawMode::Art,
"char" => {
skip_args = 1;
DrawMode::Char(args[i+2].chars().nth(0).unwrap())
},
_ => {
println!("Unrecognized draw mode. Defaulting to block.");
DrawMode::Char('#')
}
};
skip_args += 1; // NOTE: Add one because we may skip another line if we have to get the character.
} else if arg == SOURCE_RECT {
settings.region = Some((
args[i+1].parse::<u32>().unwrap(),
args[i+2].parse::<u32>().unwrap(),
args[i+3].parse::<u32>().unwrap(),
args[i+4].parse::<u32>().unwrap(),
));
skip_args = 4;
} else if arg == FORCE_GREY {
settings.force_grey = true;
} else {
if settings.input_filename == "" && args[i].chars().nth(0).unwrap_or('-') != '-' {
settings.input_filename = args[i].to_string();
} else {
panic!("Unrecognized argument #{}: {}", i, args[i]);
}
}
}
settings
}
fn print_color_character(c : char, fg : (u8, u8, u8), bg : (u8, u8, u8), use_full_colors : bool) {
// let mut out : String = String::new();
// TODO: write!(&mut res, "{}", c).unwrap()
if use_full_colors { // Generate color code.
// ESC[38;2;<r>;<g>;<b>m (Foreground)
// ESC[48;2;<r>;<g>;<b>m (Background)
print!("\u{1B}[38;2;{};{};{}m{}", fg.0, fg.1, fg.2, c);
} else {
// If we support full color switching, use that, otherwise, get the nearest color match.
let mut color_lookup = HashMap::new();
color_lookup.insert([0u8, 0, 0], 30); // Black.
color_lookup.insert([255u8, 0, 0], 31); // Red
color_lookup.insert([0u8, 255, 0], 32); // Green.
color_lookup.insert([0u8, 255, 255], 33); // Yellow.
color_lookup.insert([0u8, 0, 255], 34); // Blue
color_lookup.insert([255u8, 0, 255], 35); // Magenta.
color_lookup.insert([255u8, 255, 0], 36); // Cyan.
color_lookup.insert([255u8, 255, 255], 37); // White.
let mut nearest_foreground_color = 39;
let mut nearest_foreground_dist = 195075 as i32 + 1; // Past max rgb^2.
let mut nearest_background_color = 39;
let mut nearest_background_dist = 194075 as i32 + 1;
for (color_array, color_code) in &color_lookup {
let dr = fg.0 as i32 - color_array[0] as i32;
let dg = fg.1 as i32 - color_array[1] as i32;
let db = fg.2 as i32 - color_array[2] as i32;
let dist = dr*dr + dg*dg + db*db;
if dist < nearest_foreground_dist {
nearest_foreground_color = *color_code;
nearest_foreground_dist = dist;
}
let dr = bg.0 as i32 - color_array[0] as i32;
let dg = bg.1 as i32 - color_array[1] as i32;
let db = bg.2 as i32 - color_array[2] as i32;
let dist = dr*dr + dg*dg + db*db;
if dist < nearest_background_dist {
nearest_background_color = (*color_code) + 10; // Offset by 10 for BG colors.
nearest_background_dist = dist;
}
}
print!("\u{1B}[{}m\u{1B}[{}m{}", nearest_foreground_color, nearest_background_color, c);
}
//print!("\u{1B}[39m"); // Alternate reset.
print!("\u{1B}[0m"); // Reset
}
fn print_help() {
println!("{}", HELP_STRING);
}
fn calculate_target_dimension(maybe_width : Option<u32>, maybe_height : Option<u32>, image_width : u32, image_height : u32) -> (u32, u32) {
let aspect_ratio = image_width as f32 / image_height as f32;
let (target_width, target_height) = match (maybe_width, maybe_height) {
(Some(w), Some(h)) => (w, h),
(Some(w), None) => (w, (w as f32/aspect_ratio) as u32),
(None, Some(h)) => ((h as f32*aspect_ratio) as u32, h),
(None, None) => (DEFAULT_WIDTH, (DEFAULT_WIDTH as f32/aspect_ratio) as u32),
};
(target_width, target_height)
}
fn build_character_image_vector(font_image : &DynamicImage) -> Vec<DynamicImage> {
let num_characters : u32 = (b'~' - b' ') as u32;
let mut characters = Vec::with_capacity(num_characters as usize);
let character_width = font_image.dimensions().0 / num_characters;
let character_height = font_image.dimensions().1;
for i in 0..num_characters {
let mut font_image_copy = font_image.clone();
let source_character = font_image_copy.crop(i*character_width, 0, character_width, character_height);
characters.push(source_character);
}
characters
}
fn find_best_character(x : u32, y : u32, w : u32, h : u32, input_image : &DynamicImage, characters : &Vec<DynamicImage>) -> char {
// This takes the 'font image', which is a list of all the printable ascii characters in a single row left to right,
// starting at the lowest printable one, ' ' and ending at '~'.
// w and h are the final, desired size of the input_image. x and y are the pixel that will be printed in the final image.
// x and y are the location in the resized pixel, so they'll have to be multiplied by the pixel_width and height to get correct values.
let (input_width, input_height) = input_image.dimensions();
let pixel_width = input_width/w; // Not really 'pixel width', but the sample width that lets us feed into the image.
let pixel_height = input_height/h;
let (character_width, character_height) = characters[0].dimensions();
let mut best_char = ' ';
let mut best_distance : u32 = character_width*character_height*255+1; // Max distance.
let input_image_region = input_image.clone().crop(x*pixel_width, y*pixel_width, pixel_width, pixel_height);
let target_region = imageops::resize(&input_image_region, character_width, character_height, FilterType::CatmullRom);
'charloop: for (char_index, source_character) in characters.iter().enumerate() {
// Calculate distance between this character and the one in question.
let mut distance : u32 = 0;
//for (candidate_pixel, target_pixel) in target_region.pixels().zip(source_character.pixels()) {
for py in 0..character_height {
for px in 0..character_width {
let target_pixel = target_region.get_pixel(px, py);
let candidate_pixel = source_character.get_pixel(px, py);
distance += (candidate_pixel.to_luma().data[0] as i32 - target_pixel.to_luma().data[0] as i32).abs() as u32;
}
// We are only breaking on the outermost loop because we don't want to incur the branching cost on the inner loop.
// If the distance is already greater than the best character, don't keep comparing.
if distance > best_distance {
continue 'charloop;
}
}
if distance < best_distance {
best_distance = distance;
best_char = char::from_u32(char_index as u32 + b' ' as u32).unwrap();
}
}
best_char
}
fn main() {
let arguments: Vec<_> = env::args().collect();
let settings = parse_args(arguments);
if settings.show_help {
print_help();
} else {
let mut img = if settings.input_filename == "" {
// Don't do this because it expects a UTF-8 string:
//let mut buffer = String::new();
//io::stdin().read_to_string(&mut buffer);
// This may be an option:
//image::load(std::io::BufReader::new(std::io::stdin()))
let mut buffer = Vec::<u8>::new();
match io::stdin().read_to_end(&mut buffer) { _ => () };
match image::load_from_memory(&buffer) {
Ok(img) => img,
Err(problem) => { panic!("Problem loading image from stream: {}", problem); }
}
} else {
image::open(&Path::new(&settings.input_filename)).unwrap()
};
// Calculate aspect ratio and see if there are any requests outside the image range.
let (image_width, image_height) = img.dimensions();
//let color = img.color();
let (target_width, target_height) = calculate_target_dimension(settings.output_width, settings.output_height, image_width, image_height);
// Only crop if the rect flag is set.
img = match settings.region {
Some(rect) => { img.crop(rect.0, rect.1, rect.2-rect.0, rect.3-rect.1) },
None => { img },
};
let target_region = imageops::resize(&img, target_width, target_height, FilterType::CatmullRom); // Nearest/Triangle/CatmullRom/Gaussian/Lanczos3
// Since we're calling this every pixel, let's preload the comparison NN set for the 'best character' search, but only if the mode is 'Art'.
// TODO: Make this optionally loaded.
let font_image = image::load(Cursor::new(&include_bytes!("characters.png")[..]), image::PNG).unwrap(); // TODO: MAGIC NUMBER - Make 'characters' a magic number.
let character_image_vector = build_character_image_vector(&font_image);
for (x, y, pixel) in target_region.enumerate_pixels() { // TODO: pixel should be yielding x, y, pixel.
// Extract pixel color and, if needed, convert it to grey before passing it off to the draw method.
let mut rgb = (pixel.data[0], pixel.data[1], pixel.data[2]);
if settings.force_grey {
// TODO: Check if already luma and use to_luma.
let sum_rgb : u8 = ((pixel.data[0] as u32 + pixel.data[1] as u32 + pixel.data[2] as u32) / 3) as u8;
rgb = (sum_rgb, sum_rgb, sum_rgb);
}
// Dispatch draw call. Sometimes we have to select the best character.
match settings.draw_mode {
DrawMode::Block => { print_color_character(' ', (0, 0, 0), rgb, settings.use_full_colors) },
DrawMode::Char(c) => { print_color_character(c, rgb, (0, 0, 0), settings.use_full_colors) },
DrawMode::Art => { print_color_character(find_best_character(x, y, target_width, target_height, &img, &character_image_vector), rgb, (0, 0, 0), settings.use_full_colors) },
};
// Generate newline if we're at the edge of the output.
if x == target_width-1 {
print!("\n");
}
}
}
}
|
#![allow(non_snake_case, non_camel_case_types)]
use libc::c_int;
use crate::channel::ssh_channel;
use crate::session::ssh_session;
use crate::socket_t;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ssh_connector_struct {
_unused: [u8; 0],
}
pub type ssh_connector = *mut ssh_connector_struct;
pub type ssh_connector_flags_e = c_int;
/// Only the standard stream of the channel
pub const SSH_CONNECTOR_STDOUT: ssh_connector_flags_e = 1;
/// Only the standard stream of the channel
pub const SSH_CONNECTOR_STDINOUT: ssh_connector_flags_e = 1;
/// Only the exception stream of the channel
pub const SSH_CONNECTOR_STDERR: ssh_connector_flags_e = 2;
/// Merge both standard and exception streams
pub const SSH_CONNECTOR_BOTH: ssh_connector_flags_e = 3;
extern "C" {
pub fn ssh_connector_new(session: ssh_session) -> ssh_connector;
pub fn ssh_connector_free(connector: ssh_connector);
pub fn ssh_connector_set_in_channel(
connector: ssh_connector,
channel: ssh_channel,
flags: i32,
) -> c_int;
pub fn ssh_connector_set_out_channel(
connector: ssh_connector,
channel: ssh_channel,
flags: i32,
) -> c_int;
pub fn ssh_connector_set_in_fd(connector: ssh_connector, fd: socket_t);
pub fn ssh_connector_set_out_fd(connector: ssh_connector, fd: socket_t);
}
|
use super::{Error, Module, SassFunction};
use crate::css::Value;
use crate::ordermap::OrderMap;
use crate::value::ListSeparator;
/// Create the `sass:map` standard module.
///
/// Should conform to
/// [the specification](https://sass-lang.com/documentation/modules/map).
pub fn create_module() -> Module {
let mut f = Module::new();
// TODO deep_merge and deep_remove
def_va!(f, get(map, key, keys), |s| {
let map = get_map(s.get("map")?)?;
let mut val = map.get(&s.get("key")?).cloned();
match s.get("keys")? {
Value::List(keys, ..) => {
for k in keys {
match val {
Some(Value::Map(m)) => {
val = m.get(&k).cloned();
}
_ => return Ok(Value::Null),
}
}
}
Value::Null => (),
key => {
// Single key
match val {
Some(Value::Map(m)) => {
val = m.get(&key).cloned();
}
_ => return Ok(Value::Null),
}
} //_ => (),
};
Ok(val.unwrap_or(Value::Null))
});
def_va!(f, has_key(map, key, keys), |s| {
let map = get_map(s.get("map")?)?;
match s.get("keys")? {
Value::List(keys, ..) => {
if let Some((last, keys)) = keys.split_last() {
let mut val = map.get(&s.get("key")?).cloned();
for k in keys {
match val {
Some(Value::Map(m)) => {
val = m.get(&k).cloned();
}
_ => return Ok(Value::False),
}
}
if let Some(Value::Map(val)) = val {
Ok(val.contains_key(last).into())
} else {
Ok(Value::False)
}
} else {
Ok(map.contains_key(&s.get("key")?).into())
}
}
Value::Null => Ok(map.contains_key(&s.get("key")?).into()),
key => {
// Single key
let val = map.get(&s.get("key")?).cloned();
match val {
Some(Value::Map(m)) => Ok(m.contains_key(&key).into()),
_ => return Ok(Value::Null),
}
} //_ => (),
}
});
def!(f, keys(map), |s| {
let map = get_map(s.get("map")?)?;
Ok(Value::List(map.keys(), ListSeparator::Comma, false))
});
// TODO: Merge should be varargs
def!(f, merge(map1, map2), |s| {
let mut map1 = get_map(s.get("map1")?)?;
let map2 = get_map(s.get("map2")?)?;
for (key, value) in map2 {
map1.insert(key, value);
}
Ok(Value::Map(map1))
});
// It's really map_remove(map, keys), but "key" is supported as an
// alias for "keys", which makes a mess when using more than one
// positional argument.
def_va!(f, remove(map, key, keys), |s| {
let mut map = get_map(s.get("map")?)?;
let key = s.get("key")?;
let keys = s.get("keys")?;
match (key, keys) {
(first, Value::List(rest, ..)) => {
map.remove(&first);
for key in rest {
map.remove(&key);
}
}
(Value::List(keys, ..), Value::Null) => {
for key in keys {
map.remove(&key);
}
}
(first, second) => {
map.remove(&first);
map.remove(&second);
}
}
Ok(Value::Map(map))
});
def!(f, set(map, key, value), |s| {
// TODO: handle keys... arguments before key for nested maps
let mut map = get_map(s.get("map")?)?;
let key = s.get("key")?;
let value = s.get("value")?;
map.insert(key, value);
Ok(Value::Map(map))
});
def!(f, values(map), |s| {
let map = get_map(s.get("map")?)?;
Ok(Value::List(map.values(), ListSeparator::Comma, false))
});
f
}
pub fn expose(map: &Module, global: &mut Module) {
for (gname, lname) in &[
("map_get", "get"),
("map_has_key", "has_key"),
("map_keys", "keys"),
("map_merge", "merge"),
("map_remove", "remove"),
("map_values", "values"),
] {
global.insert(gname, map.get(lname).unwrap().clone());
}
}
fn get_map(v: Value) -> Result<OrderMap<Value, Value>, Error> {
match v {
Value::Map(m) => Ok(m),
// An empty map and an empty list looks the same
Value::List(ref l, ..) if l.is_empty() => Ok(OrderMap::new()),
v => Err(Error::badarg("map", &v)),
}
}
#[cfg(test)]
mod test {
// http://sass-lang.com/documentation/Sass/Script/Functions.html
mod map_get {
use super::check_val;
#[test]
fn a() {
check_val("map-get((\"foo\": 1, \"bar\": 2), \"foo\");", "1")
}
#[test]
fn b() {
check_val("map-get((\"foo\": 1, \"bar\": 2), \"bar\");", "2")
}
#[test]
fn c() {
check_val("map-get((\"foo\": 1, \"bar\": 2), \"baz\");", "")
}
}
mod map_has_key {
use super::check_val;
#[test]
fn a() {
check_val(
"map-has-key((\"foo\": 1, \"bar\": 2), \"foo\");",
"true",
)
}
#[test]
fn b() {
check_val(
"map-has-key((\"foo\": 1, \"bar\": 2), \"baz\");",
"false",
)
}
}
fn check_val(src: &str, correct: &str) {
use crate::variablescope::test::do_evaluate;
assert_eq!(do_evaluate(&[], src.as_bytes()), correct)
}
}
|
/*
* Slack Web API
*
* One way to interact with the Slack platform is its HTTP RPC-based Web API, a collection of methods requiring OAuth 2.0-based user, bot, or workspace tokens blessed with related OAuth scopes.
*
* The version of the OpenAPI document: 1.7.0
*
* Generated by: https://openapi-generator.tech
*/
use reqwest;
use crate::apis::ResponseContent;
use super::{Error, configuration};
/// struct for typed errors of method `stars_add`
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum StarsAddError {
DefaultResponse(::std::collections::HashMap<String, serde_json::Value>),
UnknownValue(serde_json::Value),
}
/// struct for typed errors of method `stars_list`
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum StarsListError {
DefaultResponse(::std::collections::HashMap<String, serde_json::Value>),
UnknownValue(serde_json::Value),
}
/// struct for typed errors of method `stars_remove`
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum StarsRemoveError {
DefaultResponse(::std::collections::HashMap<String, serde_json::Value>),
UnknownValue(serde_json::Value),
}
/// Adds a star to an item.
pub async fn stars_add(configuration: &configuration::Configuration, token: &str, channel: Option<&str>, file: Option<&str>, file_comment: Option<&str>, timestamp: Option<&str>) -> Result<::std::collections::HashMap<String, serde_json::Value>, Error<StarsAddError>> {
let local_var_client = &configuration.client;
let local_var_uri_str = format!("{}/stars.add", configuration.base_path);
let mut local_var_req_builder = local_var_client.post(local_var_uri_str.as_str());
if let Some(ref local_var_user_agent) = configuration.user_agent {
local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone());
}
local_var_req_builder = local_var_req_builder.header("token", token.to_string());
if let Some(ref local_var_token) = configuration.oauth_access_token {
local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned());
};
let mut local_var_form_params = std::collections::HashMap::new();
if let Some(local_var_param_value) = channel {
local_var_form_params.insert("channel", local_var_param_value.to_string());
}
if let Some(local_var_param_value) = file {
local_var_form_params.insert("file", local_var_param_value.to_string());
}
if let Some(local_var_param_value) = file_comment {
local_var_form_params.insert("file_comment", local_var_param_value.to_string());
}
if let Some(local_var_param_value) = timestamp {
local_var_form_params.insert("timestamp", local_var_param_value.to_string());
}
local_var_req_builder = local_var_req_builder.form(&local_var_form_params);
let local_var_req = local_var_req_builder.build()?;
let local_var_resp = local_var_client.execute(local_var_req).await?;
let local_var_status = local_var_resp.status();
let local_var_content = local_var_resp.text().await?;
if !local_var_status.is_client_error() && !local_var_status.is_server_error() {
serde_json::from_str(&local_var_content).map_err(Error::from)
} else {
let local_var_entity: Option<StarsAddError> = serde_json::from_str(&local_var_content).ok();
let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity };
Err(Error::ResponseError(local_var_error))
}
}
/// Lists stars for a user.
pub async fn stars_list(configuration: &configuration::Configuration, token: Option<&str>, count: Option<&str>, page: Option<&str>, cursor: Option<&str>, limit: Option<i32>) -> Result<::std::collections::HashMap<String, serde_json::Value>, Error<StarsListError>> {
let local_var_client = &configuration.client;
let local_var_uri_str = format!("{}/stars.list", configuration.base_path);
let mut local_var_req_builder = local_var_client.get(local_var_uri_str.as_str());
if let Some(ref local_var_str) = token {
local_var_req_builder = local_var_req_builder.query(&[("token", &local_var_str.to_string())]);
}
if let Some(ref local_var_str) = count {
local_var_req_builder = local_var_req_builder.query(&[("count", &local_var_str.to_string())]);
}
if let Some(ref local_var_str) = page {
local_var_req_builder = local_var_req_builder.query(&[("page", &local_var_str.to_string())]);
}
if let Some(ref local_var_str) = cursor {
local_var_req_builder = local_var_req_builder.query(&[("cursor", &local_var_str.to_string())]);
}
if let Some(ref local_var_str) = limit {
local_var_req_builder = local_var_req_builder.query(&[("limit", &local_var_str.to_string())]);
}
if let Some(ref local_var_user_agent) = configuration.user_agent {
local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone());
}
if let Some(ref local_var_token) = configuration.oauth_access_token {
local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned());
};
let local_var_req = local_var_req_builder.build()?;
let local_var_resp = local_var_client.execute(local_var_req).await?;
let local_var_status = local_var_resp.status();
let local_var_content = local_var_resp.text().await?;
if !local_var_status.is_client_error() && !local_var_status.is_server_error() {
serde_json::from_str(&local_var_content).map_err(Error::from)
} else {
let local_var_entity: Option<StarsListError> = serde_json::from_str(&local_var_content).ok();
let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity };
Err(Error::ResponseError(local_var_error))
}
}
/// Removes a star from an item.
pub async fn stars_remove(configuration: &configuration::Configuration, token: &str, channel: Option<&str>, file: Option<&str>, file_comment: Option<&str>, timestamp: Option<&str>) -> Result<::std::collections::HashMap<String, serde_json::Value>, Error<StarsRemoveError>> {
let local_var_client = &configuration.client;
let local_var_uri_str = format!("{}/stars.remove", configuration.base_path);
let mut local_var_req_builder = local_var_client.post(local_var_uri_str.as_str());
if let Some(ref local_var_user_agent) = configuration.user_agent {
local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone());
}
local_var_req_builder = local_var_req_builder.header("token", token.to_string());
if let Some(ref local_var_token) = configuration.oauth_access_token {
local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned());
};
let mut local_var_form_params = std::collections::HashMap::new();
if let Some(local_var_param_value) = channel {
local_var_form_params.insert("channel", local_var_param_value.to_string());
}
if let Some(local_var_param_value) = file {
local_var_form_params.insert("file", local_var_param_value.to_string());
}
if let Some(local_var_param_value) = file_comment {
local_var_form_params.insert("file_comment", local_var_param_value.to_string());
}
if let Some(local_var_param_value) = timestamp {
local_var_form_params.insert("timestamp", local_var_param_value.to_string());
}
local_var_req_builder = local_var_req_builder.form(&local_var_form_params);
let local_var_req = local_var_req_builder.build()?;
let local_var_resp = local_var_client.execute(local_var_req).await?;
let local_var_status = local_var_resp.status();
let local_var_content = local_var_resp.text().await?;
if !local_var_status.is_client_error() && !local_var_status.is_server_error() {
serde_json::from_str(&local_var_content).map_err(Error::from)
} else {
let local_var_entity: Option<StarsRemoveError> = serde_json::from_str(&local_var_content).ok();
let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity };
Err(Error::ResponseError(local_var_error))
}
}
|
//! Nodes that cause the execution of tasks.
use crate::{
node::{Node, Tickable},
status::Status,
};
use std::{
sync::{mpsc, mpsc::TryRecvError, Arc},
thread,
};
/// A node that manages the execution of tasks in a separate thread.
///
/// This node will launch the supplied function in a separate thread and ticks
/// will monitor the state of that thread. The return value of the function is
/// the status of the Action node.
///
/// This node should be the main way of modifying the world state. Note that,
/// despite the function being run in a separate thread, there will usually
/// only be one thread modifying the world.
///
/// Note that the supplied function will be called again the next tick if the
/// function returns either `Initialized` or `Running`.
///
/// # State
///
/// **Initialized:** Before being ticked after either being created or reset,
/// or if the function returned `Initialized`.
///
/// **Running:** While the function is being executed in the other thread or if
/// the function returned `Running`.
///
/// **Succeeded:** When the function returns `Succeeded`.
///
/// **Failed:** When the function returns `Failed`.
///
/// # Children
///
/// None.
///
/// # Examples
///
/// An action node that attempts to subtract two unsigned integers:
///
/// ```
/// # use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
/// # use std::sync::Arc;
/// # use aspen::std_nodes::*;
/// # use aspen::Status;
/// # use aspen::node::Tickable;
/// const FIRST: usize = 10;
/// const SECOND: usize = 100;
/// let mut result = Arc::new(AtomicUsize::default());
///
/// let mut action = Action::new(|s: Arc<AtomicUsize>| {
/// if let Some(val) = SECOND.checked_sub(FIRST) {
/// s.store(val, Ordering::SeqCst);
/// Status::Succeeded
/// } else {
/// Status::Failed
/// }
/// });
///
/// // Run the node until it completes
/// while !action.tick(&mut result).is_done() {}
/// assert_eq!(action.status().unwrap(), Status::Succeeded);
/// assert_eq!(result.load(Ordering::SeqCst), 90);
/// ```
pub struct Action<W>
where
W: Clone + Send + Sync + 'static,
{
/// The task which is to be run.
func: Arc<dyn Fn(W) -> Status + Send + Sync>,
/// Channel on which the task will communicate.
rx: Option<mpsc::Receiver<Status>>,
}
impl<W> Action<W>
where
W: Clone + Send + Sync + 'static,
{
/// Creates a new Action node that will execute the given task.
pub fn new<F>(task: F) -> Node<'static, W>
where
F: Fn(W) -> Status + Send + Sync + 'static,
{
let internals = Action {
func: Arc::new(task),
rx: None,
};
Node::new(internals)
}
/// Launches a new worker thread to run the task.
fn start_thread(&mut self, world: &W) {
// Create our new channels
let (tx, rx) = mpsc::sync_channel(0);
// Then clone the function so we can move it
let func_clone = self.func.clone();
// Finally, boot up the thread
let world_clone = world.clone();
thread::spawn(move || tx.send((func_clone)(world_clone)).unwrap());
// Store the rx for later use
self.rx = Some(rx);
}
}
impl<W> Tickable<W> for Action<W>
where
W: Clone + Send + Sync + 'static,
{
/// Ticks the Action node a single time.
///
/// The first time being ticked after being reset (or initialized), it will
/// clone `world` and use the clone as the argument for the task function,
/// which will be run in a separate thread. Usually, this should be an
/// `Arc`.
fn tick(&mut self, world: &mut W) -> Status {
let (status, reset) = if let Some(ref mut rx) = self.rx {
match rx.try_recv() {
Ok(Status::Running) => (Status::Running, true),
Ok(s) => (s, false),
Err(TryRecvError::Empty) => (Status::Running, false),
Err(e) => panic!("Thread died before finishing {}", e),
}
} else {
self.start_thread(world);
(Status::Running, false)
};
if reset {
self.rx.take();
}
status
}
/// Resets the internal state of this node.
///
/// If there is a task currently running, this will block until the task is
/// completed.
fn reset(&mut self) {
// I debated what to do here for a while. I could see someone wanting to detach
// the thread due to time constraints, but it seems to me that it would be
// better to avoid potential bugs that come from a node only looking
// like its been fully reset.
if let Some(ref mut rx) = self.rx {
rx.recv().unwrap();
}
self.rx = None;
}
/// Returns the constant string "Action"
fn type_name(&self) -> &'static str {
"Action"
}
}
/// Convenience macro for creating Action nodes.
///
/// # Examples
///
/// ```
/// # #[macro_use] extern crate aspen;
/// # fn foo(_: ()) -> aspen::Status { aspen::Status::Succeeded }
/// # fn main() {
/// let mut action = Action! { |s| foo(s) };
/// # }
/// ```
#[macro_export]
macro_rules! Action {
( $e:expr ) => {
$crate::std_nodes::Action::new($e)
};
}
/// A node that manages the execution of tasks within the ticking thread.
///
/// This node is an alternative to a normal Action node which can be used when
/// the time required to do the task is significantly less than a single tick
/// or if it can be broken down into discrete steps. If the task takes too
/// long, or too many of these nodes are utilized, the ticking rate can be
/// affected.
///
/// # State
///
/// **Initialized:** Before being ticked after either being created or reset,
/// or if the supplied function returns `Initialized`.
///
/// **Running:** Whe the function returns `Running`.
///
/// **Succeeded:** When the function returns `Succeeded`.
///
/// **Failed:** When the function returns `Failed`.
///
/// # Children
///
/// None.
///
/// # Examples
///
/// A short action node that attempts to subtract two unsigned integers:
///
/// ```
/// # use aspen::std_nodes::*;
/// # use aspen::Status;
/// # use aspen::node::Tickable;
/// let first = 10u32;
/// let second = 100u32;
/// let mut result = 0u32;
///
/// let mut action = InlineAction::new(|r| {
/// if let Some(n) = second.checked_sub(first) {
/// *r = n;
/// Status::Succeeded
/// } else {
/// Status::Failed
/// }
/// });
///
/// assert_eq!(action.tick(&mut result), Status::Succeeded);
/// assert_eq!(result, 90);
/// ```
pub struct InlineAction<'a, W> {
/// The task which is to be run.
func: Box<dyn FnMut(&mut W) -> Status + 'a>,
}
impl<'a, W> InlineAction<'a, W>
where
W: 'a,
{
/// Creates a new `ShortAction` node that will execute the given task.
pub fn new<F>(task: F) -> Node<'a, W>
where
F: FnMut(&mut W) -> Status + 'a,
{
let internals = InlineAction {
func: Box::new(task),
};
Node::new(internals)
}
}
impl<'a, W> Tickable<W> for InlineAction<'a, W> {
fn tick(&mut self, world: &mut W) -> Status {
(*self.func)(world)
}
fn reset(&mut self) {
// No-op
}
/// Returns the constant string "InlineAction"
fn type_name(&self) -> &'static str {
"InlineAction"
}
}
/// Convenience macro for creating [`InlineAction`] nodes.
///
/// # Examples
///
/// ```
/// # #[macro_use] extern crate aspen;
/// # use aspen::Status;
/// # fn foo(_: &mut ()) -> Status { Status::Running }
/// # fn main() {
/// let mut action = InlineAction! { |s| foo(s) };
/// # }
/// ```
#[macro_export]
macro_rules! InlineAction {
( $e:expr ) => {
$crate::std_nodes::InlineAction::new($e)
};
}
#[cfg(test)]
mod test {
use crate::{
node::Tickable,
status::Status,
std_nodes::{Action, InlineAction},
};
use std::{
sync::{mpsc, Mutex},
thread, time,
};
#[test]
fn failure() {
let (tx, rx) = mpsc::sync_channel(0);
let mrx = Mutex::new(rx);
let mut action = Action::new(move |_| {
// Block until the message is sent, then return its value
mrx.lock().unwrap().recv().unwrap()
});
for _ in 0..5 {
assert_eq!(action.tick(&mut ()), Status::Running);
thread::sleep(time::Duration::from_millis(100));
}
tx.send(Status::Failed).unwrap();
let mut status = Status::Running;
while status == Status::Running {
status = action.tick(&mut ());
}
assert_eq!(status, Status::Failed);
}
#[test]
fn success() {
let (tx, rx) = mpsc::sync_channel(0);
let mrx = Mutex::new(rx);
let mut action = Action::new(move |_| {
// Block until the message is sent, then return its value
mrx.lock().unwrap().recv().unwrap()
});
for _ in 0..5 {
assert_eq!(action.tick(&mut ()), Status::Running);
thread::sleep(time::Duration::from_millis(100));
}
tx.send(Status::Succeeded).unwrap();
let mut status = Status::Running;
while status == Status::Running {
status = action.tick(&mut ());
}
assert_eq!(status, Status::Succeeded);
}
#[test]
fn inline_failure() {
assert_eq!(
InlineAction::new(|_| Status::Failed).tick(&mut ()),
Status::Failed
);
}
#[test]
fn inline_success() {
assert_eq!(
InlineAction::new(|_| Status::Succeeded).tick(&mut ()),
Status::Succeeded
);
}
#[test]
fn inline_running() {
assert_eq!(
InlineAction::new(|_| Status::Running).tick(&mut ()),
Status::Running
);
}
}
|
use quote::quote_spanned;
use syn::parse_quote_spanned;
use syn::spanned::Spanned;
use super::{
DelayType, FlowProperties, FlowPropertyVal, OpInstGenerics, OperatorCategory,
OperatorConstraints, OperatorInstance, WriteContextArgs, RANGE_1,
};
use crate::graph::ops::OperatorWriteOutput;
/// > 1 input stream, 1 output stream
///
/// > Generic parameters: A `Lattice` type, must implement [`Merge<Self>`](https://hydro-project.github.io/hydroflow/doc/lattices/trait.Merge.html)
/// type.
///
/// A specialized operator for merging lattices together into an accumulated value. Like [`reduce()`](#reduce)
/// but specialized for lattice types. `lattice_reduce::<MyLattice>()` is equivalent to `reduce(hydroflow::lattices::Merge::merge_owned)`.
///
/// `lattice_reduce` can also be provided with one generic lifetime persistence argument, either
/// `'tick` or `'static`, to specify how data persists. With `'tick`, values will only be collected
/// within the same tick. With `'static`, values will be remembered across ticks and will be
/// aggregated with pairs arriving in later ticks. When not explicitly specified persistence
/// defaults to `'tick`.
///
/// `lattice_reduce` is differentiated from `lattice_fold` in that `lattice_reduce` does not require the accumulating type to implement `Default`.
/// But it also means that the accumulating function inputs and the accumulating type must be the same.
///
/// ```hydroflow
/// source_iter([1,2,3,4,5])
/// -> map(hydroflow::lattices::Max::new)
/// -> lattice_reduce::<'static, hydroflow::lattices::Max<usize>>()
/// -> assert_eq([hydroflow::lattices::Max::new(5)]);
/// ```
pub const LATTICE_REDUCE: OperatorConstraints = OperatorConstraints {
name: "lattice_reduce",
categories: &[OperatorCategory::LatticeFold],
hard_range_inn: RANGE_1,
soft_range_inn: RANGE_1,
hard_range_out: RANGE_1,
soft_range_out: RANGE_1,
num_args: 0,
persistence_args: &(0..=1),
type_args: RANGE_1,
is_external_input: false,
ports_inn: None,
ports_out: None,
properties: FlowProperties {
deterministic: FlowPropertyVal::Preserve,
monotonic: FlowPropertyVal::Yes,
inconsistency_tainted: false,
},
input_delaytype_fn: |_| Some(DelayType::Stratum),
write_fn: |wc @ &WriteContextArgs {
root,
inputs,
is_pull,
op_inst:
op_inst @ OperatorInstance {
generics: OpInstGenerics { type_args, .. },
..
},
..
},
diagnostics| {
assert!(is_pull);
assert_eq!(1, inputs.len());
let input = &inputs[0];
assert_eq!(1, type_args.len());
let lat_type = &type_args[0];
let arguments = parse_quote_spanned! {lat_type.span()=> // Uses `lat_type.span()`!
#root::lattices::Merge::<#lat_type>::merge
};
let wc = WriteContextArgs {
op_inst: &OperatorInstance {
arguments,
..op_inst.clone()
},
..wc.clone()
};
let OperatorWriteOutput {
write_prologue,
write_iterator,
write_iterator_after,
} = (super::reduce::REDUCE.write_fn)(&wc, diagnostics)?;
let write_iterator = quote_spanned! {lat_type.span()=> // Uses `lat_type.span()`!
let #input = {
/// Improve errors with `#lat_type` trait bound.
#[inline(always)]
fn check_inputs<Lat>(
input: impl ::std::iter::Iterator<Item = Lat>
) -> impl ::std::iter::Iterator<Item = Lat>
where
Lat: #root::lattices::Merge<Lat>,
{
input
}
check_inputs::<#lat_type>(#input)
};
#write_iterator
};
Ok(OperatorWriteOutput {
write_prologue,
write_iterator,
write_iterator_after,
})
},
};
|
use crate::matrix::Matrix;
/*
NOTE: this is all very preliminary...
*/
pub struct Layer {
activations: Option<Box<Matrix>>,
delta_activations: Option<Box<Matrix>>,
sums: Option<Box<Matrix>>,
delta_sums: Option<Box<Matrix>>,
weights: Option<Box<Matrix>>,
delta_weights: Option<Box<Matrix>>,
bias: Option<Box<Matrix>>,
delta_bias: Option<Box<Matrix>>,
neuron_count: usize,
// This is a double linked list... which is hard in Rust... I will need to study for this one... :/
// And these layers are stored in a vector with each element holding knowledge of its neighbours...........
// https://rust-unofficial.github.io/too-many-lists/ see if this helps.
previous_layer: Option<&'static Box<Layer>>,
next_layer: Option<&'static Box<Layer>>,
}
impl<'a> Layer {
pub fn new(neuron_count: usize) -> Box<Layer>{
Box::new(Layer {
neuron_count: neuron_count,
//None:
activations: None,
delta_activations: None,
sums: None,
delta_sums: None,
weights: None,
delta_weights: None,
bias: None,
delta_bias: None,
previous_layer: None,
next_layer: None,
})
}
pub fn set_hidden_layer(& mut self, previous_layer: &'static Box<Layer>, trainingset_batch_size: usize) {
self.previous_layer = Some(previous_layer);
previous_layer.next_layer = Some(&Box::from_raw(self));
self.activations = Matrix::new(self.neuron_count, trainingset_batch_size);
self.delta_activations = Matrix::new(self.neuron_count, trainingset_batch_size);
self.sums = Matrix::new(self.neuron_count, trainingset_batch_size);
self.delta_sums = Matrix::new(self.neuron_count, trainingset_batch_size);
self.weights = Matrix::new(self.neuron_count, previous_layer.neuron_count);
self.delta_weights = Matrix::new(self.neuron_count, previous_layer.neuron_count);
self.bias = Matrix::new(self.neuron_count, 1);
self.delta_bias = Matrix::new(self.neuron_count, 1);
// None:
self.next_layer = None
}
pub fn feed_forward() {
}
pub fn back_prop() {
}
pub fn set_input_layer_activation_matrix(input_neuron_count: usize, batch_size: usize ) -> Box<Layer> {
Box::new(Layer {
activations: Matrix::new(input_neuron_count, batch_size),
//None:
delta_activations: None,
sums: None,
delta_sums: None,
weights: None,
delta_weights: None,
bias: None,
delta_bias: None,
neuron_count: input_neuron_count,
previous_layer: None,
next_layer: None,
})
}
} |
//! Warning: this example is a lot more advanced than the others.
//!
//! This example shows a possible way to make shred interact with a scripting
//! language.
//!
//! It does that by implementing `DynamicSystemData` and using `MetaTable`.
extern crate shred;
// faster alternative to std's HashMap
use ahash::AHashMap as HashMap;
use shred::{
cell::{AtomicRef, AtomicRefMut},
Accessor, AccessorCow, CastFrom, DispatcherBuilder, DynamicSystemData, MetaTable, Read,
Resource, ResourceId, System, SystemData, World,
};
struct Dependencies {
reads: Vec<ResourceId>,
writes: Vec<ResourceId>,
}
impl Accessor for Dependencies {
fn try_new() -> Option<Self> {
// there's no default for this
None
}
fn reads(&self) -> Vec<ResourceId> {
let mut reads = self.reads.clone();
reads.push(ResourceId::new::<ReflectionTable>());
reads
}
fn writes(&self) -> Vec<ResourceId> {
self.writes.clone()
}
}
/// A dynamic system that represents and calls the script.
struct DynamicSystem {
dependencies: Dependencies,
/// just a dummy, you would want an actual script handle here
script: fn(ScriptInput),
}
impl<'a> System<'a> for DynamicSystem {
type SystemData = ScriptSystemData<'a>;
fn run(&mut self, mut data: Self::SystemData) {
let meta = data.meta_table;
let reads: Vec<&dyn Reflection> = data
.reads
.iter()
.map(|resource| meta.get(&**resource).expect("Not registered in meta table"))
.collect();
let writes: Vec<&mut dyn Reflection> = data
.writes
.iter_mut()
.map(|resource| {
// For some reason this needs a type ascription, otherwise Rust will think it's
// a `&mut (Reflaction + '_)` (as opposed to `&mut (Reflection + 'static)`. (Note this
// isn't needed in newer rust version but fails on the MSRV of 1.59.0).
let res: &mut dyn Reflection = meta.get_mut(&mut **resource).expect(
"Not registered in meta \
table",
);
res
})
.collect();
let input = ScriptInput { reads, writes };
// call the script with the input
(self.script)(input);
}
fn accessor<'b>(&'b self) -> AccessorCow<'a, 'b, Self> {
AccessorCow::Ref(&self.dependencies)
}
fn setup(&mut self, _res: &mut World) {
// this could call a setup function of the script
}
}
/// Some trait that all of your dynamic resources should implement.
/// This trait should be able to register / transfer it to the scripting
/// framework.
trait Reflection {
fn call_method(&self, s: &str);
}
// necessary for `MetaTable`
unsafe impl<T> CastFrom<T> for dyn Reflection
where
T: Reflection + 'static,
{
fn cast(t: &T) -> &Self {
t
}
fn cast_mut(t: &mut T) -> &mut Self {
t
}
}
type ReflectionTable = MetaTable<dyn Reflection>;
/// Maps resource names to resource ids.
struct ResourceTable {
map: HashMap<String, ResourceId>,
}
impl ResourceTable {
fn new() -> Self {
ResourceTable {
map: HashMap::default(),
}
}
fn register<T: Resource>(&mut self, name: &str) {
self.map.insert(name.to_owned(), ResourceId::new::<T>());
}
fn get(&self, name: &str) -> ResourceId {
self.map.get(name).cloned().unwrap()
}
}
struct ScriptInput<'a> {
reads: Vec<&'a dyn Reflection>,
writes: Vec<&'a mut dyn Reflection>,
}
struct ScriptSystemData<'a> {
meta_table: Read<'a, ReflectionTable>,
reads: Vec<AtomicRef<'a, dyn Resource + 'static>>,
writes: Vec<AtomicRefMut<'a, dyn Resource + 'static>>,
}
impl<'a> DynamicSystemData<'a> for ScriptSystemData<'a> {
type Accessor = Dependencies;
fn setup(_accessor: &Dependencies, _res: &mut World) {}
fn fetch(access: &Dependencies, world: &'a World) -> Self {
let reads = access
.reads
.iter()
.map(|id| {
let id = id.clone();
// SAFETY: We don't expose mutable reference to the Box or swap it out.
let res = unsafe { world.try_fetch_internal(id) };
AtomicRef::map(
res.expect("bug: the requested resource does not exist")
.borrow(),
Box::as_ref,
)
})
.collect();
let writes = access
.writes
.iter()
.map(|id| {
let id = id.clone();
// SAFETY: We don't expose mutable reference to the Box or swap it out.
let res = unsafe { world.try_fetch_internal(id) };
AtomicRefMut::map(
res.expect("bug: the requested resource does not exist")
.borrow_mut(),
Box::as_mut,
)
})
.collect();
ScriptSystemData {
meta_table: SystemData::fetch(world),
reads,
writes,
}
}
}
fn create_script_sys(res: &World) -> DynamicSystem {
// -- what we get from the script --
fn script(input: ScriptInput) {
input.reads[0].call_method("bar");
input.writes[0].call_method("foo");
}
let reads = vec!["Bar"];
let writes = vec!["Foo"];
// -- how we create the system --
let table = res.fetch::<ResourceTable>();
DynamicSystem {
dependencies: Dependencies {
reads: reads.iter().map(|r| table.get(r)).collect(),
writes: writes.iter().map(|r| table.get(r)).collect(),
},
// just pass the function pointer
script,
}
}
fn main() {
/// Some resource
#[derive(Debug, Default)]
struct Foo;
impl Reflection for Foo {
fn call_method(&self, s: &str) {
match s {
"foo" => println!("Hello from Foo"),
"bar" => println!("You gotta ask somebody else"),
_ => panic!("The error handling of this example is non-ideal"),
}
}
}
/// Another resource
#[derive(Debug, Default)]
struct Bar;
impl Reflection for Bar {
fn call_method(&self, s: &str) {
match s {
"bar" => println!("Hello from Bar"),
"foo" => println!("You gotta ask somebody else"),
_ => panic!("The error handling of this example is non-ideal"),
}
}
}
struct NormalSys;
impl<'a> System<'a> for NormalSys {
type SystemData = (Read<'a, Foo>, Read<'a, Bar>);
fn run(&mut self, (foo, bar): Self::SystemData) {
println!("Fetched foo: {:?}", &foo as &Foo);
println!("Fetched bar: {:?}", &bar as &Bar);
}
}
let mut res = World::empty();
{
let mut table = res.entry().or_insert_with(ReflectionTable::new);
table.register(&Foo);
table.register(&Bar);
}
{
let mut table = res.entry().or_insert_with(ResourceTable::new);
table.register::<Foo>("Foo");
table.register::<Bar>("Bar");
}
let mut dispatcher = DispatcherBuilder::new()
.with(NormalSys, "normal", &[])
.build();
dispatcher.setup(&mut res);
let script0 = create_script_sys(&res);
// it is recommended you create a second dispatcher dedicated to scripts,
// that'll allow you to rebuild if necessary
let mut scripts = DispatcherBuilder::new()
.with(script0, "script0", &[])
.build();
scripts.setup(&mut res);
// Game loop
loop {
dispatcher.dispatch(&res);
scripts.dispatch(&res);
}
}
|
use super::*;
pub use crate::mock::{
run_to_block, Currency, Event as TestEvent, ExtBuilder, LBPPallet, Origin, System, Test, ACA, ALICE, BOB, CHARLIE,
DOT, ETH, HDX,
};
use crate::mock::{ACA_DOT_POOL_ID, HDX_DOT_POOL_ID, INITIAL_BALANCE};
use frame_support::{assert_noop, assert_ok};
use sp_runtime::traits::BadOrigin;
use sp_std::collections::btree_map::BTreeMap;
use sp_std::convert::TryInto;
pub fn new_test_ext() -> sp_io::TestExternalities {
let mut ext = ExtBuilder::default().build();
ext.execute_with(|| System::set_block_number(1));
ext
}
use primitives::{asset::AssetPair, fee::Fee, traits::AMMTransfer,
constants::chain::{MAX_IN_RATIO, MAX_OUT_RATIO}
};
pub fn predefined_test_ext() -> sp_io::TestExternalities {
let mut ext = new_test_ext();
ext.execute_with(|| {
assert_ok!(LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: ACA,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: DOT,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 10,
},
(10u64, 20u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
));
assert_eq!(
<PoolData<Test>>::get(ACA_DOT_POOL_ID),
Pool {
owner: ALICE,
start: 10u64,
end: 20u64,
assets: (ACA, DOT),
initial_weights: (20, 80),
final_weights: (90, 10),
last_weight_update: 0_u64,
last_weights: (20, 80),
weight_curve: WeightCurveType::Linear,
pausable: true,
paused: false,
fee: Fee::default(),
fee_receiver: CHARLIE,
}
);
});
ext
}
fn last_events(n: usize) -> Vec<TestEvent> {
frame_system::Pallet::<Test>::events()
.into_iter()
.rev()
.take(n)
.rev()
.map(|e| e.event)
.collect()
}
fn expect_events(e: Vec<TestEvent>) {
assert_eq!(last_events(e.len()), e);
}
#[test]
fn weight_update_should_work() {
new_test_ext().execute_with(|| {
let asset_a = LBPAssetInfo {
id: ACA,
amount: 1_000_000,
initial_weight: 20,
final_weight: 80,
};
let asset_b = LBPAssetInfo {
id: DOT,
amount: 2_000_000,
initial_weight: 80,
final_weight: 20,
};
let duration = (10u64, 19u64);
let mut linear_pool = Pool::new(
ALICE,
asset_a,
asset_b,
duration,
WeightCurveType::Linear,
false,
Fee::default(),
CHARLIE,
);
assert_ok!(LBPPallet::create_pool(
Origin::root(),
ALICE,
asset_a,
asset_b,
duration,
WeightCurveType::Linear,
false,
Fee::default(),
CHARLIE,
));
System::set_block_number(13);
assert_ok!(LBPPallet::update_weights(&ACA_DOT_POOL_ID, &mut linear_pool));
let mut linear_pool = LBPPallet::pool_data(ACA_DOT_POOL_ID);
assert_eq!(linear_pool.last_weight_update, 13);
assert_eq!(linear_pool.last_weights, (40u128, 60u128));
// call update again in the same block, data should be the same
assert_ok!(LBPPallet::update_weights(&ACA_DOT_POOL_ID, &mut linear_pool));
let linear_pool = LBPPallet::pool_data(ACA_DOT_POOL_ID);
assert_eq!(linear_pool.last_weight_update, 13);
assert_eq!(linear_pool.last_weights, (40u128, 60u128));
});
}
#[test]
fn validate_pool_data_should_work() {
new_test_ext().execute_with(|| {
let pool_data = Pool {
owner: ALICE,
start: 10u64,
end: 20u64,
assets: (ACA, DOT),
initial_weights: (20, 80),
final_weights: (90, 10),
last_weight_update: 0u64,
last_weights: (20, 80),
weight_curve: WeightCurveType::Linear,
pausable: true,
paused: false,
fee: Fee::default(),
fee_receiver: CHARLIE,
};
assert_ok!(LBPPallet::validate_pool_data(&pool_data));
// null interval
let pool_data = Pool {
owner: ALICE,
start: 0u64,
end: 0u64,
assets: (ACA, DOT),
initial_weights: (20, 80),
final_weights: (90, 10),
last_weight_update: 0u64,
last_weights: (20, 80),
weight_curve: WeightCurveType::Linear,
pausable: true,
paused: false,
fee: Fee::default(),
fee_receiver: CHARLIE,
};
assert_ok!(LBPPallet::validate_pool_data(&pool_data));
let pool_data = Pool {
owner: ALICE,
start: 10u64,
end: 2u64,
assets: (ACA, DOT),
initial_weights: (20, 80),
final_weights: (90, 10),
last_weight_update: 0u64,
last_weights: (20, 80),
weight_curve: WeightCurveType::Linear,
pausable: true,
paused: false,
fee: Fee::default(),
fee_receiver: CHARLIE,
};
assert_noop!(
LBPPallet::validate_pool_data(&pool_data),
Error::<Test>::InvalidBlockNumber
);
let pool_data = Pool {
owner: ALICE,
start: 10u64,
end: 11u64 + u32::MAX as u64,
assets: (ACA, DOT),
initial_weights: (20, 80),
final_weights: (90, 10),
last_weight_update: 0u64,
last_weights: (20, 80),
weight_curve: WeightCurveType::Linear,
pausable: true,
paused: false,
fee: Fee::default(),
fee_receiver: CHARLIE,
};
assert_noop!(
LBPPallet::validate_pool_data(&pool_data),
Error::<Test>::MaxSaleDurationExceeded
);
});
}
#[test]
fn calculate_weights_should_work() {
new_test_ext().execute_with(|| {
let mut pool_data = Pool {
owner: ALICE,
start: 100u64,
end: 200u64,
assets: (ACA, DOT),
initial_weights: (2000, 2000),
final_weights: (1000, 2000),
last_weight_update: 0u64,
last_weights: (2000, 2000),
weight_curve: WeightCurveType::Linear,
pausable: true,
paused: false,
fee: Fee::default(),
fee_receiver: CHARLIE,
};
assert_eq!(LBPPallet::calculate_weights(&pool_data, 170), Ok((1300, 2000)));
pool_data.initial_weights = (1000, 2000);
pool_data.final_weights = (2000, 1000);
pool_data.last_weights = pool_data.initial_weights;
assert_eq!(LBPPallet::calculate_weights(&pool_data, 100), Ok((1000, 2000)));
pool_data.initial_weights = (1000, 2000);
pool_data.final_weights = (1000, 2000);
pool_data.last_weights = pool_data.initial_weights;
assert_eq!(LBPPallet::calculate_weights(&pool_data, 100), Ok((1000, 2000)));
pool_data.initial_weights = (2000, 2000);
pool_data.final_weights = (1000, 2000);
pool_data.last_weights = pool_data.initial_weights;
assert_eq!(LBPPallet::calculate_weights(&pool_data, 200), Ok((1000, 2000)));
// invalid interval
pool_data.start = 200;
pool_data.end = 100;
assert_eq!(
LBPPallet::calculate_weights(&pool_data, 200),
Err(Error::<Test>::WeightCalculationError.into())
);
// invalid interval
pool_data.start = 100;
pool_data.end = 100;
assert_eq!(
LBPPallet::calculate_weights(&pool_data, 200),
Err(Error::<Test>::WeightCalculationError.into())
);
// out of bound
pool_data.start = 100;
pool_data.end = 200;
assert_eq!(
LBPPallet::calculate_weights(&pool_data, 10),
Err(Error::<Test>::WeightCalculationError.into())
);
assert_eq!(
LBPPallet::calculate_weights(&pool_data, 210),
Err(Error::<Test>::WeightCalculationError.into())
);
});
}
#[test]
fn create_pool_should_work() {
new_test_ext().execute_with(|| {
assert_ok!(LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: ACA,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: DOT,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 10,
},
(10u64, 20u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
));
assert_eq!(Currency::free_balance(ACA, &ACA_DOT_POOL_ID), 1_000_000_000);
assert_eq!(Currency::free_balance(DOT, &ACA_DOT_POOL_ID), 2_000_000_000);
assert_eq!(
Currency::free_balance(ACA, &ALICE),
INITIAL_BALANCE.saturating_sub(1_000_000_000)
);
assert_eq!(
Currency::free_balance(DOT, &ALICE),
INITIAL_BALANCE.saturating_sub(2_000_000_000)
);
let pool_data = LBPPallet::pool_data(ACA_DOT_POOL_ID);
assert_eq!(pool_data.owner, ALICE);
assert_eq!(pool_data.start, 10u64);
assert_eq!(pool_data.end, 20u64);
assert_eq!(pool_data.assets, (ACA, DOT));
assert_eq!(pool_data.initial_weights, (20, 80));
assert_eq!(pool_data.final_weights, (90, 10));
assert_eq!(pool_data.weight_curve, WeightCurveType::Linear);
assert!(pool_data.pausable);
// verify that `last_weight_update`, `last_weights` and `paused` fields are correctly initialized
assert_eq!(pool_data.last_weight_update, 0);
assert_eq!(pool_data.last_weights, (20, 80));
assert!(!pool_data.paused);
assert_eq!(pool_data.fee, Fee::default());
assert_eq!(pool_data.fee_receiver, CHARLIE);
expect_events(vec![Event::PoolCreated(ACA_DOT_POOL_ID, pool_data).into()]);
});
}
#[test]
fn create_pool_from_basic_origin_should_not_work() {
new_test_ext().execute_with(|| {
// only CreatePoolOrigin is allowed to create new pools
assert_noop!(
LBPPallet::create_pool(
Origin::signed(ALICE),
ALICE,
LBPAssetInfo {
id: HDX,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: DOT,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 10,
},
(10u64, 20u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
),
BadOrigin
);
});
}
#[test]
fn create_same_pool_should_not_work() {
new_test_ext().execute_with(|| {
assert_ok!(LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: ACA,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: DOT,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 10,
},
(10u64, 20u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
));
assert_noop!(
LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: ACA,
amount: 10_000_000_000,
initial_weight: 30,
final_weight: 70,
},
LBPAssetInfo {
id: DOT,
amount: 20_000_000_000,
initial_weight: 70,
final_weight: 30,
},
(100u64, 200u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
),
Error::<Test>::PoolAlreadyExists
);
let pool_data = LBPPallet::pool_data(ACA_DOT_POOL_ID);
expect_events(vec![Event::PoolCreated(ACA_DOT_POOL_ID, pool_data).into()]);
});
}
#[test]
fn create_pool_with_same_assets_should_not_work() {
new_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: ACA,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: ACA,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 10,
},
(20u64, 10u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
),
Error::<Test>::CannotCreatePoolWithSameAssets
);
});
}
#[test]
fn create_pool_with_insufficient_liquidity_should_not_work() {
new_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: HDX,
amount: 0,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: DOT,
amount: 0,
initial_weight: 80,
final_weight: 10,
},
(10u64, 20u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
),
Error::<Test>::InsufficientLiquidity
);
assert_noop!(
LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: HDX,
amount: 0,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: DOT,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 10,
},
(10u64, 20u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
),
Error::<Test>::InsufficientLiquidity
);
assert_noop!(
LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: HDX,
amount: 100,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: DOT,
amount: 100,
initial_weight: 80,
final_weight: 10,
},
(10u64, 20u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
),
Error::<Test>::InsufficientLiquidity
);
});
}
#[test]
fn create_pool_with_invalid_data_should_not_work() {
new_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: ACA,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: DOT,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 10,
},
(20u64, 10u64), // reversed interval, the end precedes the beginning
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
),
Error::<Test>::InvalidBlockNumber
);
});
}
#[test]
fn create_pool_with_insufficient_balance_should_not_work() {
new_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: ACA,
amount: 2_000_000_000_000_000,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: DOT,
amount: 2_000_000_000_000_000,
initial_weight: 80,
final_weight: 10,
},
(10u64, 20u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
),
Error::<Test>::InsufficientAssetBalance
);
});
}
#[test]
fn update_pool_data_should_work() {
predefined_test_ext().execute_with(|| {
// update all parameters
assert_ok!(LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
Some((15, 18)),
Some(((ACA, 10), (DOT, 90))),
Some(((ACA, 80), (DOT, 20))),
Some(Fee {
numerator: 5,
denominator: 100,
}),
Some(BOB),
));
// verify changes
let updated_pool_data_1 = LBPPallet::pool_data(ACA_DOT_POOL_ID);
assert_eq!(updated_pool_data_1.start, 15);
assert_eq!(updated_pool_data_1.end, 18);
assert_eq!(updated_pool_data_1.initial_weights, (10, 90));
assert_eq!(updated_pool_data_1.final_weights, (80, 20));
assert_eq!(
updated_pool_data_1.fee,
Fee {
numerator: 5,
denominator: 100
}
);
assert_eq!(updated_pool_data_1.fee_receiver, BOB);
assert_eq!(updated_pool_data_1.last_weight_update, 0);
assert_eq!(updated_pool_data_1.last_weights, (10, 90));
// update only one parameter
assert_ok!(LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
Some((15, 30)),
None,
None,
None,
None,
));
// verify changes
let updated_pool_data_2 = LBPPallet::pool_data(ACA_DOT_POOL_ID);
assert_eq!(updated_pool_data_2.start, 15);
assert_eq!(updated_pool_data_2.end, 30);
assert_eq!(updated_pool_data_2.initial_weights, (10, 90));
assert_eq!(updated_pool_data_2.final_weights, (80, 20));
assert_eq!(
updated_pool_data_2.fee,
Fee {
numerator: 5,
denominator: 100
}
);
assert_eq!(updated_pool_data_2.fee_receiver, BOB);
// update only one parameter
assert_ok!(LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
None,
Some(((ACA, 10), (DOT, 70))),
None,
None,
None,
));
// verify changes
let updated_pool_data_3 = LBPPallet::pool_data(ACA_DOT_POOL_ID);
assert_eq!(updated_pool_data_3.start, 15);
assert_eq!(updated_pool_data_3.end, 30);
assert_eq!(updated_pool_data_3.initial_weights, (10, 70));
assert_eq!(updated_pool_data_3.final_weights, (80, 20));
assert_eq!(
updated_pool_data_3.fee,
Fee {
numerator: 5,
denominator: 100
}
);
assert_eq!(updated_pool_data_3.fee_receiver, BOB);
// update only one parameter
assert_ok!(LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
None,
None,
None,
None,
Some(ALICE),
));
// verify changes
let updated_pool_data_4 = LBPPallet::pool_data(ACA_DOT_POOL_ID);
assert_eq!(updated_pool_data_4.start, 15);
assert_eq!(updated_pool_data_4.end, 30);
assert_eq!(updated_pool_data_4.initial_weights, (10, 70));
assert_eq!(updated_pool_data_4.final_weights, (80, 20));
assert_eq!(
updated_pool_data_4.fee,
Fee {
numerator: 5,
denominator: 100
}
);
assert_eq!(updated_pool_data_4.fee_receiver, ALICE);
// mix
assert_ok!(LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
Some((15, 18)),
Some(((ACA, 10), (DOT, 90))),
None,
Some(Fee {
numerator: 6,
denominator: 1_000
}),
None,
));
// verify changes
let updated_pool_data_5 = LBPPallet::pool_data(ACA_DOT_POOL_ID);
assert_eq!(updated_pool_data_5.start, 15);
assert_eq!(updated_pool_data_5.end, 18);
assert_eq!(updated_pool_data_5.initial_weights, (10, 90));
assert_eq!(updated_pool_data_5.final_weights, (80, 20));
assert_eq!(
updated_pool_data_5.fee,
Fee {
numerator: 6,
denominator: 1_000
}
);
assert_eq!(updated_pool_data_5.fee_receiver, ALICE);
expect_events(vec![
Event::PoolUpdated(ACA_DOT_POOL_ID, updated_pool_data_1).into(),
Event::PoolUpdated(ACA_DOT_POOL_ID, updated_pool_data_2).into(),
Event::PoolUpdated(ACA_DOT_POOL_ID, updated_pool_data_3).into(),
Event::PoolUpdated(ACA_DOT_POOL_ID, updated_pool_data_4).into(),
Event::PoolUpdated(ACA_DOT_POOL_ID, updated_pool_data_5).into(),
]);
});
}
#[test]
fn update_non_existing_pool_data_should_not_work() {
new_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
Some((15, 18)),
Some(((ACA, 10), (DOT, 90))),
Some(((ACA, 80), (DOT, 20))),
Some(Fee {
numerator: 5,
denominator: 100,
}),
None,
),
Error::<Test>::PoolNotFound
);
});
}
#[test]
fn update_pool_with_invalid_data_should_not_work() {
predefined_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
Some((20, 10)), // reversed interval, the end precedes the beginning
Some(((ACA, 10), (DOT, 90))),
Some(((ACA, 80), (DOT, 20))),
Some(Fee {
numerator: 5,
denominator: 100,
}),
None,
),
Error::<Test>::InvalidBlockNumber
);
run_to_block(6);
assert_noop!(
LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
Some((5, 20)),
Some(((ACA, 10), (DOT, 90))),
Some(((ACA, 80), (DOT, 20))),
Some(Fee {
numerator: 5,
denominator: 100,
}),
None,
),
Error::<Test>::InvalidBlockNumber
);
assert_noop!(
LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
Some((0, 20)),
Some(((ACA, 10), (DOT, 90))),
Some(((ACA, 80), (DOT, 20))),
Some(Fee {
numerator: 5,
denominator: 100,
}),
None,
),
Error::<Test>::InvalidBlockNumber
);
assert_noop!(
LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
Some((5, 0)),
Some(((ACA, 10), (DOT, 90))),
Some(((ACA, 80), (DOT, 20))),
Some(Fee {
numerator: 5,
denominator: 100,
}),
None,
),
Error::<Test>::InvalidBlockNumber
);
});
}
#[test]
fn update_pool_data_without_changes_should_not_work() {
predefined_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::update_pool_data(Origin::signed(ALICE), ACA_DOT_POOL_ID, None, None, None, None, None,),
Error::<Test>::NothingToUpdate
);
});
}
#[test]
fn update_pool_data_by_non_owner_should_not_work() {
predefined_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::update_pool_data(
Origin::signed(BOB),
ACA_DOT_POOL_ID,
Some((15, 20)),
Some(((ACA, 10), (DOT, 90))),
None,
None,
None,
),
Error::<Test>::NotOwner
);
});
}
#[test]
fn update_pool_data_for_running_lbp_should_not_work() {
predefined_test_ext().execute_with(|| {
System::set_block_number(16);
// update starting block and final weights
assert_noop!(
LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
Some((15, 20)),
Some(((ACA, 10), (DOT, 90))),
None,
Some(Fee {
numerator: 5,
denominator: 100
}),
Some(BOB),
),
Error::<Test>::SaleStarted
);
let pool_data = LBPPallet::pool_data(ACA_DOT_POOL_ID);
expect_events(vec![Event::PoolCreated(ACA_DOT_POOL_ID, pool_data).into()]);
});
}
#[test]
fn update_pool_data_with_wrong_asset_should_not_work() {
predefined_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
None,
Some(((HDX, 10), (DOT, 90))),
None,
None,
None,
),
Error::<Test>::InvalidAsset
);
});
}
#[test]
fn update_pool_interval_should_work() {
new_test_ext().execute_with(|| {
assert_ok!(LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: ACA,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: DOT,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 10,
},
(0u64, 0u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
));
let pool_data = LBPPallet::pool_data(ACA_DOT_POOL_ID);
System::set_block_number(15);
assert_noop!(
LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
Some((16, 0)),
None,
None,
None,
None,
),
Error::<Test>::InvalidBlockNumber
);
assert_ok!(LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
Some((16, 20)),
None,
None,
None,
None,
));
// verify changes
let updated_pool_data = LBPPallet::pool_data(ACA_DOT_POOL_ID);
assert_eq!(updated_pool_data.start, 16);
assert_eq!(updated_pool_data.end, 20);
expect_events(vec![
Event::PoolCreated(ACA_DOT_POOL_ID, pool_data).into(),
Event::PoolUpdated(ACA_DOT_POOL_ID, updated_pool_data).into(),
]);
});
}
#[test]
fn pause_pool_should_work() {
predefined_test_ext().execute_with(|| {
assert_ok!(LBPPallet::pause_pool(Origin::signed(ALICE), ACA_DOT_POOL_ID));
let paused_pool = LBPPallet::pool_data(ACA_DOT_POOL_ID);
assert_eq!(
paused_pool,
Pool {
owner: ALICE,
start: 10u64,
end: 20u64,
assets: (ACA, DOT),
initial_weights: (20, 80),
final_weights: (90, 10),
last_weight_update: 0u64,
last_weights: (20, 80),
weight_curve: WeightCurveType::Linear,
pausable: true,
paused: true,
fee: Fee::default(),
fee_receiver: CHARLIE,
}
);
expect_events(vec![Event::Paused(ALICE, ACA_DOT_POOL_ID).into()]);
});
}
#[test]
fn pause_non_existing_pool_should_not_work() {
predefined_test_ext().execute_with(|| {
let non_existing_id = 25486;
assert_noop!(
LBPPallet::pause_pool(Origin::signed(ALICE), non_existing_id),
Error::<Test>::PoolNotFound
);
});
}
#[test]
fn pause_pool_by_non_owner_should_not_work() {
predefined_test_ext().execute_with(|| {
//user is not pool owner
let not_owner = BOB;
assert_noop!(
LBPPallet::pause_pool(Origin::signed(not_owner), ACA_DOT_POOL_ID),
Error::<Test>::NotOwner
);
});
}
#[test]
fn pause_non_pausable_pool_should_not_work() {
predefined_test_ext().execute_with(|| {
//pool is not pausable
assert_ok!(LBPPallet::create_pool(
Origin::root(),
BOB,
LBPAssetInfo {
id: ACA,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 40,
},
LBPAssetInfo {
id: ETH,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 60,
},
(200u64, 400u64),
WeightCurveType::Linear,
false,
Fee::default(),
CHARLIE,
));
assert_noop!(
LBPPallet::pause_pool(Origin::signed(BOB), 2_004_000),
Error::<Test>::PoolIsNotPausable
);
});
}
#[test]
fn pause_paused_pool_should_not_work() {
predefined_test_ext().execute_with(|| {
assert_ok!(LBPPallet::create_pool(
Origin::root(),
BOB,
LBPAssetInfo {
id: DOT,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 40,
},
LBPAssetInfo {
id: ETH,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 60,
},
(200u64, 400u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
));
//pause the pool - pool is created as unpaused by default
assert_ok!(LBPPallet::pause_pool(Origin::signed(BOB), 3_004_000));
assert_noop!(
LBPPallet::pause_pool(Origin::signed(BOB), 3_004_000),
Error::<Test>::CannotPausePausedPool
);
});
}
#[test]
fn pause_non_running_pool_should_not_work() {
predefined_test_ext().execute_with(|| {
//pool is ended or ending in current block
assert_ok!(LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: DOT,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 40,
},
LBPAssetInfo {
id: HDX,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 60,
},
(200u64, 400u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
));
run_to_block(401);
assert_noop!(
LBPPallet::pause_pool(Origin::signed(ALICE), HDX_DOT_POOL_ID),
Error::<Test>::CannotPauseEndedPool
);
});
}
#[test]
fn unpause_pool_should_work() {
predefined_test_ext().execute_with(|| {
assert_ok!(LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: DOT,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 40,
},
LBPAssetInfo {
id: HDX,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 60,
},
(200u64, 400u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
));
//pool is created as unpaused by default
assert_ok!(LBPPallet::pause_pool(Origin::signed(ALICE), HDX_DOT_POOL_ID));
assert_ok!(LBPPallet::unpause_pool(Origin::signed(ALICE), HDX_DOT_POOL_ID,));
let unpaused_pool = LBPPallet::pool_data(HDX_DOT_POOL_ID);
assert_eq!(
unpaused_pool,
Pool {
owner: ALICE,
start: 200_u64,
end: 400_u64,
assets: (HDX, DOT),
initial_weights: (80, 20),
final_weights: (60, 40),
last_weight_update: 0u64,
last_weights: (80, 20),
weight_curve: WeightCurveType::Linear,
pausable: true,
paused: false,
fee: Fee::default(),
fee_receiver: CHARLIE,
}
);
expect_events(vec![
Event::Paused(ALICE, HDX_DOT_POOL_ID).into(),
Event::Unpaused(ALICE, HDX_DOT_POOL_ID).into(),
]);
});
}
#[test]
fn unpause_pool_should_not_work() {
predefined_test_ext().execute_with(|| {
//user is not pool owner
let not_owner = BOB;
assert_noop!(
LBPPallet::unpause_pool(Origin::signed(not_owner), ACA_DOT_POOL_ID),
Error::<Test>::NotOwner
);
//pool is not found
assert_noop!(
LBPPallet::unpause_pool(Origin::signed(ALICE), 24568),
Error::<Test>::PoolNotFound
);
//predefined_test_ext pool is unpaused
assert_noop!(
LBPPallet::unpause_pool(Origin::signed(ALICE), ACA_DOT_POOL_ID),
Error::<Test>::PoolIsNotPaused
);
//pool is ended or ending in current block - pool is unpaused by default
assert_ok!(LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: DOT,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 40,
},
LBPAssetInfo {
id: HDX,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 60,
},
(200u64, 400u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
));
// pause the pool before trying to unpause it
assert_ok!(LBPPallet::pause_pool(Origin::signed(ALICE), HDX_DOT_POOL_ID,));
run_to_block(401);
assert_noop!(
LBPPallet::unpause_pool(Origin::signed(ALICE), HDX_DOT_POOL_ID),
Error::<Test>::CannotUnpauseEndedPool
);
});
}
#[test]
fn add_liquidity_should_work() {
predefined_test_ext().execute_with(|| {
let user_balance_a_before = Currency::free_balance(ACA, &ALICE);
let user_balance_b_before = Currency::free_balance(DOT, &ALICE);
let pool_balance_a_before = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_before = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
let added_a = 10_000_000_000;
let added_b = 20_000_000_000;
assert_ok!(LBPPallet::add_liquidity(
Origin::signed(ALICE),
(ACA, added_a),
(DOT, added_b),
));
let pool_balance_a_after = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_after = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_eq!(pool_balance_a_after, pool_balance_a_before.saturating_add(added_a));
assert_eq!(pool_balance_b_after, pool_balance_b_before.saturating_add(added_b));
let user_balance_a_after = Currency::free_balance(ACA, &ALICE);
let user_balance_b_after = Currency::free_balance(DOT, &ALICE);
assert_eq!(user_balance_a_after, user_balance_a_before.saturating_sub(added_a));
assert_eq!(user_balance_b_after, user_balance_b_before.saturating_sub(added_b));
let user_balance_a_before = Currency::free_balance(ACA, &ALICE);
let user_balance_b_before = Currency::free_balance(DOT, &ALICE);
let pool_balance_a_before = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_before = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_ok!(LBPPallet::add_liquidity(
Origin::signed(ALICE),
(ACA, added_a),
(DOT, 0),
));
let pool_balance_a_after = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_after = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_eq!(pool_balance_a_after, pool_balance_a_before.saturating_add(added_a));
assert_eq!(pool_balance_b_after, pool_balance_b_before);
let user_balance_a_after = Currency::free_balance(ACA, &ALICE);
let user_balance_b_after = Currency::free_balance(DOT, &ALICE);
assert_eq!(user_balance_a_after, user_balance_a_before.saturating_sub(added_a));
assert_eq!(user_balance_b_after, user_balance_b_before);
// change asset order
let user_balance_a_before = Currency::free_balance(ACA, &ALICE);
let user_balance_b_before = Currency::free_balance(DOT, &ALICE);
let pool_balance_a_before = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_before = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_ok!(LBPPallet::add_liquidity(
Origin::signed(ALICE),
(DOT, added_b),
(ACA, added_a),
));
let pool_balance_a_after = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_after = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_eq!(pool_balance_a_after, pool_balance_a_before.saturating_add(added_a));
assert_eq!(pool_balance_b_after, pool_balance_b_before.saturating_add(added_b));
let user_balance_a_after = Currency::free_balance(ACA, &ALICE);
let user_balance_b_after = Currency::free_balance(DOT, &ALICE);
assert_eq!(user_balance_a_after, user_balance_a_before.saturating_sub(added_a));
assert_eq!(user_balance_b_after, user_balance_b_before.saturating_sub(added_b));
expect_events(vec![
Event::LiquidityAdded(ACA_DOT_POOL_ID, ACA, DOT, added_a, added_b).into(),
Event::LiquidityAdded(ACA_DOT_POOL_ID, ACA, DOT, added_a, 0).into(),
Event::LiquidityAdded(ACA_DOT_POOL_ID, DOT, ACA, added_b, added_a).into(),
]);
});
}
#[test]
fn add_liquidity_by_non_owner_should_not_work() {
predefined_test_ext().execute_with(|| {
assert_eq!(Currency::free_balance(ACA, &BOB), 1000000000000000);
assert_eq!(Currency::free_balance(DOT, &BOB), 1000000000000000);
assert_eq!(Currency::free_balance(ACA, &ACA_DOT_POOL_ID), 1_000_000_000);
assert_eq!(Currency::free_balance(DOT, &ACA_DOT_POOL_ID), 2_000_000_000);
assert_noop!(
LBPPallet::add_liquidity(Origin::signed(BOB), (ACA, 10_000_000_000), (DOT, 20_000_000_000),),
Error::<Test>::NotOwner
);
});
}
#[test]
fn add_zero_liquidity_should_not_work() {
predefined_test_ext().execute_with(|| {
let user_balance_a_before = Currency::free_balance(ACA, &ALICE);
let user_balance_b_before = Currency::free_balance(DOT, &ALICE);
let pool_balance_a_before = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_before = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_noop!(
LBPPallet::add_liquidity(Origin::signed(ALICE), (ACA, 0), (DOT, 0),),
Error::<Test>::CannotAddZeroLiquidity
);
let pool_balance_a_after = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_after = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_eq!(pool_balance_a_after, pool_balance_a_before);
assert_eq!(pool_balance_b_after, pool_balance_b_before);
let user_balance_a_after = Currency::free_balance(ACA, &ALICE);
let user_balance_b_after = Currency::free_balance(DOT, &ALICE);
assert_eq!(user_balance_a_after, user_balance_a_before);
assert_eq!(user_balance_b_after, user_balance_b_before);
let pool_data = LBPPallet::pool_data(ACA_DOT_POOL_ID);
expect_events(vec![Event::PoolCreated(ACA_DOT_POOL_ID, pool_data).into()]);
});
}
#[test]
fn add_liquidity_with_insufficient_balance_should_not_work() {
predefined_test_ext().execute_with(|| {
let user_balance_a_before = Currency::free_balance(ACA, &ALICE);
let pool_balance_a_before = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_before = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_noop!(
LBPPallet::add_liquidity(Origin::signed(ALICE), (ACA, u128::MAX), (DOT, 0),),
Error::<Test>::InsufficientAssetBalance
);
let pool_balance_a_after = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_after = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_eq!(pool_balance_a_after, pool_balance_a_before);
assert_eq!(pool_balance_b_after, pool_balance_b_before);
let user_balance_a_after = Currency::free_balance(ACA, &ALICE);
assert_eq!(user_balance_a_after, user_balance_a_before);
});
}
#[test]
fn add_liquidity_after_sale_started_should_work() {
predefined_test_ext().execute_with(|| {
System::set_block_number(15);
let user_balance_a_before = Currency::free_balance(ACA, &ALICE);
let user_balance_b_before = Currency::free_balance(DOT, &ALICE);
let pool_balance_a_before = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_before = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_ok!(LBPPallet::add_liquidity(
Origin::signed(ALICE),
(ACA, 1_000),
(DOT, 1_000),
));
let pool_balance_a_after = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_after = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_eq!(pool_balance_a_after, pool_balance_a_before.saturating_add(1_000));
assert_eq!(pool_balance_b_after, pool_balance_b_before.saturating_add(1_000));
let user_balance_a_after = Currency::free_balance(ACA, &ALICE);
let user_balance_b_after = Currency::free_balance(DOT, &ALICE);
assert_eq!(user_balance_a_after, user_balance_a_before.saturating_sub(1_000));
assert_eq!(user_balance_b_after, user_balance_b_before.saturating_sub(1_000));
// sale ended at the block number 20
System::set_block_number(30);
let user_balance_a_before = Currency::free_balance(ACA, &ALICE);
let user_balance_b_before = Currency::free_balance(DOT, &ALICE);
let pool_balance_a_before = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_before = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_ok!(LBPPallet::add_liquidity(
Origin::signed(ALICE),
(ACA, 1_000),
(DOT, 1_000),
));
let pool_balance_a_after = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_after = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_eq!(pool_balance_a_after, pool_balance_a_before.saturating_add(1_000));
assert_eq!(pool_balance_b_after, pool_balance_b_before.saturating_add(1_000));
let user_balance_a_after = Currency::free_balance(ACA, &ALICE);
let user_balance_b_after = Currency::free_balance(DOT, &ALICE);
assert_eq!(user_balance_a_after, user_balance_a_before.saturating_sub(1_000));
assert_eq!(user_balance_b_after, user_balance_b_before.saturating_sub(1_000));
expect_events(vec![
Event::LiquidityAdded(ACA_DOT_POOL_ID, ACA, DOT, 1_000, 1_000).into(),
Event::LiquidityAdded(ACA_DOT_POOL_ID, ACA, DOT, 1_000, 1_000).into(),
]);
});
}
#[test]
fn add_liquidity_to_non_existing_pool_should_not_work() {
predefined_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::add_liquidity(Origin::signed(ALICE), (ACA, 1_000), (HDX, 1_000),),
Error::<Test>::PoolNotFound
);
});
}
#[test]
fn remove_liquidity_should_work() {
predefined_test_ext().execute_with(|| {
System::set_block_number(21);
let user_balance_a_before = Currency::free_balance(ACA, &ALICE);
let user_balance_b_before = Currency::free_balance(DOT, &ALICE);
let pool_balance_a_before = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_before = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_ok!(LBPPallet::remove_liquidity(Origin::signed(ALICE), ACA_DOT_POOL_ID,));
let pool_balance_a_after = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_after = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_eq!(pool_balance_a_after, 0);
assert_eq!(pool_balance_b_after, 0);
let user_balance_a_after = Currency::free_balance(ACA, &ALICE);
assert_eq!(
user_balance_a_after,
user_balance_a_before.saturating_add(pool_balance_a_before)
);
let user_balance_b_after = Currency::free_balance(DOT, &ALICE);
assert_eq!(
user_balance_b_after,
user_balance_b_before.saturating_add(pool_balance_b_before)
);
assert!(!<PoolData<Test>>::contains_key(ACA_DOT_POOL_ID));
expect_events(vec![
frame_system::Event::KilledAccount(ACA_DOT_POOL_ID).into(),
Event::LiquidityRemoved(ACA_DOT_POOL_ID, ACA, DOT, pool_balance_a_before, pool_balance_b_before).into(),
]);
});
}
#[test]
fn remove_liquidity_from_paused_pool_should_work() {
predefined_test_ext().execute_with(|| {
System::set_block_number(11);
assert_ok!(LBPPallet::pause_pool(Origin::signed(ALICE), ACA_DOT_POOL_ID,));
System::set_block_number(21);
let user_balance_a_before = Currency::free_balance(ACA, &ALICE);
let user_balance_b_before = Currency::free_balance(DOT, &ALICE);
let pool_balance_a_before = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_before = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_ok!(LBPPallet::remove_liquidity(Origin::signed(ALICE), ACA_DOT_POOL_ID,));
let pool_balance_a_after = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_after = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_eq!(pool_balance_a_after, 0);
assert_eq!(pool_balance_b_after, 0);
let user_balance_a_after = Currency::free_balance(ACA, &ALICE);
assert_eq!(
user_balance_a_after,
user_balance_a_before.saturating_add(pool_balance_a_before)
);
let user_balance_b_after = Currency::free_balance(DOT, &ALICE);
assert_eq!(
user_balance_b_after,
user_balance_b_before.saturating_add(pool_balance_b_before)
);
assert!(!<PoolData<Test>>::contains_key(ACA_DOT_POOL_ID));
expect_events(vec![
Event::Paused(1, 2003000).into(),
frame_system::Event::KilledAccount(ACA_DOT_POOL_ID).into(),
Event::LiquidityRemoved(ACA_DOT_POOL_ID, ACA, DOT, pool_balance_a_before, pool_balance_b_before).into(),
]);
});
}
#[test]
fn remove_liquidity_from_not_started_pool_should_work() {
predefined_test_ext().execute_with(|| {
let user_balance_a_before = Currency::free_balance(ACA, &ALICE);
let user_balance_b_before = Currency::free_balance(DOT, &ALICE);
let pool_balance_a_before = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_before = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_ok!(LBPPallet::remove_liquidity(Origin::signed(ALICE), ACA_DOT_POOL_ID,));
let pool_balance_a_after = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_after = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_eq!(pool_balance_a_after, 0);
assert_eq!(pool_balance_b_after, 0);
let user_balance_a_after = Currency::free_balance(ACA, &ALICE);
assert_eq!(
user_balance_a_after,
user_balance_a_before.saturating_add(pool_balance_a_before)
);
let user_balance_b_after = Currency::free_balance(DOT, &ALICE);
assert_eq!(
user_balance_b_after,
user_balance_b_before.saturating_add(pool_balance_b_before)
);
assert!(!<PoolData<Test>>::contains_key(ACA_DOT_POOL_ID));
expect_events(vec![
frame_system::Event::KilledAccount(ACA_DOT_POOL_ID).into(),
Event::LiquidityRemoved(ACA_DOT_POOL_ID, ACA, DOT, pool_balance_a_before, pool_balance_b_before).into(),
]);
// sale duration is not specified
assert_ok!(LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: HDX,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: DOT,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 10,
},
(0u64, 0u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
));
let user_balance_a_before = Currency::free_balance(HDX, &ALICE);
let user_balance_b_before = Currency::free_balance(DOT, &ALICE);
let pool_balance_a_before = Currency::free_balance(HDX, &HDX_DOT_POOL_ID);
let pool_balance_b_before = Currency::free_balance(DOT, &HDX_DOT_POOL_ID);
assert_ok!(LBPPallet::remove_liquidity(Origin::signed(ALICE), HDX_DOT_POOL_ID,));
let pool_balance_a_after = Currency::free_balance(HDX, &HDX_DOT_POOL_ID);
let pool_balance_b_after = Currency::free_balance(DOT, &HDX_DOT_POOL_ID);
assert_eq!(pool_balance_a_after, 0);
assert_eq!(pool_balance_b_after, 0);
let user_balance_a_after = Currency::free_balance(HDX, &ALICE);
assert_eq!(
user_balance_a_after,
user_balance_a_before.saturating_add(pool_balance_a_before)
);
let user_balance_b_after = Currency::free_balance(DOT, &ALICE);
assert_eq!(
user_balance_b_after,
user_balance_b_before.saturating_add(pool_balance_b_before)
);
assert!(!<PoolData<Test>>::contains_key(HDX_DOT_POOL_ID));
expect_events(vec![
frame_system::Event::KilledAccount(HDX_DOT_POOL_ID).into(),
Event::LiquidityRemoved(HDX_DOT_POOL_ID, HDX, DOT, pool_balance_a_before, pool_balance_b_before).into(),
]);
});
}
#[test]
fn remove_liquidity_from_non_existing_pool_should_not_work() {
new_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::remove_liquidity(Origin::signed(ALICE), ACA_DOT_POOL_ID),
Error::<Test>::PoolNotFound
);
});
}
#[test]
fn remove_liquidity_from_not_finalized_pool_should_not_work() {
predefined_test_ext().execute_with(|| {
System::set_block_number(15);
let user_balance_a_before = Currency::free_balance(ACA, &ALICE);
let user_balance_b_before = Currency::free_balance(DOT, &ALICE);
let pool_balance_a_before = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_before = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_noop!(
LBPPallet::remove_liquidity(Origin::signed(ALICE), ACA_DOT_POOL_ID,),
Error::<Test>::SaleNotEnded
);
let user_balance_a_after = Currency::free_balance(ACA, &ALICE);
let user_balance_b_after = Currency::free_balance(DOT, &ALICE);
let pool_balance_a_after = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_after = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_eq!(pool_balance_a_before, pool_balance_a_after);
assert_eq!(pool_balance_b_before, pool_balance_b_after);
assert_eq!(user_balance_a_before, user_balance_a_after);
assert_eq!(user_balance_b_before, user_balance_b_after);
});
}
#[test]
fn remove_liquidity_from_finalized_pool_should_work() {
predefined_test_ext().execute_with(|| {
System::set_block_number(21);
let user_balance_a_before = Currency::free_balance(ACA, &ALICE);
let user_balance_b_before = Currency::free_balance(DOT, &ALICE);
let pool_balance_a_before = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_before = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_ok!(LBPPallet::remove_liquidity(Origin::signed(ALICE), ACA_DOT_POOL_ID,));
let pool_balance_a_after = Currency::free_balance(ACA, &ACA_DOT_POOL_ID);
let pool_balance_b_after = Currency::free_balance(DOT, &ACA_DOT_POOL_ID);
assert_eq!(pool_balance_a_after, 0);
assert_eq!(pool_balance_b_after, 0);
let user_balance_a_after = Currency::free_balance(ACA, &ALICE);
assert_eq!(
user_balance_a_after,
user_balance_a_before.saturating_add(pool_balance_a_before)
);
let user_balance_b_after = Currency::free_balance(DOT, &ALICE);
assert_eq!(
user_balance_b_after,
user_balance_b_before.saturating_add(pool_balance_b_before)
);
assert!(!<PoolData<Test>>::contains_key(ACA_DOT_POOL_ID));
expect_events(vec![
frame_system::Event::KilledAccount(ACA_DOT_POOL_ID).into(),
Event::LiquidityRemoved(ACA_DOT_POOL_ID, ACA, DOT, pool_balance_a_before, pool_balance_b_before).into(),
]);
});
}
#[test]
fn remove_liquidity_by_non_owner_should_not_work() {
predefined_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::remove_liquidity(Origin::signed(BOB), ACA_DOT_POOL_ID),
Error::<Test>::NotOwner
);
});
}
#[test]
fn execute_trade_should_work() {
predefined_test_ext().execute_with(|| {
let asset_in = ACA;
let asset_out = DOT;
let pool_id = ACA_DOT_POOL_ID;
let amount_in = 5_000_000_u128;
let amount_out = 10_000_000_u128;
let t_sell = AMMTransfer {
origin: ALICE,
assets: AssetPair { asset_in, asset_out },
amount: amount_in,
amount_out,
discount: false,
discount_amount: 0_u128,
fee: (asset_out, 1_000),
};
assert_eq!(Currency::free_balance(asset_in, &ALICE), 999_999_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &ALICE), 999_998_000_000_000);
assert_eq!(Currency::free_balance(asset_in, &CHARLIE), 0);
assert_eq!(Currency::free_balance(asset_out, &CHARLIE), 0);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 2_000_000_000);
assert_ok!(LBPPallet::execute_trade(&t_sell));
assert_eq!(Currency::free_balance(asset_in, &ALICE), 999_998_995_000_000);
assert_eq!(Currency::free_balance(asset_out, &ALICE), 999_998_010_000_000);
assert_eq!(Currency::free_balance(asset_in, &CHARLIE), 0);
assert_eq!(Currency::free_balance(asset_out, &CHARLIE), 1_000);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_005_000_000);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 1_989_999_000);
let t_buy = AMMTransfer {
origin: ALICE,
assets: AssetPair { asset_in, asset_out },
amount: amount_in,
amount_out,
discount: false,
discount_amount: 0_u128,
fee: (asset_in, 1_000),
};
assert_ok!(LBPPallet::execute_trade(&t_buy));
assert_eq!(Currency::free_balance(asset_in, &ALICE), 999_998_989_999_000);
assert_eq!(Currency::free_balance(asset_out, &ALICE), 999_998_020_000_000);
assert_eq!(Currency::free_balance(asset_in, &CHARLIE), 1_000);
assert_eq!(Currency::free_balance(asset_out, &CHARLIE), 1_000);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_010_000_000);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 1_979_999_000);
});
}
// This test ensure storage was not modified on error
#[test]
fn execute_trade_should_not_work() {
predefined_test_ext().execute_with(|| {
let asset_in = ACA;
let asset_out = DOT;
let pool_id = LBPPallet::get_pair_id(AssetPair { asset_in, asset_out });
let amount_in = 5_000_000_u128;
let amount_out = 10_000_000_000_000_000u128;
let t = AMMTransfer {
origin: ALICE,
assets: AssetPair { asset_in, asset_out },
amount: amount_in,
amount_out,
discount: false,
discount_amount: 0_u128,
fee: (asset_in, 1_000),
};
assert_eq!(Currency::free_balance(asset_in, &ALICE), 999_999_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &ALICE), 999_998_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &CHARLIE), 0);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 2_000_000_000);
assert_noop!(LBPPallet::execute_trade(&t), orml_tokens::Error::<Test>::BalanceTooLow);
assert_eq!(Currency::free_balance(asset_in, &ALICE), 999_999_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &ALICE), 999_998_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &CHARLIE), 0);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 2_000_000_000);
});
}
#[test]
fn execute_sell_should_work() {
predefined_test_ext().execute_with(|| {
let asset_in = ACA;
let asset_out = DOT;
let pool_id = LBPPallet::get_pair_id(AssetPair { asset_in, asset_out });
let amount_in = 8_000_000_u128;
let amount_out = 20_000_000_u128;
let t = AMMTransfer {
origin: ALICE,
assets: AssetPair { asset_in, asset_out },
amount: amount_in,
amount_out,
discount: false,
discount_amount: 0_u128,
fee: (asset_out, 1_000),
};
assert_eq!(Currency::free_balance(asset_in, &ALICE), 999_999_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &ALICE), 999_998_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &CHARLIE), 0);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 2_000_000_000);
assert_ok!(LBPPallet::execute_sell(&t));
expect_events(vec![Event::SellExecuted(
ALICE, asset_in, asset_out, amount_in, amount_out, asset_out, 1_000,
)
.into()]);
assert_eq!(Currency::free_balance(asset_in, &ALICE), 999_998_992_000_000);
assert_eq!(Currency::free_balance(asset_out, &ALICE), 999_998_020_000_000);
assert_eq!(Currency::free_balance(asset_out, &CHARLIE), 1_000);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_008_000_000);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 1_979_999_000);
expect_events(vec![Event::SellExecuted(
ALICE, asset_in, asset_out, 8_000_000, 20_000_000, asset_out, 1_000,
)
.into()]);
});
}
// This test ensure storage was not modified on error
#[test]
fn execute_sell_should_not_work() {
predefined_test_ext().execute_with(|| {
let t = AMMTransfer {
origin: ALICE,
assets: AssetPair {
asset_in: ACA,
asset_out: DOT,
},
amount: 8_000_000_000_u128,
amount_out: 200_000_000_000_000_u128,
discount: false,
discount_amount: 0_u128,
fee: (DOT, 1_000),
};
assert_eq!(Currency::free_balance(ACA, &ALICE), 999_999_000_000_000);
assert_eq!(Currency::free_balance(DOT, &ALICE), 999_998_000_000_000);
assert_eq!(Currency::free_balance(DOT, &CHARLIE), 0);
assert_eq!(Currency::free_balance(ACA, &ACA_DOT_POOL_ID), 1_000_000_000);
assert_eq!(Currency::free_balance(DOT, &ACA_DOT_POOL_ID), 2_000_000_000);
assert_noop!(LBPPallet::execute_sell(&t), orml_tokens::Error::<Test>::BalanceTooLow);
assert_eq!(Currency::free_balance(ACA, &ALICE), 999_999_000_000_000);
assert_eq!(Currency::free_balance(DOT, &ALICE), 999_998_000_000_000);
assert_eq!(Currency::free_balance(DOT, &CHARLIE), 0);
assert_eq!(Currency::free_balance(ACA, &ACA_DOT_POOL_ID), 1_000_000_000);
assert_eq!(Currency::free_balance(DOT, &ACA_DOT_POOL_ID), 2_000_000_000);
});
}
#[test]
fn zero_weight_should_not_work() {
predefined_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: ACA,
amount: 1_000_000_000,
initial_weight: 100,
final_weight: 0,
},
LBPAssetInfo {
id: ETH,
amount: 2_000_000_000,
initial_weight: 100,
final_weight: 100,
},
(10u64, 20u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
),
Error::<Test>::ZeroWeight
);
assert_noop!(
LBPPallet::update_pool_data(
Origin::signed(ALICE),
ACA_DOT_POOL_ID,
Some((15, 18)),
Some(((ACA, 0), (DOT, 90))),
Some(((ACA, 80), (DOT, 20))),
Some(Fee {
numerator: 5,
denominator: 100,
}),
Some(BOB),
),
Error::<Test>::ZeroWeight
);
});
}
#[test]
fn execute_buy_should_work() {
predefined_test_ext().execute_with(|| {
let asset_in = ACA;
let asset_out = DOT;
let pool_id = LBPPallet::get_pair_id(AssetPair { asset_in, asset_out });
let amount_in = 8_000_000_u128;
let amount_out = 20_000_000_u128;
let t = AMMTransfer {
origin: ALICE,
assets: AssetPair { asset_in, asset_out },
amount: amount_in,
amount_out,
discount: false,
discount_amount: 0_u128,
fee: (asset_in, 1_000),
};
assert_eq!(Currency::free_balance(asset_in, &ALICE), 999_999_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &ALICE), 999_998_000_000_000);
assert_eq!(Currency::free_balance(asset_in, &CHARLIE), 0);
assert_eq!(Currency::free_balance(asset_out, &CHARLIE), 0);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 2_000_000_000);
assert_ok!(LBPPallet::execute_buy(&t));
assert_eq!(Currency::free_balance(asset_in, &ALICE), 999_998_991_999_000);
assert_eq!(Currency::free_balance(asset_out, &ALICE), 999_998_020_000_000);
assert_eq!(Currency::free_balance(asset_in, &CHARLIE), 1_000);
assert_eq!(Currency::free_balance(asset_out, &CHARLIE), 0);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_008_000_000);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 1_980_000_000);
expect_events(vec![Event::BuyExecuted(
ALICE, asset_out, asset_in, 8_000_000, 20_000_000, asset_in, 1_000,
)
.into()]);
});
}
// This test ensure storage was not modified on error
#[test]
fn execute_buy_should_not_work() {
predefined_test_ext().execute_with(|| {
let asset_in = ACA;
let asset_out = DOT;
let pool_id = LBPPallet::get_pair_id(AssetPair { asset_in, asset_out });
let amount_in = 8_000_000_000_u128;
let amount_out = 200_000_000_000_000_u128;
let t = AMMTransfer {
origin: ALICE,
assets: AssetPair { asset_in, asset_out },
amount: amount_in,
amount_out,
discount: false,
discount_amount: 0_u128,
fee: (asset_in, 1_000),
};
assert_eq!(Currency::free_balance(asset_in, &ALICE), 999_999_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &ALICE), 999_998_000_000_000);
assert_eq!(Currency::free_balance(asset_in, &CHARLIE), 0);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 2_000_000_000);
assert_noop!(LBPPallet::execute_buy(&t), orml_tokens::Error::<Test>::BalanceTooLow);
assert_eq!(Currency::free_balance(asset_in, &ALICE), 999_999_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &ALICE), 999_998_000_000_000);
assert_eq!(Currency::free_balance(asset_in, &CHARLIE), 0);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_000_000_000);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 2_000_000_000);
});
}
#[test]
fn sell_zero_amount_should_not_work() {
predefined_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::sell(Origin::signed(BOB), ACA, DOT, 0_u128, 200_000_u128),
Error::<Test>::ZeroAmount
);
});
}
#[test]
fn buy_zero_amount_should_not_work() {
predefined_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::buy(Origin::signed(BOB), ACA, DOT, 0_u128, 200_000_u128),
Error::<Test>::ZeroAmount
);
});
}
#[test]
fn sell_to_non_existing_pool_should_not_work() {
predefined_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::sell(Origin::signed(BOB), ACA, ETH, 800_000_u128, 200_000_u128),
Error::<Test>::PoolNotFound
);
});
}
#[test]
fn buy_from_non_existing_pool_should_not_work() {
predefined_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::buy(Origin::signed(BOB), ACA, ETH, 800_000_u128, 200_000_u128),
Error::<Test>::PoolNotFound
);
});
}
#[test]
fn exceed_max_in_ratio_should_not_work() {
predefined_test_ext().execute_with(|| {
run_to_block(11); //start sale
assert_noop!(
LBPPallet::sell(
Origin::signed(BOB),
ACA,
DOT,
1_000_000_000 / MAX_IN_RATIO + 1,
200_000_u128
),
Error::<Test>::MaxInRatioExceeded
);
//1/2 should not work
assert_noop!(
LBPPallet::sell(Origin::signed(BOB), ACA, DOT, 1_000_000_000 / 2, 200_000_u128),
Error::<Test>::MaxInRatioExceeded
);
//max ratio should work
assert_ok!(LBPPallet::sell(
Origin::signed(BOB),
ACA,
DOT,
1_000_000_000 / MAX_IN_RATIO,
2_000_u128
));
});
}
#[test]
fn exceed_max_out_ratio_should_not_work() {
predefined_test_ext().execute_with(|| {
run_to_block(11); //start sale
//max_ratio_out + 1 should not work
assert_noop!(
LBPPallet::buy(
Origin::signed(BOB),
ACA,
DOT,
1_000_000_000 / MAX_OUT_RATIO + 1,
200_000_u128
),
Error::<Test>::MaxOutRatioExceeded
);
//1/2 should not work
assert_noop!(
LBPPallet::buy(Origin::signed(BOB), ACA, DOT, 1_000_000_000 / 2, 200_000_u128),
Error::<Test>::MaxOutRatioExceeded
);
//max ratio should work
assert_ok!(LBPPallet::buy(
Origin::signed(BOB),
ACA,
DOT,
1_000_000_000 / MAX_OUT_RATIO,
2_000_000_000_u128
));
});
}
#[test]
fn trade_in_non_running_pool_should_not_work() {
predefined_test_ext().execute_with(|| {
let who = BOB;
let asset_in = ACA;
let asset_out = DOT;
let amount = 800_000_u128;
let limit = 200_000_u128;
//sale not started
run_to_block(9);
assert_noop!(
LBPPallet::sell(Origin::signed(who), asset_in, asset_out, amount, limit),
Error::<Test>::SaleIsNotRunning
);
assert_noop!(
LBPPallet::buy(Origin::signed(who), asset_in, asset_out, amount, limit),
Error::<Test>::SaleIsNotRunning
);
//sale ended
run_to_block(21);
assert_noop!(
LBPPallet::sell(Origin::signed(who), asset_in, asset_out, amount, limit),
Error::<Test>::SaleIsNotRunning
);
assert_noop!(
LBPPallet::buy(Origin::signed(who), asset_in, asset_out, amount, limit),
Error::<Test>::SaleIsNotRunning
);
//unpaused pool - pool is created as unpaused by default
assert_ok!(LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: HDX,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: ETH,
amount: 10_000,
initial_weight: 80,
final_weight: 10,
},
(30u64, 40u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
));
assert_ok!(LBPPallet::pause_pool(Origin::signed(ALICE), 4_000));
//pool started but is paused
run_to_block(30);
assert_noop!(
LBPPallet::sell(Origin::signed(BOB), HDX, ETH, amount, limit),
Error::<Test>::SaleIsNotRunning
);
assert_noop!(
LBPPallet::buy(Origin::signed(BOB), HDX, ETH, amount, limit),
Error::<Test>::SaleIsNotRunning
);
});
}
#[test]
fn exceed_trader_limit_should_not_work() {
predefined_test_ext().execute_with(|| {
let who = BOB;
let asset_in = ACA;
let asset_out = DOT;
let amount = 800_000_u128;
let sell_limit = 800_000_u128;
let buy_limit = 1_000_u128;
//start sale
run_to_block(11);
assert_noop!(
LBPPallet::sell(Origin::signed(who), asset_in, asset_out, amount, sell_limit),
Error::<Test>::AssetBalanceLimitExceeded
);
assert_noop!(
LBPPallet::buy(Origin::signed(who), asset_in, asset_out, amount, buy_limit),
Error::<Test>::AssetBalanceLimitExceeded
);
});
}
#[test]
fn sell_with_insufficient_balance_should_not_work() {
predefined_test_ext().execute_with(|| {
let who = BOB;
let asset_in = ACA;
let asset_out = ETH;
let amount = 1_000_000_u128;
assert_ok!(LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: asset_in,
amount: 1_000_000_000,
initial_weight: 50,
final_weight: 50,
},
LBPAssetInfo {
id: asset_out,
amount: 1_000_000_000,
initial_weight: 50,
final_weight: 50,
},
(30u64, 40u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
));
assert_ok!(Currency::withdraw(asset_in, &who, 999_999_999_900_000));
assert_eq!(Currency::free_balance(asset_in, &who), 100_000);
//start sale
run_to_block(31);
assert_noop!(
LBPPallet::sell(Origin::signed(who), asset_in, asset_out, amount, 800_000_u128),
Error::<Test>::InsufficientAssetBalance
);
});
}
#[test]
fn buy_with_insufficient_balance_should_not_work() {
new_test_ext().execute_with(|| {
let who = BOB;
let asset_in = ACA;
let asset_out = ETH;
let amount = 1_000_000_u128;
assert_ok!(LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: asset_in,
amount: 1_000_000_000,
initial_weight: 50,
final_weight: 50,
},
LBPAssetInfo {
id: asset_out,
amount: 1_000_000_000,
initial_weight: 50,
final_weight: 50,
},
(30u64, 40u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
));
assert_ok!(Currency::withdraw(asset_in, &who, 999_999_999_900_000));
assert_eq!(Currency::free_balance(asset_in, &who), 100_000);
//start sale
run_to_block(31);
assert_noop!(
LBPPallet::buy(Origin::signed(who), asset_out, asset_in, amount, 2_000_000_u128),
Error::<Test>::InsufficientAssetBalance
);
});
}
#[test]
fn buy_should_work() {
predefined_test_ext().execute_with(|| {
let who = BOB;
let asset_in = ACA;
let asset_out = DOT;
let pool_id = LBPPallet::get_pair_id(AssetPair { asset_in, asset_out });
assert_ok!(Currency::withdraw(asset_in, &who, 999_999_985_546_560));
assert_eq!(Currency::free_balance(asset_in, &who), 14_453_440);
assert_ok!(Currency::withdraw(asset_out, &who, 1_000_000_000_000_000));
assert_eq!(Currency::free_balance(asset_out, &who), 0);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_000_000_000_u128);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 2_000_000_000_u128);
//start sale
run_to_block(11);
assert_ok!(LBPPallet::buy(
Origin::signed(who),
asset_out,
asset_in,
10_000_000_u128,
2_000_000_000_u128
));
let pool = <PoolData<Test>>::get(pool_id);
assert_eq!(
Pool {
owner: ALICE,
start: 10u64,
end: 20u64,
assets: (asset_in, asset_out),
initial_weights: (20, 80),
final_weights: (90, 10),
last_weight_update: 11u64,
last_weights: (27, 73),
weight_curve: WeightCurveType::Linear,
pausable: true,
paused: false,
fee: Fee::default(),
fee_receiver: CHARLIE,
},
pool
);
assert_eq!(Currency::free_balance(asset_in, &who), 0);
assert_eq!(Currency::free_balance(asset_out, &who), 10_000_000);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_014_424_591);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 1_990_000_000);
// test buy where the amount_in is less than the amount_out
let asset_in = HDX;
let asset_out = ETH;
let pool_id = LBPPallet::get_pair_id(AssetPair { asset_in, asset_out });
assert_ok!(LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: HDX,
amount: 1_000_000_000,
initial_weight: 80,
final_weight: 10,
},
LBPAssetInfo {
id: ETH,
amount: 2_000_000_000,
initial_weight: 20,
final_weight: 90,
},
(20u64, 30u64),
WeightCurveType::Linear,
true,
Fee::default(),
CHARLIE,
));
let pool_data = LBPPallet::pool_data(4000);
assert_ok!(Currency::withdraw(asset_in, &who, 999_999_990_000_001));
// assert_ok!(Currency::withdraw(asset_in, &who, 999_999_998_240_561));
// assert_eq!(Currency::free_balance(asset_in, &who), 1_759_439);
// assert_ok!(Currency::withdraw(asset_out, &who, 1_000_000_000_000_000));
// assert_eq!(Currency::free_balance(asset_out, &who), 0);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_000_000_000_u128);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 2_000_000_000_u128);
//start sale
run_to_block(21);
assert_ok!(LBPPallet::buy(
Origin::signed(who),
asset_out,
asset_in,
10_000_000_u128,
2_000_000_000_u128
));
// assert_eq!(Currency::free_balance(asset_in, &who), 0);//8_240_561
// assert_eq!(Currency::free_balance(asset_out, &who), 10_000_000);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_001_755_928);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 1_990_000_000);
expect_events(vec![
Event::BuyExecuted(who, DOT, ACA, 14_424_591, 10_000_000, ACA, 28_849).into(),
frame_system::Event::NewAccount(4000).into(),
orml_tokens::Event::Endowed(0, 4000, 1000000000).into(),
orml_tokens::Event::Endowed(4000, 4000, 2000000000).into(),
Event::PoolCreated(4000, pool_data).into(),
orml_tokens::Event::Endowed(0, 3, 3511).into(),
Event::BuyExecuted(2, 4000, 0, 1755928, 10000000, 0, 3511).into(),
]);
});
}
#[test]
fn sell_should_work() {
predefined_test_ext().execute_with(|| {
let who = BOB;
let asset_in = ACA;
let asset_out = DOT;
let pool_id = LBPPallet::get_pair_id(AssetPair { asset_in, asset_out });
assert_eq!(Currency::free_balance(asset_in, &who), 1_000_000_000_000_000);
assert_ok!(Currency::withdraw(asset_out, &who, 1_000_000_000_000_000));
assert_eq!(Currency::free_balance(asset_out, &who), 0);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_000_000_000_u128);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 2_000_000_000_u128);
//start sale
run_to_block(11);
assert_ok!(LBPPallet::sell(
Origin::signed(who),
asset_in,
asset_out,
10_000_000_u128,
2_000_u128
));
let pool = <PoolData<Test>>::get(pool_id);
assert_eq!(
Pool {
owner: ALICE,
start: 10u64,
end: 20u64,
assets: (asset_in, asset_out),
initial_weights: (20, 80),
final_weights: (90, 10),
last_weight_update: 11u64,
last_weights: (27, 73),
weight_curve: WeightCurveType::Linear,
pausable: true,
paused: false,
fee: Fee::default(),
fee_receiver: CHARLIE,
},
pool
);
assert_eq!(Currency::free_balance(asset_in, &who), INITIAL_BALANCE - 10_000_000);
assert_eq!(Currency::free_balance(asset_out, &who), 6_939_210);
assert_eq!(Currency::free_balance(asset_in, &pool_id), 1_010_000_000);
assert_eq!(Currency::free_balance(asset_out, &pool_id), 1_993_046_884);
expect_events(vec![Event::SellExecuted(
who, asset_in, asset_out, 10_000_000, 6_939_210, asset_out, 13_906,
)
.into()]);
});
}
#[test]
fn zero_fee_should_work() {
new_test_ext().execute_with(|| {
assert_ok!(LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: ACA,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: DOT,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 10,
},
(10u64, 20u64),
WeightCurveType::Linear,
true,
Fee {
numerator: 0,
denominator: 100,
},
CHARLIE,
));
//start sale
run_to_block(11);
assert_ok!(LBPPallet::sell(Origin::signed(ALICE), ACA, DOT, 1_000, 1,));
});
}
#[test]
fn invalid_fee_should_not_work() {
new_test_ext().execute_with(|| {
assert_noop!(
LBPPallet::create_pool(
Origin::root(),
ALICE,
LBPAssetInfo {
id: ACA,
amount: 1_000_000_000,
initial_weight: 20,
final_weight: 90,
},
LBPAssetInfo {
id: DOT,
amount: 2_000_000_000,
initial_weight: 80,
final_weight: 10,
},
(10u64, 20u64),
WeightCurveType::Linear,
true,
Fee {
numerator: 10,
denominator: 0,
},
CHARLIE,
),
Error::<Test>::FeeAmountInvalid
);
});
}
#[test]
fn amm_trait_should_work() {
predefined_test_ext().execute_with(|| {
let asset_pair = AssetPair {
asset_in: ACA,
asset_out: DOT,
};
let reversed_asset_pair = AssetPair {
asset_in: DOT,
asset_out: ACA,
};
let non_existing_asset_pair = AssetPair {
asset_in: DOT,
asset_out: HDX,
};
assert!(LBPPallet::exists(asset_pair));
assert!(LBPPallet::exists(reversed_asset_pair));
assert!(!LBPPallet::exists(non_existing_asset_pair));
assert_eq!(LBPPallet::get_pair_id(asset_pair), ACA_DOT_POOL_ID);
assert_eq!(LBPPallet::get_pair_id(reversed_asset_pair), ACA_DOT_POOL_ID);
assert_eq!(LBPPallet::get_pool_assets(&ACA_DOT_POOL_ID), Some(vec![ACA, DOT]));
assert_eq!(LBPPallet::get_pool_assets(&HDX_DOT_POOL_ID), None);
// TODO: test all methods from the AMM trait
});
}
#[test]
fn get_spot_price_should_work() {
predefined_test_ext().execute_with(|| {
System::set_block_number(10);
let price = hydra_dx_math::lbp::calculate_spot_price(
1_000_000_000_u128,
2_000_000_000_u128,
20_u128,
80_u128,
1_000_000_u128,
)
.unwrap_or_else(|_| BalanceOf::<Test>::zero());
assert_eq!(LBPPallet::get_spot_price_unchecked(ACA, DOT, 1_000_000_u128), price);
// swap assets
let price = hydra_dx_math::lbp::calculate_spot_price(
2_000_000_000_u128,
1_000_000_000_u128,
80_u128,
20_u128,
1_000_000_u128,
)
.unwrap_or_else(|_| BalanceOf::<Test>::zero());
assert_eq!(LBPPallet::get_spot_price_unchecked(DOT, ACA, 1_000_000_u128), price);
// change weights
System::set_block_number(20);
let price = hydra_dx_math::lbp::calculate_spot_price(
1_000_000_000_u128,
2_000_000_000_u128,
90_u128,
10_u128,
1_000_000_u128,
)
.unwrap_or_else(|_| BalanceOf::<Test>::zero());
assert_eq!(LBPPallet::get_spot_price_unchecked(ACA, DOT, 1_000_000), price);
// pool does not exist
assert_eq!(LBPPallet::get_spot_price_unchecked(ACA, HDX, 1_000_000), 0);
// overflow
assert_eq!(LBPPallet::get_spot_price_unchecked(ACA, DOT, u128::MAX), 0);
// sale ended
System::set_block_number(21);
assert_eq!(LBPPallet::get_spot_price_unchecked(ACA, DOT, 1_000_000), 0);
});
}
#[test]
fn simulate_lbp_event_should_work() {
new_test_ext().execute_with(|| {
// setup
let pool_owner = BOB;
let lbp_participant = CHARLIE;
let asset_in = DOT;
let asset_in_pool_reserve: u128 = 1_000_000;
let owner_initial_asset_in_balance: u128 = 1_000_000_000_000;
let lbp_participant_initial_asset_in_balance: u128 = 1_000_000_000_000;
let asset_in_initial_weight = 400;
let asset_in_final_weight = 3_000;
let asset_out = HDX;
let asset_out_pool_reserve: u128 = 500_000_000;
let owner_initial_asset_out_balance: u128 = 1_000_000_000_000;
let lbp_participant_initial_asset_out_balance: u128 = 1_000_000_000_000;
let asset_out_initial_weight = 3_600;
let asset_out_final_weight = 1_000;
let sale_start: u64 = 1_000;
let sale_end: u64 = 22_600; // in blocks; 3 days
let mut trades = BTreeMap::new();
let intervals: u64 = 72;
let sale_rate = 200_000_000; // asset_out per day
let buy_amount = sale_rate / 24;
let sell_amount = 100_000_000 / 24;
let skip = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let sells = vec![19, 20, 21, 33, 34, 35, 48, 49, 50, 62, 63, 64];
for i in 0..=intervals {
let block_num = sale_start + (i * ((sale_end - sale_start) / intervals));
if skip.contains(&i) {
continue;
}
let (is_buy, amount) = if sells.contains(&i) {
(false, sell_amount)
} else {
(true, buy_amount)
};
trades.insert(block_num, (is_buy, amount));
}
let fee = Fee {
numerator: 9,
denominator: 1_000,
};
let fee_receiver = ALICE;
let pausable = true;
let trade_limit_factor: u128 = 1_000;
// preparations
let asset_pair = AssetPair { asset_in, asset_out };
let pool_account = LBPPallet::get_pair_id(asset_pair);
Currency::set_balance(Origin::root(), fee_receiver, asset_in, 0, 0).unwrap();
Currency::set_balance(Origin::root(), fee_receiver, asset_out, 0, 0).unwrap();
Currency::set_balance(Origin::root(), pool_owner, asset_in, 0, 0).unwrap();
Currency::set_balance(Origin::root(), pool_owner, asset_out, 0, 0).unwrap();
Currency::set_balance(
Origin::root(),
pool_owner,
asset_in,
owner_initial_asset_in_balance
.checked_add(asset_in_pool_reserve)
.unwrap(),
0,
)
.unwrap();
Currency::set_balance(
Origin::root(),
pool_owner,
asset_out,
owner_initial_asset_out_balance
.checked_add(asset_out_pool_reserve)
.unwrap(),
0,
)
.unwrap();
<Test as Config>::MultiCurrency::update_balance(
asset_in,
&lbp_participant,
lbp_participant_initial_asset_in_balance.try_into().unwrap(),
)
.unwrap();
<Test as Config>::MultiCurrency::update_balance(
asset_out,
&lbp_participant,
lbp_participant_initial_asset_out_balance.try_into().unwrap(),
)
.unwrap();
assert_ok!(LBPPallet::create_pool(
Origin::root(),
pool_owner,
LBPAssetInfo {
id: asset_in,
amount: asset_in_pool_reserve,
initial_weight: asset_in_initial_weight,
final_weight: asset_in_final_weight,
},
LBPAssetInfo {
id: asset_out,
amount: asset_out_pool_reserve,
initial_weight: asset_out_initial_weight,
final_weight: asset_out_final_weight,
},
(sale_start, sale_end),
WeightCurveType::Linear,
pausable,
fee,
fee_receiver,
));
System::set_block_number(sale_start.checked_sub(1).unwrap());
// start LBP
for block_num in sale_start..=sale_end {
System::set_block_number(block_num);
if let Some((is_buy, amount)) = trades.get(&block_num) {
if *is_buy {
assert_ok!(LBPPallet::buy(
Origin::signed(lbp_participant),
asset_out,
asset_in,
*amount,
amount.saturating_mul(trade_limit_factor)
));
} else {
assert_ok!(LBPPallet::sell(
Origin::signed(lbp_participant),
asset_out,
asset_in,
*amount,
amount.checked_div(trade_limit_factor).unwrap()
));
}
}
}
// end LBP and consolidate results
System::set_block_number(sale_end.checked_add(1).unwrap());
let pool_account_result_asset_in = Currency::free_balance(asset_in, &pool_account);
let pool_account_result_asset_out = Currency::free_balance(asset_out, &pool_account);
assert_eq!(
Currency::free_balance(asset_in, &pool_owner),
owner_initial_asset_in_balance
);
assert_eq!(
Currency::free_balance(asset_out, &pool_owner),
owner_initial_asset_out_balance
);
assert_eq!(Currency::free_balance(asset_in, &pool_account), 4_973_509);
assert_eq!(Currency::free_balance(asset_out, &pool_account), 125_000_009);
assert_eq!(Currency::free_balance(asset_in, &lbp_participant), 999_995_981_161);
assert_eq!(Currency::free_balance(asset_out, &lbp_participant), 1_000_374_999_991);
// remove liquidity from the pool
assert_ok!(LBPPallet::remove_liquidity(Origin::signed(pool_owner), pool_account));
assert_eq!(Currency::free_balance(asset_in, &pool_account), 0);
assert_eq!(Currency::free_balance(asset_out, &pool_account), 0);
assert_eq!(
Currency::free_balance(asset_in, &pool_owner),
owner_initial_asset_in_balance
.checked_add(pool_account_result_asset_in)
.unwrap()
);
assert_eq!(
Currency::free_balance(asset_out, &pool_owner),
owner_initial_asset_out_balance
.checked_add(pool_account_result_asset_out)
.unwrap()
);
assert_eq!(Currency::free_balance(asset_in, &fee_receiver), 45_330);
assert_eq!(Currency::free_balance(asset_out, &fee_receiver), 0);
});
}
|
use std::collections::HashSet;
use hydroflow::compiled::pull::HalfMultisetJoinState;
use hydroflow::util::collect_ready;
use hydroflow::{assert_graphvis_snapshots, hydroflow_syntax};
use multiplatform_test::multiplatform_test;
#[multiplatform_test]
pub fn test_persist_basic() {
let (result_send, mut result_recv) = hydroflow::util::unbounded_channel::<u32>();
let mut hf = hydroflow_syntax! {
source_iter([1])
-> persist()
-> persist()
-> fold(0, |a: &mut _, b| *a += b)
-> for_each(|x| result_send.send(x).unwrap());
};
assert_graphvis_snapshots!(hf);
for tick in 0..10 {
assert_eq!(tick, hf.current_tick());
hf.run_tick();
}
assert_eq!(
&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
&*collect_ready::<Vec<_>, _>(&mut result_recv)
);
}
#[multiplatform_test]
pub fn test_persist_pull() {
let (result_send, mut result_recv) = hydroflow::util::unbounded_channel::<u32>();
let mut hf = hydroflow_syntax! {
// Structured to ensure `persist()` is pull-based.
source_iter([1]) -> persist() -> m0;
null() -> m0;
m0 = union() -> persist() -> m1;
null() -> m1;
m1 = union()
-> fold(0, |a: &mut _, b| *a += b)
-> for_each(|x| result_send.send(x).unwrap());
};
assert_graphvis_snapshots!(hf);
for tick in 0..10 {
assert_eq!(tick, hf.current_tick());
hf.run_tick();
}
assert_eq!(
&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
&*collect_ready::<Vec<_>, _>(&mut result_recv)
);
}
#[multiplatform_test]
pub fn test_persist_push() {
let (result_send, mut result_recv) = hydroflow::util::unbounded_channel::<u32>();
let mut hf = hydroflow_syntax! {
t0 = source_iter([1]) -> persist() -> tee();
t0 -> null();
t1 = t0 -> persist() -> tee();
t1 -> null();
t1 -> fold(0, |a: &mut _, b| *a += b) -> for_each(|x| result_send.send(x).unwrap());
};
assert_graphvis_snapshots!(hf);
for tick in 0..10 {
assert_eq!(tick, hf.current_tick());
hf.run_tick();
}
assert_eq!(
&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
&*collect_ready::<Vec<_>, _>(&mut result_recv)
);
}
#[multiplatform_test]
pub fn test_persist_join() {
let (input_send, input_recv) = hydroflow::util::unbounded_channel::<(&str, &str)>();
let mut flow = hydroflow::hydroflow_syntax! {
source_iter([("hello", "world")]) -> persist() -> [0]my_join;
source_stream(input_recv) -> persist() -> [1]my_join;
my_join = join::<'tick>() -> for_each(|(k, (v1, v2))| println!("({}, ({}, {}))", k, v1, v2));
};
input_send.send(("hello", "oakland")).unwrap();
flow.run_tick();
input_send.send(("hello", "san francisco")).unwrap();
flow.run_tick();
}
#[multiplatform_test]
pub fn test_persist_replay_join() {
let (persist_input_send, persist_input) = hydroflow::util::unbounded_channel::<u32>();
let (other_input_send, other_input) = hydroflow::util::unbounded_channel::<u32>();
let (result_send, mut result_recv) = hydroflow::util::unbounded_channel::<(u32, u32)>();
let mut hf = hydroflow_syntax! {
source_stream(persist_input)
-> persist()
-> fold::<'tick>(0, |a: &mut _, b| *a += b)
-> next_stratum()
-> [0]product_node;
source_stream(other_input) -> [1] product_node;
product_node = cross_join::<'tick, 'tick>() -> for_each(|x| result_send.send(x).unwrap());
};
assert_graphvis_snapshots!(hf);
persist_input_send.send(1).unwrap();
other_input_send.send(2).unwrap();
hf.run_tick();
assert_eq!(&[(1, 2)], &*collect_ready::<Vec<_>, _>(&mut result_recv));
persist_input_send.send(2).unwrap();
other_input_send.send(2).unwrap();
hf.run_tick();
assert_eq!(&[(3, 2)], &*collect_ready::<Vec<_>, _>(&mut result_recv));
other_input_send.send(3).unwrap();
hf.run_tick();
assert_eq!(&[(3, 3)], &*collect_ready::<Vec<_>, _>(&mut result_recv));
}
#[multiplatform_test]
pub fn test_persist_double_handoff() {
let (input_send, input_recv) = hydroflow::util::unbounded_channel::<usize>();
let (input_2_send, input_2_recv) = hydroflow::util::unbounded_channel::<usize>();
let (output_send, mut output_recv) = hydroflow::util::unbounded_channel::<(usize, usize)>();
let mut flow = hydroflow::hydroflow_syntax! {
teed_first_sg = source_stream(input_2_recv) -> tee();
teed_first_sg -> [0] joined_second_sg;
teed_first_sg -> [1] joined_second_sg;
source_stream(input_recv) -> persist()
-> inspect(|x| println!("LHS {} {}:{}", x, context.current_tick(), context.current_stratum())) -> [0] cross;
joined_second_sg = cross_join::<'tick, 'tick>() -> map(|t| t.0)
-> inspect(|x| println!("RHS {} {}:{}", x, context.current_tick(), context.current_stratum())) -> [1] cross;
cross = cross_join::<'tick, 'tick, HalfMultisetJoinState>() -> for_each(|x| output_send.send(x).unwrap());
};
println!("A {}:{}", flow.current_tick(), flow.current_stratum());
input_send.send(0).unwrap();
flow.run_tick();
println!("B {}:{}", flow.current_tick(), flow.current_stratum());
assert!(collect_ready::<Vec<_>, _>(&mut output_recv).is_empty());
input_2_send.send(1).unwrap();
flow.run_tick();
println!("C {}:{}", flow.current_tick(), flow.current_stratum());
assert_eq!(&[(0, 1)], &*collect_ready::<Vec<_>, _>(&mut output_recv));
}
#[multiplatform_test]
pub fn test_persist_single_handoff() {
let (input_send, input_recv) = hydroflow::util::unbounded_channel::<usize>();
let (input_2_send, input_2_recv) = hydroflow::util::unbounded_channel::<usize>();
let (output_send, mut output_recv) = hydroflow::util::unbounded_channel::<(usize, usize)>();
let mut flow = hydroflow::hydroflow_syntax! {
teed_first_sg = source_stream(input_2_recv) -> tee();
teed_first_sg [0] -> null();
teed_first_sg [1] -> joined_second_sg;
null() -> joined_second_sg;
source_stream(input_recv) -> persist()
-> inspect(|x| println!("LHS {} {}:{}", x, context.current_tick(), context.current_stratum())) -> [0] cross;
joined_second_sg = union()
-> inspect(|x| println!("RHS {} {}:{}", x, context.current_tick(), context.current_stratum())) -> [1] cross;
cross = cross_join::<'tick, 'tick, HalfMultisetJoinState>() -> for_each(|x| output_send.send(x).unwrap());
};
println!("A {}:{}", flow.current_tick(), flow.current_stratum());
input_send.send(0).unwrap();
flow.run_tick();
println!("B {}:{}", flow.current_tick(), flow.current_stratum());
assert!(collect_ready::<Vec<_>, _>(&mut output_recv).is_empty());
input_2_send.send(1).unwrap();
flow.run_tick();
println!("C {}:{}", flow.current_tick(), flow.current_stratum());
assert_eq!(&[(0, 1)], &*collect_ready::<Vec<_>, _>(&mut output_recv));
}
#[multiplatform_test]
pub fn test_persist_single_subgraph() {
let (input_send, input_recv) = hydroflow::util::unbounded_channel::<usize>();
let (input_2_send, input_2_recv) = hydroflow::util::unbounded_channel::<usize>();
let (output_send, mut output_recv) = hydroflow::util::unbounded_channel::<(usize, usize)>();
let mut flow = hydroflow::hydroflow_syntax! {
source_stream(input_2_recv) -> joined_second_sg;
source_stream(input_recv) -> persist()
-> inspect(|x| println!("LHS {} {}:{}", x, context.current_tick(), context.current_stratum())) -> [0] cross;
joined_second_sg = inspect(|x| println!("RHS {} {}:{}", x, context.current_tick(), context.current_stratum())) -> [1] cross;
cross = cross_join::<'tick, 'tick, HalfMultisetJoinState>() -> for_each(|x| output_send.send(x).unwrap());
};
println!("A {}:{}", flow.current_tick(), flow.current_stratum());
input_send.send(0).unwrap();
flow.run_tick();
println!("B {}:{}", flow.current_tick(), flow.current_stratum());
assert!(collect_ready::<Vec<_>, _>(&mut output_recv).is_empty());
input_2_send.send(1).unwrap();
flow.run_tick();
println!("C {}:{}", flow.current_tick(), flow.current_stratum());
assert_eq!(&[(0, 1)], &*collect_ready::<Vec<_>, _>(&mut output_recv));
}
#[multiplatform_test]
pub fn test_persist() {
let (pull_tx, mut pull_rx) = hydroflow::util::unbounded_channel::<usize>();
let (push_tx, mut push_rx) = hydroflow::util::unbounded_channel::<usize>();
let mut df = hydroflow_syntax! {
my_tee = source_iter([1, 2, 3])
-> persist() // pull
-> tee();
my_tee
-> for_each(|v| pull_tx.send(v).unwrap());
my_tee
-> persist() // push
-> for_each(|v| push_tx.send(v).unwrap());
};
assert_graphvis_snapshots!(df);
df.run_available();
assert_eq!(&[1, 2, 3], &*collect_ready::<Vec<_>, _>(&mut pull_rx));
assert_eq!(&[1, 2, 3], &*collect_ready::<Vec<_>, _>(&mut push_rx));
}
#[multiplatform_test]
pub fn test_persist_mut() {
use hydroflow::util::Persistence::*;
let (pull_tx, mut pull_rx) = hydroflow::util::unbounded_channel::<usize>();
let (push_tx, mut push_rx) = hydroflow::util::unbounded_channel::<usize>();
let mut df = hydroflow_syntax! {
my_tee = source_iter([Persist(1), Persist(2), Persist(3), Persist(4), Delete(2)])
-> persist_mut() // pull
-> tee();
my_tee
-> for_each(|v| pull_tx.send(v).unwrap());
my_tee
-> flat_map(|x| if x == 3 {vec![Persist(x), Delete(x)]} else {vec![Persist(x)]})
-> persist_mut() // push
-> for_each(|v| push_tx.send(v).unwrap());
};
assert_graphvis_snapshots!(df);
df.run_available();
assert_eq!(&[1, 3, 4], &*collect_ready::<Vec<_>, _>(&mut pull_rx));
assert_eq!(&[1, 4], &*collect_ready::<Vec<_>, _>(&mut push_rx));
}
#[multiplatform_test]
pub fn test_persist_mut_keyed() {
use hydroflow::util::PersistenceKeyed::*;
let (pull_tx, mut pull_rx) = hydroflow::util::unbounded_channel::<usize>();
let (push_tx, mut push_rx) = hydroflow::util::unbounded_channel::<usize>();
let mut df = hydroflow_syntax! {
my_tee = source_iter([Persist(1, 1), Persist(2, 2), Persist(3, 3), Persist(4, 4), Delete(2)])
-> persist_mut_keyed() // pull
-> tee();
my_tee
-> for_each(|(_k, v)| pull_tx.send(v).unwrap());
my_tee
-> flat_map(|(k, v)| if v == 3 {vec![Persist(k, v), Delete(k)]} else {vec![Persist(k, v)]})
-> persist_mut_keyed() // push
-> for_each(|(_k, v)| push_tx.send(v).unwrap());
};
assert_graphvis_snapshots!(df);
df.run_available();
assert_eq!(
HashSet::from_iter([1, 3, 4]),
collect_ready::<HashSet<_>, _>(&mut pull_rx)
);
assert_eq!(
HashSet::from_iter([1, 4]),
collect_ready::<HashSet<_>, _>(&mut push_rx)
);
}
|
use diesel::pg::PgConnection;
use diesel::r2d2::{Builder, ConnectionManager, Pool};
pub(crate) use super::prelude::{self, *};
pub(crate) type ConnectionPool = Pool<ConnectionManager<PgConnection>>;
#[allow(dead_code)]
pub(crate) fn establish_connection() -> PgConnection {
dotenv().ok();
let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
PgConnection::establish(&database_url).expect(&format!("Error connecting to {}", database_url))
}
#[allow(dead_code)]
pub(crate) fn get_connection_pool()->Pool<ConnectionManager<PgConnection>>{
// Supports user configuration via .env.
let max_size = env::var("CONNECTION_POOL_MAX_SIZE").map(|s|str::parse::<u32>(&s).unwrap_or(64)).unwrap_or(64);
let min_idle = env::var("CONNECTION_POOL_MIN_IDLE").map(|s|str::parse::<u32>(&s).unwrap_or(32)).unwrap_or(32);
let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
let manager:ConnectionManager<PgConnection> = ConnectionManager::new(database_url);
Builder::new().max_size(max_size).min_idle(Some(min_idle)).build(manager).expect("Connection pool initialization failed")
} |
struct Point {
x: f64,
y: f64,
}
impl Point {
fn origin() -> Point {
Point { x: 0.0, y: 0.0 }
}
fn new(x: f64, y: f64) -> Point {
Point { x: x, y: y }
}
}
struct Rectangle {
p1: Point,
p2: Point,
}
impl Rectangle {
fn area(&self) -> f64 {
let Point { x: x1, y: y1 } = self.p1;
let Point { x: x2, y: y2 } = self.p2;
((x1 - x2) * (y1 - y2)).abs()
}
}
fn main() {
let rectangle = Rectangle {
p1: Point::origin(),
p2: Point::new(3.0, 4.0),
};
let area = rectangle.area();
println!("Rectangle area: {}", area);
}
|
//!
//! # Arrays
//!
//! This module implements fixed-length arrays and utility functions for it.
//!
//! **Note** that all macros starting with an underscore (`_array_base` etc.)
//! are note intended for public use. Unfortunately it's not possible to hide
//! them.
//!
//! To define a new array type with name `State`, holding `16` `u32` run
//!
//! ```
//! use hacspec_lib::*;
//! array!(State, 16, u32, type_for_indexes: StateIdx);
//! ```
//!
//! ## Instantiating Arrays
//! There are several different ways of creating array types.
//!
//! ###
#[macro_export]
#[doc(hidden)]
macro_rules! _array_base {
($name:ident,$l:expr,$t:ty) => {
/// Fixed length byte array.
// Because Rust requires fixed length arrays to have a known size at
// compile time there's no generic fixed length byte array here.
// Use this to define the fixed length byte arrays needed in your code.
#[allow(non_camel_case_types)]
#[derive(Clone, Copy)]
pub struct $name(pub [$t; $l]);
impl $name {
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn new() -> Self {
Self([<$t>::default(); $l])
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn length() -> usize {
$l
}
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn from_array(v: [$t; $l]) -> Self {
Self(v.clone())
}
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
pub fn from_native_slice(v: &[$t]) -> Self {
debug_assert!(v.len() <= $l);
let mut tmp = [<$t>::default(); $l];
for i in 0..v.len() {
tmp[i] = v[i];
}
Self(tmp.clone())
}
}
impl $name {
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn from_slice<A: SeqTrait<$t>>(input: &A, start: usize, len: usize) -> Self {
let mut a = Self::new();
debug_assert!(len <= a.len(), "{} > {}", len, a.len());
a = a.update_slice(0, input, start, len);
a
}
#[cfg_attr(feature = "use_attributes", in_hacspec)]
pub fn concat<A: SeqTrait<$t>>(&self, next: &A) -> Seq<$t> {
let mut out = Seq::new(self.len() + next.len());
out = out.update_start(self);
out = out.update_slice(self.len(), next, 0, next.len());
out
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn from_slice_range<A: SeqTrait<$t>>(input: &A, r: Range<usize>) -> Self {
Self::from_slice(input, r.start, r.end - r.start)
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn slice(&self, start_out: usize, len: usize) -> Seq<$t> {
Seq::from_slice(self, start_out, len)
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn slice_range(&self, r: Range<usize>) -> Seq<$t> {
self.slice(r.start, r.end - r.start)
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn num_chunks(&self, chunk_size: usize) -> usize {
(self.len() + chunk_size - 1) / chunk_size
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn get_chunk_len(&self, chunk_size: usize, chunk_number: usize) -> usize {
let idx_start = chunk_size * chunk_number;
if idx_start + chunk_size > self.len() {
self.len() - idx_start
} else {
chunk_size
}
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn get_chunk(&self, chunk_size: usize, chunk_number: usize) -> (usize, Seq<$t>) {
let idx_start = chunk_size * chunk_number;
let len = self.get_chunk_len(chunk_size, chunk_number);
let out = self.slice(idx_start, len);
(len, out)
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn set_chunk<A: SeqTrait<$t>>(
self,
chunk_size: usize,
chunk_number: usize,
input: &A,
) -> Self {
let idx_start = chunk_size * chunk_number;
let len = self.get_chunk_len(chunk_size, chunk_number);
debug_assert!(
input.len() == len,
"the chunk length should match the input. got {}, expected {}",
input.len(),
len
);
self.update_slice(idx_start, input, 0, len)
}
}
impl Default for $name {
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
fn default() -> Self {
$name::new()
}
}
impl SeqTrait<$t> for $name {
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
fn create(x: usize) -> Self {
assert_eq!(x, $l);
Self::new()
}
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn len(&self) -> usize {
$l
}
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
fn iter(&self) -> core::slice::Iter<$t> {
self.0.iter()
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
fn update_slice<A: SeqTrait<$t>>(
mut self,
start_out: usize,
v: &A,
start_in: usize,
len: usize,
) -> Self {
debug_assert!(self.len() >= start_out + len);
debug_assert!(v.len() >= start_in + len);
for i in 0..len {
self[start_out + i] = v[start_in + i];
}
self
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
fn update<A: SeqTrait<$t>>(self, start: usize, v: &A) -> Self {
let len = v.len();
self.update_slice(start, v, 0, len)
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
fn update_start<A: SeqTrait<$t>>(self, v: &A) -> Self {
let len = v.len();
self.update_slice(0, v, 0, len)
}
}
impl Index<usize> for $name {
type Output = $t;
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn index(&self, i: usize) -> &$t {
&self.0[i]
}
}
impl IndexMut<usize> for $name {
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn index_mut(&mut self, i: usize) -> &mut $t {
&mut self.0[i]
}
}
impl Index<u8> for $name {
type Output = $t;
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn index(&self, i: u8) -> &$t {
&self.0[i as usize]
}
}
impl IndexMut<u8> for $name {
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
fn index_mut(&mut self, i: u8) -> &mut $t {
&mut self.0[i as usize]
}
}
impl Index<u32> for $name {
type Output = $t;
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn index(&self, i: u32) -> &$t {
&self.0[i as usize]
}
}
impl IndexMut<u32> for $name {
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
fn index_mut(&mut self, i: u32) -> &mut $t {
&mut self.0[i as usize]
}
}
impl Index<i32> for $name {
type Output = $t;
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn index(&self, i: i32) -> &$t {
&self.0[i as usize]
}
}
impl IndexMut<i32> for $name {
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
fn index_mut(&mut self, i: i32) -> &mut $t {
&mut self.0[i as usize]
}
}
impl Index<RangeFull> for $name {
type Output = [$t];
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn index(&self, r: RangeFull) -> &[$t] {
&self.0[r]
}
}
impl $name {
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
pub fn from_vec(x: Vec<$t>) -> $name {
debug_assert_eq!(x.len(), $l);
let mut tmp = [<$t>::default(); $l];
for (i, e) in x.iter().enumerate() {
tmp[i] = *e;
}
$name(tmp.clone())
}
// We can't use the [From] trait here because otherwise it would conflict with
// the From<T> for T core implementation, as the array also implements the [SeqTrait].
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn from_seq<T: SeqTrait<$t>>(x: &T) -> $name {
debug_assert_eq!(x.len(), $l);
let mut out = $name::new();
for i in 0..x.len() {
out[i] = x[i];
}
out
}
}
impl $name {
fn hex_string_to_vec(s: &str) -> Vec<$t> {
debug_assert!(s.len() % core::mem::size_of::<$t>() == 0);
let b: Result<Vec<$t>, ParseIntError> = (0..s.len())
.step_by(2)
.map(|i| u8::from_str_radix(&s[i..i + 2], 16).map(<$t>::from))
.collect();
b.expect("Error parsing hex string")
}
/// Read hex string to Bytes.
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn from_hex(s: &str) -> $name {
let v = $name::hex_string_to_vec(s);
let mut o = $name::new();
debug_assert!(v.len() == $l);
for i in 0..$l {
o[i] = v[i]
}
o
}
}
};
}
#[macro_export]
macro_rules! generic_array {
($name:ident,$l:expr) => {
/// Fixed length byte array.
// Because Rust requires fixed length arrays to have a known size at
// compile time there's no generic fixed length byte array here.
// Use this to define the fixed length byte arrays needed in your code.
#[allow(non_camel_case_types)]
#[derive(Clone, Copy)]
pub struct $name<T>(pub [T; $l]);
impl<T: Numeric + Copy> $name<T> {
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn new() -> Self {
Self([<T>::default(); $l])
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn length() -> usize {
$l
}
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn from_array(v: [T; $l]) -> Self {
Self(v.clone())
}
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
pub fn from_native_slice(v: &[T]) -> Self {
debug_assert!(v.len() <= $l);
let mut tmp = [<T>::default(); $l];
for i in 0..v.len() {
tmp[i] = v[i];
}
Self(tmp.clone())
}
}
impl<T: Numeric + Copy> $name<T> {
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn from_slice<A: SeqTrait<T>>(input: &A, start: usize, len: usize) -> Self {
let mut a = Self::new();
debug_assert!(len <= a.len());
a = a.update_slice(0, input, start, len);
a
}
#[cfg_attr(feature = "use_attributes", in_hacspec)]
pub fn concat<A: SeqTrait<T>>(&self, next: &A) -> Seq<T> {
let mut out = Seq::new(self.len() + next.len());
out = out.update_start(self);
out = out.update_slice(self.len(), next, 0, next.len());
out
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn from_slice_range<A: SeqTrait<T>>(input: &A, r: Range<usize>) -> Self {
Self::from_slice(input, r.start, r.end - r.start)
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn slice(&self, start_out: usize, len: usize) -> Seq<T> {
Seq::from_slice(self, start_out, len)
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn slice_range(&self, r: Range<usize>) -> Seq<T> {
self.slice(r.start, r.end - r.start)
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn num_chunks(&self, chunk_size: usize) -> usize {
(self.len() + chunk_size - 1) / chunk_size
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn get_chunk_len(&self, chunk_size: usize, chunk_number: usize) -> usize {
let idx_start = chunk_size * chunk_number;
if idx_start + chunk_size > self.len() {
self.len() - idx_start
} else {
chunk_size
}
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn get_chunk(&self, chunk_size: usize, chunk_number: usize) -> (usize, Seq<T>) {
let idx_start = chunk_size * chunk_number;
let len = self.get_chunk_len(chunk_size, chunk_number);
let out = self.slice(idx_start, len);
(len, out)
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn set_chunk<A: SeqTrait<T>>(
self,
chunk_size: usize,
chunk_number: usize,
input: &A,
) -> Self {
let idx_start = chunk_size * chunk_number;
let len = self.get_chunk_len(chunk_size, chunk_number);
debug_assert!(
input.len() == len,
"the chunk length should match the input. got {}, expected {}",
input.len(),
len
);
self.update_slice(idx_start, input, 0, len)
}
}
impl<T: Numeric + Copy> Default for $name<T> {
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
fn default() -> Self {
$name::new()
}
}
impl<T: Numeric + Copy> SeqTrait<T> for $name<T> {
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
fn create(x: usize) -> Self {
assert_eq!(x, $l);
Self::new()
}
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn len(&self) -> usize {
$l
}
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
fn iter(&self) -> core::slice::Iter<T> {
self.0.iter()
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
fn update_slice<A: SeqTrait<T>>(
mut self,
start_out: usize,
v: &A,
start_in: usize,
len: usize,
) -> Self {
debug_assert!(self.len() >= start_out + len);
debug_assert!(v.len() >= start_in + len);
for i in 0..len {
self[start_out + i] = v[start_in + i];
}
self
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
fn update<A: SeqTrait<T>>(self, start: usize, v: &A) -> Self {
let len = v.len();
self.update_slice(start, v, 0, len)
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
fn update_start<A: SeqTrait<T>>(self, v: &A) -> Self {
let len = v.len();
self.update_slice(0, v, 0, len)
}
}
impl<T: Numeric + Copy> Index<usize> for $name<T> {
type Output = T;
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn index(&self, i: usize) -> &T {
&self.0[i]
}
}
impl<T: Numeric + Copy> IndexMut<usize> for $name<T> {
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn index_mut(&mut self, i: usize) -> &mut T {
&mut self.0[i]
}
}
impl<T: Numeric + Copy> Index<u8> for $name<T> {
type Output = T;
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn index(&self, i: u8) -> &T {
&self.0[i as usize]
}
}
impl<T: Numeric + Copy> IndexMut<u8> for $name<T> {
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
fn index_mut(&mut self, i: u8) -> &mut T {
&mut self.0[i as usize]
}
}
impl<T: Numeric + Copy> Index<u32> for $name<T> {
type Output = T;
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn index(&self, i: u32) -> &T {
&self.0[i as usize]
}
}
impl<T: Numeric + Copy> IndexMut<u32> for $name<T> {
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
fn index_mut(&mut self, i: u32) -> &mut T {
&mut self.0[i as usize]
}
}
impl<T: Numeric + Copy> Index<i32> for $name<T> {
type Output = T;
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn index(&self, i: i32) -> &T {
&self.0[i as usize]
}
}
impl<T: Numeric + Copy> IndexMut<i32> for $name<T> {
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
fn index_mut(&mut self, i: i32) -> &mut T {
&mut self.0[i as usize]
}
}
impl<T: Numeric + Copy> Index<RangeFull> for $name<T> {
type Output = [T];
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn index(&self, r: RangeFull) -> &[T] {
&self.0[r]
}
}
impl<T: Numeric + Copy> $name<T> {
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
pub fn from_vec(x: Vec<T>) -> $name<T> {
debug_assert_eq!(x.len(), $l);
let mut tmp = [<T>::default(); $l];
for (i, e) in x.iter().enumerate() {
tmp[i] = *e;
}
$name(tmp.clone())
}
// We can't use the [From] trait here because otherwise it would conflict with
// the From<T> for T core implementation, as the array also implements the [SeqTrait].
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn from_seq<U: SeqTrait<T>>(x: &U) -> $name<T> {
debug_assert_eq!(x.len(), $l);
let mut out = $name::new();
for i in 0..x.len() {
out[i] = x[i];
}
out
}
}
/// **Warning:** declassifies secret integer types.
impl<T: Numeric + Copy> fmt::Debug for $name<T> {
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0[..].iter().collect::<Vec<_>>().fmt(f)
}
}
};
}
#[macro_export]
#[doc(hidden)]
/// This creates arrays for secret integers, i.e. `$t` is the secret integer
/// type and `$tbase` is the according Rust type.
macro_rules! _secret_array {
($name:ident,$l:expr,$t:ty, $tbase:ty) => {
_array_base!($name, $l, $t);
/// **Warning:** declassifies secret integer types.
impl fmt::Debug for $name {
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0[..]
.iter()
.map(|x| <$t>::declassify(*x))
.collect::<Vec<_>>()
.fmt(f)
}
}
/// **Warning:** declassifies secret integer types.
impl $name {
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn declassify_eq(&self, other: &Self) -> bool {
self.0[..]
.iter()
.map(|x| <$t>::declassify(*x))
.collect::<Vec<_>>()
== other.0[..]
.iter()
.map(|x| <$t>::declassify(*x))
.collect::<Vec<_>>()
}
}
impl $name {
#[cfg_attr(feature = "use_attributes", unsafe_hacspec)]
pub fn to_be_bytes(&self) -> Seq<U8> {
const FACTOR: usize = core::mem::size_of::<$t>();
let mut out: Seq<U8> = Seq::new($l * FACTOR);
for i in 0..$l {
let tmp: $t = self[i];
let tmp = <$t>::to_be_bytes(&[tmp]);
for j in 0..FACTOR {
out[i * FACTOR + j] = tmp[j];
}
}
out
}
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn to_le_bytes(&self) -> Seq<U8> {
const FACTOR: usize = core::mem::size_of::<$t>();
let mut out: Seq<U8> = Seq::new($l * FACTOR);
for i in 0..$l {
let tmp: $t = self[i];
let tmp = <$t>::to_le_bytes(&[tmp]);
for j in 0..FACTOR {
out[i * FACTOR + j] = tmp[j];
}
}
out
}
}
impl $name {
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
pub fn from_public_slice(v: &[$tbase]) -> $name {
debug_assert!(v.len() == $l);
Self::from_vec(
v[..]
.iter()
.map(|x| <$t>::classify(*x))
.collect::<Vec<$t>>(),
)
}
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
pub fn to_public_array(&self) -> [$tbase; $l] {
let mut out = [0; $l];
for (x, o) in self.0.iter().zip(out.iter_mut()) {
*o = <$t>::declassify(*x);
}
out
}
/// Create an array from a regular Rust array.
///
/// # Examples
///
/// ```
/// use hacspec_lib::prelude::*;
///
/// bytes!(Block, 5);
/// let b = Block::from_public_array([1, 2, 3, 4, 5]);
/// ```
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
pub fn from_public_array(v: [$tbase; $l]) -> $name {
debug_assert!(v.len() == $l);
Self::from_vec(
v[..]
.iter()
.map(|x| <$t>::classify(*x))
.collect::<Vec<$t>>(),
)
}
}
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! _public_array {
($name:ident,$l:expr,$t:ty) => {
_array_base!($name, $l, $t);
impl $name {
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn into_le_bytes(self) -> Seq<u8> {
const FACTOR: usize = core::mem::size_of::<$t>();
let mut out: Seq<u8> = Seq::new($l * FACTOR);
for i in 0..$l {
let tmp = <$t>::to_le_bytes(self[i]);
for j in 0..FACTOR {
out[i * FACTOR + j] = tmp[j];
}
}
out
}
}
impl fmt::Debug for $name {
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0[..].fmt(f)
}
}
impl PartialEq for $name {
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
fn eq(&self, other: &Self) -> bool {
self.0[..] == other.0[..]
}
}
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! _implement_secret_u8_array {
($name:ident, $l:expr) => {
_secret_array!($name, $l, U8, u8);
_implement_numeric_unsigned_secret!($name, U8);
impl $name {
#[allow(non_snake_case)]
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn to_be_U32s(&self) -> Seq<U32> {
let mut out = Seq::new($l / 4);
for (i, block) in self.0.chunks(4).enumerate() {
debug_assert!(block.len() == 4);
out[i] = U32_from_be_bytes(U32Word::from_native_slice(block));
}
out
}
#[allow(non_snake_case)]
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn to_le_U32s(&self) -> Seq<U32> {
let mut out = Seq::new($l / 4);
for (i, block) in self.0.chunks(4).enumerate() {
debug_assert!(block.len() == 4);
out[i] = U32_from_le_bytes(U32Word::from_native_slice(block));
}
out
}
#[allow(non_snake_case)]
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn to_be_U64s(&self) -> Seq<U64> {
let mut out = Seq::new($l / 8);
for (i, block) in self.0.chunks(8).enumerate() {
debug_assert!(block.len() == 8);
out[i] = U64_from_be_bytes(U64Word::from_native_slice(block));
}
out
}
#[allow(non_snake_case)]
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn to_le_U64s(&self) -> Seq<U64> {
let mut out = Seq::new($l / 8);
for (i, block) in self.0.chunks(8).enumerate() {
debug_assert!(block.len() == 8);
out[i] = U64_from_le_bytes(U64Word::from_native_slice(block));
}
out
}
#[allow(non_snake_case)]
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn to_U128s_be(&self) -> Seq<U128> {
let mut out = Seq::new($l / 16);
for (i, block) in self.0.chunks(16).enumerate() {
debug_assert!(block.len() == 16);
out[i] = U128_from_be_bytes(U128Word::from_native_slice(block));
}
out
}
#[allow(non_snake_case)]
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn to_U128s_le(&self) -> Seq<U128> {
let mut out = Seq::new($l / 16);
for (i, block) in self.0.chunks(16).enumerate() {
debug_assert!(block.len() == 16);
out[i] = U128_from_le_bytes(U128Word::from_native_slice(block));
}
out
}
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
pub fn to_hex(&self) -> String {
let strs: Vec<String> = self.0.iter().map(|b| format!("{:02x}", b)).collect();
strs.join("")
}
}
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! _implement_public_u8_array {
($name:ident, $l:expr) => {
_public_array!($name, $l, u8);
_implement_numeric_unsigned_public!($name);
impl $name {
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn to_be_u32s(&self) -> Seq<u32> {
let mut out = Seq::new($l / 4);
for (i, block) in self.0.chunks(4).enumerate() {
debug_assert!(block.len() == 4);
out[i] = u32::from_be_bytes(to_array(block));
}
out
}
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn to_le_u32s(&self) -> Seq<u32> {
let mut out = Seq::new($l / 4);
for (i, block) in self.0.chunks(4).enumerate() {
debug_assert!(block.len() == 4);
out[i] = u32::from_le_bytes(to_array(block));
}
out
}
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn to_be_u64s(&self) -> Seq<u64> {
let mut out = Seq::new($l / 8);
for (i, block) in self.0.chunks(8).enumerate() {
debug_assert!(block.len() == 8);
out[i] = u64::from_be_bytes(to_array(block));
}
out
}
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn to_le_u64s(&self) -> Seq<u64> {
let mut out = Seq::new($l / 8);
for (i, block) in self.0.chunks(8).enumerate() {
debug_assert!(block.len() == 8);
out[i] = u64::from_le_bytes(to_array(block));
}
out
}
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn to_u128s_be(&self) -> Seq<u128> {
let mut out = Seq::new($l / 16);
for (i, block) in self.0.chunks(16).enumerate() {
debug_assert!(block.len() == 16);
out[i] = u128::from_be_bytes(to_array(block));
}
out
}
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn to_u128s_le(&self) -> Seq<u128> {
let mut out = Seq::new($l / 16);
for (i, block) in self.0.chunks(16).enumerate() {
debug_assert!(block.len() == 16);
out[i] = u128::from_le_bytes(to_array(block));
}
out
}
#[cfg_attr(feature = "use_attributes", not_hacspec($name))]
pub fn to_hex(&self) -> String {
let strs: Vec<String> = self.0.iter().map(|b| format!("{:02x}", b)).collect();
strs.join("")
}
}
};
}
// The following are the macros intended for use from the outside.
#[macro_export]
/// Create a new array with the given name, length, and type.
macro_rules! array {
($name:ident, $l:expr, U8) => {
_implement_secret_u8_array!($name, $l);
};
($name:ident, $l:expr, U8, $idx: ident) => {
_implement_secret_u8_array!($name, $l);
pub type $idx = usize;
};
($name:ident, $l:expr, U16) => {
_secret_array!($name, $l, U16, u16);
_implement_numeric_unsigned_secret!($name, U16);
};
($name:ident, $l:expr, U16, type_for_indexes: $idx: ident) => {
_secret_array!($name, $l, U16, u16);
_implement_numeric_unsigned_secret!($name, U16);
pub type $idx = usize;
};
($name:ident, $l:expr, U32) => {
_secret_array!($name, $l, U32, u32);
_implement_numeric_unsigned_secret!($name, U32);
};
($name:ident, $l:expr, U32, type_for_indexes: $idx: ident) => {
_secret_array!($name, $l, U32, u32);
_implement_numeric_unsigned_secret!($name, U32);
pub type $idx = usize;
};
($name:ident, $l:expr, U64) => {
_secret_array!($name, $l, U64, u64);
_implement_numeric_unsigned_secret!($name, U64);
};
($name:ident, $l:expr, U64, type_for_indexes: $idx: ident) => {
_secret_array!($name, $l, U64, u64);
_implement_numeric_unsigned_secret!($name, U64);
pub type $idx = usize;
};
($name:ident, $l:expr, U128) => {
_secret_array!($name, $l, U128, u128);
_implement_numeric_unsigned_secret!($name, U128);
};
($name:ident, $l:expr, U128, type_for_indexes: $idx: ident) => {
_secret_array!($name, $l, U128, u128);
_implement_numeric_unsigned_secret!($name, U128);
pub type $idx = usize;
};
($name:ident, $l:expr, u8) => {
_implement_public_u8_array!($name, $l);
};
($name:ident, $l:expr, u8, type_for_indexes: $idx: ident) => {
_implement_public_u8_array!($name, $l);
pub type $idx = usize;
};
($name:ident, $l:expr, u16) => {
_public_array!($name, $l, u16);
_implement_numeric_unsigned_public!($name);
};
($name:ident, $l:expr, u16, type_for_indexes: $idx: ident) => {
_public_array!($name, $l, u16);
_implement_numeric_unsigned_public!($name);
pub type $idx = usize;
};
($name:ident, $l:expr, u32) => {
_public_array!($name, $l, u32);
_implement_numeric_unsigned_public!($name);
};
($name:ident, $l:expr, u32, type_for_indexes: $idx: ident) => {
_public_array!($name, $l, u32);
_implement_numeric_unsigned_public!($name);
pub type $idx = usize;
};
($name:ident, $l:expr, u64) => {
_public_array!($name, $l, u64);
_implement_numeric_unsigned_public!($name);
};
($name:ident, $l:expr, u64, type_for_indexes: $idx: ident) => {
_public_array!($name, $l, u64);
_implement_numeric_unsigned_public!($name);
pub type $idx = usize;
};
($name:ident, $l:expr, u128) => {
_public_array!($name, $l, u128);
_implement_numeric_unsigned_public!($name);
};
($name:ident, $l:expr, u128, type_for_indexes: $idx: ident) => {
_public_array!($name, $l, u128);
_implement_numeric_unsigned_public!($name);
pub type $idx = usize;
};
($name:ident, $l:expr, $t:ty) => {
_public_array!($name, $l, $t);
};
($name:ident, $l:expr, $t:ty, type_for_indexes: $idx: ident) => {
_public_array!($name, $l, $t);
pub type $idx = usize;
};
}
#[macro_export]
/// Convenience function to create a new byte array (of type `U8`) with the given
/// name and length.
macro_rules! bytes {
($name:ident, $l:expr) => {
array!($name, $l, U8);
};
}
#[macro_export]
/// Convenience function to create a new public byte array (of type `u8`) with
/// the given name and length.
macro_rules! public_bytes {
($name:ident, $l:expr) => {
array!($name, $l, u8);
};
}
#[macro_export]
macro_rules! secret_array {
( $int_type: ident, [ $( $x:expr ),+ ] ) => {
[
$(
$int_type($x)
),+
]
}
}
#[macro_export]
macro_rules! secret_bytes {
([ $( $x:expr ),+ ] ) => {
secret_array!(U8, [$($x),+])
}
}
#[macro_export]
macro_rules! assert_secret_array_eq {
( $a1: expr, $a2: expr, $si: ident) => {
assert_eq!(
$a1.iter().map(|x| $si::declassify(*x)).collect::<Vec<_>>(),
$a2.iter().map(|x| $si::declassify(*x)).collect::<Vec<_>>()
);
};
}
#[macro_export]
macro_rules! assert_bytes_eq {
( $a1: expr, $a2: expr) => {
assert_secret_array_eq!($a1, $a2, U8)
};
}
#[macro_export]
macro_rules! both_arrays {
($public_name:ident, $name:ident, $l:expr, $t:ty, $tbase:ty) => {
_secret_array!($name, $l, $t, $tbase);
_public_array!($public_name, $l, $tbase);
impl $name {
/// Conversion function between public and secret array versions.
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn from_public(v: $public_name) -> $name {
Self::from_vec(
v[..]
.iter()
.map(|x| <$t>::classify(*x))
.collect::<Vec<$t>>(),
)
}
}
impl $public_name {
/// *Warning:* this function declassifies secret integers!
#[cfg_attr(feature = "use_attributes", unsafe_hacspec($name))]
pub fn from_secret_declassify(v: $name) -> $public_name {
Self::from_vec(
v[..]
.iter()
.map(|x| <$t>::declassify(*x))
.collect::<Vec<$tbase>>(),
)
}
}
};
}
#[macro_export]
macro_rules! both_bytes {
($public_name:ident, $name:ident, $l:expr) => {
both_arrays!($public_name, $name, $l, U8, u8);
};
}
|
use std::hash::Hash;
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub enum Rank {
Two,
Three,
Four,
Five,
Six,
Seven,
Eight,
Nine,
Ten,
Jack,
Queen,
King,
Ace,
}
impl Rank {
pub fn list() -> Vec<Self> {
vec![
Self::Two,
Self::Three,
Self::Four,
Self::Five,
Self::Six,
Self::Seven,
Self::Eight,
Self::Nine,
Self::Ten,
Self::Jack,
Self::Queen,
Self::King,
Self::Ace,
]
}
}
#[cfg(test)]
mod tests {
use ::claim::*;
use super::Rank;
#[test]
fn test_ordering_two() {
let lhs = Rank::Two;
assert_eq!(lhs, Rank::Two);
assert_lt!(lhs, Rank::Three);
assert_lt!(lhs, Rank::Four);
assert_lt!(lhs, Rank::Five);
assert_lt!(lhs, Rank::Six);
assert_lt!(lhs, Rank::Seven);
assert_lt!(lhs, Rank::Eight);
assert_lt!(lhs, Rank::Nine);
assert_lt!(lhs, Rank::Ten);
assert_lt!(lhs, Rank::Jack);
assert_lt!(lhs, Rank::Queen);
assert_lt!(lhs, Rank::King);
assert_lt!(lhs, Rank::Ace);
}
#[test]
fn test_ordering_three() {
let lhs = Rank::Three;
assert_gt!(lhs, Rank::Two);
assert_eq!(lhs, Rank::Three);
assert_lt!(lhs, Rank::Four);
assert_lt!(lhs, Rank::Five);
assert_lt!(lhs, Rank::Six);
assert_lt!(lhs, Rank::Seven);
assert_lt!(lhs, Rank::Eight);
assert_lt!(lhs, Rank::Nine);
assert_lt!(lhs, Rank::Ten);
assert_lt!(lhs, Rank::Jack);
assert_lt!(lhs, Rank::Queen);
assert_lt!(lhs, Rank::King);
assert_lt!(lhs, Rank::Ace);
}
#[test]
fn test_ordering_four() {
let lhs = Rank::Four;
assert_gt!(lhs, Rank::Two);
assert_gt!(lhs, Rank::Three);
assert_eq!(lhs, Rank::Four);
assert_lt!(lhs, Rank::Five);
assert_lt!(lhs, Rank::Six);
assert_lt!(lhs, Rank::Seven);
assert_lt!(lhs, Rank::Eight);
assert_lt!(lhs, Rank::Nine);
assert_lt!(lhs, Rank::Ten);
assert_lt!(lhs, Rank::Jack);
assert_lt!(lhs, Rank::Queen);
assert_lt!(lhs, Rank::King);
assert_lt!(lhs, Rank::Ace);
}
#[test]
fn test_ordering_five() {
let lhs = Rank::Five;
assert_gt!(lhs, Rank::Two);
assert_gt!(lhs, Rank::Three);
assert_gt!(lhs, Rank::Four);
assert_eq!(lhs, Rank::Five);
assert_lt!(lhs, Rank::Six);
assert_lt!(lhs, Rank::Seven);
assert_lt!(lhs, Rank::Eight);
assert_lt!(lhs, Rank::Nine);
assert_lt!(lhs, Rank::Ten);
assert_lt!(lhs, Rank::Jack);
assert_lt!(lhs, Rank::Queen);
assert_lt!(lhs, Rank::King);
assert_lt!(lhs, Rank::Ace);
}
#[test]
fn test_ordering_six() {
let lhs = Rank::Six;
assert_gt!(lhs, Rank::Two);
assert_gt!(lhs, Rank::Three);
assert_gt!(lhs, Rank::Four);
assert_gt!(lhs, Rank::Five);
assert_eq!(lhs, Rank::Six);
assert_lt!(lhs, Rank::Seven);
assert_lt!(lhs, Rank::Eight);
assert_lt!(lhs, Rank::Nine);
assert_lt!(lhs, Rank::Ten);
assert_lt!(lhs, Rank::Jack);
assert_lt!(lhs, Rank::Queen);
assert_lt!(lhs, Rank::King);
assert_lt!(lhs, Rank::Ace);
}
#[test]
fn test_ordering_seven() {
let lhs = Rank::Seven;
assert_gt!(lhs, Rank::Two);
assert_gt!(lhs, Rank::Three);
assert_gt!(lhs, Rank::Four);
assert_gt!(lhs, Rank::Five);
assert_gt!(lhs, Rank::Six);
assert_eq!(lhs, Rank::Seven);
assert_lt!(lhs, Rank::Eight);
assert_lt!(lhs, Rank::Nine);
assert_lt!(lhs, Rank::Ten);
assert_lt!(lhs, Rank::Jack);
assert_lt!(lhs, Rank::Queen);
assert_lt!(lhs, Rank::King);
assert_lt!(lhs, Rank::Ace);
}
#[test]
fn test_ordering_eight() {
let lhs = Rank::Eight;
assert_gt!(lhs, Rank::Two);
assert_gt!(lhs, Rank::Three);
assert_gt!(lhs, Rank::Four);
assert_gt!(lhs, Rank::Five);
assert_gt!(lhs, Rank::Six);
assert_gt!(lhs, Rank::Seven);
assert_eq!(lhs, Rank::Eight);
assert_lt!(lhs, Rank::Nine);
assert_lt!(lhs, Rank::Ten);
assert_lt!(lhs, Rank::Jack);
assert_lt!(lhs, Rank::Queen);
assert_lt!(lhs, Rank::King);
assert_lt!(lhs, Rank::Ace);
}
#[test]
fn test_ordering_nine() {
let lhs = Rank::Nine;
assert_gt!(lhs, Rank::Two);
assert_gt!(lhs, Rank::Three);
assert_gt!(lhs, Rank::Four);
assert_gt!(lhs, Rank::Five);
assert_gt!(lhs, Rank::Six);
assert_gt!(lhs, Rank::Seven);
assert_gt!(lhs, Rank::Eight);
assert_eq!(lhs, Rank::Nine);
assert_lt!(lhs, Rank::Ten);
assert_lt!(lhs, Rank::Jack);
assert_lt!(lhs, Rank::Queen);
assert_lt!(lhs, Rank::King);
assert_lt!(lhs, Rank::Ace);
}
#[test]
fn test_ordering_ten() {
let lhs = Rank::Ten;
assert_gt!(lhs, Rank::Two);
assert_gt!(lhs, Rank::Three);
assert_gt!(lhs, Rank::Four);
assert_gt!(lhs, Rank::Five);
assert_gt!(lhs, Rank::Six);
assert_gt!(lhs, Rank::Seven);
assert_gt!(lhs, Rank::Eight);
assert_gt!(lhs, Rank::Nine);
assert_eq!(lhs, Rank::Ten);
assert_lt!(lhs, Rank::Jack);
assert_lt!(lhs, Rank::Queen);
assert_lt!(lhs, Rank::King);
assert_lt!(lhs, Rank::Ace);
}
#[test]
fn test_ordering_jack() {
let lhs = Rank::Jack;
assert_gt!(lhs, Rank::Two);
assert_gt!(lhs, Rank::Three);
assert_gt!(lhs, Rank::Four);
assert_gt!(lhs, Rank::Five);
assert_gt!(lhs, Rank::Six);
assert_gt!(lhs, Rank::Seven);
assert_gt!(lhs, Rank::Eight);
assert_gt!(lhs, Rank::Nine);
assert_gt!(lhs, Rank::Ten);
assert_eq!(lhs, Rank::Jack);
assert_lt!(lhs, Rank::Queen);
assert_lt!(lhs, Rank::King);
assert_lt!(lhs, Rank::Ace);
}
#[test]
fn test_ordering_queen() {
let lhs = Rank::Queen;
assert_gt!(lhs, Rank::Two);
assert_gt!(lhs, Rank::Three);
assert_gt!(lhs, Rank::Four);
assert_gt!(lhs, Rank::Five);
assert_gt!(lhs, Rank::Six);
assert_gt!(lhs, Rank::Seven);
assert_gt!(lhs, Rank::Eight);
assert_gt!(lhs, Rank::Nine);
assert_gt!(lhs, Rank::Ten);
assert_gt!(lhs, Rank::Jack);
assert_eq!(lhs, Rank::Queen);
assert_lt!(lhs, Rank::King);
assert_lt!(lhs, Rank::Ace);
}
#[test]
fn test_ordering_king() {
let lhs = Rank::King;
assert_gt!(lhs, Rank::Two);
assert_gt!(lhs, Rank::Three);
assert_gt!(lhs, Rank::Four);
assert_gt!(lhs, Rank::Five);
assert_gt!(lhs, Rank::Six);
assert_gt!(lhs, Rank::Seven);
assert_gt!(lhs, Rank::Eight);
assert_gt!(lhs, Rank::Nine);
assert_gt!(lhs, Rank::Ten);
assert_gt!(lhs, Rank::Jack);
assert_gt!(lhs, Rank::Queen);
assert_eq!(lhs, Rank::King);
assert_lt!(lhs, Rank::Ace);
}
#[test]
fn test_ordering_ace() {
let lhs = Rank::Ace;
assert_gt!(lhs, Rank::Two);
assert_gt!(lhs, Rank::Three);
assert_gt!(lhs, Rank::Four);
assert_gt!(lhs, Rank::Five);
assert_gt!(lhs, Rank::Six);
assert_gt!(lhs, Rank::Seven);
assert_gt!(lhs, Rank::Eight);
assert_gt!(lhs, Rank::Nine);
assert_gt!(lhs, Rank::Ten);
assert_gt!(lhs, Rank::Jack);
assert_gt!(lhs, Rank::Queen);
assert_gt!(lhs, Rank::King);
assert_eq!(lhs, Rank::Ace);
}
#[test]
fn test_list() {
assert_eq!(
Rank::list(),
vec![
Rank::Two,
Rank::Three,
Rank::Four,
Rank::Five,
Rank::Six,
Rank::Seven,
Rank::Eight,
Rank::Nine,
Rank::Ten,
Rank::Jack,
Rank::Queen,
Rank::King,
Rank::Ace,
]
);
}
}
|
use anyhow::{anyhow, bail, Result};
use pasture_core::math::Alignable;
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
use std::{
collections::HashMap,
convert::TryFrom,
convert::TryInto,
io::{BufRead, Seek, SeekFrom, Write},
};
use super::{read_json_header, write_json_header};
/// A reference to data inside a BatchTable binary body
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
pub struct BatchTableDataReference {
#[serde(rename = "byteOffset")]
pub byte_offset: usize,
#[serde(rename = "componentType")]
pub component_type: String,
#[serde(rename = "type")]
pub scalar_or_vector_type: String,
}
/// An entry inside a BatchTable
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum BatchTableEntry {
/// An entry containing array data
ArrayData(Vec<Value>),
/// An entry refering to the binary body of the BatchTable
DataReference(BatchTableDataReference),
}
impl TryFrom<Value> for BatchTableEntry {
type Error = anyhow::Error;
fn try_from(val: Value) -> Result<Self> {
if val.is_array() {
let val_as_array = val.as_array().unwrap();
return Ok(BatchTableEntry::ArrayData(val_as_array.clone()));
}
if val.is_object() {
let data_reference = serde_json::from_value::<BatchTableDataReference>(val)?;
return Ok(BatchTableEntry::DataReference(data_reference));
}
bail!("JSON value cannot be converted to BatchTableEntry because it is neither an array nor an object")
}
}
impl TryFrom<&Value> for BatchTableEntry {
type Error = anyhow::Error;
fn try_from(val: &Value) -> Result<Self> {
if val.is_array() {
let val_as_array = val.as_array().unwrap();
return Ok(BatchTableEntry::ArrayData(val_as_array.clone()));
}
if val.is_object() {
let data_reference = serde_json::from_value::<BatchTableDataReference>(val.clone())?;
return Ok(BatchTableEntry::DataReference(data_reference));
}
bail!("JSON value cannot be converted to BatchTableEntry because it is neither an array nor an object")
}
}
impl Into<Value> for BatchTableEntry {
fn into(self) -> Value {
match self {
BatchTableEntry::ArrayData(array) => Value::Array(array),
BatchTableEntry::DataReference(data_reference) => serde_json::to_value(data_reference)
.expect("Could not convert BatchTableEntry to JSON Value"),
}
}
}
impl Into<Value> for &BatchTableEntry {
fn into(self) -> Value {
match self {
BatchTableEntry::ArrayData(array) => Value::Array(array.clone()),
BatchTableEntry::DataReference(data_reference) => serde_json::to_value(data_reference)
.expect("Could not convert BatchTableEntry to JSON Value"),
}
}
}
/// A 3D Tiles BatchTable header, which is a collection of BatchTableEntries
pub type BatchTableHeader = HashMap<String, BatchTableEntry>;
/// Deserialize a `BatchTableHeader` from the given `reader`. If successful, returns the serialized header and the
/// `reader` will be at the start of the binary body of the 3D Tiles BatchTable. See the [3D Tiles documentation](https://github.com/CesiumGS/3d-tiles/blob/master/specification/TileFormats/BatchTable/README.md)
/// for more information. If this operation fails, the reader will be in an undefined state.
pub fn deser_batch_table_header<R: BufRead + Seek>(
mut reader: R,
batch_table_header_size: usize,
header_start_position_in_file: usize,
) -> Result<BatchTableHeader> {
let batch_table_header_json = read_json_header(&mut reader, batch_table_header_size)?;
// Read the potential padding bytes so we end at an 8-byte boundary in the file. since the 'reader' can be a
// sub-reader that does not refer to the whole file, we need the start position of the header in the file as
// an extra parameter
let current_position_in_file = header_start_position_in_file + batch_table_header_size;
let padding_bytes = current_position_in_file.align_to(8) - current_position_in_file;
if padding_bytes > 0 {
reader.seek(SeekFrom::Current(padding_bytes as i64))?;
}
let batch_table_json_obj = batch_table_header_json
.as_object()
.ok_or(anyhow!("BatchTable JSON header was no JSON object"))?;
// Convert JSON object to `BatchTableHeader`
Ok(batch_table_json_obj
.iter()
.map(|(k, v)| -> Result<(String, BatchTableEntry)> {
let batch_table_entry: BatchTableEntry = v.try_into()?;
Ok((k.clone(), batch_table_entry))
})
.collect::<Result<HashMap<_, _>, _>>()?)
}
/// Serializes the given `BatchTableHeader` to the given `writer`. If successful, the `writer` will be at the appropriate
/// position for writing the BatchTable body (i.e. required padding spaces have been written as per the [3D Tiles documentation](https://github.com/CesiumGS/3d-tiles/blob/master/specification/TileFormats/BatchTable/README.md)).
pub fn ser_batch_table_header<W: Write + Seek>(
mut writer: W,
batch_table_header: &BatchTableHeader,
position_in_file: usize,
) -> Result<()> {
let header_as_map = batch_table_header
.iter()
.map(|(k, v)| -> (String, Value) { (k.clone(), v.into()) })
.collect::<Map<_, _>>();
let header_json_obj = Value::Object(header_as_map);
write_json_header(&mut writer, &header_json_obj, position_in_file)
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
use std::io::{BufReader, BufWriter, Cursor, SeekFrom};
fn dummy_batch_table_header() -> BatchTableHeader {
let mut header = BatchTableHeader::new();
header.insert(
"ARRAY_FIELD".into(),
BatchTableEntry::ArrayData(vec![json!(1), json!(2), json!(3)]),
);
header.insert(
"REFERENCE_FIELD".into(),
BatchTableEntry::DataReference(BatchTableDataReference {
byte_offset: 42,
component_type: "FLOAT".into(),
scalar_or_vector_type: "SCALAR".into(),
}),
);
header
}
#[test]
fn test_3dtiles_batch_table_io() -> Result<()> {
let expected_header = dummy_batch_table_header();
let mut writer = BufWriter::new(Cursor::new(vec![]));
ser_batch_table_header(&mut writer, &expected_header, 0)?;
// Make sure that the header is written with padding bytes so that we are at an 8-byte boundary
let header_size = writer.seek(SeekFrom::Current(0))? as usize;
assert_eq!(header_size % 8, 0);
let mut cursor = writer.into_inner()?;
cursor.seek(SeekFrom::Start(0))?;
let mut reader = BufReader::new(cursor);
let actual_header = deser_batch_table_header(&mut reader, header_size, 0)?;
assert_eq!(expected_header, actual_header);
Ok(())
}
}
|
use std::rc::Rc;
use crate::material::{diffuse_light::DiffuseLight, lambertian::Lambertian, Material};
use crate::shape::{
cube::Cube, rotate_y::RotateY, shape_list::ShapeList, translate::Translate, xy_rect::XyRect,
xz_rect::XzRect, yz_rect::YzRect, Shape,
};
use crate::vec3::{Color, Point3, Vec3};
pub fn build() -> ShapeList {
let mut shapes = ShapeList::default();
let red: Rc<dyn Material> = Rc::new(Lambertian::from_color(Color::new(0.65, 0.05, 0.05)));
let white: Rc<dyn Material> = Rc::new(Lambertian::from_color(Color::new(0.73, 0.73, 0.73)));
let green: Rc<dyn Material> = Rc::new(Lambertian::from_color(Color::new(0.12, 0.45, 0.15)));
let light: Rc<dyn Material> = Rc::new(DiffuseLight::from_color(Color::new(15.0, 15.0, 15.0)));
shapes.add(Rc::new(YzRect::new(0.0, 555.0, 0.0, 555.0, 555.0, green)));
shapes.add(Rc::new(YzRect::new(0.0, 555.0, 0.0, 555.0, 0.0, red)));
shapes.add(Rc::new(XzRect::new(
213.0, 343.0, 227.0, 332.0, 554.0, light,
)));
shapes.add(Rc::new(XzRect::new(
0.0,
555.0,
0.0,
555.0,
0.0,
Rc::clone(&white),
)));
shapes.add(Rc::new(XzRect::new(
0.0,
555.0,
0.0,
555.0,
555.0,
Rc::clone(&white),
)));
shapes.add(Rc::new(XyRect::new(
0.0,
555.0,
0.0,
555.0,
555.0,
Rc::clone(&white),
)));
let mut box1: Rc<dyn Shape> = Rc::new(Cube::new(
Point3::new(0.0, 0.0, 0.0),
Point3::new(165.0, 330.0, 165.0),
Rc::clone(&white),
));
box1 = Rc::new(RotateY::new(box1, 15.0));
box1 = Rc::new(Translate::new(box1, Vec3::new(265.0, 0.0, 295.0)));
shapes.add(box1);
let mut box2: Rc<dyn Shape> = Rc::new(Cube::new(
Point3::new(0.0, 0.0, 0.0),
Point3::new(165.0, 165.0, 165.0),
Rc::clone(&white),
));
box2 = Rc::new(RotateY::new(box2, -18.0));
box2 = Rc::new(Translate::new(box2, Vec3::new(130.0, 0.0, 65.0)));
shapes.add(box2);
shapes
}
|
pub mod request;
pub mod response;
#[cfg(test)]
mod tests {
use request::Request;
use response::Response;
#[test]
fn test_req_create() {
let req = Request::new("GET", "/hello", "1.1");
assert_eq!(req.method, String::from("GET"));
assert_eq!(req.target, String::from("/hello"));
assert_eq!(req.version, String::from("1.1"));
}
#[test]
fn test_req_get_set_header() {
let name = "Content-Type";
let value = "text/html";
let mut req = Request::new("GET", "/hello", "1.1");
assert_eq!(req.get_header(name), None);
assert_eq!(req.set_header(name, value)
.get_header(name),
Some(&String::from(value)));
}
#[test]
fn test_req_to_string() {
let name = "Content-Type";
let value = "text/html";
let body = "Hello, World!\r\n\r\n";
let mut req = Request::new("GET", "/hello", "1.1");
req.set_header(name, value);
req.body.append(&mut body.as_bytes().to_vec());
assert_eq!(req.to_string(),
format!("{}\r\n{}: {}\r\n\r\n{}",
"GET /hello HTTP/1.1",
name, value,
body));
}
#[test]
fn test_req_parse() {
// Test parse_header:
let mut msg = String::new();
msg.push_str("GET /hello HTTP/1.1\r\n");
msg.push_str("Content-Type: text/html\r\n");
msg.push_str("Accept-Charset: utf-8\r\n\r\n");
let parse_result = Request::parse_header(&mut msg.as_bytes());
assert!(parse_result.is_ok());
let parse = parse_result.unwrap();
assert_eq!(parse.method, "GET");
assert_eq!(parse.target, "/hello");
assert_eq!(parse.version, "1.1");
assert_eq!(parse.get_header("Content-Type"),
Some(&String::from("text/html")));
assert_eq!(parse.get_header("Accept-Charset"),
Some(&String::from("utf-8")));
// Test parse_body:
let parse_body_result = parse.parse_body(
&mut "Hello, World!\r\n\r\n".as_bytes());
assert!(parse_body_result.is_ok());
let parse = parse_body_result.unwrap();
assert_eq!(parse.body.len(), 17);
let body = String::from_utf8(parse.body).unwrap_or(String::new());
assert_eq!(body, String::from("Hello, World!\r\n\r\n"));
}
#[test]
fn test_req_parse_exact() {
// Test parse_header:
let mut msg = String::new();
msg.push_str("GET /hello HTTP/1.1\r\n");
msg.push_str("Content-Length: 17\r\n\r\n");
let parse_result = Request::parse_header(&mut msg.as_bytes());
assert!(parse_result.is_ok());
let parse = parse_result.unwrap();
assert_eq!(
usize::from_str_radix(
parse.get_header("Content-Length").unwrap(), 10),
Ok(17));
// Test parse_body:
let parse_body_result = parse.parse_body(
&mut "Hello, World!\r\n\r\n".as_bytes());
assert!(parse_body_result.is_ok());
let parse = parse_body_result.unwrap();
assert_eq!(parse.body.len(), 17);
let body = String::from_utf8(parse.body).unwrap_or(String::new());
assert_eq!(body, String::from("Hello, World!\r\n\r\n"));
}
#[test]
fn test_res_new() {
let res = Response::new("1.1", 200);
assert_eq!(res.version, String::from("1.1"));
assert_eq!(res.response_code, 200);
assert_eq!(res.response_phrase, String::from("OK"));
assert_eq!(res.body.len(), 0);
}
#[test]
fn test_res_headers() {
let version = "1.1";
let res = Response::new(&version, 200)
.set_header("foo", "foo_val")
.set_header("bar", "bar_val");
assert_eq!(res.get_header("foo"), Some(&String::from("foo_val")));
assert_eq!(res.get_header("bar"), Some(&String::from("bar_val")));
}
#[test]
fn test_res_set_body() {
let version = "1.1";
let res = Response::new(&version, 200)
.set_body("Hello, World!\r\n".as_bytes());
assert_eq!(res.body, "Hello, World!\r\n".as_bytes());
}
#[test]
fn test_res_serialize() {
let version = "1.1";
let res = Response::new(&version, 200)
.set_header("foo", "foo_val")
.set_header("bar", "bar_val")
.set_body("Hello, World!\r\n\r\n".as_bytes());
let bytes = res.serialize();
let start_line = "HTTP/1.1 200 OK\r\n";
let headers1 = "foo: foo_val\r\nbar: bar_val\r\n\r\n";
let headers2 = "bar: bar_val\r\nfoo: foo_val\r\n\r\n";
let body = "Hello, World!\r\n\r\n";
let exp_bytes1 = format!("{}{}{}", start_line, headers1, body);
let exp_bytes2 = format!("{}{}{}", start_line, headers2, body);
if bytes != exp_bytes1.as_str().as_bytes() {
assert_eq!(bytes, exp_bytes2.as_str().as_bytes());
} else {
assert_eq!(bytes, exp_bytes1.as_str().as_bytes());
}
}
}
|
use std::path::Path;
use image::{Rgb, RgbImage};
use imageproc::drawing::{draw_filled_circle_mut, draw_filled_rect_mut, draw_line_segment_mut, draw_hollow_circle_mut, draw_cross_mut};
use imageproc::rect::Rect;
use crate::config;
use crate::maze::maze_phenotype::MazeCell;
use crate::maze::maze_phenotype::MazePhenotype;
use crate::maze::PathDirection;
#[allow(dead_code)]
pub fn visualize_maze(maze: &MazePhenotype, file_path: String, display_solution: bool) {
let scale_u32 = 4 * config::MAZE.cell_dimension as u32;
let mut drawing = RgbImage::new(maze.width * scale_u32 + 1, maze.height * scale_u32 + 1);
draw_maze(maze, &mut drawing, scale_u32, display_solution);
let path = Path::new(&file_path);
drawing.save(path).unwrap();
}
#[allow(dead_code)]
pub fn draw_maze(
maze: &MazePhenotype,
drawing: &mut RgbImage,
scale_u32: u32,
display_solution: bool,
) {
let offset = config::MAZE.cell_dimension as usize / 2;
let radius = 2;
let scale_usize = 4 * config::MAZE.cell_dimension as usize;
draw_filled_rect_mut(
drawing,
Rect::at(0, 0).of_size(maze.width * scale_u32 + 1, maze.height * scale_u32 + 1),
Rgb([255u8, 255u8, 255u8]),
);
for (x, column) in maze.grid.iter().enumerate() {
for (y, cell) in column.iter().rev().enumerate() {
draw_cell_borders(
drawing,
cell,
(x * scale_usize) as f32,
(y * scale_usize) as f32,
scale_usize as f32,
);
if display_solution {
if cell.is_waypoint && x < 9 {
draw_filled_circle_mut(
drawing,
(
(x * scale_usize + offset * 4) as i32,
(y * scale_usize + offset * 4) as i32,
),
offset as i32,
Rgb([0, 0, 0]),
);
}
/*if cell.is_juncture {
draw_filled_circle_mut(
drawing,
(
(x * scale_usize + offset * 4) as i32,
(y * scale_usize + offset * 4) as i32,
),
offset as i32,
Rgb([0, 0, 0]),
);
}
if cell.path_direction != PathDirection::None {
draw_filled_circle_mut(
drawing,
(
(x * scale_usize + offset) as i32,
(y * scale_usize + offset) as i32,
),
radius,
Rgb([0, 0, 0]),
);
}*/
}
}
}
}
#[allow(dead_code)]
pub fn draw_cell_borders(drawing: &mut RgbImage, cell: &MazeCell, x: f32, y: f32, scale: f32) {
if cell.north_wall {
draw_line_segment_mut(drawing, (x, y - 1.0), (x + scale, y - 1.0), Rgb([0, 0, 0]));
draw_line_segment_mut(drawing, (x, y), (x + scale, y), Rgb([0, 0, 0]));
draw_line_segment_mut(drawing, (x, y + 1.0), (x + scale, y + 1.0), Rgb([0, 0, 0]));
}
if cell.east_wall {
draw_line_segment_mut(
drawing,
(x - 1.0 + scale, y),
(x - 1.0 + scale, y + scale),
Rgb([0, 0, 0]),
);
draw_line_segment_mut(
drawing,
(x + scale, y),
(x + scale, y + scale),
Rgb([0, 0, 0]),
);
draw_line_segment_mut(
drawing,
(x + 1.0 + scale, y),
(x + 1.0 + scale, y + scale),
Rgb([0, 0, 0]),
);
}
if cell.south_wall {
draw_line_segment_mut(
drawing,
(x, y + scale - 1.0),
(x + scale, y + scale - 1.0),
Rgb([0, 0, 0]),
);
draw_line_segment_mut(
drawing,
(x, y + scale),
(x + scale, y + scale),
Rgb([0, 0, 0]),
);
draw_line_segment_mut(
drawing,
(x, y + scale + 1.0),
(x + scale, y + scale + 1.0),
Rgb([0, 0, 0]),
);
}
if cell.west_wall {
draw_line_segment_mut(drawing, (x - 1.0, y), (x - 1.0, y + scale), Rgb([0, 0, 0]));
draw_line_segment_mut(drawing, (x, y), (x, y + scale), Rgb([0, 0, 0]));
draw_line_segment_mut(drawing, (x + 1.0, y), (x + 1.0, y + scale), Rgb([0, 0, 0]));
}
}
|
#[doc = "Register `SR` reader"]
pub type R = crate::R<SR_SPEC>;
#[doc = "Field `RTT4B` reader - FIFO is ready to transfer four bytes"]
pub type RTT4B_R = crate::BitReader<RTT4B_A>;
#[doc = "FIFO is ready to transfer four bytes\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum RTT4B_A {
#[doc = "0: FIFO is not ready for a four-byte transfer"]
NotReady = 0,
#[doc = "1: FIFO is ready for a four-byte (32-bit) transfer. In receive mode, this means that at least four valid data bytes are in the FIFO. In transmit mode, this means that there are at least four bytes free in the FIFO"]
Ready = 1,
}
impl From<RTT4B_A> for bool {
#[inline(always)]
fn from(variant: RTT4B_A) -> Self {
variant as u8 != 0
}
}
impl RTT4B_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RTT4B_A {
match self.bits {
false => RTT4B_A::NotReady,
true => RTT4B_A::Ready,
}
}
#[doc = "FIFO is not ready for a four-byte transfer"]
#[inline(always)]
pub fn is_not_ready(&self) -> bool {
*self == RTT4B_A::NotReady
}
#[doc = "FIFO is ready for a four-byte (32-bit) transfer. In receive mode, this means that at least four valid data bytes are in the FIFO. In transmit mode, this means that there are at least four bytes free in the FIFO"]
#[inline(always)]
pub fn is_ready(&self) -> bool {
*self == RTT4B_A::Ready
}
}
#[doc = "Field `RTT1B` reader - FIFO is ready to transfer one byte"]
pub type RTT1B_R = crate::BitReader<RTT1B_A>;
#[doc = "FIFO is ready to transfer one byte\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum RTT1B_A {
#[doc = "0: FIFO is not ready for a 1-byte transfer"]
NotReady = 0,
#[doc = "1: FIFO is ready for a one byte (32-bit) transfer. In receive mode, this means that at least one valid data byte is in the FIFO. In transmit mode, this means that there is at least one byte free in the FIFO"]
Ready = 1,
}
impl From<RTT1B_A> for bool {
#[inline(always)]
fn from(variant: RTT1B_A) -> Self {
variant as u8 != 0
}
}
impl RTT1B_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RTT1B_A {
match self.bits {
false => RTT1B_A::NotReady,
true => RTT1B_A::Ready,
}
}
#[doc = "FIFO is not ready for a 1-byte transfer"]
#[inline(always)]
pub fn is_not_ready(&self) -> bool {
*self == RTT1B_A::NotReady
}
#[doc = "FIFO is ready for a one byte (32-bit) transfer. In receive mode, this means that at least one valid data byte is in the FIFO. In transmit mode, this means that there is at least one byte free in the FIFO"]
#[inline(always)]
pub fn is_ready(&self) -> bool {
*self == RTT1B_A::Ready
}
}
impl R {
#[doc = "Bit 2 - FIFO is ready to transfer four bytes"]
#[inline(always)]
pub fn rtt4b(&self) -> RTT4B_R {
RTT4B_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - FIFO is ready to transfer one byte"]
#[inline(always)]
pub fn rtt1b(&self) -> RTT1B_R {
RTT1B_R::new(((self.bits >> 3) & 1) != 0)
}
}
#[doc = "PSSI status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`sr::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct SR_SPEC;
impl crate::RegisterSpec for SR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`sr::R`](R) reader structure"]
impl crate::Readable for SR_SPEC {}
#[doc = "`reset()` method sets SR to value 0"]
impl crate::Resettable for SR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use bevy::prelude::*;
use crate::digger::{Digger, DiggerState};
use crate::map::Map;
use crate::GameState;
pub struct BasePlugin;
#[derive(SystemLabel, Eq, PartialEq, Hash, Clone, Debug)]
pub enum BaseSystemLabels {
CheckPlayerPosition,
}
impl Plugin for BasePlugin {
fn build(&self, app: &mut AppBuilder) {
app.insert_resource(Base { active: false }).add_system_set(
SystemSet::on_update(GameState::Playing)
.with_system(
check_player_position
.system()
.label(BaseSystemLabels::CheckPlayerPosition),
)
.with_system(
fuel_up
.system()
.after(BaseSystemLabels::CheckPlayerPosition),
),
);
}
}
pub struct Base {
pub active: bool,
}
fn check_player_position(
digger: Query<&Transform, With<Digger>>,
map: Res<Map>,
mut base: ResMut<Base>,
) {
if let Ok(transform) = digger.single() {
base.active = Vec2::new(transform.translation.x, transform.translation.y)
.distance(map.base)
<= map.tile_size;
}
}
fn fuel_up(base: Res<Base>, mut digger_state: ResMut<DiggerState>) {
if base.active {
let to_fuel = digger_state.fuel_max - digger_state.fuel;
if to_fuel > digger_state.money {
digger_state.fuel += digger_state.money;
digger_state.money = 0.;
} else {
digger_state.money -= to_fuel;
digger_state.fuel = digger_state.fuel_max;
}
}
}
|
use bytes::{Buf, BufMut};
use super::{QuicResult, QUIC_VERSION};
use codec::Codec;
#[derive(Clone, Debug, PartialEq)]
pub struct ClientTransportParameters {
pub initial_version: u32,
pub parameters: TransportParameters,
}
impl Default for ClientTransportParameters {
fn default() -> Self {
Self {
initial_version: QUIC_VERSION,
parameters: TransportParameters::default(),
}
}
}
impl Codec for ClientTransportParameters {
fn encode<T: BufMut>(&self, buf: &mut T) {
buf.put_u32_be(self.initial_version);
self.parameters.encode(buf);
}
fn decode<T: Buf>(buf: &mut T) -> QuicResult<Self> {
Ok(ClientTransportParameters {
initial_version: buf.get_u32_be(),
parameters: TransportParameters::decode(buf)?,
})
}
}
#[derive(Clone, Debug, PartialEq)]
pub struct ServerTransportParameters {
pub negotiated_version: u32,
pub supported_versions: Vec<u32>,
pub parameters: TransportParameters,
}
impl Default for ServerTransportParameters {
fn default() -> Self {
Self {
negotiated_version: QUIC_VERSION,
supported_versions: vec![QUIC_VERSION],
parameters: TransportParameters::default(),
}
}
}
impl Codec for ServerTransportParameters {
fn encode<T: BufMut>(&self, buf: &mut T) {
buf.put_u32_be(self.negotiated_version);
buf.put_u8((4 * self.supported_versions.len()) as u8);
for v in &self.supported_versions {
buf.put_u32_be(*v);
}
self.parameters.encode(buf);
}
fn decode<T: Buf>(buf: &mut T) -> QuicResult<Self> {
Ok(ServerTransportParameters {
negotiated_version: buf.get_u32_be(),
supported_versions: {
let mut supported_versions = vec![];
let supported_bytes = buf.get_u8() as usize;
let mut sub = buf.take(supported_bytes);
while sub.has_remaining() {
supported_versions.push(sub.get_u32_be());
}
supported_versions
},
parameters: TransportParameters::decode(buf)?,
})
}
}
impl Codec for TransportParameters {
fn encode<T: BufMut>(&self, buf: &mut T) {
let mut tmp = vec![];
let mut val = vec![];
tmp.put_u16_be(0);
val.put_u32_be(self.max_stream_data);
tmp.put_u16_be(val.len() as u16);
tmp.append(&mut val);
val.truncate(0);
tmp.put_u16_be(1);
val.put_u32_be(self.max_data);
tmp.put_u16_be(val.len() as u16);
tmp.append(&mut val);
val.truncate(0);
tmp.put_u16_be(3);
val.put_u16_be(self.idle_timeout);
tmp.put_u16_be(val.len() as u16);
tmp.append(&mut val);
val.truncate(0);
if self.max_streams_bidi > 0 {
tmp.put_u16_be(2);
val.put_u16_be(self.max_streams_bidi);
tmp.put_u16_be(val.len() as u16);
tmp.append(&mut val);
val.truncate(0);
}
if self.max_packet_size != 65527 {
tmp.put_u16_be(5);
val.put_u16_be(self.max_packet_size);
tmp.put_u16_be(val.len() as u16);
tmp.append(&mut val);
val.truncate(0);
}
if self.ack_delay_exponent != 3 {
tmp.put_u16_be(7);
val.put_u8(self.ack_delay_exponent);
tmp.put_u16_be(val.len() as u16);
tmp.append(&mut val);
val.truncate(0);
}
if self.max_stream_id_uni > 0 {
tmp.put_u16_be(8);
val.put_u16_be(self.max_stream_id_uni);
tmp.put_u16_be(val.len() as u16);
tmp.append(&mut val);
val.truncate(0);
}
if let Some(token) = self.stateless_reset_token {
tmp.put_u16_be(6);
tmp.put_u16_be(16);
tmp.extend_from_slice(&token);
}
buf.put_u16_be(tmp.len() as u16);
buf.put_slice(&tmp);
}
fn decode<T: Buf>(buf: &mut T) -> QuicResult<Self> {
let mut params = TransportParameters::default();
let num = buf.get_u16_be();
let mut sub = buf.take(num as usize);
while sub.has_remaining() {
let tag = sub.get_u16_be();
let size = sub.get_u16_be();
match tag {
0 => {
debug_assert_eq!(size, 4);
params.max_stream_data = sub.get_u32_be();
}
1 => {
debug_assert_eq!(size, 4);
params.max_data = sub.get_u32_be();
}
2 => {
debug_assert_eq!(size, 2);
params.max_streams_bidi = sub.get_u16_be();
}
3 => {
debug_assert_eq!(size, 2);
params.idle_timeout = sub.get_u16_be();
}
5 => {
debug_assert_eq!(size, 2);
params.max_packet_size = sub.get_u16_be();
}
6 => {
debug_assert_eq!(size, 16);
let mut token = [0; 16];
sub.copy_to_slice(&mut token);
params.stateless_reset_token = Some(token);
}
7 => {
debug_assert_eq!(size, 1);
params.ack_delay_exponent = sub.get_u8();
}
8 => {
debug_assert_eq!(size, 2);
params.max_stream_id_uni = sub.get_u16_be();
}
_ => {
sub.advance(usize::from(size));
}
}
}
Ok(params)
}
}
#[derive(Clone, Debug, PartialEq)]
pub struct TransportParameters {
pub max_stream_data: u32, // 0x00
pub max_data: u32, // 0x01
pub max_streams_bidi: u16, // 0x02
pub idle_timeout: u16, // 0x03
pub max_packet_size: u16, // 0x05
pub stateless_reset_token: Option<[u8; 16]>, // 0x06
pub ack_delay_exponent: u8, // 0x07
pub max_stream_id_uni: u16, // 0x08
}
impl Default for TransportParameters {
fn default() -> Self {
Self {
max_stream_data: 131_072,
max_data: 1_048_576,
max_streams_bidi: 4,
idle_timeout: 300,
max_packet_size: 65_527,
stateless_reset_token: None,
ack_delay_exponent: 3,
max_stream_id_uni: 20,
}
}
}
#[cfg(test)]
mod tests {
use super::TransportParameters;
use super::{ClientTransportParameters, Codec, ServerTransportParameters};
use std::fmt::Debug;
use std::io::Cursor;
fn round_trip<T: Codec + PartialEq + Debug>(t: T) {
let buf = {
let mut ret = Vec::new();
t.encode(&mut ret);
ret
};
let mut read = Cursor::new(&buf);
assert_eq!(t, T::decode(&mut read).unwrap());
}
#[test]
fn test_client_transport_parameters() {
round_trip(ClientTransportParameters {
initial_version: 1,
parameters: TransportParameters {
max_stream_data: 0,
max_data: 1234,
idle_timeout: 26,
..Default::default()
},
});
}
#[test]
fn test_server_transport_parameters() {
round_trip(ServerTransportParameters {
negotiated_version: 1,
supported_versions: vec![1, 2, 3],
parameters: TransportParameters {
stateless_reset_token: Some([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
..Default::default()
},
});
}
#[test]
fn test_ignores_unknown_transport_parameter_ids() {
#[cfg_attr(rustfmt, rustfmt_skip)]
let bytes = [0u8, 130,
0, 0, 0, 4, 0, 0, 64, 0,
0, 1, 0, 4, 0, 0, 128, 0,
0, 2, 0, 2, 0, 1,
0, 8, 0, 2, 0, 1,
0, 3, 0, 2, 0, 10,
255, 0, 0, 2, 255, 0,
255, 1, 0, 2, 255, 1,
255, 2, 0, 2, 255, 2,
255, 3, 0, 2, 255, 3,
255, 4, 0, 2, 255, 4,
255, 5, 0, 2, 255, 5,
255, 6, 0, 2, 255, 6,
255, 7, 0, 2, 255, 7,
255, 8, 0, 2, 255, 8,
255, 9, 0, 2, 255, 9,
255, 10, 0, 2, 255, 10,
255, 11, 0, 2, 255, 11,
255, 12, 0, 2, 255, 12,
255, 13, 0, 2, 255, 13,
255, 14, 0, 2, 255, 14,
255, 15, 0, 2, 255, 15];
let tp = TransportParameters::decode(&mut Cursor::new(bytes.as_ref())).unwrap();
assert_eq!(
tp,
TransportParameters {
max_stream_data: 16384,
max_data: 32768,
max_streams_bidi: 1,
idle_timeout: 10,
max_packet_size: 65527,
stateless_reset_token: None,
ack_delay_exponent: 3,
max_stream_id_uni: 1,
}
);
}
}
|
use cc;
fn main() {
if std::env::var("TARGET").unwrap() != "armv7-linux-androideabi" {
return;
}
let mut build = cc::Build::new();
build.warnings(false);
build.flag("-Wno-everything");
build.include("inline-hook");
build.file("inline-hook/inlineHook.c");
build.file("inline-hook/relocate.c");
build.compile("libinlinehook.a");
}
|
#[doc = "Register `PRLH` writer"]
pub type W = crate::W<PRLH_SPEC>;
#[doc = "Field `PRLH` writer - RTC Prescaler Load Register High"]
pub type PRLH_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
impl W {
#[doc = "Bits 0:3 - RTC Prescaler Load Register High"]
#[inline(always)]
#[must_use]
pub fn prlh(&mut self) -> PRLH_W<PRLH_SPEC, 0> {
PRLH_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "RTC Prescaler Load Register High\n\nYou can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`prlh::W`](W). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct PRLH_SPEC;
impl crate::RegisterSpec for PRLH_SPEC {
type Ux = u32;
}
#[doc = "`write(|w| ..)` method takes [`prlh::W`](W) writer structure"]
impl crate::Writable for PRLH_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets PRLH to value 0"]
impl crate::Resettable for PRLH_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
#[doc = "Reader of register SSPPCELLID0"]
pub type R = crate::R<u32, super::SSPPCELLID0>;
#[doc = "Reader of field `SSPPCELLID0`"]
pub type SSPPCELLID0_R = crate::R<u8, u8>;
impl R {
#[doc = "Bits 0:7 - These bits read back as 0x0D"]
#[inline(always)]
pub fn ssppcellid0(&self) -> SSPPCELLID0_R {
SSPPCELLID0_R::new((self.bits & 0xff) as u8)
}
}
|
// Copyright (c) The Starcoin Core Contributors
// SPDX-License-Identifier: Apache-2.0
use cucumber::{after, before, cucumber, Steps, StepsBuilder};
use starcoin_config::NodeConfig;
use starcoin_logger::prelude::*;
use starcoin_node::NodeHandle;
use starcoin_rpc_client::RpcClient;
use starcoin_storage::cache_storage::CacheStorage;
use starcoin_storage::db_storage::DBStorage;
use starcoin_storage::storage::StorageInstance;
use starcoin_storage::Storage;
use starcoin_types::account_address::AccountAddress;
use starcoin_wallet_api::WalletAccount;
use std::env;
use std::sync::Arc;
use std::time::Duration;
use steps::{cmd as steps_cmd, node as steps_node, state as steps_state, sync, transaction};
mod steps;
#[derive(Default)]
pub struct MyWorld {
node_config: Option<NodeConfig>,
storage: Option<Storage>,
rpc_client: Option<Arc<RpcClient>>,
local_rpc_client: Option<RpcClient>,
default_account: Option<WalletAccount>,
txn_account: Option<WalletAccount>,
node_handle: Option<NodeHandle>,
default_address: Option<AccountAddress>,
}
impl MyWorld {
pub fn storage(&self) -> Option<&Storage> {
match &self.storage {
Some(storage) => Some(storage),
_ => None,
}
}
}
impl cucumber::World for MyWorld {}
pub fn steps() -> Steps<MyWorld> {
let mut builder: StepsBuilder<MyWorld> = Default::default();
builder
.given("a storage", |world: &mut MyWorld, _step| {
let cache_storage = Arc::new(CacheStorage::new());
let db_storage = Arc::new(DBStorage::new(starcoin_config::temp_path().as_ref()));
let storage = Storage::new(StorageInstance::new_cache_and_db_instance(
cache_storage,
db_storage,
))
.unwrap();
info!("storage created!");
world.storage = Some(storage)
})
.given("remote rpc client", |world: &mut MyWorld, _step| {
let rpc_addr = env::var("STARCOIN_WS").unwrap_or_else(|_| "".to_string());
let client = RpcClient::connect_websocket(rpc_addr.as_ref()).unwrap();
info!("rpc client created!");
world.rpc_client = Some(Arc::new(client))
})
.given("dev rpc client", |world: &mut MyWorld, _step| {
let node_config = world.node_config.as_ref().take().unwrap();
let client = RpcClient::connect_ipc(node_config.clone().rpc.get_ipc_file()).unwrap();
info!("dev node local rpc client created!");
world.rpc_client = Some(Arc::new(client))
})
.given("default account", |world: &mut MyWorld, _step| {
let client = world.rpc_client.as_ref().take().unwrap();
let default_account = client.clone().wallet_default().unwrap().unwrap();
info!("default account config success!");
client
.wallet_unlock(
default_account.address,
"".parse().unwrap(),
Duration::from_secs(300 as u64),
)
.unwrap();
world.default_account = Some(default_account)
})
.given("an account", |world: &mut MyWorld, _step| {
let client = world.rpc_client.as_ref().take().unwrap();
let password = "integration";
let account = client
.clone()
.wallet_create(password.clone().parse().unwrap())
.unwrap();
client
.wallet_unlock(
account.address,
password.clone().parse().unwrap(),
Duration::from_secs(300 as u64),
)
.unwrap();
info!("a account create success!");
world.txn_account = Some(account.clone())
});
builder.build()
}
// Declares a before handler function named `a_before_fn`
before!(a_before_fn => |_scenario| {
});
// Declares an after handler function named `an_after_fn`
after!(an_after_fn => |_scenario| {
});
// A setup function to be called before everything else
fn setup() {}
cucumber! {
features: "./features", // Path to our feature files
world: MyWorld, // The world needs to be the same for steps and the main cucumber call
steps: &[
crate::steps, // the `steps!` macro creates a `steps` function in a module
transaction::steps,
steps_node::steps,
sync::steps,
steps_state::steps,
steps_cmd::steps,
],
setup: setup, // Optional; called once before everything
before: &[
a_before_fn // Optional; called before each scenario
],
after: &[
an_after_fn // Optional; called after each scenario
]
}
|
use std::{
error::Error as StdError,
fmt,
net::{AddrParseError, Ipv4Addr, Ipv6Addr},
str::FromStr,
};
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Status {
Ok,
Ko,
}
impl fmt::Display for Status {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Status::Ok => write!(f, "OK"),
Status::Ko => write!(f, "KO"),
}
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Updated {
Updated,
Nochange,
}
impl fmt::Display for Updated {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Updated::Updated => write!(f, "UPDATED"),
Updated::Nochange => write!(f, "NOCHANGE"),
}
}
}
#[derive(Debug)]
pub enum ParseResponseError {
UnrecognizedStatus,
BadAddr(AddrParseError),
UnrecognizedUpdated,
ExpectedEnd,
}
impl fmt::Display for ParseResponseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ParseResponseError::UnrecognizedStatus => write!(f, "unrecognized status line"),
ParseResponseError::BadAddr(e) => e.fmt(f),
ParseResponseError::UnrecognizedUpdated => write!(f, "unrecognized updated line"),
ParseResponseError::ExpectedEnd => write!(f, "expected end of input"),
}
}
}
impl From<AddrParseError> for ParseResponseError {
fn from(e: AddrParseError) -> Self {
ParseResponseError::BadAddr(e)
}
}
impl StdError for ParseResponseError {}
#[derive(Debug)]
pub struct Response {
status: Status,
ipv4: Option<Ipv4Addr>,
ipv6: Option<Ipv6Addr>,
updated: Option<Updated>,
}
impl Response {
pub fn status(&self) -> Status {
self.status
}
pub fn ipv4(&self) -> Option<&Ipv4Addr> {
self.ipv4.as_ref()
}
pub fn ipv6(&self) -> Option<&Ipv6Addr> {
self.ipv6.as_ref()
}
pub fn updated(&self) -> Option<Updated> {
self.updated
}
}
impl fmt::Display for Response {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.status)?;
if let Some(updated) = self.updated {
writeln!(f)?;
match self.ipv4 {
Some(ipv4) => writeln!(f, "{}", ipv4)?,
None => writeln!(f)?,
}
match self.ipv6 {
Some(ipv6) => writeln!(f, "{}", ipv6)?,
None => writeln!(f)?,
}
write!(f, "{}", updated)?;
}
Ok(())
}
}
impl FromStr for Response {
type Err = ParseResponseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut lines = s.lines();
let status = match lines.next() {
Some("OK") => Status::Ok,
Some("KO") => Status::Ko,
_ => return Err(ParseResponseError::UnrecognizedStatus),
};
let ipv4 = match lines.next() {
Some("") => None,
Some(ipv4) => Some(ipv4.parse()?),
None => None,
};
let ipv6 = match lines.next() {
Some("") => None,
Some(ipv6) => Some(ipv6.parse()?),
None => None,
};
let updated = match lines.next() {
Some("UPDATED") => Some(Updated::Updated),
Some("NOCHANGE") => Some(Updated::Nochange),
Some(_) => return Err(ParseResponseError::UnrecognizedUpdated),
None => None,
};
let last = lines.next();
if !(last == None || (last == Some("") && lines.next() == None)) {
return Err(ParseResponseError::ExpectedEnd);
}
Ok(Response {
status,
ipv4,
ipv6,
updated,
})
}
}
#[derive(Debug)]
pub enum ParseTxtResponseError {
UnrecognizedStatus,
UnrecognizedUpdated,
ExpectedEnd,
}
impl fmt::Display for ParseTxtResponseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ParseTxtResponseError::UnrecognizedStatus => write!(f, "unrecognized status line"),
ParseTxtResponseError::UnrecognizedUpdated => write!(f, "unrecognized updated line"),
ParseTxtResponseError::ExpectedEnd => write!(f, "expected end of input"),
}
}
}
impl StdError for ParseTxtResponseError {}
#[derive(Debug)]
pub struct TxtResponse {
status: Status,
txt: Option<String>,
updated: Option<Updated>,
}
impl TxtResponse {
pub fn status(&self) -> Status {
self.status
}
pub fn txt(&self) -> Option<&str> {
self.txt.as_deref()
}
pub fn updated(&self) -> Option<Updated> {
self.updated
}
}
impl fmt::Display for TxtResponse {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.status)?;
if let Some(updated) = self.updated {
writeln!(f)?;
match self.txt {
Some(ref txt) => writeln!(f, "{}", txt)?,
None => writeln!(f)?,
}
write!(f, "{}", updated)?;
}
Ok(())
}
}
impl FromStr for TxtResponse {
type Err = ParseTxtResponseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut lines = s.lines();
let status = match lines.next() {
Some("OK") => Status::Ok,
Some("KO") => Status::Ko,
_ => return Err(ParseTxtResponseError::UnrecognizedStatus),
};
let txt = match lines.next() {
Some(txt) => Some(txt.to_owned()),
None => None,
};
let updated = match lines.next() {
Some("UPDATED") => Some(Updated::Updated),
Some("NOCHANGE") => Some(Updated::Nochange),
Some(_) => return Err(ParseTxtResponseError::UnrecognizedUpdated),
None => None,
};
let last = lines.next();
if !(last == None || (last == Some("") && lines.next() == None)) {
return Err(ParseTxtResponseError::ExpectedEnd);
}
Ok(TxtResponse {
status,
txt,
updated,
})
}
}
|
extern crate opencv;
extern crate libc;
// use networktables;
use opencv::_unsafe as cv;
fn main() {
unsafe {
let mut img = cv::cvLoadImage(c_str("lena512.bmp"), cv::CV_LOAD_IMAGE_COLOR);
println!("Image: {}", img);
println!("Window: {}", cv::cvNamedWindow(c_str("Example1"), cv::CV_WINDOW_AUTOSIZE as i32));
println!("Show Image: {}", cv::cvShowImage(c_str("Example1"),
img as *const libc::types::common::c95::c_void ));
println!("Wait Key: {}", cv::cvWaitKey(0));
println!("Release Image: {}", cv::cvReleaseImage( &mut img ));
println!("Destroy Window: {}", cv::cvDestroyWindow(c_str("Example1")));
}
}
fn c_str(s: &str) -> *const i8 {
unsafe { s.to_c_str().unwrap() }
}
|
// https://www.codewars.com/kata/55e7280b40e1c4a06d0000aa
extern crate itertools;
use self::itertools::Itertools;
fn choose_best_sum(t: i32, k: i32, ls: &Vec<i32>) -> i32 {
let mut largest = -1;
for cur in ls.iter().combinations(k as usize) {
let sum = cur.iter().map(|x| *x).sum();
if sum == t {
return sum;
};
if sum > largest && sum < t {
largest = sum
};
}
largest
}
#[cfg(test)]
mod tests {
use super::*;
fn testing(t: i32, k: i32, ls: &Vec<i32>, exp: i32) -> () {
assert_eq!(choose_best_sum(t, k, ls), exp)
}
#[test]
fn basics_choose_best_sum() {
let ts = &vec![50, 55, 56, 57, 58];
testing(163, 3, ts, 163);
let ts = &vec![50];
testing(163, 3, ts, -1);
let ts = &vec![91, 74, 73, 85, 73, 81, 87];
testing(230, 3, ts, 228);
testing(331, 2, ts, 178);
}
}
|
/**********************************************
> File Name : AuthenticationManager.rs
> Author : lunar
> Email : lunar_ubuntu@qq.com
> Created Time : Sun 13 Feb 2022 06:28:17 PM CST
> Location : Shanghai
> Copyright@ https://github.com/xiaoqixian
**********************************************/
use std::collections::HashMap;
struct AuthenticationManager {
inner: HashMap<String, i32>,
time_to_live: i32
}
/*
* `&self` means the method takes an immutable reference.
* If you need a mutable reference, change it to `&mut self` instead.
*/
impl AuthenticationManager {
fn new(time_to_live: i32) -> Self {
Self {
inner: HashMap::new(),
time_to_live
}
}
fn generate(&mut self, token_id: String, current_time: i32) {
self.inner.insert(token_id, current_time + self.time_to_live);
}
fn renew(&mut self, token_id: String, current_time: i32) {
match self.inner.get_mut(&token_id) {
None => (),
Some(expire_time) => {
if (*expire_time > current_time) {
*expire_time = self.time_to_live + current_time;
}
}
}
}
fn count_unexpired_tokens(&self, current_time: i32) -> i32 {
let mut res = 0;
for (_, expire_time) in self.inner.into_iter() {
if expire_time > current_time {
res += 1;
}
}
res
}
}
/*
* Your AuthenticationManager object will be instantiated and called as such:
* let obj = AuthenticationManager::new(time_to_live);
* obj.generate(tokenId, currentTime);
* obj.renew(tokenId, currentTime);
* let ret_3: i32 = obj.count_unexpired_tokens(currentTime);
*/
|
use clap::{Arg, SubCommand};
use super::{config::Config, Command};
pub struct Repo;
impl Command for Repo {
fn info<'a, 'b>() -> clap::App<'a, 'b> {
SubCommand::with_name("repo")
.about("clone command")
.subcommand(SubCmdClone::info())
.subcommand(SubCmdSearch::info())
}
fn execute(_matches: &clap::ArgMatches<'_>, _: Config) {
todo!()
}
}
struct SubCmdClone;
impl Command for SubCmdClone {
fn info<'a, 'b>() -> clap::App<'a, 'b> {
SubCommand::with_name("clone").arg(Arg::with_name("repo").required(true).takes_value(true))
}
fn execute(_matches: &clap::ArgMatches<'_>, _: Config) {
todo!()
}
}
struct SubCmdSearch;
impl Command for SubCmdSearch {
fn info<'a, 'b>() -> clap::App<'a, 'b> {
SubCommand::with_name("search")
.arg(Arg::with_name("repo_name").required(true).takes_value(true))
}
fn execute(_matches: &clap::ArgMatches<'_>, _: Config) {
todo!()
}
}
|
#[doc = "Register `FCR1` writer"]
pub type W = crate::W<FCR1_SPEC>;
#[doc = "Field `TIM2FC` writer - TIM2FC"]
pub type TIM2FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM3FC` writer - TIM3FC"]
pub type TIM3FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM4FC` writer - TIM4FC"]
pub type TIM4FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM5FC` writer - TIM5FC"]
pub type TIM5FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM6FC` writer - TIM6FC"]
pub type TIM6FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM7FC` writer - TIM7FC"]
pub type TIM7FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `WWDGFC` writer - WWDGFC"]
pub type WWDGFC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `IWDGFC` writer - IWDGFC"]
pub type IWDGFC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SPI2FC` writer - SPI2FC"]
pub type SPI2FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SPI3FC` writer - SPI3FC"]
pub type SPI3FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `USART2FC` writer - USART2FC"]
pub type USART2FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `USART3FC` writer - USART3FC"]
pub type USART3FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `UART4FC` writer - UART4FC"]
pub type UART4FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `UART5FC` writer - UART5FC"]
pub type UART5FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `I2C1FC` writer - I2C1FC"]
pub type I2C1FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `I2C2FC` writer - I2C2FC"]
pub type I2C2FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `I2C3FC` writer - I2C3FC"]
pub type I2C3FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CRSFC` writer - CRSFC"]
pub type CRSFC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DACFC` writer - DACFC"]
pub type DACFC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OPAMPFC` writer - OPAMPFC"]
pub type OPAMPFC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `LPTIM1FC` writer - LPTIM1FC"]
pub type LPTIM1FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `LPUART1FC` writer - LPUART1FC"]
pub type LPUART1FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `I2C4FC` writer - I2C4FC"]
pub type I2C4FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `LPTIM2FC` writer - LPTIM2FC"]
pub type LPTIM2FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `LPTIM3FC` writer - LPTIM3FC"]
pub type LPTIM3FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FDCAN1FC` writer - FDCAN1FC"]
pub type FDCAN1FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `USBFSFC` writer - USBFSFC"]
pub type USBFSFC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `UCPD1FC` writer - UCPD1FC"]
pub type UCPD1FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `VREFBUFFC` writer - VREFBUFFC"]
pub type VREFBUFFC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `COMPFC` writer - COMPFC"]
pub type COMPFC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM1FC` writer - TIM1FC"]
pub type TIM1FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SPI1FC` writer - SPI1FC"]
pub type SPI1FC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl W {
#[doc = "Bit 0 - TIM2FC"]
#[inline(always)]
#[must_use]
pub fn tim2fc(&mut self) -> TIM2FC_W<FCR1_SPEC, 0> {
TIM2FC_W::new(self)
}
#[doc = "Bit 1 - TIM3FC"]
#[inline(always)]
#[must_use]
pub fn tim3fc(&mut self) -> TIM3FC_W<FCR1_SPEC, 1> {
TIM3FC_W::new(self)
}
#[doc = "Bit 2 - TIM4FC"]
#[inline(always)]
#[must_use]
pub fn tim4fc(&mut self) -> TIM4FC_W<FCR1_SPEC, 2> {
TIM4FC_W::new(self)
}
#[doc = "Bit 3 - TIM5FC"]
#[inline(always)]
#[must_use]
pub fn tim5fc(&mut self) -> TIM5FC_W<FCR1_SPEC, 3> {
TIM5FC_W::new(self)
}
#[doc = "Bit 4 - TIM6FC"]
#[inline(always)]
#[must_use]
pub fn tim6fc(&mut self) -> TIM6FC_W<FCR1_SPEC, 4> {
TIM6FC_W::new(self)
}
#[doc = "Bit 5 - TIM7FC"]
#[inline(always)]
#[must_use]
pub fn tim7fc(&mut self) -> TIM7FC_W<FCR1_SPEC, 5> {
TIM7FC_W::new(self)
}
#[doc = "Bit 6 - WWDGFC"]
#[inline(always)]
#[must_use]
pub fn wwdgfc(&mut self) -> WWDGFC_W<FCR1_SPEC, 6> {
WWDGFC_W::new(self)
}
#[doc = "Bit 7 - IWDGFC"]
#[inline(always)]
#[must_use]
pub fn iwdgfc(&mut self) -> IWDGFC_W<FCR1_SPEC, 7> {
IWDGFC_W::new(self)
}
#[doc = "Bit 8 - SPI2FC"]
#[inline(always)]
#[must_use]
pub fn spi2fc(&mut self) -> SPI2FC_W<FCR1_SPEC, 8> {
SPI2FC_W::new(self)
}
#[doc = "Bit 9 - SPI3FC"]
#[inline(always)]
#[must_use]
pub fn spi3fc(&mut self) -> SPI3FC_W<FCR1_SPEC, 9> {
SPI3FC_W::new(self)
}
#[doc = "Bit 10 - USART2FC"]
#[inline(always)]
#[must_use]
pub fn usart2fc(&mut self) -> USART2FC_W<FCR1_SPEC, 10> {
USART2FC_W::new(self)
}
#[doc = "Bit 11 - USART3FC"]
#[inline(always)]
#[must_use]
pub fn usart3fc(&mut self) -> USART3FC_W<FCR1_SPEC, 11> {
USART3FC_W::new(self)
}
#[doc = "Bit 12 - UART4FC"]
#[inline(always)]
#[must_use]
pub fn uart4fc(&mut self) -> UART4FC_W<FCR1_SPEC, 12> {
UART4FC_W::new(self)
}
#[doc = "Bit 13 - UART5FC"]
#[inline(always)]
#[must_use]
pub fn uart5fc(&mut self) -> UART5FC_W<FCR1_SPEC, 13> {
UART5FC_W::new(self)
}
#[doc = "Bit 14 - I2C1FC"]
#[inline(always)]
#[must_use]
pub fn i2c1fc(&mut self) -> I2C1FC_W<FCR1_SPEC, 14> {
I2C1FC_W::new(self)
}
#[doc = "Bit 15 - I2C2FC"]
#[inline(always)]
#[must_use]
pub fn i2c2fc(&mut self) -> I2C2FC_W<FCR1_SPEC, 15> {
I2C2FC_W::new(self)
}
#[doc = "Bit 16 - I2C3FC"]
#[inline(always)]
#[must_use]
pub fn i2c3fc(&mut self) -> I2C3FC_W<FCR1_SPEC, 16> {
I2C3FC_W::new(self)
}
#[doc = "Bit 17 - CRSFC"]
#[inline(always)]
#[must_use]
pub fn crsfc(&mut self) -> CRSFC_W<FCR1_SPEC, 17> {
CRSFC_W::new(self)
}
#[doc = "Bit 18 - DACFC"]
#[inline(always)]
#[must_use]
pub fn dacfc(&mut self) -> DACFC_W<FCR1_SPEC, 18> {
DACFC_W::new(self)
}
#[doc = "Bit 19 - OPAMPFC"]
#[inline(always)]
#[must_use]
pub fn opampfc(&mut self) -> OPAMPFC_W<FCR1_SPEC, 19> {
OPAMPFC_W::new(self)
}
#[doc = "Bit 20 - LPTIM1FC"]
#[inline(always)]
#[must_use]
pub fn lptim1fc(&mut self) -> LPTIM1FC_W<FCR1_SPEC, 20> {
LPTIM1FC_W::new(self)
}
#[doc = "Bit 21 - LPUART1FC"]
#[inline(always)]
#[must_use]
pub fn lpuart1fc(&mut self) -> LPUART1FC_W<FCR1_SPEC, 21> {
LPUART1FC_W::new(self)
}
#[doc = "Bit 22 - I2C4FC"]
#[inline(always)]
#[must_use]
pub fn i2c4fc(&mut self) -> I2C4FC_W<FCR1_SPEC, 22> {
I2C4FC_W::new(self)
}
#[doc = "Bit 23 - LPTIM2FC"]
#[inline(always)]
#[must_use]
pub fn lptim2fc(&mut self) -> LPTIM2FC_W<FCR1_SPEC, 23> {
LPTIM2FC_W::new(self)
}
#[doc = "Bit 24 - LPTIM3FC"]
#[inline(always)]
#[must_use]
pub fn lptim3fc(&mut self) -> LPTIM3FC_W<FCR1_SPEC, 24> {
LPTIM3FC_W::new(self)
}
#[doc = "Bit 25 - FDCAN1FC"]
#[inline(always)]
#[must_use]
pub fn fdcan1fc(&mut self) -> FDCAN1FC_W<FCR1_SPEC, 25> {
FDCAN1FC_W::new(self)
}
#[doc = "Bit 26 - USBFSFC"]
#[inline(always)]
#[must_use]
pub fn usbfsfc(&mut self) -> USBFSFC_W<FCR1_SPEC, 26> {
USBFSFC_W::new(self)
}
#[doc = "Bit 27 - UCPD1FC"]
#[inline(always)]
#[must_use]
pub fn ucpd1fc(&mut self) -> UCPD1FC_W<FCR1_SPEC, 27> {
UCPD1FC_W::new(self)
}
#[doc = "Bit 28 - VREFBUFFC"]
#[inline(always)]
#[must_use]
pub fn vrefbuffc(&mut self) -> VREFBUFFC_W<FCR1_SPEC, 28> {
VREFBUFFC_W::new(self)
}
#[doc = "Bit 29 - COMPFC"]
#[inline(always)]
#[must_use]
pub fn compfc(&mut self) -> COMPFC_W<FCR1_SPEC, 29> {
COMPFC_W::new(self)
}
#[doc = "Bit 30 - TIM1FC"]
#[inline(always)]
#[must_use]
pub fn tim1fc(&mut self) -> TIM1FC_W<FCR1_SPEC, 30> {
TIM1FC_W::new(self)
}
#[doc = "Bit 31 - SPI1FC"]
#[inline(always)]
#[must_use]
pub fn spi1fc(&mut self) -> SPI1FC_W<FCR1_SPEC, 31> {
SPI1FC_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "TZIC interrupt clear register 1\n\nYou can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`fcr1::W`](W). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct FCR1_SPEC;
impl crate::RegisterSpec for FCR1_SPEC {
type Ux = u32;
}
#[doc = "`write(|w| ..)` method takes [`fcr1::W`](W) writer structure"]
impl crate::Writable for FCR1_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets FCR1 to value 0"]
impl crate::Resettable for FCR1_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use chore::*;
#[test]
fn general() -> Result<()> {
for (config, expect) in &[
(
Config {
now: chrono::NaiveDate::from_ymd(2001, 2, 3).and_hms(4, 5, 6),
args: vec!["completed".to_owned(), " ".to_owned()],
tasks: Some(
concat!(
"(M) 2001-02-03 @home +chore add tests\n",
"add task due:2002-03-04T05:06:07\n",
"x 2001-02-03 (H) 2001-01-02 @work issue:123\n",
)
.to_string(),
),
filter_aliases: vec![File {
name: "completed".to_string(),
content: "+done".to_string(),
}],
..Default::default()
},
Output::JustPrint {
stdout: concat!("3 x 2001-02-03 (H) 2001-01-02 @work issue:123\n",).to_string(),
},
),
(
Config {
now: chrono::NaiveDate::from_ymd(2001, 2, 3).and_hms(4, 5, 6),
args: vec!["pri:M".to_owned(), "done".to_owned()],
tasks: Some(
concat!(
"(M) 2001-02-03 @home +chore add tests\n",
"add task due:2002-03-04T05:06:07\n",
"x 2001-02-03 (H) 2001-01-02 @work issue:123\n",
)
.to_string(),
),
command_aliases: vec![File {
name: "done".to_string(),
content: "modify +done end:today".to_string(),
}],
..Default::default()
},
Output::WriteFiles {
stdout: concat!(
"DEL (M) 2001-02-03 @home +chore add tests\n",
"ADD x 2001-02-03 (M) 2001-02-03 @home +chore add tests\n",
)
.to_string(),
confirm: false,
tasks: concat!(
"add task due:2002-03-04T05:06:07\n",
"x 2001-02-03 (H) 2001-01-02 @work issue:123\n",
"x 2001-02-03 (M) 2001-02-03 @home +chore add tests\n",
)
.to_string(),
undo: concat!(
"---\n",
"DEL (M) 2001-02-03 @home +chore add tests\n",
"ADD x 2001-02-03 (M) 2001-02-03 @home +chore add tests\n",
)
.to_string(),
},
),
(
Config {
now: chrono::NaiveDate::from_ymd(2001, 2, 3).and_hms(4, 5, 6),
args: vec!["+done".to_owned(), "modify".to_owned(), "reopen".to_owned()],
tasks: Some(
concat!(
"(M) 2001-02-03 @home +chore add tests\n",
"add task due:2002-03-04T05:06:07\n",
"x 2001-02-03 (H) 2001-01-02 @work issue:123\n",
)
.to_string(),
),
modification_aliases: vec![File {
name: "reopen".to_string(),
content: "-+done -end:".to_string(),
}],
..Default::default()
},
Output::WriteFiles {
stdout: concat!(
"DEL x 2001-02-03 (H) 2001-01-02 @work issue:123\n",
"ADD (H) 2001-01-02 @work issue:123\n",
)
.to_string(),
confirm: false,
tasks: concat!(
"(H) 2001-01-02 @work issue:123\n",
"(M) 2001-02-03 @home +chore add tests\n",
"add task due:2002-03-04T05:06:07\n",
)
.to_string(),
undo: concat!(
"---\n",
"DEL x 2001-02-03 (H) 2001-01-02 @work issue:123\n",
"ADD (H) 2001-01-02 @work issue:123\n",
)
.to_string(),
},
),
(
Config {
now: chrono::NaiveDate::from_ymd(2001, 2, 3).and_hms(4, 5, 6),
args: vec![],
tasks: Some(
concat!(
"(M) 2001-02-03 @home +chore add tests\n",
"add task due:2002-03-04T05:06:07\n",
"x 2001-02-03 (H) 2001-01-02 @work issue:123\n",
)
.to_string(),
),
default_filters: vec![File {
name: "pending".to_string(),
content: "-+done".to_string(),
}],
..Default::default()
},
Output::JustPrint {
stdout: concat!(
"1 (M) 2001-02-03 @home +chore add tests\n",
"2 add task due:2002-03-04T05:06:07\n",
)
.to_string(),
},
),
(
Config {
now: chrono::NaiveDate::from_ymd(2001, 2, 3).and_hms(4, 5, 6),
args: vec![
" ".to_owned(),
"-pri:M".to_owned(),
" ".to_owned(),
"-+done".to_owned(),
" ".to_owned(),
],
tasks: Some(
concat!(
"(M) 2001-02-03 @home +chore add tests\n",
"add task due:2002-03-04T05:06:07\n",
"x 2001-02-03 (H) 2001-01-02 @work issue:123\n",
)
.to_string(),
),
..Default::default()
},
Output::JustPrint {
stdout: concat!("2 add task due:2002-03-04T05:06:07\n",).to_string(),
},
),
(
Config {
now: chrono::NaiveDate::from_ymd(2001, 2, 3).and_hms(4, 5, 6),
args: vec![
"/add tests/".to_owned(),
"modify".to_owned(),
"pri:Z +done".to_owned(),
],
tasks: Some(
concat!(
"(M) 2001-02-03 @home +chore add tests\n",
"add task due:2002-03-04T05:06:07\n",
"x 2001-02-03 (H) 2001-01-02 @work issue:123\n",
)
.to_string(),
),
command_aliases: vec![File {
name: "done".to_string(),
content: "modify +done end:today".to_string(),
}],
..Default::default()
},
Output::WriteFiles {
stdout: concat!(
"DEL (M) 2001-02-03 @home +chore add tests\n",
"ADD x (Z) 2001-02-03 @home +chore add tests\n",
)
.to_string(),
confirm: false,
tasks: concat!(
"add task due:2002-03-04T05:06:07\n",
"x (Z) 2001-02-03 @home +chore add tests\n",
"x 2001-02-03 (H) 2001-01-02 @work issue:123\n",
)
.to_string(),
undo: concat!(
"---\n",
"DEL (M) 2001-02-03 @home +chore add tests\n",
"ADD x (Z) 2001-02-03 @home +chore add tests\n",
)
.to_string(),
},
),
] {
let actual = chore::run(config.clone())?;
assert_eq!(actual, *expect);
}
Ok(())
}
|
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct Context {
ext: Option<ContextExt>,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct ContextExt {}
|
use crate::types::Vec3;
/// A simple structure for storing a force to be applied.
pub struct Force {
/// The force vector.
pub force : Vec3,
/// The position to apply the force at (in world coordinates).
pub position : Vec3,
}
impl Force {
/// Creates a new instance by consuming the given vectors.
pub fn new(force : Vec3, position : Vec3) -> Force {
Force { force, position }
}
}
|
pub struct Solution {}
mod s0013_roman_to_integer;
mod s0014_longest_common_prefix;
mod s0015_3_sum; |
extern crate dotenv;
use crate::graphql::utils::generate_uuid_from_str;
use crate::graphql::Context;
use crate::models::user::User;
use chrono::*;
use uuid::Uuid;
#[juniper::graphql_object(description = "A user", name = "User", Context = Context)]
impl User {
fn id(&self) -> i32 {
self.id
}
fn uuid(&self) -> Option<Uuid> {
generate_uuid_from_str(self.uuid.as_str())
}
fn email(&self) -> String {
self.email.to_string()
}
fn created_at(&self) -> DateTime<Utc> {
DateTime::<Utc>::from_utc(self.created_at, Utc)
}
fn updated_at(&self) -> DateTime<Utc> {
DateTime::<Utc>::from_utc(self.updated_at, Utc)
}
fn deleted(&self) -> bool {
self.deleted
}
}
/// decrypted token JWT and return into login
pub struct Token {
pub bearer: Option<String>,
pub user: User,
}
#[juniper::graphql_object(description = "The token object with user information", Context = Context)]
impl Token {
fn bearer(&self) -> Option<String> {
Some(self.bearer.as_ref().expect("").to_string())
}
fn user(&self) -> &User {
&self.user
}
}
|
use crate::bond::deposit_farm_share;
use crate::contract::{handle, init, query};
use crate::mock_querier::{mock_dependencies, WasmMockQuerier};
use crate::state::{pool_info_read, pool_info_store, read_config};
use pylon_token::gov::Cw20HookMsg as PylonGovCw20HookMsg;
use pylon_token::gov::HandleMsg as PylonGovHandleMsg;
use pylon_token::staking::HandleMsg as PylonStakingHandleMsg;
use cosmwasm_std::testing::{mock_env, MockApi, MockStorage, MOCK_CONTRACT_ADDR};
use cosmwasm_std::{
from_binary, to_binary, Api, Coin, CosmosMsg, Decimal, Extern, HumanAddr, Uint128, WasmMsg,
};
use cw20::{Cw20HandleMsg, Cw20ReceiveMsg};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use spectrum_protocol::pylon_farm::{
ConfigInfo, Cw20HookMsg, HandleMsg, PoolItem, PoolsResponse, QueryMsg, StateInfo,
};
use spectrum_protocol::gov::{Cw20HookMsg as GovCw20HookMsg, HandleMsg as GovHandleMsg};
use std::fmt::Debug;
use terraswap::asset::{Asset, AssetInfo, PairInfo};
use terraswap::pair::{Cw20HookMsg as TerraswapCw20HookMsg, HandleMsg as TerraswapHandleMsg};
const SPEC_GOV: &str = "spec_gov";
const SPEC_PLATFORM: &str = "spec_platform";
const SPEC_TOKEN: &str = "spec_token";
const SPEC_LP: &str = "spec_lp";
const SPEC_POOL: &str = "spec_pool";
const MINE_GOV: &str = "mine_gov";
const MINE_TOKEN: &str = "mine_token";
const MINE_STAKING: &str = "mine_staking";
const MINE_LP: &str = "mine_lp";
const MINE_POOL: &str = "mine_pool";
const TERRA_SWAP: &str = "terra_swap";
const TEST_CREATOR: &str = "creator";
const TEST_CONTROLLER: &str = "controller";
const FAIL_TOKEN: &str = "fail_token";
const FAIL_LP: &str = "fail_lp";
const USER1: &str = "user1";
const USER2: &str = "user2";
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)]
pub struct RewardInfoResponse {
pub staker_addr: HumanAddr,
pub reward_infos: Vec<RewardInfoResponseItem>,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)]
pub struct RewardInfoResponseItem {
pub asset_token: HumanAddr,
pub farm_share_index: Decimal,
pub auto_spec_share_index: Decimal,
pub stake_spec_share_index: Decimal,
pub bond_amount: Uint128,
pub auto_bond_amount: Uint128,
pub stake_bond_amount: Uint128,
pub farm_share: Uint128,
pub spec_share: Uint128,
pub auto_bond_share: Uint128,
pub stake_bond_share: Uint128,
pub pending_farm_reward: Uint128,
pub pending_spec_reward: Uint128,
pub accum_spec_share: Uint128,
pub locked_spec_share: Uint128,
pub locked_spec_reward: Uint128,
}
#[test]
fn test() {
let mut deps = mock_dependencies(20, &[]);
deps.querier.with_balance_percent(100);
deps.querier.with_terraswap_pairs(&[
(
&"uusdmine_token".to_string(),
&PairInfo {
asset_infos: [
AssetInfo::Token {
contract_addr: HumanAddr::from(MINE_TOKEN),
},
AssetInfo::NativeToken {
denom: "uusd".to_string(),
},
],
contract_addr: HumanAddr::from(MINE_POOL),
liquidity_token: HumanAddr::from(MINE_LP),
},
),
(
&"uusdspec_token".to_string(),
&PairInfo {
asset_infos: [
AssetInfo::Token {
contract_addr: HumanAddr::from(SPEC_TOKEN),
},
AssetInfo::NativeToken {
denom: "uusd".to_string(),
},
],
contract_addr: HumanAddr::from(SPEC_POOL),
liquidity_token: HumanAddr::from(SPEC_LP),
},
),
]);
deps.querier.with_tax(
Decimal::percent(1),
&[(&"uusd".to_string(), &Uint128(1500000u128))],
);
let _ = test_config(&mut deps);
test_register_asset(&mut deps);
test_compound_unauthorized(&mut deps);
test_compound_zero(&mut deps);
test_compound_mine_from_allowance(&mut deps);
test_bond(&mut deps);
test_compound_mine(&mut deps);
test_compound_mine_with_fees(&mut deps);
}
fn test_config(deps: &mut Extern<MockStorage, MockApi, WasmMockQuerier>) -> ConfigInfo {
// test init & read config & read state
let env = mock_env(TEST_CREATOR, &[]);
let mut config = ConfigInfo {
owner: HumanAddr::from(TEST_CREATOR),
spectrum_gov: HumanAddr::from(SPEC_GOV),
spectrum_token: HumanAddr::from(SPEC_TOKEN),
pylon_gov: HumanAddr::from(MINE_GOV),
pylon_token: HumanAddr::from(MINE_TOKEN),
pylon_staking: HumanAddr::from(MINE_STAKING),
terraswap_factory: HumanAddr::from(TERRA_SWAP),
platform: Some(HumanAddr::from(SPEC_PLATFORM)),
controller: Some(HumanAddr::from(TEST_CONTROLLER)),
base_denom: "uusd".to_string(),
community_fee: Decimal::zero(),
platform_fee: Decimal::zero(),
controller_fee: Decimal::zero(),
deposit_fee: Decimal::zero(),
lock_start: 0u64,
lock_end: 0u64,
};
// success init
let res = init(deps, env.clone(), config.clone());
assert!(res.is_ok());
// read config
let msg = QueryMsg::config {};
let res: ConfigInfo = from_binary(&query(deps, msg).unwrap()).unwrap();
assert_eq!(res, config.clone());
// read state
let msg = QueryMsg::state {};
let res: StateInfo = from_binary(&query(deps, msg).unwrap()).unwrap();
assert_eq!(
res,
StateInfo {
previous_spec_share: Uint128::zero(),
total_farm_share: Uint128::zero(),
total_weight: 0u32,
spec_share_index: Decimal::zero(),
}
);
// alter config, validate owner
let env = mock_env(SPEC_GOV, &[]);
let msg = HandleMsg::update_config {
owner: Some(HumanAddr::from(SPEC_GOV)),
platform: None,
controller: None,
community_fee: None,
platform_fee: None,
controller_fee: None,
deposit_fee: None,
lock_start: None,
lock_end: None,
};
let res = handle(deps, env.clone(), msg.clone());
assert!(res.is_err());
// success
let env = mock_env(TEST_CREATOR, &[]);
let res = handle(deps, env.clone(), msg);
assert!(res.is_ok());
let msg = QueryMsg::config {};
let res: ConfigInfo = from_binary(&query(deps, msg).unwrap()).unwrap();
config.owner = HumanAddr::from(SPEC_GOV);
assert_eq!(res, config.clone());
config
}
fn test_register_asset(deps: &mut Extern<MockStorage, MockApi, WasmMockQuerier>) {
// no permission
let env = mock_env(TEST_CREATOR, &[]);
let msg = HandleMsg::register_asset {
asset_token: HumanAddr::from(MINE_TOKEN),
staking_token: HumanAddr::from(MINE_LP),
weight: 1u32,
auto_compound: true,
};
let res = handle(deps, env.clone(), msg.clone());
assert!(res.is_err());
// success
let env = mock_env(SPEC_GOV, &[]);
let res = handle(deps, env.clone(), msg);
assert!(res.is_ok());
// query pool info
let msg = QueryMsg::pools {};
let res: PoolsResponse = from_binary(&query(deps, msg).unwrap()).unwrap();
assert_eq!(
res,
PoolsResponse {
pools: vec![PoolItem {
asset_token: HumanAddr::from(MINE_TOKEN),
staking_token: HumanAddr::from(MINE_LP),
weight: 1u32,
auto_compound: true,
farm_share: Uint128::zero(),
state_spec_share_index: Decimal::zero(),
stake_spec_share_index: Decimal::zero(),
auto_spec_share_index: Decimal::zero(),
farm_share_index: Decimal::zero(),
total_stake_bond_amount: Uint128::zero(),
total_stake_bond_share: Uint128::zero(),
total_auto_bond_share: Uint128::zero(),
reinvest_allowance: Uint128::zero(),
}]
}
);
// test register fail
let msg = HandleMsg::register_asset {
asset_token: HumanAddr::from(FAIL_TOKEN),
staking_token: HumanAddr::from(FAIL_LP),
weight: 2u32,
auto_compound: true,
};
let res = handle(deps, env.clone(), msg);
assert!(res.is_err());
// read state
let msg = QueryMsg::state {};
let res: StateInfo = from_binary(&query(deps, msg).unwrap()).unwrap();
assert_eq!(res.total_weight, 1u32);
}
fn test_compound_unauthorized(deps: &mut Extern<MockStorage, MockApi, WasmMockQuerier>) {
// reinvest err
let env = mock_env(TEST_CREATOR, &[]);
let msg = HandleMsg::compound {};
let res = handle(deps, env.clone(), msg.clone());
assert!(res.is_err());
}
fn test_compound_zero(deps: &mut Extern<MockStorage, MockApi, WasmMockQuerier>) {
// reinvest zero
let env = mock_env(TEST_CONTROLLER, &[]);
let msg = HandleMsg::compound {};
let res = handle(deps, env.clone(), msg.clone()).unwrap();
assert_eq!(
res.messages,
vec![
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_STAKING),
send: vec![],
msg: to_binary(&PylonStakingHandleMsg::Withdraw {}).unwrap(),
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_TOKEN),
msg: to_binary(&Cw20HandleMsg::IncreaseAllowance {
spender: HumanAddr::from(MINE_POOL),
amount: Uint128::zero(),
expires: None,
})
.unwrap(),
send: vec![],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_POOL),
msg: to_binary(&TerraswapHandleMsg::ProvideLiquidity {
assets: [
Asset {
info: AssetInfo::Token {
contract_addr: HumanAddr::from(MINE_TOKEN),
},
amount: Uint128::zero(),
},
Asset {
info: AssetInfo::NativeToken {
denom: "uusd".to_string(),
},
amount: Uint128::zero(),
},
],
slippage_tolerance: None,
})
.unwrap(),
send: vec![Coin {
denom: "uusd".to_string(),
amount: Uint128::zero(),
}],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: env.contract.address,
msg: to_binary(&HandleMsg::stake {
asset_token: HumanAddr::from(MINE_TOKEN),
})
.unwrap(),
send: vec![],
}),
]
);
}
fn test_compound_mine_from_allowance(deps: &mut Extern<MockStorage, MockApi, WasmMockQuerier>) {
let env = mock_env(TEST_CONTROLLER, &[]);
let asset_token_raw = deps
.api
.canonical_address(&HumanAddr::from(MINE_TOKEN))
.unwrap();
let mut pool_info = pool_info_read(&deps.storage)
.load(asset_token_raw.as_slice())
.unwrap();
pool_info.reinvest_allowance = Uint128::from(100_000_000u128);
pool_info_store(&mut deps.storage)
.save(asset_token_raw.as_slice(), &pool_info)
.unwrap();
let msg = HandleMsg::compound {};
let res = handle(deps, env.clone(), msg.clone()).unwrap();
let pool_info = pool_info_read(&deps.storage)
.load(asset_token_raw.as_slice())
.unwrap();
assert_eq!(Uint128::from(1_132_243u128), pool_info.reinvest_allowance);
assert_eq!(
res.messages,
vec![
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_STAKING),
send: vec![],
msg: to_binary(&PylonStakingHandleMsg::Withdraw {}).unwrap(),
}), //ok
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_TOKEN),
msg: to_binary(&Cw20HandleMsg::Send {
contract: HumanAddr::from(MINE_POOL),
amount: Uint128::from(50_000_000u128),
msg: Some(
to_binary(&TerraswapCw20HookMsg::Swap {
max_spread: None,
belief_price: None,
to: None,
})
.unwrap()
),
})
.unwrap(),
send: vec![],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_TOKEN),
msg: to_binary(&Cw20HandleMsg::IncreaseAllowance {
spender: HumanAddr::from(MINE_POOL),
amount: Uint128::from(48_867_757u128),
expires: None
})
.unwrap(),
send: vec![],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_POOL),
msg: to_binary(&TerraswapHandleMsg::ProvideLiquidity {
assets: [
Asset {
info: AssetInfo::Token {
contract_addr: HumanAddr::from(MINE_TOKEN),
},
amount: Uint128::from(48_867_757u128),
},
Asset {
info: AssetInfo::NativeToken {
denom: "uusd".to_string(),
},
amount: Uint128::from(48_867_757u128),
},
],
slippage_tolerance: None,
})
.unwrap(),
send: vec![Coin {
denom: "uusd".to_string(),
amount: Uint128::from(48_867_757u128),
}],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: env.contract.address,
msg: to_binary(&HandleMsg::stake {
asset_token: HumanAddr::from(MINE_TOKEN),
})
.unwrap(),
send: vec![],
}),
]
);
}
fn test_bond(deps: &mut Extern<MockStorage, MockApi, WasmMockQuerier>) {
// bond err
let env = mock_env(TEST_CREATOR, &[]);
let msg = HandleMsg::receive(Cw20ReceiveMsg {
sender: HumanAddr::from(USER1),
amount: Uint128::from(10000u128),
msg: Some(
to_binary(&Cw20HookMsg::bond {
staker_addr: None,
asset_token: HumanAddr::from(MINE_TOKEN),
compound_rate: Some(Decimal::percent(60)),
})
.unwrap(),
),
});
let res = handle(deps, env.clone(), msg.clone());
assert!(res.is_err());
// bond success user1 1000 MINE-LP
let env = mock_env(MINE_LP, &[]);
let res = handle(deps, env.clone(), msg);
assert!(res.is_ok());
let config = read_config(&deps.storage).unwrap();
let mut pool_info = pool_info_read(&deps.storage)
.load(config.pylon_token.as_slice())
.unwrap();
deposit_farm_share(deps, &mut pool_info, &config, Uint128::from(500u128)).unwrap();
pool_info_store(&mut deps.storage)
.save(config.pylon_token.as_slice(), &pool_info)
.unwrap();
deps.querier.with_token_balances(&[
(
&HumanAddr::from(MINE_STAKING),
&[(
&HumanAddr::from(MOCK_CONTRACT_ADDR),
&Uint128::from(10000u128),
)],
),
(
&HumanAddr::from(MINE_GOV),
&[(
&HumanAddr::from(MOCK_CONTRACT_ADDR),
&Uint128::from(1000u128),
)],
),
(
&HumanAddr::from(SPEC_GOV),
&[(
&HumanAddr::from(MOCK_CONTRACT_ADDR),
&Uint128::from(2700u128),
)],
),
]);
// query balance for user1
let msg = QueryMsg::reward_info {
staker_addr: HumanAddr::from(USER1),
height: 0u64,
};
let res: RewardInfoResponse = from_binary(&query(deps, msg).unwrap()).unwrap();
assert_eq!(
res.reward_infos,
vec![RewardInfoResponseItem {
asset_token: HumanAddr::from(MINE_TOKEN),
pending_farm_reward: Uint128::from(1000u128),
pending_spec_reward: Uint128::from(2700u128),
bond_amount: Uint128::from(10000u128),
auto_bond_amount: Uint128::from(6000u128),
stake_bond_amount: Uint128::from(4000u128),
accum_spec_share: Uint128::from(2700u128),
farm_share_index: Decimal::zero(),
auto_spec_share_index: Decimal::zero(),
stake_spec_share_index: Decimal::zero(),
farm_share: Uint128::from(500u128),
spec_share: Uint128::from(2700u128),
auto_bond_share: Uint128::from(6000u128),
stake_bond_share: Uint128::from(4000u128),
locked_spec_share: Uint128::zero(),
locked_spec_reward: Uint128::zero(),
},]
);
// unbond 3000 MINE-LP
let env = mock_env(USER1, &[]);
let msg = HandleMsg::unbond {
asset_token: HumanAddr::from(MINE_TOKEN),
amount: Uint128::from(3000u128),
};
let res = handle(deps, env.clone(), msg);
assert!(res.is_ok());
assert_eq!(
res.unwrap().messages,
[
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_STAKING),
send: vec![],
msg: to_binary(&PylonStakingHandleMsg::Unbond {
amount: Uint128::from(3000u128),
})
.unwrap(),
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_LP),
send: vec![],
msg: to_binary(&Cw20HandleMsg::Transfer {
recipient: HumanAddr::from(USER1),
amount: Uint128::from(3000u128),
})
.unwrap(),
}),
]
);
// withdraw rewards
let msg = HandleMsg::withdraw { asset_token: None };
let res = handle(deps, env.clone(), msg);
assert!(res.is_ok());
assert_eq!(
res.unwrap().messages,
vec![
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(SPEC_GOV),
send: vec![],
msg: to_binary(&GovHandleMsg::withdraw {
amount: Some(Uint128::from(2700u128)),
})
.unwrap(),
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(SPEC_TOKEN),
send: vec![],
msg: to_binary(&Cw20HandleMsg::Transfer {
recipient: HumanAddr::from(USER1),
amount: Uint128::from(2700u128),
})
.unwrap(),
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_GOV),
send: vec![],
msg: to_binary(&PylonGovHandleMsg::WithdrawVotingTokens {
amount: Some(Uint128::from(1000u128)),
})
.unwrap(),
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_TOKEN),
send: vec![],
msg: to_binary(&Cw20HandleMsg::Transfer {
recipient: HumanAddr::from(USER1),
amount: Uint128::from(1000u128),
})
.unwrap(),
}),
]
);
deps.querier.with_token_balances(&[
(
&HumanAddr::from(MINE_STAKING),
&[(
&HumanAddr::from(MOCK_CONTRACT_ADDR),
&Uint128::from(7000u128),
)],
),
(
&HumanAddr::from(MINE_GOV),
&[(&HumanAddr::from(MOCK_CONTRACT_ADDR), &Uint128::from(0u128))],
),
(
&HumanAddr::from(SPEC_GOV),
&[(&HumanAddr::from(MOCK_CONTRACT_ADDR), &Uint128::from(0u128))],
),
]);
// query balance for user2
let msg = QueryMsg::reward_info {
staker_addr: HumanAddr::from(USER2),
height: 0u64,
};
let res: RewardInfoResponse = from_binary(&query(deps, msg).unwrap()).unwrap();
assert_eq!(res.reward_infos, vec![]);
// query balance for user1
let msg = QueryMsg::reward_info {
staker_addr: HumanAddr::from(USER1),
height: 0u64,
};
let res: RewardInfoResponse = from_binary(&query(deps, msg).unwrap()).unwrap();
assert_eq!(
res.reward_infos,
vec![RewardInfoResponseItem {
asset_token: HumanAddr::from(MINE_TOKEN),
pending_farm_reward: Uint128::from(0u128),
pending_spec_reward: Uint128::from(0u128),
bond_amount: Uint128::from(7000u128),
auto_bond_amount: Uint128::from(4200u128),
stake_bond_amount: Uint128::from(2800u128),
accum_spec_share: Uint128::from(2700u128),
farm_share_index: Decimal::from_ratio(125u128, 1000u128),
auto_spec_share_index: Decimal::from_ratio(270u128, 1000u128),
stake_spec_share_index: Decimal::from_ratio(270u128, 1000u128),
farm_share: Uint128::from(0u128),
spec_share: Uint128::from(0u128),
auto_bond_share: Uint128::from(4200u128),
stake_bond_share: Uint128::from(2800u128),
locked_spec_share: Uint128::zero(),
locked_spec_reward: Uint128::zero(),
},]
);
// bond user2 5000 MINE-LP auto-stake
let env = mock_env(MINE_LP, &[]);
let msg = HandleMsg::receive(Cw20ReceiveMsg {
sender: HumanAddr::from(USER2),
amount: Uint128::from(5000u128),
msg: Some(
to_binary(&Cw20HookMsg::bond {
staker_addr: None,
asset_token: HumanAddr::from(MINE_TOKEN),
compound_rate: None,
})
.unwrap(),
),
});
let res = handle(deps, env.clone(), msg);
assert!(res.is_ok());
let mut pool_info = pool_info_read(&deps.storage)
.load(config.pylon_token.as_slice())
.unwrap();
deposit_farm_share(deps, &mut pool_info, &config, Uint128::from(10000u128)).unwrap();
pool_info_store(&mut deps.storage)
.save(config.pylon_token.as_slice(), &pool_info)
.unwrap();
deps.querier.with_token_balances(&[
(
&HumanAddr::from(MINE_STAKING),
&[(
&HumanAddr::from(MOCK_CONTRACT_ADDR),
&Uint128::from(12000u128),
)],
),
(
&HumanAddr::from(MINE_GOV),
&[(
&HumanAddr::from(MOCK_CONTRACT_ADDR),
&Uint128::from(5000u128),
)],
),
(
&HumanAddr::from(SPEC_GOV),
&[(
&HumanAddr::from(MOCK_CONTRACT_ADDR),
&Uint128::from(1000u128),
)],
),
]);
/*
USER1 7000 (auto 4200, stake 2800)
USER2 5000 (auto 0, stake 5000)
Total lp 12000
Total farm share 7800
Farm share +10000
USER1 Farm share = 28/78 * 10000 = 3589
USER2 Farm share = 50/78 * 10000 = 6410
Farm reward 5000
USER1 Farm reward = 28/78 * 5000 = 1794
USER2 Farm reward = 50/78 * 5000 = 3205
SPEC reward +1000
USER1 SPEC reward ~ 582
USER2 SPEC reward ~ 416
*/
// query balance for user1
let msg = QueryMsg::reward_info {
staker_addr: HumanAddr::from(USER1),
height: 0u64,
};
let res: RewardInfoResponse = from_binary(&query(deps, msg).unwrap()).unwrap();
assert_eq!(
res.reward_infos,
vec![RewardInfoResponseItem {
asset_token: HumanAddr::from(MINE_TOKEN),
pending_farm_reward: Uint128::from(1794u128),
pending_spec_reward: Uint128::from(582u128),
bond_amount: Uint128::from(7000u128),
auto_bond_amount: Uint128::from(4200u128),
stake_bond_amount: Uint128::from(2800u128),
accum_spec_share: Uint128::from(3282u128),
farm_share_index: Decimal::from_ratio(125u128, 1000u128),
auto_spec_share_index: Decimal::from_ratio(270u128, 1000u128),
stake_spec_share_index: Decimal::from_ratio(270u128, 1000u128),
farm_share: Uint128::from(3589u128),
spec_share: Uint128::from(582u128),
auto_bond_share: Uint128::from(4200u128),
stake_bond_share: Uint128::from(2800u128),
locked_spec_share: Uint128::zero(),
locked_spec_reward: Uint128::zero(),
},]
);
// query balance for user2
let msg = QueryMsg::reward_info {
staker_addr: HumanAddr::from(USER2),
height: 0u64,
};
let res: RewardInfoResponse = from_binary(&query(deps, msg).unwrap()).unwrap();
assert_eq!(
res.reward_infos,
vec![RewardInfoResponseItem {
asset_token: HumanAddr::from(MINE_TOKEN),
pending_farm_reward: Uint128::from(3205u128),
pending_spec_reward: Uint128::from(416u128),
bond_amount: Uint128::from(5000u128),
auto_bond_amount: Uint128::from(0u128),
stake_bond_amount: Uint128::from(5000u128),
accum_spec_share: Uint128::from(416u128),
farm_share_index: Decimal::from_ratio(125u128, 1000u128),
auto_spec_share_index: Decimal::from_ratio(270u128, 1000u128),
stake_spec_share_index: Decimal::from_ratio(270u128, 1000u128),
farm_share: Uint128::from(6410u128),
spec_share: Uint128::from(416u128),
auto_bond_share: Uint128::from(0u128),
stake_bond_share: Uint128::from(5000u128),
locked_spec_share: Uint128::zero(),
locked_spec_reward: Uint128::zero(),
},]
);
}
fn test_compound_mine(deps: &mut Extern<MockStorage, MockApi, WasmMockQuerier>) {
let env = mock_env(TEST_CONTROLLER, &[]);
let asset_token_raw = deps
.api
.canonical_address(&HumanAddr::from(MINE_TOKEN))
.unwrap();
let mut pool_info = pool_info_read(&deps.storage)
.load(asset_token_raw.as_slice())
.unwrap();
pool_info.reinvest_allowance = Uint128::from(0u128);
pool_info_store(&mut deps.storage)
.save(asset_token_raw.as_slice(), &pool_info)
.unwrap();
/*
pending rewards 12000 MINE
USER1 7000 (auto 4200, stake 2800)
USER2 5000 (auto 0, stake 5000)
total 12000
auto 4200 / 12000 * 12000 = 4200
stake 7800 / 12000 * 12000 = 7800
swap amount 2100 MINE -> 2073 UST
provide UST = 2052
provide MINE = 2052
remaining = 48
*/
let msg = HandleMsg::compound {};
let res = handle(deps, env.clone(), msg.clone()).unwrap();
let pool_info = pool_info_read(&deps.storage)
.load(asset_token_raw.as_slice())
.unwrap();
assert_eq!(Uint128::from(48u128), pool_info.reinvest_allowance);
assert_eq!(
res.messages,
vec![
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_STAKING),
send: vec![],
msg: to_binary(&PylonStakingHandleMsg::Withdraw {}).unwrap(),
}), //ok
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_TOKEN),
msg: to_binary(&Cw20HandleMsg::Send {
contract: HumanAddr::from(MINE_POOL),
amount: Uint128::from(2100u128),
msg: Some(
to_binary(&TerraswapCw20HookMsg::Swap {
max_spread: None,
belief_price: None,
to: None,
})
.unwrap()
),
})
.unwrap(),
send: vec![],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_TOKEN),
send: vec![],
msg: to_binary(&Cw20HandleMsg::Send {
contract: HumanAddr::from(MINE_GOV),
amount: Uint128::from(7800u128),
msg: Some(to_binary(&PylonGovCw20HookMsg::StakeVotingTokens {}).unwrap()),
})
.unwrap(),
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_TOKEN),
msg: to_binary(&Cw20HandleMsg::IncreaseAllowance {
spender: HumanAddr::from(MINE_POOL),
amount: Uint128::from(2052u128),
expires: None
})
.unwrap(),
send: vec![],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_POOL),
msg: to_binary(&TerraswapHandleMsg::ProvideLiquidity {
assets: [
Asset {
info: AssetInfo::Token {
contract_addr: HumanAddr::from(MINE_TOKEN),
},
amount: Uint128::from(2052u128),
},
Asset {
info: AssetInfo::NativeToken {
denom: "uusd".to_string(),
},
amount: Uint128::from(2052u128),
},
],
slippage_tolerance: None,
})
.unwrap(),
send: vec![Coin {
denom: "uusd".to_string(),
amount: Uint128::from(2052u128),
}],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: env.contract.address,
msg: to_binary(&HandleMsg::stake {
asset_token: HumanAddr::from(MINE_TOKEN),
})
.unwrap(),
send: vec![],
}),
]
);
deps.querier.with_token_balances(&[
(
&HumanAddr::from(MINE_STAKING),
&[(
&HumanAddr::from(MOCK_CONTRACT_ADDR),
&Uint128::from(12100u128),
)],
),
(
&HumanAddr::from(MINE_GOV),
&[(
&HumanAddr::from(MOCK_CONTRACT_ADDR),
&Uint128::from(12800u128),
)],
),
(
&HumanAddr::from(SPEC_GOV),
&[(
&HumanAddr::from(MOCK_CONTRACT_ADDR),
&Uint128::from(1000u128),
)],
),
]);
// query balance for user1
let msg = QueryMsg::reward_info {
staker_addr: HumanAddr::from(USER1),
height: 0u64,
};
let res: RewardInfoResponse = from_binary(&query(deps, msg).unwrap()).unwrap();
assert_eq!(
res.reward_infos,
vec![RewardInfoResponseItem {
asset_token: HumanAddr::from(MINE_TOKEN),
pending_farm_reward: Uint128::from(4594u128),
pending_spec_reward: Uint128::from(586u128),
bond_amount: Uint128::from(7100u128),
auto_bond_amount: Uint128::from(4300u128),
stake_bond_amount: Uint128::from(2800u128),
accum_spec_share: Uint128::from(3286u128),
farm_share_index: Decimal::from_ratio(125u128, 1000u128),
auto_spec_share_index: Decimal::from_ratio(270u128, 1000u128),
stake_spec_share_index: Decimal::from_ratio(270u128, 1000u128),
farm_share: Uint128::from(9189u128),
spec_share: Uint128::from(586u128),
auto_bond_share: Uint128::from(4200u128),
stake_bond_share: Uint128::from(2800u128),
locked_spec_share: Uint128::zero(),
locked_spec_reward: Uint128::zero(),
},]
);
// query balance for user2
let msg = QueryMsg::reward_info {
staker_addr: HumanAddr::from(USER2),
height: 0u64,
};
let res: RewardInfoResponse = from_binary(&query(deps, msg).unwrap()).unwrap();
assert_eq!(
res.reward_infos,
vec![RewardInfoResponseItem {
asset_token: HumanAddr::from(MINE_TOKEN),
pending_farm_reward: Uint128::from(8205u128),
pending_spec_reward: Uint128::from(413u128),
bond_amount: Uint128::from(5000u128),
auto_bond_amount: Uint128::from(0u128),
stake_bond_amount: Uint128::from(5000u128),
accum_spec_share: Uint128::from(413u128),
farm_share_index: Decimal::from_ratio(125u128, 1000u128),
auto_spec_share_index: Decimal::from_ratio(270u128, 1000u128),
stake_spec_share_index: Decimal::from_ratio(270u128, 1000u128),
farm_share: Uint128::from(16410u128),
spec_share: Uint128::from(413u128),
auto_bond_share: Uint128::from(0u128),
stake_bond_share: Uint128::from(5000u128),
locked_spec_share: Uint128::zero(),
locked_spec_reward: Uint128::zero(),
},]
);
}
fn test_compound_mine_with_fees(deps: &mut Extern<MockStorage, MockApi, WasmMockQuerier>) {
// update fees
let env = mock_env(SPEC_GOV, &[]);
let msg = HandleMsg::update_config {
owner: Some(HumanAddr::from(SPEC_GOV)),
platform: None,
controller: None,
community_fee: Some(Decimal::percent(3u64)),
platform_fee: Some(Decimal::percent(1u64)),
controller_fee: Some(Decimal::percent(1u64)),
deposit_fee: None,
lock_start: None,
lock_end: None,
};
let res = handle(deps, env.clone(), msg.clone());
assert!(res.is_ok());
let env = mock_env(TEST_CONTROLLER, &[]);
let asset_token_raw = deps
.api
.canonical_address(&HumanAddr::from(MINE_TOKEN))
.unwrap();
let mut pool_info = pool_info_read(&deps.storage)
.load(asset_token_raw.as_slice())
.unwrap();
pool_info.reinvest_allowance = Uint128::from(0u128);
pool_info_store(&mut deps.storage)
.save(asset_token_raw.as_slice(), &pool_info)
.unwrap();
/*
pending rewards 12100 MINE
USER1 7100 (auto 4300, stake 2800)
USER2 5000 (auto 0, stake 5000)
total 12100
total fee = 605
remaining = 11495
auto 4300 / 12100 * 11495 = 4085
stake 7800 / 12100 * 11495 = 7410
swap amount 2042 MINE -> 2016 UST
provide UST = 1996
provide MINE = 1996
remaining = 46
fee swap amount 605 MINE -> 591 UST -> 590 SPEC
community fee = 363 / 605 * 590 = 354
platform fee = 121 / 605 * 590 = 118
controller fee = 121 / 605 * 590 = 118
total swap amount 2647 MINE
*/
let msg = HandleMsg::compound {};
let res = handle(deps, env.clone(), msg.clone()).unwrap();
let pool_info = pool_info_read(&deps.storage)
.load(asset_token_raw.as_slice())
.unwrap();
assert_eq!(Uint128::from(46u128), pool_info.reinvest_allowance);
assert_eq!(
res.messages,
vec![
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_STAKING),
send: vec![],
msg: to_binary(&PylonStakingHandleMsg::Withdraw {}).unwrap(),
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_TOKEN),
msg: to_binary(&Cw20HandleMsg::Send {
contract: HumanAddr::from(MINE_POOL),
amount: Uint128::from(2647u128),
msg: Some(
to_binary(&TerraswapCw20HookMsg::Swap {
max_spread: None,
belief_price: None,
to: None,
})
.unwrap()
),
})
.unwrap(),
send: vec![],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(SPEC_POOL),
msg: to_binary(&TerraswapHandleMsg::Swap {
offer_asset: Asset {
info: AssetInfo::NativeToken {
denom: "uusd".to_string(),
},
amount: Uint128::from(591u128),
},
max_spread: None,
belief_price: None,
to: None,
})
.unwrap(),
send: vec![Coin {
denom: "uusd".to_string(),
amount: Uint128::from(591u128),
}],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(SPEC_GOV),
msg: to_binary(&GovHandleMsg::mint {}).unwrap(),
send: vec![],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(SPEC_TOKEN),
msg: to_binary(&Cw20HandleMsg::Transfer {
recipient: HumanAddr::from(SPEC_GOV),
amount: Uint128::from(354u128),
})
.unwrap(),
send: vec![],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(SPEC_TOKEN),
msg: to_binary(&Cw20HandleMsg::Send {
contract: HumanAddr::from(SPEC_GOV),
amount: Uint128::from(118u128),
msg: Some(
to_binary(&GovCw20HookMsg::stake_tokens {
staker_addr: Some(HumanAddr::from(SPEC_PLATFORM)),
})
.unwrap()
),
})
.unwrap(),
send: vec![],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(SPEC_TOKEN),
msg: to_binary(&Cw20HandleMsg::Send {
contract: HumanAddr::from(SPEC_GOV),
amount: Uint128::from(118u128),
msg: Some(
to_binary(&GovCw20HookMsg::stake_tokens {
staker_addr: Some(HumanAddr::from(TEST_CONTROLLER)),
})
.unwrap()
),
})
.unwrap(),
send: vec![],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_TOKEN),
send: vec![],
msg: to_binary(&Cw20HandleMsg::Send {
contract: HumanAddr::from(MINE_GOV),
amount: Uint128::from(7410u128),
msg: Some(to_binary(&PylonGovCw20HookMsg::StakeVotingTokens {}).unwrap()),
})
.unwrap(),
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_TOKEN),
msg: to_binary(&Cw20HandleMsg::IncreaseAllowance {
spender: HumanAddr::from(MINE_POOL),
amount: Uint128::from(1996u128),
expires: None
})
.unwrap(),
send: vec![],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: HumanAddr::from(MINE_POOL),
msg: to_binary(&TerraswapHandleMsg::ProvideLiquidity {
assets: [
Asset {
info: AssetInfo::Token {
contract_addr: HumanAddr::from(MINE_TOKEN),
},
amount: Uint128::from(1996u128),
},
Asset {
info: AssetInfo::NativeToken {
denom: "uusd".to_string(),
},
amount: Uint128::from(1996u128),
},
],
slippage_tolerance: None,
})
.unwrap(),
send: vec![Coin {
denom: "uusd".to_string(),
amount: Uint128::from(1996u128),
}],
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: env.contract.address,
msg: to_binary(&HandleMsg::stake {
asset_token: HumanAddr::from(MINE_TOKEN),
})
.unwrap(),
send: vec![],
}),
]
);
} |
#[macro_use]
extern crate afl;
fn main() {
fuzz!(|data: &[u8]| {
if let Ok(input) = std::str::from_utf8(data) {
let _ = parser::parse(&input);
}
});
}
|
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::atomic::{AtomicBool, Ordering};
use libc;
static INITIALIZED: AtomicBool = AtomicBool::new(false);
static RECEIVED_SIGINT: AtomicBool = AtomicBool::new(false);
extern "C" fn sigint_handler(_arg: libc::c_int) {
RECEIVED_SIGINT.store(true, Ordering::Relaxed);
}
#[allow(non_camel_case_types)]
type sighandler_t = extern "C" fn(libc::c_int);
extern "C" {
fn signal(signum: libc::c_int, handler: sighandler_t) -> sighandler_t;
}
pub fn received_sigint() -> bool {
RECEIVED_SIGINT.load(Ordering::Relaxed)
}
pub fn init() {
if !INITIALIZED.swap(true, Ordering::AcqRel) {
unsafe { signal(libc::SIGINT, sigint_handler) };
}
}
|
extern crate soup;
extern crate url;
use soup::*;
use std::vec::*;
use url::*;
use crate::ingestion_engine::Word;
use crate::ingestion_engine;
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Page {
last_linker:String,
title:String,
about:String,
url:String,
words:Vec<Word>
}
pub fn soup_to_links(soup:&Soup,base_url:&str) -> Vec<String> {
let mut links=Vec::new();
let all_links= soup.tag("a").find_all();
for link in all_links{
if !link.display().contains(":href")&& !link.display().contains("mailto") {
if !link.get("href").is_none() {
let tmp_link = link.get("href").expect("Error Parsing");
if !tmp_link.contains("#") {
if tmp_link.contains("http://") || tmp_link.contains("https://") {
links.push(tmp_link);
} else {
let url = Url::parse(base_url).expect("");
links.push(url.join(&tmp_link).unwrap().to_string());
}
}
}
}
}
return links;
}
//soup_page_formatter
pub fn soup_page_formatter(page:&Soup, last_page: String, page_url: &String) -> Page {
let title=page.tag("title").find().unwrap().text();
let h1 = page.tag("h1").find_all();
let h2 = page.tag("h2").find_all();
let mut body:String;
body="".to_string();
for x in h1{
body.push_str(&x.text().to_string());
}
for x in h2{
body.push_str(&x.text().to_string());
}
Page {
last_linker: last_page,
title,
about: body,
url: page_url.to_string(),
words: ingestion_engine::words_from_soup(&page).words
}
}
//Test
#[cfg(test)]
mod test{
use crate::web_page_format::{soup_page_formatter, soup_to_links};
use soup::Soup;
#[test]
fn test_soup_page_formatter(){
let html=r#"
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<h1>The Dormouse's story</h1>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"#;
let soup= Soup::new(html);
let page= soup_page_formatter(&soup, "test.com".to_string(), &"Test title".to_string());
assert_eq!(page.last_linker,"test.com");
assert_eq!(page.title,"The Dormouse's story");
assert_eq!(page.about.contains("The Dormouse's story"),true);
assert_eq!(page.about.contains("This does not appear"),false);
}
#[test]
fn test_soup_to_links(){
let html=r#"
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<h1>The Dormouse's story</h1>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="/news" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"#;
let soup=Soup::new(html);
let links=soup_to_links(&soup, &"http://example.com".to_string());
assert_eq!(links.contains(&"http://example.com/elsie".to_string()),true);
assert_eq!(links.contains(&"http://example.com/news".to_string()),true);
assert_eq!(links.contains(&"/news".to_string()),false);
for link in links{
assert_eq!(link.contains("http"),true)
}
}
}
|
#![cfg(target_os = "android")]
#![allow(non_snake_case)]
// pub unsafe extern fn Java_com_parallel_android_ParallelEngine_cancel(_env: JNIEnv, _: JObject) {}
//
// pub unsafe extern fn Java_com_parallel_android_ParallelEngine_isCanceled(_env: JNIEnv, _: JObject) -> jboolean {}
//
// pub unsafe extern fn Java_com_parallel_android_ParallelEngine_isExecuted(_env: JNIEnv, _: JObject) -> jboolean {}
//
// pub unsafe extern fn Java_com_parallel_android_ParallelEngine_nativeEnqueue(_env: JNIEnv, _: JObject) {}
use std::sync::Mutex;
use jni::JNIEnv;
use jni::objects::{JObject, JString, JValue};
use jni::sys::jlong;
use parallel_net::engine::{Engine, new_engine};
use hyper::body::Body;
use hyper::Error;
use hyper::http::response::Parts;
#[no_mangle]
pub unsafe extern "C" fn Java_com_parallelnet_android_DemoFragment_initParallel(env: JNIEnv, jni_obj: JObject) -> jlong {
android_log::init("parallel_net").unwrap();
let engine = new_engine();
let container = Box::into_raw(Box::new(Mutex::new(engine))) as jlong;
container
}
#[no_mangle]
pub unsafe extern "C" fn Java_com_parallelnet_android_DemoFragment_drop(env: JNIEnv, jni_obj: JObject, engine: jlong) {
let _drop = Box::from_raw(engine as *mut Mutex<Engine>);
}
#[no_mangle]
pub unsafe extern "C" fn Java_com_parallelnet_android_DemoFragment_req(env: JNIEnv, jni_obj: JObject, engine: jlong) {
let engine = engine as *const Mutex<Engine>;
let lock = (*engine).lock().unwrap();
lock.request(|parts: Parts, body_string: String| {
log::debug!("{}", body_string);
}, |err: Error| {});
} |
/*
* Datadog API V1 Collection
*
* Collection of all Datadog Public endpoints.
*
* The version of the OpenAPI document: 1.0
* Contact: support@datadoghq.com
* Generated by: https://openapi-generator.tech
*/
/// SloBulkDeleteResponse : The bulk partial delete service level objective object endpoint response. This endpoint operates on multiple service level objective objects, so it may be partially successful. In such cases, the \"data\" and \"error\" fields in this response indicate which deletions succeeded and failed.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SloBulkDeleteResponse {
#[serde(rename = "data", skip_serializing_if = "Option::is_none")]
pub data: Option<Box<crate::models::SloBulkDeleteResponseData>>,
/// Array of errors object returned.
#[serde(rename = "errors", skip_serializing_if = "Option::is_none")]
pub errors: Option<Vec<crate::models::SloBulkDeleteError>>,
}
impl SloBulkDeleteResponse {
/// The bulk partial delete service level objective object endpoint response. This endpoint operates on multiple service level objective objects, so it may be partially successful. In such cases, the \"data\" and \"error\" fields in this response indicate which deletions succeeded and failed.
pub fn new() -> SloBulkDeleteResponse {
SloBulkDeleteResponse {
data: None,
errors: None,
}
}
}
|
#[doc = "Register `ETH_MTLTxQ1ESR` reader"]
pub type R = crate::R<ETH_MTLTX_Q1ESR_SPEC>;
#[doc = "Field `ABS` reader - ABS"]
pub type ABS_R = crate::FieldReader<u32>;
impl R {
#[doc = "Bits 0:23 - ABS"]
#[inline(always)]
pub fn abs(&self) -> ABS_R {
ABS_R::new(self.bits & 0x00ff_ffff)
}
}
#[doc = "Tx queue x ETS status Register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`eth_mtltx_q1esr::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct ETH_MTLTX_Q1ESR_SPEC;
impl crate::RegisterSpec for ETH_MTLTX_Q1ESR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`eth_mtltx_q1esr::R`](R) reader structure"]
impl crate::Readable for ETH_MTLTX_Q1ESR_SPEC {}
#[doc = "`reset()` method sets ETH_MTLTxQ1ESR to value 0"]
impl crate::Resettable for ETH_MTLTX_Q1ESR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
#[doc = "Register `LUT1005H` reader"]
pub type R = crate::R<LUT1005H_SPEC>;
#[doc = "Register `LUT1005H` writer"]
pub type W = crate::W<LUT1005H_SPEC>;
#[doc = "Field `LO` reader - Line offset"]
pub type LO_R = crate::FieldReader<u32>;
#[doc = "Field `LO` writer - Line offset"]
pub type LO_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 18, O, u32>;
impl R {
#[doc = "Bits 4:21 - Line offset"]
#[inline(always)]
pub fn lo(&self) -> LO_R {
LO_R::new((self.bits >> 4) & 0x0003_ffff)
}
}
impl W {
#[doc = "Bits 4:21 - Line offset"]
#[inline(always)]
#[must_use]
pub fn lo(&mut self) -> LO_W<LUT1005H_SPEC, 4> {
LO_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "Graphic MMU LUT entry 1005 high\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`lut1005h::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`lut1005h::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct LUT1005H_SPEC;
impl crate::RegisterSpec for LUT1005H_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`lut1005h::R`](R) reader structure"]
impl crate::Readable for LUT1005H_SPEC {}
#[doc = "`write(|w| ..)` method takes [`lut1005h::W`](W) writer structure"]
impl crate::Writable for LUT1005H_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets LUT1005H to value 0"]
impl crate::Resettable for LUT1005H_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
// 2019-01-01
// On veut modifier une variable sans en prendre l'ownership.
fn main() {
// On crée une chaine de caractère s qui est MODIFIABLE (mutable)
let mut s = String::from("Hello");
// On appelle la fonction change()
// On crée une RÉFÉRENCE MODIFIABLE (mutable reference) avec &mut s
change(&mut s);
// On peut emprunter une variable modifiable une fois...
let s1 = &mut s;
println!("{}", s1);
// ... mais pas deux ! Ce code-ci planterait :
// let s2 = &mut s;
// println!("{}", s1);
// avec l'erreur suivante :
// cannot borrow `s` as mutable more than once at a time
}
// Définissons change()
// L'input est défini comme une chaine de caractère modifiable
// On accepte ici la référence modifiable créée plus haut
// Pas besoin de définir l'output qui est de même type
fn change(une_chaine: &mut String) {
// la fonction push_str() ajoute du texte à la variable modifiable.
une_chaine.push_str(", world!");
}
|
use alloc::string::String;
use alloc::vec::Vec;
use crate::Client;
use chain::names::AccountName;
use rpc_codegen::Fetch;
use serde::{Deserialize, Serialize};
#[derive(Fetch, Debug, Clone, Serialize)]
#[api(path="v1/chain/get_abi", http_method="POST", returns="GetAbi")]
pub struct GetAbiParams {
account_name: AccountName,
}
pub const fn get_abi(account_name: AccountName) -> GetAbiParams {
GetAbiParams { account_name }
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAbi {
pub account_name: AccountName,
pub abi: Abi,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct Abi {
pub version: String,
pub types: Vec<Type>,
pub structs: Vec<Struct>,
pub actions: Vec<Action>,
pub tables: Vec<Table>,
pub ricardian_clauses: Vec<RicardianClause>,
pub error_messages: Vec<ErrorMessage>,
pub abi_extensions: Vec<AbiExtension>,
// TODO variants: Vec<Variant>,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct Type {
pub new_type_name: String,
#[serde(rename = "type")]
pub type_: String,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct Struct {
pub name: String,
pub base: String,
pub fields: Vec<Field>,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct Field {
pub name: String,
#[serde(rename = "type")]
pub type_: String,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct Action {
pub name: String,
#[serde(rename = "type")]
pub type_: String,
pub ricardian_contract: String,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct Table {
pub name: String,
pub index_type: String,
pub key_names: Vec<String>,
pub key_types: Vec<String>,
#[serde(rename = "type")]
pub type_: String,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct RicardianClause {
pub id: String,
pub body: String,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct ErrorMessage {
pub error_code: u64,
pub error_msg: String,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct AbiExtension {
#[serde(rename = "type")]
pub type_: u16,
pub data: String,
}
#[cfg(feature = "use-hyper")]
#[cfg(test)]
mod test {
use super::*;
use crate::HyperClient;
use std::str::FromStr;
#[test]
fn get_abi_should_work() {
let node: &'static str = "https://eos.greymass.com/";
let hyper_client = HyperClient::new(node);
let account_name: AccountName =AccountName::from_str("eosio.token").unwrap();
let response = get_abi(account_name).fetch(&hyper_client);
assert!(response.is_ok());
}
#[test]
fn get_abi_by_non_exist_account() {
let node: &'static str = "https://eos.greymass.com/";
let hyper_client = HyperClient::new(node);
// eosio.token1 is an invalid account
let account_name: AccountName = AccountName::from_str("eosio.token1").unwrap();
let response = get_abi(account_name).fetch(&hyper_client);
if let Err(crate::Error::EosError{ ref eos_err }) = response {
assert_eq!(eos_err.code, 500);
assert_eq!(eos_err.message, "Internal Service Error");
} else {
assert!(true);
}
}
}
|
#![allow(unused)]
pub mod fmc;
|
use serde::{Deserialize, Serialize};
#[derive(Debug, Deserialize, Serialize, Default)]
#[serde(default)]
#[serde(deny_unknown_fields)]
pub struct DebugConfig {
/// An array of values that 'time_scale' can have.
/// Debug controls will allow switching between these values,
/// to slow time down or speed it up.
pub time_scale_presets: Vec<f32>,
/// How fast the clock is ticking. A value of 1.0 means time is
/// behaving normally, higher values mean time is sped up and
/// 0.0 means time is frozen.
pub time_scale: f32,
/// The max speed of the player in meters per second.
pub player_speed: f32,
/// Number of seconds to leave between frames when rewinding time.
pub seconds_per_rewind_frame: f32,
/// Enable this when debugging, to save time when rapidly iterating.
/// It saves you from having to navigate the menu every time you start the game.
/// If true, the game will open in the editor state.
/// If false, it will open on the main menu.
pub skip_straight_to_editor: bool,
/// Whether or not to display debug frames indicating the player's discrete position.
pub display_debug_frames: bool,
}
impl DebugConfig {
/// Increase the time scale. Everything in the world will move more quickly.
/// Return a tuple containing the old scale and the new scale.
/// If the time is already operating at the fastest speed, the time scale will not change.
pub fn increase_speed(&mut self) -> (f32, f32) {
let old_time_scale = self.time_scale;
let new_time_scale = self
.time_scale_presets
.iter()
.find(|&&scale| scale > self.time_scale);
if let Some(new_time_scale) = new_time_scale {
self.time_scale = *new_time_scale;
(old_time_scale, self.time_scale)
} else {
(self.time_scale, self.time_scale)
}
}
/// Decrease the time scale. Everything in the world will move more slowly.
/// Return a tuple containing the old scale and the new scale.
/// If the time is already operating at the slowest speed, the time scale will not change.
pub fn decrease_speed(&mut self) -> (f32, f32) {
let old_time_scale = self.time_scale;
let new_time_scale = self
.time_scale_presets
.iter()
.rev()
.find(|&&scale| scale < self.time_scale);
if let Some(new_time_scale) = new_time_scale {
self.time_scale = *new_time_scale;
(old_time_scale, self.time_scale)
} else {
(self.time_scale, self.time_scale)
}
}
}
#[derive(Debug, Deserialize, Serialize, Default)]
#[serde(default)]
#[serde(deny_unknown_fields)]
pub struct MovementConfig {
/// The max speed of the player in meters per second.
pub player_speed: f32,
/// How many seconds can pass between starting your jump and starting to move sideways for it to
/// still register. If you start moving sideways later than that, it will not work and the
/// character will simply jump straight up into the air instead.
pub jump_allowance: f32,
/// How many seconds must pass after turning around whilst standing still before the character
/// starts walking. This gives the player a bit of time to let go of the walking controls if
/// they just want to turn around, but not want to start walking.
pub turn_allowance: f32,
/// When the player first starts pressing down a movement key (e.g. RIGHT), how many seconds
/// does it take between moving the first step and moving the second step? The first step is
/// taken instantly, the second step takes a while. This prevents a single key press registering
/// as more than one step.
pub map_cursor_move_high_cooldown: f32,
/// When the player is holding down a movement key (e.g. RIGHT), how many seconds between two
/// steps? The first step takes longer, that's what the high cooldown is for. Each subsequent
/// step takes much shorter.
pub map_cursor_move_low_cooldown: f32,
}
#[derive(Debug, Deserialize, Serialize, Default)]
#[serde(default)]
#[serde(deny_unknown_fields)]
pub struct AudioConfig {
/// What volume the music should be played at. If this value is None, the music will not be
/// played at all.
/// The volume should be a value in the range [0.0, 1.0].
pub music_volume: Option<f32>,
/// What volume the sound effects should be played at. If this value is None, the music will
/// not be played at all.
/// The volume should be a value in the range [0.0, 1.0].
pub sound_effects_volume: Option<f32>,
}
|
//! s3du: A tool for informing you of the used space in AWS S3 buckets.
#![forbid(unsafe_code)]
#![deny(missing_docs)]
#![allow(clippy::redundant_field_names)]
use anyhow::Result;
use std::str::FromStr;
use tracing::{
debug,
info,
};
/// Command line parsing.
mod cli;
/// Common types and traits.
mod common;
use common::{
BucketSizer,
ClientConfig,
ClientMode,
HumanSize,
Region,
SizeUnit,
};
#[cfg(feature = "s3")]
use common::ObjectVersions;
/// `CloudWatch` Client.
#[cfg(feature = "cloudwatch")]
mod cloudwatch;
/// S3 Client.
#[cfg(feature = "s3")]
mod s3;
/// `Client` struct wraps a `Box<dyn BucketSizer>`.
struct Client(Box<dyn BucketSizer>);
/// `Client` implementation.
impl Client {
/// Return the appropriate AWS client with the given `ClientConfig`.
async fn new(config: ClientConfig) -> Self {
let mode = &config.mode;
let region = &config.region;
info!("Client in region {} for mode {:?}", region.name(), mode);
let client: Box<dyn BucketSizer> = match mode {
#[cfg(feature = "cloudwatch")]
ClientMode::CloudWatch => {
let client = cloudwatch::Client::new(config);
Box::new(client.await)
},
#[cfg(feature = "s3")]
ClientMode::S3 => {
let client = s3::Client::new(config);
Box::new(client.await)
},
};
Client(client)
}
/// Perform the actual get and output of the bucket sizes.
async fn du(&self, unit: SizeUnit) -> Result<()> {
// List all of our buckets
let buckets = self.0.buckets().await?;
debug!("du: Got buckets: {:?}", buckets);
// Track total size of all buckets.
let mut total_size: u64 = 0;
// For each bucket name, get the size
for bucket in buckets {
let size = self.0.bucket_size(&bucket).await?;
total_size += size;
let size = size.humansize(&unit);
println!("{size}\t{bucket}", bucket=bucket.name);
}
let total_size = total_size.humansize(&unit);
// Display the total size the same way du(1) would, the total size
// followed by a `.`.
println!("{total_size}\t.");
Ok(())
}
}
/// Entry point
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
// Parse the CLI
let matches = cli::parse_args();
// Get the bucket name, if any.
let bucket_name = matches.get_one::<String>("BUCKET").cloned();
// Get the client mode
let mode: ClientMode = {
let mode = matches.get_one::<String>("MODE")
.expect("client mode");
ClientMode::from_str(mode.as_str())
.expect("client mode")
};
// Get the unit size to display
let unit: SizeUnit = {
let unit = matches.get_one::<String>("UNIT")
.expect("size unit");
SizeUnit::from_str(unit.as_str())
.expect("size unit")
};
// Here we get the region, if a custom endpoint is set, that is used,
// otherwise we get the regular region.
// Unwraps on values here should be fine, as they're checked when the CLI
// is validated.
#[cfg(feature = "s3")]
let region = if matches.contains_id("ENDPOINT") {
if mode == ClientMode::S3 {
let region = matches.get_one::<String>("REGION").unwrap();
Region::new().set_region(region)
}
else {
eprintln!("Error: Endpoint supplied but client mode is not S3");
::std::process::exit(1);
}
}
else {
let region = matches.get_one::<String>("REGION").unwrap();
Region::new().set_region(region)
};
// Endpoint selection isn't supported for CloudWatch, so we can drop it if
// we're compiled without the S3 feature.
#[cfg(all(feature = "cloudwatch", not(feature = "s3")))]
let region = {
let region = matches.get_one::<String>("REGION").unwrap();
Region::new().set_region(region)
};
// This warning will trigger if compiled without the "s3" feature. We're
// aware, allow it.
#[allow(unused_mut)]
let mut config = ClientConfig {
bucket_name: bucket_name,
mode: mode,
region: region,
..Default::default()
};
// If have s3 mode available we also need to pull in the ObjectVersions
// from the command line.
#[cfg(feature = "s3")]
{
if config.mode == ClientMode::S3 {
// This should be safe, we validated this in the CLI parser.
let versions = matches.get_one::<String>("OBJECT_VERSIONS").unwrap();
// This should be safe, due to validation of the above.
let versions = ObjectVersions::from_str(versions).unwrap();
config.object_versions = versions;
// Set the endpoint
config.endpoint = matches.get_one::<String>("ENDPOINT").cloned();
}
}
// The region here will come from CLI args in the future
let client = Client::new(config).await;
client.du(unit).await
}
|
use super::{parse, Builder, Compiler, GenericResult};
use std::time::Instant;
use std::{fs::File, io::Read};
// load and execute a file into the vm.
pub fn exec_file<'a>(compiler: &mut Compiler<'a>, path: &str) -> GenericResult<()> {
load_file(compiler, path, "main")?;
let mut builder = Builder::new(&compiler.llvm);
builder.build(&compiler.data, &mut compiler.llvm.types);
let f = builder.get_function("main-main")?;
if cfg!(feature = "debug") {
let before = Instant::now();
f();
println!("function duration: {}", before.elapsed().as_secs_f64());
} else {
f();
}
Ok(())
}
// load a file into the VM.
pub fn load_file<'a>(
compiler: &mut Compiler<'a>,
path: &str,
module_name: &str,
) -> GenericResult<()> {
let mut file = File::open(path)?;
let mut input = String::new();
file.read_to_string(&mut input)?;
let inp = parse(&input);
// TODO: fix to compile a module
// Ok(compile_module(compiler, module_name, &inp)?)
Ok(())
}
|
#[doc = "Register `DFSDM_HWCFGR` reader"]
pub type R = crate::R<DFSDM_HWCFGR_SPEC>;
#[doc = "Field `NBT` reader - NBT"]
pub type NBT_R = crate::FieldReader;
#[doc = "Field `NBF` reader - NBF"]
pub type NBF_R = crate::FieldReader;
impl R {
#[doc = "Bits 0:7 - NBT"]
#[inline(always)]
pub fn nbt(&self) -> NBT_R {
NBT_R::new((self.bits & 0xff) as u8)
}
#[doc = "Bits 8:15 - NBF"]
#[inline(always)]
pub fn nbf(&self) -> NBF_R {
NBF_R::new(((self.bits >> 8) & 0xff) as u8)
}
}
#[doc = "This register specifies the hardware configuration of DFSDM peripheral.\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dfsdm_hwcfgr::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct DFSDM_HWCFGR_SPEC;
impl crate::RegisterSpec for DFSDM_HWCFGR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`dfsdm_hwcfgr::R`](R) reader structure"]
impl crate::Readable for DFSDM_HWCFGR_SPEC {}
#[doc = "`reset()` method sets DFSDM_HWCFGR to value 0x0608"]
impl crate::Resettable for DFSDM_HWCFGR_SPEC {
const RESET_VALUE: Self::Ux = 0x0608;
}
|
#[doc = "Register `AHBSCR` reader"]
pub type R = crate::R<AHBSCR_SPEC>;
#[doc = "Register `AHBSCR` writer"]
pub type W = crate::W<AHBSCR_SPEC>;
#[doc = "Field `CTL` reader - CTL"]
pub type CTL_R = crate::FieldReader;
#[doc = "Field `CTL` writer - CTL"]
pub type CTL_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>;
#[doc = "Field `TPRI` reader - TPRI"]
pub type TPRI_R = crate::FieldReader<u16>;
#[doc = "Field `TPRI` writer - TPRI"]
pub type TPRI_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 9, O, u16>;
#[doc = "Field `INITCOUNT` reader - INITCOUNT"]
pub type INITCOUNT_R = crate::FieldReader;
#[doc = "Field `INITCOUNT` writer - INITCOUNT"]
pub type INITCOUNT_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 5, O>;
impl R {
#[doc = "Bits 0:1 - CTL"]
#[inline(always)]
pub fn ctl(&self) -> CTL_R {
CTL_R::new((self.bits & 3) as u8)
}
#[doc = "Bits 2:10 - TPRI"]
#[inline(always)]
pub fn tpri(&self) -> TPRI_R {
TPRI_R::new(((self.bits >> 2) & 0x01ff) as u16)
}
#[doc = "Bits 11:15 - INITCOUNT"]
#[inline(always)]
pub fn initcount(&self) -> INITCOUNT_R {
INITCOUNT_R::new(((self.bits >> 11) & 0x1f) as u8)
}
}
impl W {
#[doc = "Bits 0:1 - CTL"]
#[inline(always)]
#[must_use]
pub fn ctl(&mut self) -> CTL_W<AHBSCR_SPEC, 0> {
CTL_W::new(self)
}
#[doc = "Bits 2:10 - TPRI"]
#[inline(always)]
#[must_use]
pub fn tpri(&mut self) -> TPRI_W<AHBSCR_SPEC, 2> {
TPRI_W::new(self)
}
#[doc = "Bits 11:15 - INITCOUNT"]
#[inline(always)]
#[must_use]
pub fn initcount(&mut self) -> INITCOUNT_W<AHBSCR_SPEC, 11> {
INITCOUNT_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "AHB Slave Control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ahbscr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ahbscr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct AHBSCR_SPEC;
impl crate::RegisterSpec for AHBSCR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`ahbscr::R`](R) reader structure"]
impl crate::Readable for AHBSCR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`ahbscr::W`](W) writer structure"]
impl crate::Writable for AHBSCR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets AHBSCR to value 0"]
impl crate::Resettable for AHBSCR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use failure::{err_msg, format_err, Error};
use std::error::Error as OtherError;
use std::sync::RwLock;
use gtk::*;
use ui::image_container::render_image;
use image::Image as MyImage;
use super::dialogs::open_dialog::OpenDialog;
pub fn open (headerbar: &HeaderBar,
image_container: &Image,
current_file: &RwLock<Option<MyImage>>,
) -> Result<(), Error> {
let open_dialog = OpenDialog::new({
match current_file.try_read() {
Ok(guard) => match * guard {
Some(ref image) => image.get_dir(),
None => None
},
Err(error) => return Err(format_err!("{}", error.description()))
}
});
if let Some(file_path) = open_dialog.run() {
match MyImage::open(&file_path) {
Ok(mut image) => {
headerbar.set_title(file_path.to_str());
render_image(&image_container, &image);
*current_file.write().unwrap() = Some(image);
},
Err(error) => return Err(err_msg(error))
}
}
Ok(())
} |
// The Rust community thinks about tests in terms of
// two main categories: unit tests and integration
// tests. Unit tests are small and more focused,
// testing one module in isolation at a time, and
// can test private interfaces. Integration tests
// are entirely external to your library and use
// your code in the same way any other external code
// would, using only the public interface and potentially
// exercising multiple modules per test.
// 1. Unit Tests
/// The #[cfg(test)] annotation on the tests module
/// tells Rust to compile and run the test code only
/// when you run cargo test, not when you run cargo build.
/// This saves compile time when you only want to build the
/// library and saves space in the resulting compiled artifact
/// because the tests are not included
/// However, because unit tests go in the same files as the code,
/// you’ll use #[cfg(test)] to specify that they shouldn’t be included
/// in the compiled result.
// 2. Integration Tests
/// In Rust, integration tests are entirely external to your library.
/// Cargo treats the tests directory specially and compiles files in
/// this directory only when we run cargo test
/// We can still run a particular integration test function by specifying
/// the test function’s name as an argument to cargo test. To run all the
/// tests in a particular integration test file, use the --test argument
/// of cargo test followed by the name of the file:
/// `cargo test --test integration_test` |
use std::io::{Result, Write};
use oops::Oops;
use select::document::Document;
use select::predicate::Class;
use stdinix::stdinix;
use ureq;
fn main() -> Result<()> {
stdinix(|buf| {
let body = ureq::get(&buf[..]).call().oops("Failed to get URI")?.into_string()?;
Document::from(&body[..])
.find(Class("propertyCard-link"))
.filter_map(|node| {
node.attr("href").filter(|s| !s.trim().is_empty()).map(|s| {
println!("https://www.rightmove.co.uk{}", s);
std::io::stdout().flush()
})
})
.collect::<Result<Vec<_>>>()?;
Ok(())
})
}
|
import os::getcwd;
import os_fs;
native "rust" mod rustrt {
fn rust_file_is_dir(path: str::sbuf) -> int;
}
fn path_sep() -> str { ret str::from_char(os_fs::path_sep); }
type path = str;
fn dirname(p: path) -> path {
let i: int = str::rindex(p, os_fs::path_sep as u8);
if i == -1 {
i = str::rindex(p, os_fs::alt_path_sep as u8);
if i == -1 { ret "."; }
}
ret str::substr(p, 0u, i as uint);
}
fn basename(p: path) -> path {
let i: int = str::rindex(p, os_fs::path_sep as u8);
if i == -1 {
i = str::rindex(p, os_fs::alt_path_sep as u8);
if i == -1 { ret p; }
}
let len = str::byte_len(p);
if i + 1 as uint >= len { ret p; }
ret str::slice(p, i + 1 as uint, len);
}
// FIXME: Need some typestate to avoid bounds check when len(pre) == 0
fn connect(pre: path, post: path) -> path {
let len = str::byte_len(pre);
ret if pre[len - 1u] == os_fs::path_sep as u8 {
// Trailing '/'?
pre + post
} else { pre + path_sep() + post };
}
fn file_is_dir(p: path) -> bool {
ret str::as_buf(p, {|buf| rustrt::rust_file_is_dir(buf) != 0 });
}
fn list_dir(p: path) -> [str] {
let p = p;
let pl = str::byte_len(p);
if pl == 0u || p[pl - 1u] as char != os_fs::path_sep { p += path_sep(); }
let full_paths: [str] = [];
for filename: str in os_fs::list_dir(p) {
if !str::eq(filename, ".") {
if !str::eq(filename, "..") { full_paths += [p + filename]; }
}
}
ret full_paths;
}
fn path_is_absolute(p: path) -> bool { ret os_fs::path_is_absolute(p); }
// FIXME: under Windows, we should prepend the current drive letter to paths
// that start with a slash.
fn make_absolute(p: path) -> path {
if path_is_absolute(p) { ret p; } else { ret connect(getcwd(), p); }
}
// Local Variables:
// mode: rust;
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
// End:
|
extern "C" {
pub fn doublemunlock(
addr: *const ::std::os::raw::c_void,
size: usize,
) -> ::std::os::raw::c_int;
pub fn doublemap(size: usize) -> *mut ::std::os::raw::c_void;
pub fn pagesize() -> ::std::os::raw::c_int;
}
#[cfg(test)]
mod tests {
use crate::{doublemap, doublemunlock, pagesize};
use std::slice;
use std::mem;
#[test]
fn page_size() {
let psize = unsafe { pagesize() as usize} ;
assert_eq!(psize, 4096)
}
#[test]
fn allocate() {
let size = 4096;
let ptr = unsafe {doublemap(size) };
assert_eq!(ptr.is_null(), false);
let ret = unsafe { doublemunlock(ptr, size) };
assert_eq!(ret, 0);
}
#[test]
fn slice() {
let size = 4096;
let ptr = unsafe { doublemap(size) as *mut std::os::raw::c_int };
assert_eq!(ptr.is_null(), false);
let len = size / mem::size_of::<i32>();
// len is the number of _elements_
let slice = unsafe { slice::from_raw_parts_mut(ptr, 2*len) };
let i = 3;
slice[i] = 12000;
assert_eq!(slice[i], slice[i+len]);
let ret = unsafe {
doublemunlock(ptr as *const std::os::raw::c_void, size)
};
assert_eq!(ret, 0);
}
}
|
extern crate proc_macro;
extern crate proc_macro2;
extern crate quote;
extern crate syn;
use quote::quote;
use proc_macro::TokenStream;
use proc_macro_hack::proc_macro_hack;
use syn::{Expr, ExprArray, ExprLit, ExprUnary, Lit, parse, UnOp};
#[proc_macro_hack]
pub fn shape(input: TokenStream) -> TokenStream {
let arr_expr: ExprArray = match parse::<Expr>(input).expect("Fail to parse shape as array expr") {
Expr::Array(arr) => arr,
_ => panic!("Support only [NUMERIC, NUMERIC..NUMERIC] format")
};
let mut ranges = Vec::with_capacity(arr_expr.elems.len());
for elem in arr_expr.elems {
let as_range = match elem {
// Expr::Range(range) => {
// if range.from.is_none() && range.to.is_none() {
// // This is Rust syntax that return full size slice
// // We shall keep this semantic with tensor
// quote! {
// None
// }
// } else {
// let from = match range.from {
// Some(f) => quote!{#f},
// None => quote!{0}
// };
// let to = match range.to {
// Some(t) => Some(t),
// None => panic!("Missing max bound. The end of range is required if start of range is specified")
// };
// let to = match range.limits {
// RangeLimits::HalfOpen(_) => {
// quote! {#to}
// },
// RangeLimits::Closed(_) => {
// quote! {(#to + 1)}
// }
// };
// quote! {
// Some(#from..#to)
// }
// }
// },
Expr::Lit(ExprLit {lit: Lit::Int(int), ..}) => {
quote! {
Some(#int)
}
},
Expr::Unary(ExprUnary {op: UnOp::Neg(_), expr, ..}) => {
match *expr {
Expr::Lit(ExprLit {lit: Lit::Int(_), ..}) => {
// it's some negative number.
// In PyTorch negative size in shape mean
// take all the rest remaining element
quote! {
None
}
},
_ => {
// it might be a positive if expression return negative num
quote! {
Some(#expr..(#expr + 1))
}
}
}
}
_ => {
quote! {
Some(#elem)
}
}
};
ranges.push(as_range);
}
let proper_shape = quote! {
&[#(#ranges),*]
};
proper_shape.into()
} |
use syn::{parse_quote, parse_quote_spanned};
use super::{
FlowProperties, FlowPropertyVal, OperatorCategory, OperatorConstraints, WriteContextArgs,
RANGE_0, RANGE_1,
};
use crate::graph::{OpInstGenerics, OperatorInstance};
/// > 2 input streams of type <(K, V1)> and <(K, V2)>, 1 output stream of type <(K, (V1, V2))>
///
/// This operator is equivalent to `join` except that the LHS and RHS are collected into multisets rather than sets before joining.
///
/// If you want
/// duplicates eliminated from the inputs, use the [`join`](#join) operator.
///
/// For example:
/// ```hydroflow
/// lhs = source_iter([("a", 0), ("a", 0)]) -> tee();
/// rhs = source_iter([("a", "hydro")]) -> tee();
///
/// lhs -> [0]multiset_join;
/// rhs -> [1]multiset_join;
/// multiset_join = join_multiset() -> assert_eq([("a", (0, "hydro")), ("a", (0, "hydro"))]);
///
/// lhs -> [0]set_join;
/// rhs -> [1]set_join;
/// set_join = join() -> assert_eq([("a", (0, "hydro"))]);
/// ```
pub const JOIN_MULTISET: OperatorConstraints = OperatorConstraints {
name: "join_multiset",
categories: &[OperatorCategory::MultiIn],
hard_range_inn: &(2..=2),
soft_range_inn: &(2..=2),
hard_range_out: RANGE_1,
soft_range_out: RANGE_1,
num_args: 0,
persistence_args: &(0..=2),
type_args: RANGE_0,
is_external_input: false,
ports_inn: Some(|| super::PortListSpec::Fixed(parse_quote! { 0, 1 })),
ports_out: None,
properties: FlowProperties {
deterministic: FlowPropertyVal::Preserve,
monotonic: FlowPropertyVal::Preserve,
inconsistency_tainted: false,
},
input_delaytype_fn: |_| None,
write_fn: |wc @ &WriteContextArgs {
root,
op_span,
op_inst: op_inst @ OperatorInstance { .. },
..
},
diagnostics| {
let join_type = parse_quote_spanned! {op_span=> // Uses `lat_type.span()`!
#root::compiled::pull::HalfMultisetJoinState
};
let wc = WriteContextArgs {
op_inst: &OperatorInstance {
generics: OpInstGenerics {
type_args: vec![join_type],
..wc.op_inst.generics.clone()
},
..op_inst.clone()
},
..wc.clone()
};
(super::join::JOIN.write_fn)(&wc, diagnostics)
},
};
|
use super::{u256mod, ModulusTrait};
// Substraction
impl<M: ModulusTrait> std::ops::Sub for &u256mod<M> {
type Output = u256mod<M>;
fn sub(self, other: &u256mod<M>) -> u256mod<M> {
let result_value = if self.value >= other.value {
&self.value - &other.value
} else {
M::modulus() + &self.value - &other.value
};
return u256mod { value: result_value, this_is_stupid_why: std::marker::PhantomData };
}
}
impl<M: ModulusTrait> std::ops::SubAssign<&u256mod<M>> for u256mod<M> {
fn sub_assign(&mut self, other: &u256mod<M>) {
*self = *self - other;
}
}
// Negation
impl<M: ModulusTrait> std::ops::Neg for &u256mod<M> {
type Output = u256mod<M>;
fn neg(self) -> u256mod<M> {
return u256mod::zero() - self;
}
}
// Versions with different reference combinations
impl<M: ModulusTrait> std::ops::Sub for u256mod<M> {
type Output = u256mod<M>;
fn sub(self, other: u256mod<M>) -> u256mod<M> { return &self - &other; }
}
impl<M: ModulusTrait> std::ops::Sub<&u256mod<M>> for u256mod<M> {
type Output = u256mod<M>;
fn sub(self, other: &u256mod<M>) -> u256mod<M> { return &self - other; }
}
impl<M: ModulusTrait> std::ops::Sub<u256mod<M>> for &u256mod<M> {
type Output = u256mod<M>;
fn sub(self, other: u256mod<M>) -> u256mod<M> { return self - &other; }
}
impl<M: ModulusTrait> std::ops::SubAssign for u256mod<M> {
fn sub_assign(&mut self, other: u256mod<M>) {
*self = *self - other;
}
}
impl<M: ModulusTrait> std::ops::Neg for u256mod<M> {
type Output = u256mod<M>;
fn neg(self) -> u256mod<M> {
return -&self;
}
} |
extern crate rand;
use rand::Rng;
fn main() {
print!("Give number: ");
println!("");
let secret_number = rand::thread_rng().gen_range(1,101);
loop{
let mut guess: String = String::new(); // mut - can change value(mutable for const from c? lol)
// let static_num: u32 = 667; //
// print!("{}\n",static_num);
std::io::stdin().read_line(&mut guess).expect("Cannot read from stdin");
let guess: i32 =
match guess.trim().parse() {
Ok(rn) => rn,
Err(_) =>{
println!("Введите число!");
continue;
},
};
match guess.cmp(&secret_number){
std::cmp::Ordering::Less => println!("{} Меньше чем нужное число", guess ),
std::cmp::Ordering::Equal => {
println!("Вы угадали");
break;
},
std::cmp::Ordering::Greater => println!("{} Является больше чем нужное число", guess)
}
}
// panic!("Test");
}
|
use futures_util::{AsyncReadExt, AsyncWriteExt};
use hreq_h1::buf_reader::BufIo;
use hreq_h1::Error;
mod common;
#[async_std::test]
async fn server_request_with_body_clen() -> Result<(), Error> {
let conn = common::run_server(|parts, body, respond, _| async move {
assert_eq!(parts.method, "POST");
assert_eq!(parts.uri.path(), "/path");
assert_eq!(&body.unwrap(), b"OK\n");
let res = http::Response::builder()
.header("content-length", "0")
.header("connection", "close")
.body(())
.unwrap();
respond.send_response(res, false).await.unwrap();
Ok(false)
})
.await?;
let tcp = conn.connect().await?;
let mut brd = BufIo::with_capacity(8192, tcp);
brd.write_all(b"POST /path HTTP/1.1\r\ncontent-length: 3\r\n\r\nOK\n")
.await?;
let head = common::test_read_header(&mut brd).await?;
assert_eq!(
head,
"HTTP/1.1 200 OK\r\ncontent-length: 0\r\nconnection: close\r\n\r\n"
);
let mut buf = [0_u8; 1];
if let Some(read) = brd.read(&mut buf).await.ok() {
assert_eq!(read, 0);
}
Ok(())
}
#[async_std::test]
async fn server_request_with_body_chunked() -> Result<(), Error> {
let conn = common::run_server(|parts, body, respond, _| async move {
assert_eq!(parts.method, "POST");
assert_eq!(parts.uri.path(), "/path");
assert_eq!(&body.unwrap(), b"OK\n");
let res = http::Response::builder()
.header("content-length", "0")
.header("connection", "close")
.body(())
.unwrap();
respond.send_response(res, false).await.unwrap();
Ok(false)
})
.await?;
let tcp = conn.connect().await?;
let mut brd = BufIo::with_capacity(8192, tcp);
brd.write_all(
b"POST /path HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n3\r\nOK\n\r\n0\r\n\r\n",
)
.await?;
let head = common::test_read_header(&mut brd).await?;
assert_eq!(
head,
"HTTP/1.1 200 OK\r\ncontent-length: 0\r\nconnection: close\r\n\r\n"
);
let mut buf = [0_u8; 1];
if let Some(read) = brd.read(&mut buf).await.ok() {
assert_eq!(read, 0);
}
Ok(())
}
#[async_std::test]
async fn server_request_with_body_dropped() -> Result<(), Error> {
common::setup_logger();
use async_std::net::TcpListener;
use common::Connector;
let l = TcpListener::bind("127.0.0.1:0").await?;
let p = l.local_addr().unwrap().port();
let addr = format!("127.0.0.1:{}", p);
async_std::task::spawn(async move {
let (tcp, _) = l.accept().await.expect("Accept incoming");
let mut conn = hreq_h1::server::handshake(tcp);
let (req, respond) = conn.accept().await.unwrap().expect("Handshaken");
let (_, recv_body) = req.into_parts();
// this is what we're testing, dropping the recv_body, ignoring the incoming
// request body and then send a response anyway.
drop(recv_body);
let mut send_body = respond
.send_response(
http::Response::builder()
.header("transfer-encoding", "chunked")
.body(())
.unwrap(),
false,
)
.await
.expect("send_response");
send_body.send_data(&[], true).await.unwrap();
});
let conn = Connector(addr);
let tcp = conn.connect().await?;
let mut brd = BufIo::with_capacity(8192, tcp);
brd.write_all(b"POST /path HTTP/1.1\r\ncontent-length: 0\r\n\r\n")
.await?;
let head = common::test_read_header(&mut brd).await?;
assert_eq!(
head,
"HTTP/1.1 200 OK\r\ntransfer-encoding: chunked\r\n\r\n"
);
let mut buf = [0_u8; 5];
brd.read(&mut buf).await?;
assert_eq!(&buf, b"0\r\n\r\n");
let mut buf = [0_u8; 1];
if let Some(read) = brd.read(&mut buf).await.ok() {
assert_eq!(read, 0);
}
Ok(())
}
|
extern crate clap;
extern crate toml;
extern crate serde;
extern crate execute;
use clap::
{
Arg,
App,
SubCommand,
AppSettings
};
use serde::
{
Serialize,
Deserialize
};
use std::fmt;
use crate::system::
{
System,
SystemError,
ReadWriteError
};
use crate::system::util::
{
read_file_to_string,
write_str_to_file,
ReadFileToStringError,
};
use crate::system::real::RealSystem;
use crate::printer::StandardPrinter;
use crate::downloader::
{
download_string
};
mod blob;
mod build;
mod cache;
mod directory;
mod memory;
mod history;
mod packet;
mod printer;
mod rule;
mod server;
mod system;
mod ticket;
mod work;
mod downloader;
#[derive(Serialize, Deserialize, PartialEq, Debug)]
struct BuildInvocation
{
rules: Option<Vec<String>>,
target: Option<String>,
}
#[derive(Serialize, Deserialize, PartialEq, Debug)]
struct Config
{
again: Option<BuildInvocation>,
}
impl Config
{
fn new() -> Config
{
Config
{
again : None
}
}
}
pub enum ConfigError
{
FailedToCreateDirectory(SystemError),
FailedToCreateConfigFile(ReadWriteError),
FailedToReadConfigFile(ReadFileToStringError),
TomlDeError(toml::de::Error),
TomlSerError(toml::ser::Error),
}
impl fmt::Display for ConfigError
{
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result
{
match self
{
ConfigError::FailedToCreateDirectory(error) =>
write!(formatter, "Failed to create directory: {}", error),
ConfigError::FailedToCreateConfigFile(error) =>
write!(formatter, "Failed to create config file: {}", error),
ConfigError::FailedToReadConfigFile(error) =>
write!(formatter, "Failed to create cache directory: {}", error),
ConfigError::TomlDeError(error) =>
write!(formatter, "Config file opened, but failed to parse as toml: {}", error),
ConfigError::TomlSerError(error) =>
write!(formatter, "Config failed to encode as toml: {}", error),
}
}
}
/* From the given .ruler directry read the config file and parse as toml to obtain a
Config object. If any part of that fails, forward the appropriate error. */
fn read_config<SystemType : System>
(
system : &mut SystemType,
directory : &str
)
->
Result<Config, ConfigError>
{
if ! system.is_dir(directory)
{
match system.create_dir(directory)
{
Ok(_) => {},
Err(error) => return Err(ConfigError::FailedToCreateDirectory(error)),
}
}
let config_path = format!("{}/config.toml", directory);
if system.is_file(&config_path)
{
match read_file_to_string(system, &config_path)
{
Ok(content_string) =>
{
return
match toml::from_str(&content_string)
{
Ok(config) => Ok(config),
Err(error) => Err(ConfigError::TomlDeError(error)),
}
},
Err(error) => return Err(ConfigError::FailedToReadConfigFile(error)),
}
}
else
{
let default_config = Config::new();
match toml::to_string(&default_config)
{
Ok(config_toml) =>
match write_str_to_file(system, &config_path, &config_toml)
{
Ok(_) => Ok(default_config),
Err(error) => Err(ConfigError::FailedToCreateConfigFile(error)),
},
Err(error) => Err(ConfigError::TomlSerError(error)),
}
}
}
/* In the given directory, write the config object to toml file. If any part of that
goes wrong, error. */
fn write_config<SystemType : System>
(
system : &mut SystemType,
directory : &str,
config : &Config
)
->
Result<(), ConfigError>
{
if ! system.is_dir(directory)
{
match system.create_dir(directory)
{
Ok(_) => {},
Err(error) => return Err(ConfigError::FailedToCreateDirectory(error)),
}
}
let config_path = format!("{}/config.toml", directory);
match toml::to_string(config)
{
Ok(config_toml) =>
match write_str_to_file(system, &config_path, &config_toml)
{
Ok(_) => Ok(()),
Err(error) => Err(ConfigError::FailedToCreateConfigFile(error)),
},
Err(error) => Err(ConfigError::TomlSerError(error)),
}
}
fn main()
{
let big_matches = App::new("Ruler")
.version("0.1.6")
.author("Peterson Trethewey <peterson@2-complex.com>")
.about("
Ruler is a tool for managing a dependence graph of files. It works with a
.rules file. A .rules file contains newline-separated blocks called 'rules'.
Each rule looks like this:
path/to/target/file1
path/to/target/file2
:
path/to/source/file1
path/to/source/file2
path/to/source/file3
:
command
--option1=value1
--option2=value2
:
The command-line invocation is meant to update the target files using the
source files as input.
Ruler maintains a history of file-hashes to determine whether a target needs to
update. When you type a build command such as:
ruler build
Ruler checks the current state of the targets against the hashes it has on
file, determines which ones need to update and in what order, and runs the
commands for those rules.
")
.subcommand(
SubCommand::with_name("clean")
.about("Removes all targets")
.help("
Removes all files and directories specificed as targets in the rules file.
If a target is specified, removes all that targets ancestors.
Note: clean does not delete the files, it moves them to a cache so they can be
recovered later if needed.
")
.arg(Arg::with_name("target")
.help("
The path to the clean-target. The clean command removes all files listed as
targets in rules that the clean-target depends on. If no clean-target is
specified, the clean command removes all files listed as targets in any rule.
")
.required(false)
.index(1)
)
.arg(Arg::with_name("rules")
.short("r")
.long("rules")
.value_name("rules")
.multiple(true)
.help("Path to a custom rules file (default: build.rules)")
.takes_value(true))
)
.subcommand(
SubCommand::with_name("build")
.about("Builds the given target or all targets")
.help("
Builds the given target. If no build-target is specified, builds all targets.
The target must be a file listed in the target section of the current rules
file. The rules file is either a file in the current working directory called
\"build.rules\" or it can be specificed using --rules=<path>
")
.arg(Arg::with_name("target")
.help("
Path to a specific build-target to build. Ruler will only build this target,
and its ancestors, as needed.")
.required(false)
.index(1)
)
.arg(Arg::with_name("rules")
.short("r")
.long("rules")
.value_name("rules")
.multiple(true)
.help("Path to a custom rules file (default: build.rules)")
.takes_value(true))
)
.subcommand(
SubCommand::with_name("hash")
.about("Outputs the hash of a file")
.help("
Takes a path to a file, returns the url-safe-base64-encoded sha256 of the file.
")
.arg(Arg::with_name("path")
.help("
Path to any file.
")
.required(true)
.index(1)
)
)
.setting(AppSettings::ArgRequiredElseHelp)
.subcommand(
SubCommand::with_name("nodes")
.about("Displays info on current build-nodes along with their
current rule-hash")
.help("
Reads the rules files the same way as when invoking ruler build, except instead
of running the build process, prints information about each node. This command
is read only. It is useful for troubleshooting and understanding how ruler
works.
")
)
.setting(AppSettings::ArgRequiredElseHelp)
.subcommand(
SubCommand::with_name("again")
.about("Repeats the most recent build command")
.help("
Repeats the most recent `ruler build` invocation. To get started, type
`ruler build`. The next time you run `ruler again`, it will repeat that
`ruler build` with the same options.
")
.arg(Arg::with_name("target")
.help("
Path to a specific build-target to build. Ruler will only build this target,
and its ancestors, as needed.")
.required(false)
.index(1)
)
.arg(Arg::with_name("rules")
.short("r")
.long("rules")
.value_name("rules")
.multiple(true)
.help("Path to a custom rules file (default: build.rules)")
.takes_value(true))
)
.subcommand(
SubCommand::with_name("download")
.about("Repeats the most recent build command")
.help("Downloads!")
.arg(Arg::with_name("url")
.help("")
.required(true)
.index(1)
)
)
.subcommand(
SubCommand::with_name("serve")
.about("Starts a server to provide other instances of ruler on the
network access to the files in the cache.")
.help("Starts a server to provide other instances of ruler on the
network access to the files in the cache.")
.arg(Arg::with_name("address")
.short("a")
.long("address")
.value_name("address")
.help("The address upon which to serve")
.takes_value(true))
)
.setting(AppSettings::ArgRequiredElseHelp)
.get_matches();
if let Some(matches) = big_matches.subcommand_matches("download")
{
let url =
match matches.value_of("url")
{
Some(value) => value,
None => "apple.com",
};
match download_string(url)
{
Ok(s) => println!("contents: {}", s),
Err(error) => println!("error: {}", error),
}
}
if let Some(matches) = big_matches.subcommand_matches("again")
{
let directory =
match matches.value_of("directory")
{
Some(value) => value,
None => ".ruler",
};
let mut system = RealSystem::new();
match read_config(&mut system, &directory)
{
Ok(config) =>
match config.again
{
Some(again) =>
{
let rules =
match again.rules
{
Some(value) => value.clone(),
None => vec!["build.rules".to_string()],
};
let target =
match again.target
{
Some(value) => Some(value.to_string()),
None => None,
};
let mut printer = StandardPrinter::new();
match build::build(
system,
directory,
rules,
None,
target,
&mut printer)
{
Ok(()) => {},
Err(error) => eprintln!("{}", error),
}
}
None =>
{
println!("Repeats the most recent `ruler build` invocation. To get started, type `ruler build`.
The next time you run `ruler again`, it will repeat that `ruler build` with the same options.");
},
}
Err(config_error) => println!("Error reading config: {}", config_error),
}
}
if let Some(matches) = big_matches.subcommand_matches("clean")
{
let directory =
match matches.value_of("directory")
{
Some(value) => value,
None => ".ruler",
};
let rulefiles =
match matches.values_of("rules")
{
Some(values) => values.map(|s| s.to_string()).collect(),
None => vec!("build.rules".to_string()),
};
let target =
match matches.value_of("target")
{
Some(value) => Some(value.to_string()),
None => None,
};
match build::clean(
RealSystem::new(), directory, rulefiles, target)
{
Ok(()) => {},
Err(error) => eprintln!("{}", error),
}
}
if let Some(matches) = big_matches.subcommand_matches("build")
{
let rulefiles =
match matches.values_of("rules")
{
Some(values) =>
{
values.map(|s| s.to_string()).collect()
},
None =>
{
vec!("build.rules".to_string())
},
};
for f in rulefiles.iter()
{
println!("{}", f);
}
let directory =
match matches.value_of("directory")
{
Some(value) => value,
None => ".ruler",
};
let target =
match matches.value_of("target")
{
Some(value) => Some(value.to_string()),
None => None,
};
let config = Config
{
again : Some(
BuildInvocation
{
target : target.clone(),
rules : Some(rulefiles.clone()),
}
)
};
let mut system = RealSystem::new();
let mut printer = StandardPrinter::new();
match write_config(&mut system, &directory, &config)
{
Ok(()) =>
{
match build::build(
system,
directory,
rulefiles,
None,
target,
&mut printer)
{
Ok(()) => {},
Err(error) => eprintln!("{}", error),
}
},
Err(error) =>
{
println!("Error writing config file: {}", error);
}
}
}
if let Some(matches) = big_matches.subcommand_matches("serve")
{
let directory =
match matches.value_of("directory")
{
Some(value) => value,
None => ".ruler",
};
match server::serve(
RealSystem::new(),
directory)
{
Ok(()) => {},
Err(error) => eprintln!("{}", error),
}
}
if let Some(matches) = big_matches.subcommand_matches("hash")
{
match matches.value_of("path")
{
Some(path) =>
{
let system = RealSystem::new();
match blob::get_file_ticket_from_path(&system, &path)
{
Ok(Some(file_ticket)) =>
{
println!("{}", file_ticket);
},
Ok(None) => eprintln!("File not found: {}", path),
Err(error) => eprintln!("{}", error),
}
},
None =>
{
eprintln!("Internal error");
}
}
}
if let Some(matches) = big_matches.subcommand_matches("nodes")
{
let rulefiles =
match matches.values_of("rules")
{
Some(values) =>
{
values.map(|s| s.to_string()).collect()
},
None =>
{
vec!("build.rules".to_string())
},
};
let target =
match matches.value_of("target")
{
Some(value) => Some(value.to_string()),
None => None,
};
let system = RealSystem::new();
match build::get_nodes(
&system,
rulefiles,
target)
{
Ok(nodes) =>
{
for node in nodes.iter()
{
print!("{}", node);
}
},
Err(error) => eprintln!("{}", error),
}
}
}
|
// Copyright 2021 Contributors to the Parsec project.
// SPDX-License-Identifier: Apache-2.0
//! Session types
use crate::context::Pkcs11;
use crate::error::Result;
use crate::mechanism::Mechanism;
use crate::object::{Attribute, AttributeInfo, AttributeType, ObjectHandle};
use cryptoki_sys::*;
use log::error;
use std::collections::HashMap;
use std::fmt::Formatter;
use std::marker::PhantomData;
mod decryption;
mod encryption;
mod key_management;
mod object_management;
mod random;
mod session_info;
mod session_management;
mod signing_macing;
mod slot_token_management;
pub use session_info::{SessionInfo, SessionState};
/// Type that identifies a session
///
/// It will automatically get closed (and logout) on drop.
/// Session does not implement Sync to prevent the same Session instance to be used from multiple
/// threads. A Session needs to be created in its own thread or to be passed by ownership to
/// another thread.
#[derive(Debug)]
pub struct Session {
handle: CK_SESSION_HANDLE,
client: Pkcs11,
// This is not used but to prevent Session to automatically implement Send and Sync
_guard: PhantomData<*mut u32>,
}
impl std::fmt::Display for Session {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.handle)
}
}
impl std::fmt::LowerHex for Session {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{:08x}", self.handle)
}
}
impl std::fmt::UpperHex for Session {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{:08X}", self.handle)
}
}
// Session does not implement Sync to prevent the same Session instance to be used from multiple
// threads.
unsafe impl Send for Session {}
impl Session {
pub(crate) fn new(handle: CK_SESSION_HANDLE, client: Pkcs11) -> Self {
Session {
handle,
client,
_guard: PhantomData,
}
}
}
impl Session {
/// Initialize the normal user's pin for a token
pub fn init_pin(&self, pin: &str) -> Result<()> {
slot_token_management::init_pin(self, pin)
}
/// Changes the PIN of either the currently logged in user or of the `CKU_USER` if no user is
/// logged in.
pub fn set_pin(&self, old_pin: &str, new_pin: &str) -> Result<()> {
slot_token_management::set_pin(self, old_pin, new_pin)
}
/// Close a session
/// This will be called on drop as well.
pub fn close(self) {}
/// Log a session in.
///
/// # Arguments
///
/// * `user_type` - The type of user to log in as
/// * `pin` - The PIN to use, or `None` if you wish to use the protected authentication path
///
/// _NOTE: By passing `None` into `login`, you must ensure that the
/// [CKF_PROTECTED_AUTHENTICATION_PATH] flag is set in the `TokenFlags`._
pub fn login(&self, user_type: UserType, pin: Option<&str>) -> Result<()> {
session_management::login(self, user_type, pin)
}
/// Logs a session in using a slice of raw bytes as a PIN. Some dongle drivers allow
/// non UTF-8 characters in the PIN and as a result, we aren't guaranteed that we can
/// pass in a UTF-8 string to login. Therefore, it's useful to be able to pass in raw bytes
/// rather than convert a UTF-8 string to bytes.
///
/// # Arguments
///
/// * `user_type` - The type of user to log in as
/// * `pin` - The PIN to use
///
/// _NOTE: By passing `None` into `login`, you must ensure that the
/// [CKF_PROTECTED_AUTHENTICATION_PATH] flag is set in the `TokenFlags`._
pub fn login_with_raw(&self, user_type: UserType, pin: &[u8]) -> Result<()> {
session_management::login_with_raw(self, user_type, pin)
}
/// Log a session out
pub fn logout(&self) -> Result<()> {
session_management::logout(self)
}
/// Returns the information about a session
pub fn get_session_info(&self) -> Result<SessionInfo> {
session_management::get_session_info(self)
}
/// Search for session objects matching a template
pub fn find_objects(&self, template: &[Attribute]) -> Result<Vec<ObjectHandle>> {
object_management::find_objects(self, template)
}
/// Create a new object
pub fn create_object(&self, template: &[Attribute]) -> Result<ObjectHandle> {
object_management::create_object(self, template)
}
/// Destroy an object
pub fn destroy_object(&self, object: ObjectHandle) -> Result<()> {
object_management::destroy_object(self, object)
}
/// Get the attribute info of an object: if the attribute is present and its size.
///
/// # Arguments
///
/// * `object` - The [ObjectHandle] used to reference the object
/// * `attributes` - The list of attributes to get the information of
///
/// # Returns
///
/// This function will return a Vector of [AttributeInfo] enums that will either contain
/// the size of the requested attribute, [AttributeInfo::TypeInvalid] if the attribute is not a
/// valid type for the object, or [AttributeInfo::Sensitive] if the requested attribute is
/// sensitive and will not be returned to the user.
///
/// The list of returned attributes is 1-to-1 matched with the provided vector of attribute
/// types. If you wish, you may create a hash table simply by:
///
/// ```no_run
/// use cryptoki::context::Pkcs11;
/// use cryptoki::context::CInitializeArgs;
/// use cryptoki::object::AttributeType;
/// use cryptoki::session::UserType;
/// use std::collections::HashMap;
/// use std::env;
///
/// let mut pkcs11 = Pkcs11::new(
/// env::var("PKCS11_SOFTHSM2_MODULE")
/// .unwrap_or_else(|_| "/usr/local/lib/softhsm/libsofthsm2.so".to_string()),
/// )
/// .unwrap();
///
/// pkcs11.initialize(CInitializeArgs::OsThreads).unwrap();
/// let slot = pkcs11.get_slots_with_token().unwrap().remove(0);
///
/// let session = pkcs11.open_ro_session(slot).unwrap();
/// session.login(UserType::User, Some("fedcba"));
///
/// let empty_attrib= vec![];
/// if let Some(object) = session.find_objects(&empty_attrib).unwrap().get(0) {
/// let attribute_types = vec![
/// AttributeType::Token,
/// AttributeType::Private,
/// AttributeType::Modulus,
/// AttributeType::KeyType,
/// AttributeType::Verify,];
///
/// let attribute_info = session.get_attribute_info(*object, &attribute_types).unwrap();
///
/// let hash = attribute_types
/// .iter()
/// .zip(attribute_info.iter())
/// .collect::<HashMap<_, _>>();
/// }
/// ```
///
/// Alternatively, you can call [Session::get_attribute_info_map], found below.
pub fn get_attribute_info(
&self,
object: ObjectHandle,
attributes: &[AttributeType],
) -> Result<Vec<AttributeInfo>> {
object_management::get_attribute_info(self, object, attributes)
}
/// Get the attribute info of an object: if the attribute is present and its size.
///
/// # Arguments
///
/// * `object` - The [ObjectHandle] used to reference the object
/// * `attributes` - The list of attributes to get the information of
///
/// # Returns
///
/// This function will return a HashMap of [AttributeType] and [AttributeInfo] enums that will
/// either contain the size of the requested attribute, [AttributeInfo::TypeInvalid] if the
/// attribute is not a valid type for the object, or [AttributeInfo::Sensitive] if the requested
/// attribute is sensitive and will not be returned to the user.
pub fn get_attribute_info_map(
&self,
object: ObjectHandle,
attributes: Vec<AttributeType>,
) -> Result<HashMap<AttributeType, AttributeInfo>> {
object_management::get_attribute_info_map(self, object, attributes)
}
/// Get the attributes values of an object.
/// Ignore the unavailable one. One has to call the get_attribute_info method to check which
/// ones are unavailable.
pub fn get_attributes(
&self,
object: ObjectHandle,
attributes: &[AttributeType],
) -> Result<Vec<Attribute>> {
object_management::get_attributes(self, object, attributes)
}
/// Single-part encryption operation
pub fn encrypt(
&self,
mechanism: &Mechanism,
key: ObjectHandle,
data: &[u8],
) -> Result<Vec<u8>> {
encryption::encrypt(self, mechanism, key, data)
}
/// Single-part decryption operation
pub fn decrypt(
&self,
mechanism: &Mechanism,
key: ObjectHandle,
encrypted_data: &[u8],
) -> Result<Vec<u8>> {
decryption::decrypt(self, mechanism, key, encrypted_data)
}
/// Sign data in single-part
pub fn sign(&self, mechanism: &Mechanism, key: ObjectHandle, data: &[u8]) -> Result<Vec<u8>> {
signing_macing::sign(self, mechanism, key, data)
}
/// Verify data in single-part
pub fn verify(
&self,
mechanism: &Mechanism,
key: ObjectHandle,
data: &[u8],
signature: &[u8],
) -> Result<()> {
signing_macing::verify(self, mechanism, key, data, signature)
}
/// Generate a secret key
pub fn generate_key(
&self,
mechanism: &Mechanism,
template: &[Attribute],
) -> Result<ObjectHandle> {
key_management::generate_key(self, mechanism, template)
}
/// Generate a public/private key pair
pub fn generate_key_pair(
&self,
mechanism: &Mechanism,
pub_key_template: &[Attribute],
priv_key_template: &[Attribute],
) -> Result<(ObjectHandle, ObjectHandle)> {
key_management::generate_key_pair(self, mechanism, pub_key_template, priv_key_template)
}
/// Derives a key from a base key
pub fn derive_key(
&self,
mechanism: &Mechanism,
base_key: ObjectHandle,
template: &[Attribute],
) -> Result<ObjectHandle> {
key_management::derive_key(self, mechanism, base_key, template)
}
/// Wrap key
pub fn wrap_key(
&self,
mechanism: &Mechanism,
wrapping_key: ObjectHandle,
key: ObjectHandle,
) -> Result<Vec<u8>> {
key_management::wrap_key(self, mechanism, wrapping_key, key)
}
/// Unwrap previously wrapped key
pub fn unwrap_key(
&self,
mechanism: &Mechanism,
unwrapping_key: ObjectHandle,
wrapped_key: &[u8],
template: &[Attribute],
) -> Result<ObjectHandle> {
key_management::unwrap_key(self, mechanism, unwrapping_key, wrapped_key, template)
}
/// Generates a random number and sticks it in a slice
///
/// # Arguments
///
/// * `random_slice` - The slice to stick the random data into. The length of the slice represents
/// the number of bytes to obtain from the RBG
pub fn generate_random_slice(&self, random_data: &mut [u8]) -> Result<()> {
random::generate_random_slice(self, random_data)
}
/// Generates random data and returns it as a Vec<u8>. The length of the returned Vector will
/// be the amount of random requested, which is `random_len`.
pub fn generate_random_vec(&self, random_len: u32) -> Result<Vec<u8>> {
random::generate_random_vec(self, random_len)
}
/// Seeds the RNG
pub fn seed_random(&self, seed: &[u8]) -> Result<()> {
random::seed_random(self, seed)
}
pub(crate) fn handle(&self) -> CK_SESSION_HANDLE {
self.handle
}
pub(crate) fn client(&self) -> &Pkcs11 {
&self.client
}
}
impl Drop for Session {
fn drop(&mut self) {
if let Err(e) = session_management::close_private(self) {
error!("Failed to close session: {}", e);
}
}
}
/// Types of PKCS11 users
#[derive(Copy, Clone, Debug)]
pub enum UserType {
/// Security Officer
So,
/// User
User,
/// Context Specific
ContextSpecific,
}
impl From<UserType> for CK_USER_TYPE {
fn from(user_type: UserType) -> CK_USER_TYPE {
match user_type {
UserType::So => CKU_SO,
UserType::User => CKU_USER,
UserType::ContextSpecific => CKU_CONTEXT_SPECIFIC,
}
}
}
|
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
use crate::{errors::RandomCoinError, Hasher};
use core::{convert::TryInto, marker::PhantomData};
use math::{FieldElement, StarkField};
use utils::collections::Vec;
// RANDOM COIN
// ================================================================================================
/// Pseudo-random element generator for finite fields.
///
/// A random coin can be used to draws elements uniformly at random from the specified base field
// (which is specified via the `B` type parameter) or from any extension of the base field.
///
/// Internally we use a cryptographic hash function (which is specified via the `H` type parameter),
/// to draw elements from the field. The coin works roughly as follows:
/// - The internal state of the coin consists of a `seed` and a `counter`. At instantiation
/// time, the `seed` is set to a hash of the provided bytes, and the `counter` is set to 0.
/// - To draw the next element, we increment the `counter` and compute hash(`seed` || `counter`).
/// If the resulting value is a valid field element, we return the result; otherwise we try
/// again until a valid element is found or the number of allowed tries is exceeded.
/// - We can also re-seed the coin with a new value. During the reseeding procedure, the
/// seed is set to hash(`old_seed` || `new_seed`), and the counter is reset to 0.
///
/// # Examples
/// ```
/// # use winter_crypto::{RandomCoin, hashers::Blake3_256};
/// # use math::fields::f128::BaseElement;
/// // instantiate a random coin using BLAKE3 as the hash function
/// let mut coin = RandomCoin::<BaseElement, Blake3_256<BaseElement>>::new(&[1, 2, 3, 4]);
///
/// // should draw different elements each time
/// let e1 = coin.draw::<BaseElement>().unwrap();;
/// let e2 = coin.draw::<BaseElement>().unwrap();;
/// assert_ne!(e1, e2);
///
/// let e3 = coin.draw::<BaseElement>().unwrap();;
/// assert_ne!(e1, e3);
/// assert_ne!(e2, e3);
///
/// // should draw same elements for the same seed
/// let mut coin1 = RandomCoin::<BaseElement, Blake3_256<BaseElement>>::new(&[1, 2, 3, 4]);
/// let mut coin2 = RandomCoin::<BaseElement, Blake3_256<BaseElement>>::new(&[1, 2, 3, 4]);
/// let e1 = coin1.draw::<BaseElement>().unwrap();;
/// let e2 = coin2.draw::<BaseElement>().unwrap();;
/// assert_eq!(e1, e2);
///
/// // should draw different elements based on seed
/// let mut coin1 = RandomCoin::<BaseElement, Blake3_256<BaseElement>>::new(&[1, 2, 3, 4]);
/// let mut coin2 = RandomCoin::<BaseElement, Blake3_256<BaseElement>>::new(&[2, 3, 4, 5]);
/// let e1 = coin1.draw::<BaseElement>().unwrap();;
/// let e2 = coin2.draw::<BaseElement>().unwrap();;
/// assert_ne!(e1, e2);
/// ```
pub struct RandomCoin<B, H>
where
B: StarkField,
H: Hasher,
{
seed: H::Digest,
counter: u64,
_base_field: PhantomData<B>,
}
impl<B: StarkField, H: Hasher> RandomCoin<B, H> {
// CONSTRUCTOR
// --------------------------------------------------------------------------------------------
/// Returns a new random coin instantiated with the provided `seed`.
pub fn new(seed: &[u8]) -> Self {
let seed = H::hash(seed);
RandomCoin {
seed,
counter: 0,
_base_field: PhantomData,
}
}
// RESEEDING
// --------------------------------------------------------------------------------------------
/// Reseeds the coin with the specified data by setting the new seed to hash(`seed` || `data`).
///
/// # Examples
/// ```
/// # use winter_crypto::{RandomCoin, Hasher, hashers::Blake3_256};
/// # use math::fields::f128::BaseElement;
/// let mut coin1 = RandomCoin::<BaseElement, Blake3_256<BaseElement>>::new(&[1, 2, 3, 4]);
/// let mut coin2 = RandomCoin::<BaseElement, Blake3_256<BaseElement>>::new(&[1, 2, 3, 4]);
///
/// // should draw the same element form both coins
/// let e1 = coin1.draw::<BaseElement>().unwrap();
/// let e2 = coin2.draw::<BaseElement>().unwrap();;
/// assert_eq!(e1, e2);
///
/// // after reseeding should draw different elements
/// coin2.reseed(Blake3_256::<BaseElement>::hash(&[2, 3, 4, 5]));
/// let e1 = coin1.draw::<BaseElement>().unwrap();;
/// let e2 = coin2.draw::<BaseElement>().unwrap();;
/// assert_ne!(e1, e2);
/// ```
pub fn reseed(&mut self, data: H::Digest) {
self.seed = H::merge(&[self.seed, data]);
self.counter = 0;
}
/// Reseeds the coin with the specified value by setting the new seed to hash(`seed` ||
/// `value`).
///
/// # Examples
/// ```
/// # use winter_crypto::{RandomCoin, Hasher, hashers::Blake3_256};
/// # use math::fields::f128::BaseElement;
/// let mut coin1 = RandomCoin::<BaseElement, Blake3_256<BaseElement>>::new(&[1, 2, 3, 4]);
/// let mut coin2 = RandomCoin::<BaseElement, Blake3_256<BaseElement>>::new(&[1, 2, 3, 4]);
///
/// // should draw the same element form both coins
/// let e1 = coin1.draw::<BaseElement>().unwrap();;
/// let e2 = coin2.draw::<BaseElement>().unwrap();;
/// assert_eq!(e1, e2);
///
/// // after reseeding should draw different elements
/// coin2.reseed_with_int(42);
/// let e1 = coin1.draw::<BaseElement>().unwrap();;
/// let e2 = coin2.draw::<BaseElement>().unwrap();;
/// assert_ne!(e1, e2);
/// ```
pub fn reseed_with_int(&mut self, value: u64) {
self.seed = H::merge_with_int(self.seed, value);
self.counter = 0;
}
// PUBLIC ACCESSORS
// --------------------------------------------------------------------------------------------
/// Returns the number of leading zeros in the seed if it is interpreted as an integer in
/// big-endian byte order.
///
/// # Examples
/// ```
/// # use winter_crypto::{RandomCoin, hashers::Blake3_256};
/// # use math::fields::f128::BaseElement;
/// let mut coin = RandomCoin::<BaseElement, Blake3_256<BaseElement>>::new(&[1, 2, 3, 4]);
///
/// let mut value = 0;
/// while coin.check_leading_zeros(value) < 2 {
/// value += 1;
/// }
///
/// coin.reseed_with_int(value);
/// assert!(coin.leading_zeros() >= 2);
/// ```
pub fn leading_zeros(&self) -> u32 {
let bytes = self.seed.as_ref();
let seed_head = u64::from_le_bytes(bytes[..8].try_into().unwrap());
seed_head.trailing_zeros()
}
/// Computes hash(`seed` || `value`) and returns the number of leading zeros in the resulting
/// value if it is interpreted as an integer in big-endian byte order.
pub fn check_leading_zeros(&self, value: u64) -> u32 {
let new_seed = H::merge_with_int(self.seed, value);
let bytes = new_seed.as_ref();
let seed_head = u64::from_le_bytes(bytes[..8].try_into().unwrap());
seed_head.trailing_zeros()
}
// DRAW METHODS
// --------------------------------------------------------------------------------------------
/// Returns the next pseudo-random field element.
///
/// # Errors
/// Returns an error if a valid field element could not be generated after 100 calls to the
/// PRNG.
pub fn draw<E>(&mut self) -> Result<E, RandomCoinError>
where
E: FieldElement<BaseField = B>,
{
for _ in 0..200 {
// get the next pseudo-random value and take the first ELEMENT_BYTES from it
let value = self.next();
let bytes = &value.as_ref()[..E::ELEMENT_BYTES as usize];
// check if the bytes can be converted into a valid field element; if they can,
// return; otherwise try again
if let Some(element) = E::from_random_bytes(bytes) {
return Ok(element);
}
}
Err(RandomCoinError::FailedToDrawFieldElement(100))
}
/// Returns the next pair of pseudo-random field elements.
///
/// # Errors
/// Returns an error if any of the field elements could not be generated after 100 calls to
/// the PRNG;
pub fn draw_pair<E>(&mut self) -> Result<(E, E), RandomCoinError>
where
E: FieldElement<BaseField = B>,
{
Ok((self.draw()?, self.draw()?))
}
/// Returns the next triplet of pseudo-random field elements.
///
/// # Errors
/// Returns an error if any of the field elements could not be generated after 100 calls to
/// the PRNG;
pub fn draw_triple<E>(&mut self) -> Result<(E, E, E), RandomCoinError>
where
E: FieldElement<BaseField = B>,
{
Ok((self.draw()?, self.draw()?, self.draw()?))
}
/// Returns a vector of unique integers selected from the range [0, domain_size).
///
/// # Errors
/// Returns an error if the specified number of unique integers could not be generated
/// after 1000 calls to the PRNG.
///
/// # Panics
/// Panics if:
/// - `domain_size` is not a power of two.
/// - `num_values` is greater than or equal to `domain_size`.
///
/// # Examples
/// ```
/// # use std::collections::HashSet;
/// # use winter_crypto::{RandomCoin, hashers::Blake3_256};
/// # use math::fields::f128::BaseElement;
/// let mut coin = RandomCoin::<BaseElement, Blake3_256<BaseElement>>::new(&[1, 2, 3, 4]);
///
/// let num_values = 20;
/// let domain_size = 64;
/// let values = coin.draw_integers(num_values, domain_size).unwrap();
///
/// assert_eq!(num_values, values.len());
///
/// let mut value_set = HashSet::new();
/// for value in values {
/// assert!(value < domain_size);
/// assert!(value_set.insert(value));
/// }
/// ```
pub fn draw_integers(
&mut self,
num_values: usize,
domain_size: usize,
) -> Result<Vec<usize>, RandomCoinError> {
assert!(
domain_size.is_power_of_two(),
"domain size must be a power of two"
);
assert!(
num_values < domain_size,
"number of values must be smaller than domain size"
);
// determine how many bits are needed to represent valid values in the domain
let v_mask = (domain_size - 1) as u64;
// draw values from PRNG until we get as many unique values as specified by num_queries
let mut values = Vec::new();
for _ in 0..1000 {
// get the next pseudo-random value and read the first 8 bytes from it
let bytes: [u8; 8] = self.next().as_ref()[..8].try_into().unwrap();
// convert to integer and limit the integer to the number of bits which can fit
// into the specified domain
let value = (u64::from_le_bytes(bytes) & v_mask) as usize;
if values.contains(&value) {
continue;
}
values.push(value);
if values.len() == num_values {
break;
}
}
if values.len() < num_values {
return Err(RandomCoinError::FailedToDrawIntegers(
num_values,
values.len(),
1000,
));
}
Ok(values)
}
// HELPER METHODS
// --------------------------------------------------------------------------------------------
/// Updates the state by incrementing the counter and returns hash(seed || counter)
fn next(&mut self) -> H::Digest {
self.counter += 1;
H::merge_with_int(self.seed, self.counter)
}
}
|
use std::collections::HashMap;
use ncurses::*;
macro_rules! register_color {
($name: expr, $foreground: expr, $background: expr, $map: expr, $i: expr) => {
{
$i += 1;
init_pair($i, $foreground, $background);
$map.insert($name, $i);
}
};
}
lazy_static! {
pub static ref CPAIRS: HashMap<&'static str, i16> = {
let mut m = HashMap::new();
let mut i = 0;
register_color!("BLACK_ON_WHITE", COLOR_BLACK, COLOR_WHITE, m, i);
register_color!("WHITE_ON_BLACK", COLOR_WHITE, COLOR_BLACK, m, i);
register_color!("BLUE_ON_BLACK", COLOR_BLACK, COLOR_BLUE, m, i);
register_color!("BLACK_ON_BLUE", COLOR_BLACK, COLOR_BLUE, m, i);
m
};
}
|
#[derive(Debug, PartialEq, Eq)]
pub struct URI<'a> {
scheme: Scheme,
authority: Option<Authority<'a>>,
host: Host,
port: Option<u16>,
path: Option<Vec<&'a str>>,
query: Option<QueryParams<'a>>,
fragment: Option<&'a str>,
}
#[derive(Debug, PartialEq, Eq)]
pub enum Scheme {
HTTP,
HTTPS,
}
pub type Authority<'a> = (&'a str, Option<&'a str>);
#[derive(Debug, PartialEq, Eq)]
pub enum Host {
HOST(String),
IP([u8; 4]),
}
pub type QueryParam<'a> = (&'a str, &'a str);
pub type QueryParams<'a> = Vec<QueryParam<'a>>;
impl From<&str> for Scheme {
fn from(s: &str) -> Self {
match s.to_lowercase().as_str() {
"http://" => Scheme::HTTP,
"https://" => Scheme::HTTPS,
_ => unimplemented!("unexpected scheme"),
}
}
}
use nom::IResult;
use nom::branch::alt;
use nom::bytes::complete::{tag_no_case, tag};
use nom::error::{context, VerboseError};
use nom::sequence::{terminated, separated_pair};
use nom::character::complete::alphanumeric1;
use nom::combinator::opt;
type Res<T, U> = IResult<T, U, VerboseError<T>>;
fn scheme(input: &str) -> Res<&str, Scheme> {
context(
"scheme",
alt((tag_no_case("HTTP://"), tag_no_case("HTTPS://"))),
)(input)
.map(|(remain, res)| (remain, res.into()))
}
fn authority(input: &str) -> Res<&str, (&str, Option<&str>)> {
context(
"authority",
terminated(
separated_pair(alphanumeric1, opt(tag(":")), opt(alphanumeric1)),
tag("@"),
),
)(input)
}
#[cfg(test)]
mod tests {
use super::*;
use nom::{
error::{ErrorKind, VerboseError, VerboseErrorKind},
Err as NomError,
};
#[test]
fn test_scheme() {
assert_eq!(scheme("https://yay"), Ok(("yay", Scheme::HTTPS)));
assert_eq!(scheme("http://yay"), Ok(("yay", Scheme::HTTP)));
assert_eq!(
scheme("file://yay"),
Err(NomError::Error(VerboseError {
errors: vec![
("file://yay", VerboseErrorKind::Nom(ErrorKind::Tag)),
("file://yay", VerboseErrorKind::Nom(ErrorKind::Alt)),
("file://yay", VerboseErrorKind::Context("scheme")),
]
}))
);
}
#[test]
fn test_authority() {
assert_eq!(
authority("username:password@example.org"),
Ok(("example.org", ("username", Some("password"))))
);
assert_eq!(
authority("username@example.org"),
Ok(("example.org", ("username", None)))
);
assert_eq!(
authority("example.org"),
Err(NomError::Error(VerboseError {
errors: vec![
(".org", VerboseErrorKind::Nom(ErrorKind::Tag)),
("example.org", VerboseErrorKind::Context("authority")),
]
}))
);
}
}
|
#![allow(dead_code)]
extern crate graphics;
extern crate opengl_graphics;
extern crate piston_window;
extern crate piston;
extern crate rustc_serialize;
#[cfg(feature = "include_sdl2")]
extern crate sdl2_window;
#[cfg(feature = "include_glfw")]
extern crate glfw_window;
#[cfg(feature = "include_glutin")]
extern crate glutin_window;
#[macro_use]
mod macros;
mod characters;
mod engine;
mod locations;
mod items;
mod networking;
mod pokemon;
mod sprites;
use engine::game::Game;
use opengl_graphics::GlGraphics;
use piston::event_loop::*;
#[cfg(feature = "include_glfw")]
use glfw_window::GlfwWindow as Window;
#[cfg(feature = "include_glutin")]
use glutin_window::GlutinWindow as Window;
/// This is an array of 255 sets of 3 i32's, which contains 3 sets of
/// information each:
///
/// - 0: x coordinate of the player;
/// - 1: y coordinate of the player;
/// - 2: the direction that the player is facing, where each value means:
/// - 0: south
/// - 1: west
/// - 2: north
/// - 3: east
static mut OTHER_PLAYERS: [[i32; 3]; 255] = [[-1i32; 3]; 255];
static CLIENT_IP: &'static str = "127.0.0.1:31202";
/// The ID of the server which sends data, such as about the other players
/// currently connected.
static SERVER_IP: &'static str = "127.0.0.1:31101";
fn main() {
// Start the TcpListener to listen for data from the server.
networking::start_server();
let opengl = engine::window::OPENGL_VERSION;
let mut window = engine::window::make_window();
let mut game: Game = Game::new(characters::player::load(), sprites::all());
let ref mut gl: GlGraphics = GlGraphics::new(opengl);
let mut events = window.events();
while let Some(e) = events.next(&mut window) {
game.run(e, gl);
}
}
|
mod data;
mod endpoints;
mod service;
pub use data::*;
pub use endpoints::*;
pub use service::*;
|
// Copyright (c) The Starcoin Core Contributors
// SPDX-License-Identifier: Apache-2.0
use sgstorage::generate_random_channel_store;
#[test]
fn test_channel_store_startup() {
let channel_store = generate_random_channel_store();
let startup_info = channel_store.get_startup_info();
assert!(startup_info.is_ok());
assert!(startup_info.unwrap().is_none());
}
|
#![allow(dead_code)]
mod args;
mod backend;
mod config;
mod net;
mod track;
mod util;
mod web;
mod sim;
mod modules;
mod tui;
mod logger;
use std::io;
use crate::{
args::{Args, Command},
modules::Module,
util::future::abortable::{Aborted, abortable},
};
fn main() -> ! {
let exit_code = match run() {
Ok(_) => 0,
Err(error) => {
eprintln!("Error: {}", error);
1
}
};
std::process::exit(exit_code)
}
fn run() -> anyhow::Result<()> {
let command = args
::parse(
std::env::args_os()
)
.map_err(anyhow::Error::new)?;
match command {
Command::Help(msg) | Command::Version(msg) => {
println!("{}", msg);
Ok(())
},
Command::Download(args) => download(args),
}
}
fn download(args: Args) -> anyhow::Result<()> {
let logger = logger::setup(args.log_level);
let cfg = match config::load() {
Err(error) if error.is_not_found() => {
eprintln!(
concat!(
"The config file was not found.",
" A template was placed, please edit it with your keys."
)
);
std::process::exit(1)
},
other => other,
}?;
let mut track = args.track;
let google_cfg = config::read(&cfg)?;
let beatport_cfg = config::read(&cfg)?;
let bandcamp_cfg = config::read(&cfg)?;
let slider_cfg = config::read(&cfg)?;
let zippy_cfg = config::read(&cfg)?;
let music2k_cfg = config::read(&cfg)?;
log::debug!("google cfg: {:#?}", google_cfg);
log::debug!("beatport cfg: {:#?}", beatport_cfg);
log::debug!("bandcamp cfg: {:#?}", bandcamp_cfg);
log::debug!("slider cfg: {:#?}", slider_cfg);
log::debug!("zippy cfg: {:#?}", slider_cfg);
log::debug!("music2k cfg: {:#?}", slider_cfg);
let google = modules::google::Module::new(google_cfg);
let beatport = modules::beatport::Module::new(beatport_cfg);
let bandcamp = modules::bandcamp::Module::new(bandcamp_cfg);
let slider = modules::slider::Module::new(slider_cfg);
let zippy = modules::zippy::Module::new(zippy_cfg);
let music2k = modules::music2k::Module::new(music2k_cfg);
let terminal = tui::terminal()?;
let input = tui::StdinReader::new();
let (beatport_widget, beatport_reporter) = modules::beatport::tui::Widget::new();
let (bandcamp_widget, bandcamp_reporter) = modules::bandcamp::tui::Widget::new();
let (slider_widget, slider_reporter) = modules::slider::tui::Widget::new();
let (zippy_widget, zippy_reporter) = modules::zippy::tui::Widget::new();
let (music2k_widget, music2k_reporter) = modules::music2k::tui::Widget::new();
let backend = backend::Backend {
metasources: args.metasources,
tracksources: args.tracksources,
google,
beatport,
beatport_reporter,
bandcamp,
bandcamp_reporter,
slider,
slider_reporter,
zippy,
zippy_reporter,
music2k,
music2k_reporter,
};
let (backend_fut, backend_abort) = abortable(
backend.run(&mut track)
);
let window = tui::main::Window {
aborter: backend_abort,
terminal,
input,
beatport_widget,
bandcamp_widget,
slider_widget,
zippy_widget,
music2k_widget,
};
let ui_handle = window.run();
let result = futures::executor::block_on(backend_fut);
ui_handle
.join()
.expect("ui thread failed to join")?;
let result = match result {
Ok(result) => result,
Err(Aborted) => {
log::info!("slizzy aborted");
Ok(())
}
};
logger.dump(
io::stdout().lock()
)?;
result
}
|
use serde::de;
use serde::ser;
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use std::cmp::{Eq, PartialEq};
use std::fmt::{self, Formatter};
use std::slice;
/// A message as sent by an user.
#[derive(Clone, Debug)]
pub struct Message<'a>(Contents<'a>);
impl<'a> Message<'a> {
/// Creates a new message consisting of specified chunks.
pub fn new(chunks: impl Into<Cow<'a, [Chunk<'a>]>>) -> Self {
Self(Contents::Variable(chunks.into()))
}
/// Creates an empty message.
pub fn empty() -> Self {
const EMPTY: &[Chunk<'_>] = &[];
Message(Contents::Variable(EMPTY.into()))
}
/// Creates a message consisting of one unstyled chunk.
pub fn plain(contents: impl Into<Cow<'a, str>>) -> Self {
Self(Contents::Single(Chunk {
contents: contents.into(),
style: Style::default(),
}))
}
/// Returns the chunks of this message.
pub fn chunks(&self) -> &[Chunk<'a>] {
match &self.0 {
Contents::Single(chunk) => slice::from_ref(chunk),
Contents::Variable(chunks) => &*chunks,
}
}
/// Returns the text of this message.
///
/// If the message consists of multiple chunks, they will be concatenated in a [`String`].
///
/// Otherwise, a reference to the first (and only) chunk's contents is returned.
pub fn text(&self) -> Cow<'_, str> {
let chunks = self.chunks();
match &chunks {
[first] => first.contents.as_ref().into(),
[_, ..] => chunks
.iter()
.map(|chunk| chunk.contents.as_ref())
.collect::<String>()
.into(),
[] => "".into(),
}
}
}
impl<'a> Eq for Message<'a> {}
impl<'a, 'b> PartialEq<Message<'a>> for Message<'b> {
fn eq(&self, other: &Message<'a>) -> bool {
self.chunks() == other.chunks()
}
}
// Implement Serialize and Deserialize manually due to our single value optimization.
impl<'a> ser::Serialize for Message<'a> {
fn serialize<S: ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
self.chunks().serialize(serializer)
}
}
impl<'de, 'a> de::Deserialize<'de> for Message<'a> {
fn deserialize<D: de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
struct Visitor<'a> {
_phantom: &'a (),
}
impl<'de, 'a> de::Visitor<'de> for Visitor<'a> {
type Value = Message<'a>;
fn expecting(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "an array of chunks")
}
fn visit_seq<S: de::SeqAccess<'de>>(self, mut seq: S) -> Result<Self::Value, S::Error> {
let contents = match seq.next_element()? {
Some(first) => match seq.next_element()? {
Some(second) => {
let mut buffer = vec![first, second];
while let Some(element) = seq.next_element()? {
buffer.push(element);
}
Contents::Variable(buffer.into())
}
None => Contents::Single(first),
},
None => Contents::Variable(Vec::new().into()),
};
Ok(Message(contents))
}
}
deserializer.deserialize_seq(Visitor { _phantom: &() })
}
}
// Most messages will contain just one unstyled chunk, so it makes sense to avoid
// a heap allocation that comes with having a single-element Vec.
#[derive(Clone, Debug, Serialize, Deserialize)]
enum Contents<'a> {
Single(Chunk<'a>),
Variable(Cow<'a, [Chunk<'a>]>),
}
/// A chunk of text with some style applied to it.
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)]
pub struct Chunk<'a> {
/// Text contents of this chunk.
pub contents: Cow<'a, str>,
/// Chunk style.
pub style: Style,
}
/// Style of a message chunk.
#[derive(Serialize, Deserialize, Default, Clone, Copy, Debug, Eq, PartialEq)]
pub struct Style {
/// Bold text.
pub bold: bool,
/// Italic text.
pub italic: bool,
/// RGB color, if any.
pub color: Option<(u8, u8, u8)>,
}
|
use conveyor::ConveyorError;
use std::error::Error;
use std::fmt;
use std::result::Result;
use std::path::PathBuf;
pub type CrawlResult<T> = Result<T, CrawlError>;
#[derive(Debug)]
pub enum CrawlErrorKind {
Unknown,
Conveyor(String),
Error(Box<dyn Error + Send + Sync>),
NotFound(String),
Io(std::io::Error),
InvalidDescriptionFile(PathBuf)
}
#[derive(Debug)]
pub struct CrawlError {
kind: CrawlErrorKind,
}
impl CrawlError {
pub fn new(kind: CrawlErrorKind) -> CrawlError {
CrawlError { kind }
}
}
impl fmt::Display for CrawlError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CrawlError<")?;
match &self.kind {
CrawlErrorKind::Conveyor(s) => write!(f, "Conveyor({})", s),
CrawlErrorKind::NotFound(s) => write!(f, "NotFound({})", s),
_ => write!(f, "Unknown"),
}?;
write!(f, ">")
}
}
impl Error for CrawlError {}
impl From<CrawlErrorKind> for CrawlError {
fn from(error: CrawlErrorKind) -> CrawlError {
CrawlError::new(error)
}
}
impl From<ConveyorError> for CrawlError {
fn from(error: ConveyorError) -> CrawlError {
CrawlError::new(CrawlErrorKind::Conveyor(error.to_string()))
}
}
impl From<std::io::Error> for CrawlError {
fn from(error: std::io::Error) -> CrawlError {
CrawlError::new(CrawlErrorKind::Io(error))
}
}
impl From<CrawlError> for ConveyorError {
fn from(error: CrawlError) -> ConveyorError {
ConveyorError::new(error)
}
} |
use std::convert::TryInto;
use crate::*;
// Maybe is should be part of Asset ?
const ACCURACCY: u8 = 8;
const ORACLE_OFFSET: u8 = 4;
// Switch to u128? Reduce decimals for tokens ?
// At least rust will error during overflows checkmate Solidity
// USD prices have 8 decimal places
pub fn check_feed_update(
assets: &Vec<Asset>,
indexA: usize,
indexB: usize,
max_delay: u32,
slot: u64,
) -> Result<()> {
// Check assetA
if !assets[indexA].feed_address.eq(&Pubkey::default()) {
msg!("checkA {}", slot);
if (assets[indexA].last_update + max_delay as u64) < slot {
return Err(ErrorCode::OutdatedOracle.into());
}
}
// Check assetB
if !assets[indexB].feed_address.eq(&Pubkey::default()) {
msg!("checkB {}", slot);
if (assets[indexB].last_update + max_delay as u64) < slot {
return Err(ErrorCode::OutdatedOracle.into());
}
}
return Ok(());
}
pub fn calculate_debt(assets: &Vec<Asset>, slot: u64, max_delay: u32) -> Result<u64> {
let mut debt = 0u128;
for asset in assets.iter() {
if (asset.last_update + max_delay as u64) < slot {
msg!("last update {}", asset.last_update);
msg!("slot {}", slot);
if asset.feed_address.eq(&Pubkey::default()) {
} else {
return Err(ErrorCode::OutdatedOracle.into());
}
}
debt += (asset.price as u128 * asset.supply as u128)
/ 10u128.pow(
(asset.decimals + ORACLE_OFFSET - ACCURACCY)
.try_into()
.unwrap(),
);
}
Ok(debt as u64)
}
// debt need to be up to date
pub fn calculate_user_debt_in_usd(user_account: &UserAccount, debt: u64, debt_shares: u64) -> u64 {
if debt_shares == 0 {
return 0;
}
let user_debt = debt as u128 * user_account.shares as u128 / debt_shares as u128;
return user_debt as u64;
}
pub fn calculate_max_user_debt_in_usd(
collateral_asset: &Asset,
collateralization_level: u32,
user_account: &UserAccount,
) -> u64 {
let user_max_debt = collateral_asset.price as u128 * user_account.collateral as u128
/ 10u128.pow(
(collateral_asset.decimals + ORACLE_OFFSET - ACCURACCY)
.try_into()
.unwrap(),
);
return (user_max_debt * 100 / collateralization_level as u128)
.try_into()
.unwrap();
}
pub fn calculate_max_withdraw_in_usd(
max_user_debt_in_usd: &u64,
user_debt_in_usd: &u64,
collateralization_level: &u32,
) -> u64 {
if max_user_debt_in_usd < user_debt_in_usd {
return 0;
}
return ((max_user_debt_in_usd - user_debt_in_usd) * *collateralization_level as u64) / 100;
}
pub fn calculate_amount_mint_in_usd(mint_asset: &Asset, amount: u64) -> u64 {
let mint_amount_in_usd = mint_asset.price as u128 * amount as u128
/ 10u128.pow((mint_asset.decimals + ORACLE_OFFSET - ACCURACCY).into());
return mint_amount_in_usd as u64;
}
pub fn calculate_new_shares(shares: &u64, debt: &u64, minted_amount_usd: &u64) -> u64 {
if *shares == 0u64 {
return 10u64.pow(8);
}
let new_shares = (*shares as u128 * *minted_amount_usd as u128) / *debt as u128;
return new_shares as u64;
}
pub fn calculate_burned_shares(
asset: &Asset,
user_debt: &u64,
user_shares: &u64,
amount: &u64,
) -> u64 {
let burn_amount_in_usd = asset.price as u128 * *amount as u128
/ 10u128.pow((asset.decimals + ORACLE_OFFSET - ACCURACCY).into());
let burned_shares = burn_amount_in_usd * *user_shares as u128 / *user_debt as u128;
return burned_shares as u64;
}
pub fn calculate_max_burned_in_token(asset: &Asset, user_debt: &u64) -> u64 {
let burned_amount_token =
*user_debt as u128 * 10u128.pow(ORACLE_OFFSET.into()) / asset.price as u128;
return burned_amount_token as u64;
}
pub fn calculate_swap_out_amount(
asset_in: &Asset,
asset_for: &Asset,
amount: &u64,
fee: &u8, // in range from 0-99 | 30/10000 => 0.3% fee
) -> u64 {
// Assume same amount of decimals
// TODO: Fix that for future
let amount_before_fee = asset_in.price as u128 * *amount as u128 / asset_for.price as u128;
let amount = amount_before_fee - (amount_before_fee * *fee as u128 / 10000);
return amount as u64;
}
#[cfg(test)]
mod tests {
use std::ops::Div;
use super::*;
#[test]
fn test_calculate_debt_success() {
let slot = 100;
let accuracy = 8;
let asset_1 = Asset {
// oracle offset set as 4
price: 10 * 10u64.pow(ORACLE_OFFSET.into()),
supply: 100 * 10u64.pow(8),
last_update: slot - 10,
decimals: 8,
..Default::default()
};
// debt 1000
let asset_2 = Asset {
price: 12 * 10u64.pow(ORACLE_OFFSET.into()),
supply: 200 * 10u64.pow(8),
last_update: 100,
decimals: 8,
..Default::default()
};
// debt 2400
let assets: Vec<Asset> = vec![asset_1, asset_2];
let result = calculate_debt(&assets, slot, 100);
match result {
Ok(debt) => assert_eq!(debt, 3400 * 10u64.pow(accuracy)),
Err(_) => assert!(false, "Shouldn't check"),
}
}
#[test]
fn test_calculate_debt_error() {
let slot = 100;
let asset_1 = Asset {
price: 10 * 10u64.pow(ORACLE_OFFSET.into()),
supply: 100 * 10u64.pow(8),
last_update: slot - 10,
decimals: 8,
feed_address: Pubkey::new_unique(),
..Default::default()
};
// debt 1000
let asset_2 = Asset {
price: 12 * 10u64.pow(ORACLE_OFFSET.into()),
supply: 200 * 10u64.pow(8),
last_update: 100,
decimals: 8,
..Default::default()
};
// debt 2400
let assets: Vec<Asset> = vec![asset_1, asset_2];
let result = calculate_debt(&assets, slot, 0);
// println!("{:?}", result);
assert!(result.is_err());
}
#[test]
fn test_calculate_user_debt_in_usd() {
let debt = 1000;
let debt_shares = 1000;
// one share = one debt
let user_account = UserAccount {
collateral: 100,
shares: 10,
owner: Pubkey::default(),
};
let user_debt = calculate_user_debt_in_usd(&user_account, debt, debt_shares);
assert_eq!(user_debt, debt * user_account.shares / debt_shares);
// Zero shares
let user_account_zero_shares = UserAccount {
collateral: 100,
shares: 0,
owner: Pubkey::default(),
};
let user_debt_zero_shares =
calculate_user_debt_in_usd(&user_account_zero_shares, debt, debt_shares);
assert_eq!(user_debt_zero_shares, 0);
}
#[test]
fn test_calculate_max_user_debt_in_usd() {
let collateralization_level = 500;
// one share = one debt
let user_account = UserAccount {
collateral: 10 * 10u64.pow(8),
shares: 10,
owner: Pubkey::default(),
};
let collateral_asset = Asset {
price: 12 * 10u64.pow(ORACLE_OFFSET.into()),
last_update: 100,
decimals: 8,
..Default::default()
};
// 10 tokens per 12 $ each => 120
// collateralization_level 1/5 means 120*1/5 => 24 * decimals
let user_max_debt = calculate_max_user_debt_in_usd(
&collateral_asset,
collateralization_level,
&user_account,
);
assert_eq!(user_max_debt, 24 * 10u64.pow(8));
}
#[test]
fn test_calculate_amount_mint_in_usd() {
let amount = 10 * 10u64.pow(8);
let mint_asset = Asset {
price: 12 * 10u64.pow(ORACLE_OFFSET.into()),
last_update: 100,
decimals: 8,
..Default::default()
};
// 10 tokens per 12 $ each => 120 * decimals
let amount_mint_in_usd = calculate_amount_mint_in_usd(&mint_asset, amount);
assert_eq!(amount_mint_in_usd, 120 * 10u64.pow(8));
}
#[test]
fn test_calculate_new_shares_initial() {
let shares = 0;
let new_debt = 100;
let minted_amount_usd = 100;
let new_shares_initial = calculate_new_shares(&shares, &new_debt, &minted_amount_usd);
assert_eq!(new_shares_initial, 10u64.pow(8));
}
#[test]
fn test_calculate_new_shares_next() {
let shares = 10u64.pow(8);
let debt = 5 * 10u64.pow(8);
let minted_amount_usd = 5 * 10u64.pow(8);
let new_shares_initial = calculate_new_shares(&shares, &debt, &minted_amount_usd);
assert_eq!(new_shares_initial, 10u64.pow(8));
let shares = 10u64.pow(8);
let debt = 15 * 10u64.pow(8);
let minted_amount_usd = 5 * 10u64.pow(8);
let new_shares_initial = calculate_new_shares(&shares, &debt, &minted_amount_usd);
assert_eq!(new_shares_initial, 10u64.pow(8) / 3);
}
#[test]
fn test_calculate_max_withdraw_in_usd() {
let max_user_debt_in_usd = 20;
let user_debt_in_usd = 10;
let collateralization_level = 500;
let max_withdraw_in_usd = calculate_max_withdraw_in_usd(
&max_user_debt_in_usd,
&user_debt_in_usd,
&collateralization_level,
);
assert_eq!(max_withdraw_in_usd, 50);
}
#[test]
fn test_calculate_burned_shares() {
let user_debt_in_usd = 100 * 10u64.pow(ACCURACCY.into());
let asset = Asset {
price: 1 * 10u64.pow(ORACLE_OFFSET.into()),
last_update: 100,
decimals: 8,
..Default::default()
};
let user_shares = 10u64.pow(8u32);
let amount = 50 * 10u64.pow(asset.decimals as u32);
// each token cost 1 usd we burn 50% so we should burn 50% shares
let burned_shares =
calculate_burned_shares(&asset, &user_debt_in_usd, &user_shares, &amount);
assert_eq!(burned_shares, user_shares.div(2u64));
}
#[test]
fn test_calculate_max_burned_in_token() {
let user_debt_in_usd = 100 * 10u64.pow(ACCURACCY.into());
let asset = Asset {
price: 2 * 10u64.pow(ORACLE_OFFSET.into()),
last_update: 100,
decimals: 8,
..Default::default()
};
// Our debt = 100 usd each token cost 2 so we burn 50 tokens
let amount_to_burn = calculate_max_burned_in_token(&asset, &user_debt_in_usd);
assert_eq!(amount_to_burn, 50 * 10u64.pow(asset.decimals.into()));
}
#[test]
fn test_calculate_swap_out_amount() {
let amount_in = 1000 * 10u64.pow(ACCURACCY.into());
let fee = 30u8;
let asset_in = Asset {
price: 1 * 10u64.pow(ORACLE_OFFSET.into()),
last_update: 100,
decimals: 8,
..Default::default()
};
let asset_for = Asset {
price: 1 * 10u64.pow(ORACLE_OFFSET.into()),
last_update: 100,
decimals: 8,
..Default::default()
};
let asset_for_2 = Asset {
price: 2 * 10u64.pow(ORACLE_OFFSET.into()),
last_update: 100,
decimals: 8,
..Default::default()
};
// Test on tokens with same price
let amount = calculate_swap_out_amount(&asset_in, &asset_for, &amount_in, &fee);
assert_eq!(amount, 997 * 10u64.pow(ACCURACCY.into()));
// Test on tokens with different price
let amount = calculate_swap_out_amount(&asset_in, &asset_for_2, &amount_in, &fee);
assert_eq!(amount, 4985 * 10u64.pow(ACCURACCY.into()) / 10);
}
}
|
use registry_pol::v1::RegistryValueType;
#[test]
fn reg_none() {
assert_eq!(RegistryValueType::parse(RegistryValueType::REG_NONE as u32), Some(RegistryValueType::REG_NONE));
}
#[test]
fn reg_sz() {
assert_eq!(RegistryValueType::parse(RegistryValueType::REG_SZ as u32), Some(RegistryValueType::REG_SZ));
}
#[test]
fn reg_expand_sz() {
assert_eq!(RegistryValueType::parse(RegistryValueType::REG_EXPAND_SZ as u32),
Some(RegistryValueType::REG_EXPAND_SZ));
}
#[test]
fn reg_binary() {
assert_eq!(RegistryValueType::parse(RegistryValueType::REG_BINARY as u32),
Some(RegistryValueType::REG_BINARY));
}
#[test]
fn reg_dword() {
assert_eq!(RegistryValueType::parse(RegistryValueType::REG_DWORD as u32),
Some(RegistryValueType::REG_DWORD));
}
#[test]
fn reg_dword_alt() {
assert_eq!(RegistryValueType::parse(RegistryValueType::REG_DWORD_BIG_ENDIAN as u32),
Some(RegistryValueType::REG_DWORD_BIG_ENDIAN));
}
#[test]
fn reg_link() {
assert_eq!(RegistryValueType::parse(RegistryValueType::REG_LINK as u32), Some(RegistryValueType::REG_LINK));
}
#[test]
fn reg_multi_sz() {
assert_eq!(RegistryValueType::parse(RegistryValueType::REG_MULTI_SZ as u32),
Some(RegistryValueType::REG_MULTI_SZ));
}
#[test]
fn reg_qword() {
assert_eq!(RegistryValueType::parse(RegistryValueType::REG_QWORD as u32),
Some(RegistryValueType::REG_QWORD));
}
#[test]
fn invalid() {
for i in 8..11 {
assert_eq!(RegistryValueType::parse(i), None);
}
for i in 12..0xFFFF {
assert_eq!(RegistryValueType::parse(i), None);
}
}
|
// Copyright (c) The Starcoin Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::bus::SysBus;
use actix::prelude::*;
use anyhow::Result;
use futures::{
channel::{mpsc, oneshot},
FutureExt,
};
use std::fmt::Debug;
use std::marker::PhantomData;
pub mod bus;
#[derive(Debug, Message)]
#[rtype(result = "()")]
pub struct Subscription<M: 'static>
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
pub recipient: Recipient<M>,
}
#[derive(Debug, Default)]
pub struct Channel<M: 'static>
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
m: PhantomData<M>,
}
impl<M> Channel<M>
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
pub fn new() -> Self {
Self {
m: Default::default(),
}
}
}
impl<M> Message for Channel<M>
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
type Result = Result<mpsc::UnboundedReceiver<M>>;
}
#[derive(Debug, Default)]
pub struct Oneshot<M: 'static>
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
m: PhantomData<M>,
}
impl<M> Oneshot<M>
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
pub fn new() -> Self {
Self {
m: Default::default(),
}
}
}
impl<M> Message for Oneshot<M>
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
type Result = Result<oneshot::Receiver<M>>;
}
#[derive(Debug, Message)]
#[rtype(result = "()")]
pub struct Broadcast<M: 'static>
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
pub msg: M,
}
#[async_trait::async_trait]
pub trait Bus {
async fn subscribe<M: 'static>(self, recipient: Recipient<M>) -> Result<()>
where
M: Message + Send + Clone + Debug,
M::Result: Send;
async fn channel<M: 'static>(self) -> Result<mpsc::UnboundedReceiver<M>>
where
M: Message + Send + Clone + Debug,
M::Result: Send;
async fn oneshot<M: 'static>(self) -> Result<M>
where
M: Message + Send + Clone + Debug,
M::Result: Send;
async fn broadcast<M: 'static>(self, msg: M) -> Result<()>
where
M: Message + Send + Clone + Debug,
M::Result: Send;
}
pub struct BusActor {
bus: SysBus,
}
impl BusActor {
pub fn launch() -> Addr<BusActor> {
let bus = BusActor { bus: SysBus::new() };
bus.start()
}
}
impl Actor for BusActor {
type Context = Context<Self>;
}
impl<M: 'static> Handler<Subscription<M>> for BusActor
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
type Result = ();
fn handle(&mut self, msg: Subscription<M>, _ctx: &mut Self::Context) -> Self::Result {
self.bus.subscribe(msg.recipient)
}
}
impl<M: 'static> Handler<Channel<M>> for BusActor
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
type Result = Result<mpsc::UnboundedReceiver<M>>;
fn handle(&mut self, _msg: Channel<M>, _ctx: &mut Self::Context) -> Self::Result {
Ok(self.bus.channel())
}
}
impl<M: 'static> Handler<Oneshot<M>> for BusActor
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
type Result = Result<oneshot::Receiver<M>>;
fn handle(&mut self, _msg: Oneshot<M>, _ctx: &mut Self::Context) -> Self::Result {
Ok(self.bus.oneshot())
}
}
impl<M: 'static> Handler<Broadcast<M>> for BusActor
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
type Result = ();
fn handle(&mut self, msg: Broadcast<M>, _ctx: &mut Self::Context) -> Self::Result {
self.bus.broadcast(msg.msg)
}
}
#[async_trait::async_trait]
impl Bus for Addr<BusActor> {
async fn subscribe<M: 'static>(self, recipient: Recipient<M>) -> Result<()>
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
self.send(Subscription { recipient })
.await
.map_err(|e| e.into())
}
async fn channel<M: 'static>(self) -> Result<mpsc::UnboundedReceiver<M>>
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
self.send(Channel::<M>::new())
.await
.map_err(Into::<anyhow::Error>::into)?
}
async fn oneshot<M: 'static>(self) -> Result<M>
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
self.send(Oneshot::<M>::new())
.then(|result| async {
match result {
Ok(receiver) => receiver?.await.map_err(Into::<anyhow::Error>::into),
Err(err) => Err(Into::<anyhow::Error>::into(err)),
}
})
.await
}
async fn broadcast<M: 'static>(self, msg: M) -> Result<()>
where
M: Message + Send + Clone + Debug,
M::Result: Send,
{
self.send(Broadcast { msg }).await.map_err(|e| e.into())
}
}
#[cfg(test)]
mod tests {
use super::*;
use actix::clock::delay_for;
use futures::executor::block_on;
use futures::StreamExt;
use starcoin_logger::prelude::*;
use std::thread::sleep;
use std::time::Duration;
#[derive(Debug, Message, Clone)]
#[rtype(result = "()")]
struct MyMessage {}
#[derive(Debug, Message, Clone)]
#[rtype(result = "u64")]
struct GetCounterMessage {}
#[derive(Debug, Message, Clone)]
#[rtype(result = "()")]
struct DoBroadcast {}
#[derive(Debug, Message, Clone)]
#[rtype(result = "Result<()>")]
struct DoBroadcast2 {}
struct MyActor {
counter: u64,
bus: Addr<BusActor>,
}
impl Actor for MyActor {
type Context = Context<Self>;
}
impl Handler<MyMessage> for MyActor {
type Result = ();
fn handle(&mut self, msg: MyMessage, _ctx: &mut Self::Context) {
info!("handle MyMessage: {:?}", msg);
self.counter += 1;
}
}
impl Handler<GetCounterMessage> for MyActor {
type Result = u64;
fn handle(&mut self, _msg: GetCounterMessage, _ctx: &mut Self::Context) -> Self::Result {
info!("handle GetCounterMessage: {:?}", self.counter);
self.counter
}
}
impl Handler<DoBroadcast> for MyActor {
type Result = ();
fn handle(&mut self, _msg: DoBroadcast, ctx: &mut Self::Context) {
info!("handle DoBroadcast");
self.bus
.send(Broadcast { msg: MyMessage {} })
.into_actor(self)
//need convert act to static ActorFuture and call wait.
.then(|_result, act, _ctx| async {}.into_actor(act))
.wait(ctx);
}
}
impl Handler<DoBroadcast2> for MyActor {
type Result = ResponseActFuture<Self, Result<()>>;
fn handle(&mut self, _msg: DoBroadcast2, _ctx: &mut Self::Context) -> Self::Result {
let f = self.bus.clone().broadcast(MyMessage {});
let f = actix::fut::wrap_future::<_, Self>(f);
Box::pin(f)
}
}
#[stest::test]
async fn test_bus_actor() {
let bus_actor = BusActor::launch();
let actor = MyActor {
counter: 0,
bus: bus_actor.clone(),
};
let addr = actor.start();
let recipient = addr.clone().recipient::<MyMessage>();
bus_actor.send(Subscription { recipient }).await.unwrap();
bus_actor
.send(Broadcast { msg: MyMessage {} })
.await
.unwrap();
delay_for(Duration::from_millis(100)).await;
let counter = addr.send(GetCounterMessage {}).await.unwrap();
assert_eq!(counter, 1);
}
#[stest::test]
async fn test_bus_actor_send_message_in_handle() {
let bus_actor = BusActor::launch();
let actor = MyActor {
counter: 0,
bus: bus_actor.clone(),
};
let addr = actor.start();
let recipient = addr.clone().recipient::<MyMessage>();
bus_actor.send(Subscription { recipient }).await.unwrap();
addr.send(DoBroadcast {}).await.unwrap();
delay_for(Duration::from_millis(100)).await;
let counter = addr.send(GetCounterMessage {}).await.unwrap();
assert_eq!(counter, 1);
}
#[stest::test]
async fn test_bus_actor_async_trait() {
let bus_actor = BusActor::launch();
let actor = MyActor {
counter: 0,
bus: bus_actor.clone(),
};
let addr = actor.start();
let recipient = addr.clone().recipient::<MyMessage>();
bus_actor.subscribe(recipient).await.unwrap();
addr.send(DoBroadcast2 {}).await.unwrap().unwrap();
delay_for(Duration::from_millis(100)).await;
let counter = addr.send(GetCounterMessage {}).await.unwrap();
assert_eq!(counter, 1);
}
#[stest::test]
async fn test_onshot() {
let bus_actor = BusActor::launch();
let bus_actor2 = bus_actor.clone();
let arbiter = Arbiter::new();
arbiter.exec_fn(move || loop {
let result =
block_on(async { bus_actor2.clone().broadcast(MyMessage {}).await.is_ok() });
debug!("broadcast result: {}", result);
sleep(Duration::from_millis(50));
});
let msg = bus_actor.clone().oneshot::<MyMessage>().await;
assert!(msg.is_ok());
let msg = bus_actor.clone().oneshot::<MyMessage>().await;
assert!(msg.is_ok());
}
#[stest::test]
async fn test_channel() {
let bus_actor = BusActor::launch();
let bus_actor2 = bus_actor.clone();
let arbiter = Arbiter::new();
arbiter.exec_fn(move || loop {
let result =
block_on(async { bus_actor2.clone().broadcast(MyMessage {}).await.is_ok() });
debug!("broadcast result: {}", result);
sleep(Duration::from_millis(50));
});
let result = bus_actor.clone().channel::<MyMessage>().await;
assert!(result.is_ok());
let receiver = result.unwrap();
let msgs: Vec<MyMessage> = receiver.take(3).collect().await;
assert_eq!(3, msgs.len());
let receiver2 = bus_actor.clone().channel::<MyMessage>().await.unwrap();
let msgs: Vec<MyMessage> = receiver2.take(3).collect().await;
assert_eq!(3, msgs.len());
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.