text stringlengths 8 4.13M |
|---|
use std::{collections::HashMap, path::PathBuf, sync::Arc, time::Duration};
use futures::TryStreamExt;
use semver::Version;
use tokio::sync::{mpsc, RwLock};
use crate::{
bson::{doc, Document},
client::options::ClientOptions,
concern::{Acknowledgment, WriteConcern},
gridfs::GridFsBucket,
options::{
CollectionOptions,
CreateCollectionOptions,
FindOptions,
ReadConcern,
ReadPreference,
SelectionCriteria,
},
runtime,
sdam::{TopologyDescription, MIN_HEARTBEAT_FREQUENCY},
test::{
log_uncaptured,
spec::unified_runner::{
entity::EventList,
matcher::events_match,
test_file::{ExpectedEventType, TestFile},
},
update_options_for_testing,
util::FailPointGuard,
EventHandler,
TestClient,
CLIENT_OPTIONS,
DEFAULT_URI,
LOAD_BALANCED_MULTIPLE_URI,
LOAD_BALANCED_SINGLE_URI,
SERVERLESS,
SERVER_API,
},
Collection,
Database,
};
use super::{
entity::ThreadEntity,
file_level_log,
merge_uri_options,
test_file::ThreadMessage,
ClientEntity,
CollectionData,
Entity,
SessionEntity,
TestCursor,
TestFileEntity,
};
#[cfg(feature = "in-use-encryption-unstable")]
use crate::test::KmsProviderList;
#[cfg(feature = "tracing-unstable")]
use crate::test::{
spec::unified_runner::matcher::tracing_events_match,
util::max_verbosity_levels_for_test_case,
DEFAULT_GLOBAL_TRACING_HANDLER,
};
const SKIPPED_OPERATIONS: &[&str] = &[
"bulkWrite",
"count",
"listCollectionObjects",
"listDatabaseObjects",
"mapReduce",
"watch",
"rewrapManyDataKey",
];
static MIN_SPEC_VERSION: Version = Version::new(1, 0, 0);
static MAX_SPEC_VERSION: Version = Version::new(1, 16, 0);
pub(crate) type EntityMap = HashMap<String, Entity>;
#[derive(Clone)]
pub(crate) struct TestRunner {
pub(crate) internal_client: TestClient,
pub(crate) entities: Arc<RwLock<EntityMap>>,
pub(crate) fail_point_guards: Arc<RwLock<Vec<FailPointGuard>>>,
}
impl TestRunner {
pub(crate) async fn new() -> Self {
Self {
internal_client: TestClient::new().await,
entities: Default::default(),
fail_point_guards: Default::default(),
}
}
pub(crate) async fn new_with_connection_string(connection_string: &str) -> Self {
#[cfg(all(not(feature = "sync"), not(feature = "tokio-sync")))]
let options = ClientOptions::parse(connection_string).await.unwrap();
#[cfg(any(feature = "sync", feature = "tokio-sync"))]
let options = ClientOptions::parse(connection_string).unwrap();
Self {
internal_client: TestClient::with_options(Some(options)).await,
entities: Arc::new(RwLock::new(EntityMap::new())),
fail_point_guards: Arc::new(RwLock::new(Vec::new())),
}
}
pub(crate) async fn run_test(
&self,
test_file: TestFile,
path: impl Into<Option<PathBuf>>,
skipped_tests: Option<&Vec<&str>>,
) {
let schema_version = &test_file.schema_version;
assert!(
schema_version >= &MIN_SPEC_VERSION && schema_version <= &MAX_SPEC_VERSION,
"Test runner not compatible with schema version {}",
schema_version
);
let test_description = match path.into() {
Some(path) => format!("{} ({:?})", &test_file.description, path),
None => test_file.description.clone(),
};
if let Some(ref requirements) = test_file.run_on_requirements {
let mut can_run_on = false;
let mut run_on_errors = vec![];
for requirement in requirements {
match requirement.can_run_on(&self.internal_client).await {
Ok(()) => can_run_on = true,
Err(e) => run_on_errors.push(e),
}
}
if !can_run_on {
file_level_log(format!(
"Skipping {}: client topology not compatible with test ({})",
test_description,
run_on_errors.join(","),
));
return;
}
}
file_level_log(format!("Running tests from {}", test_description));
for test_case in &test_file.tests {
if let Ok(description) = std::env::var("TEST_DESCRIPTION") {
if !test_case
.description
.to_lowercase()
.contains(&description.to_lowercase())
{
continue;
}
}
if let Some(skipped_tests) = skipped_tests {
if skipped_tests.contains(&test_case.description.as_str()) {
log_uncaptured(format!(
"Skipping test case {}: test skipped manually",
&test_case.description
));
continue;
}
}
if let Some(skip_reason) = &test_case.skip_reason {
log_uncaptured(format!(
"Skipping test case {:?}: {}",
&test_case.description, skip_reason
));
continue;
}
if let Some(op) = test_case
.operations
.iter()
.find(|op| SKIPPED_OPERATIONS.contains(&op.name.as_str()))
.map(|op| op.name.as_str())
{
log_uncaptured(format!(
"Skipping test case {:?}: unsupported operation {}",
&test_case.description, op
));
continue;
}
if let Some(ref requirements) = test_case.run_on_requirements {
let mut can_run_on = false;
let mut run_on_errors = vec![];
for requirement in requirements {
match requirement.can_run_on(&self.internal_client).await {
Ok(()) => can_run_on = true,
Err(e) => run_on_errors.push(e),
}
}
if !can_run_on {
log_uncaptured(format!(
"Skipping test case {:?}: client topology not compatible with test ({})",
&test_case.description,
run_on_errors.join(","),
));
continue;
}
}
log_uncaptured(format!("Executing {:?}", &test_case.description));
if let Some(ref initial_data) = test_file.initial_data {
for data in initial_data {
self.insert_initial_data(data).await;
}
}
self.entities.write().await.clear();
if let Some(ref create_entities) = test_file.create_entities {
self.populate_entity_map(create_entities, &test_case.description)
.await;
}
#[cfg(feature = "tracing-unstable")]
let (mut tracing_subscriber, _levels_guard) = {
let tracing_levels =
max_verbosity_levels_for_test_case(&test_file.create_entities, test_case);
let guard = DEFAULT_GLOBAL_TRACING_HANDLER.set_levels(tracing_levels);
let subscriber = DEFAULT_GLOBAL_TRACING_HANDLER.subscribe();
(subscriber, guard)
};
for operation in &test_case.operations {
self.sync_workers().await;
operation.execute(self, &test_case.description).await;
// This test (in src/test/spec/json/sessions/server-support.json) runs two
// operations with implicit sessions in sequence and then checks to see if they
// used the same lsid. We delay for one second to ensure that the
// implicit session used in the first operation is returned to the pool before
// the second operation is executed.
if test_case.description == "Server supports implicit sessions" {
runtime::delay_for(Duration::from_secs(1)).await;
}
}
if let Some(ref events) = test_case.expect_events {
for expected in events {
let entities = self.entities.read().await;
let entity = entities.get(&expected.client).unwrap();
let client = entity.as_client();
client.sync_workers().await;
let event_type = expected.event_type.unwrap_or(ExpectedEventType::Command);
let actual_events: Vec<_> =
client.get_filtered_events(event_type).into_iter().collect();
let expected_events = &expected.events;
if expected.ignore_extra_events.unwrap_or(false) {
assert!(
actual_events.len() >= expected_events.len(),
"[{}] actual:\n{:#?}\nexpected:\n{:#?}",
test_case.description,
actual_events,
expected_events
)
} else {
assert_eq!(
actual_events.len(),
expected_events.len(),
"[{}] actual:\n{:#?}\nexpected:\n{:#?}",
test_case.description,
actual_events,
expected_events
)
}
for (actual, expected) in actual_events.iter().zip(expected_events) {
if let Err(e) = events_match(actual, expected, Some(&entities)) {
panic!(
"event mismatch: expected = {:#?}, actual = {:#?}\nmismatch \
detail: {}",
expected, actual, e,
);
}
}
}
}
#[cfg(feature = "tracing-unstable")]
if let Some(ref expected_messages) = test_case.expect_log_messages {
self.sync_workers().await;
let all_tracing_events = tracing_subscriber
.collect_events(Duration::from_millis(1000), |_| true)
.await;
for expectation in expected_messages {
let client_topology_id = self.get_client(&expectation.client).await.topology_id;
let client_actual_events: Vec<_> = all_tracing_events
.iter()
.filter(|e| {
if e.topology_id() != client_topology_id.to_hex() {
return false;
}
if let Some(ref ignored_messages) = expectation.ignore_messages {
for ignored_message in ignored_messages {
if tracing_events_match(e, ignored_message).is_ok() {
return false;
}
}
}
true
})
.collect();
let expected_events = &expectation.messages;
if expectation.ignore_extra_messages != Some(true) {
assert_eq!(
client_actual_events.len(),
expected_events.len(),
"Actual tracing event count should match expected. Expected events = \
{:#?}, actual events = {:#?}",
expected_events,
client_actual_events,
);
}
for (actual, expected) in client_actual_events.iter().zip(expected_events) {
if let Err(e) = tracing_events_match(actual, expected) {
panic!(
"tracing event mismatch: expected = {:#?}, actual = \
{:#?}\nmismatch detail: {}",
expected, actual, e,
);
}
}
}
}
self.fail_point_guards.write().await.clear();
if let Some(ref outcome) = test_case.outcome {
for expected_data in outcome {
let db_name = &expected_data.database_name;
let coll_name = &expected_data.collection_name;
let selection_criteria =
SelectionCriteria::ReadPreference(ReadPreference::Primary);
let read_concern = ReadConcern::local();
let options = CollectionOptions::builder()
.selection_criteria(selection_criteria)
.read_concern(read_concern)
.build();
let collection = self
.internal_client
.get_coll_with_options(db_name, coll_name, options);
let options = FindOptions::builder().sort(doc! { "_id": 1 }).build();
let actual_data: Vec<Document> = collection
.find(doc! {}, options)
.await
.unwrap()
.try_collect()
.await
.unwrap();
assert_eq!(expected_data.documents, actual_data);
}
}
}
}
pub(crate) async fn insert_initial_data(&self, data: &CollectionData) {
let write_concern = WriteConcern::builder().w(Acknowledgment::Majority).build();
if !data.documents.is_empty() {
let collection_options = CollectionOptions::builder()
.write_concern(write_concern)
.build();
let coll = self
.internal_client
.init_db_and_coll_with_options(
&data.database_name,
&data.collection_name,
collection_options,
)
.await;
coll.insert_many(data.documents.clone(), None)
.await
.unwrap();
} else {
let collection_options = CreateCollectionOptions::builder()
.write_concern(write_concern)
.build();
self.internal_client
.create_fresh_collection(
&data.database_name,
&data.collection_name,
collection_options,
)
.await;
}
}
pub(crate) async fn populate_entity_map(
&self,
create_entities: &[TestFileEntity],
description: impl AsRef<str>,
) {
for entity in create_entities {
let (id, entity) = match entity {
TestFileEntity::Client(client) => {
if let Some(store_events_as_entities) = &client.store_events_as_entities {
for store_events_as_entity in store_events_as_entities {
let event_list = EventList {
client_id: client.id.clone(),
event_names: store_events_as_entity.events.clone(),
};
self.insert_entity(&store_events_as_entity.id, event_list)
.await;
}
}
let id = client.id.clone();
let observe_events = client.observe_events.clone();
let ignore_command_names = client.ignore_command_monitoring_events.clone();
let observe_sensitive_commands =
client.observe_sensitive_commands.unwrap_or(false);
let server_api = client.server_api.clone().or_else(|| SERVER_API.clone());
let given_uri = if CLIENT_OPTIONS.get().await.load_balanced.unwrap_or(false) {
// for serverless testing, ignore use_multiple_mongoses.
if client.use_multiple_mongoses() && !*SERVERLESS {
LOAD_BALANCED_MULTIPLE_URI.as_ref().expect(
"Test requires URI for load balancer fronting multiple servers",
)
} else {
LOAD_BALANCED_SINGLE_URI.as_ref().expect(
"Test requires URI for load balancer fronting single server",
)
}
} else {
&DEFAULT_URI
};
let uri = merge_uri_options(
given_uri,
client.uri_options.as_ref(),
client.use_multiple_mongoses(),
);
let mut options =
ClientOptions::parse_uri(&uri, None)
.await
.unwrap_or_else(|e| {
panic!(
"[{}] invalid client URI: {}, error: {}",
description.as_ref(),
uri,
e
)
});
update_options_for_testing(&mut options);
let handler = Arc::new(EventHandler::new());
options.command_event_handler = Some(handler.clone());
options.cmap_event_handler = Some(handler.clone());
options.sdam_event_handler = Some(handler.clone());
options.server_api = server_api;
if client.use_multiple_mongoses() && TestClient::new().await.is_sharded() {
assert!(
options.hosts.len() > 1,
"[{}]: Test requires multiple mongos hosts",
description.as_ref()
);
}
// In order to speed up the tests where a failpoint is used, the test runner
// MAY specified a reduced value for `heartbeatFrequencyMS` and
// `minHeartbeatFrequencyMS`. Test runners MUST NOT do so
// for any client that specifies `heartbeatFrequencyMS` it its
// `uriOptions`.
if options.heartbeat_freq.is_none() {
options.test_options_mut().min_heartbeat_freq =
Some(Duration::from_millis(50));
options.heartbeat_freq = Some(MIN_HEARTBEAT_FREQUENCY);
}
// if we're observing log messages, we need to set the entity ID on the test
// options so that it can be emitted in tracing events and
// used to filter events by client for test assertions.
#[cfg(feature = "tracing-unstable")]
if client.observe_log_messages.is_some() {
// some tests require that an untruncated command/reply is attached to
// events so it can be parsed as JSON, but on certain topologies some of
// the tests produce replies with extJSON longer than 1000 characters.
// to accomodate this, we use a higher default length for unified tests.
options.tracing_max_document_length_bytes = Some(10000);
}
(
id,
Entity::Client(ClientEntity::new(
options,
handler,
observe_events,
ignore_command_names,
observe_sensitive_commands,
)),
)
}
TestFileEntity::Database(database) => {
let id = database.id.clone();
let client = self.get_client(&database.client).await;
let database = if let Some(ref options) = database.database_options {
let options = options.as_database_options();
client.database_with_options(&database.database_name, options)
} else {
client.database(&database.database_name)
};
(id, database.into())
}
TestFileEntity::Collection(collection) => {
let id = collection.id.clone();
let database = self.get_database(&collection.database).await;
let collection = if let Some(ref options) = collection.collection_options {
let options = options.as_collection_options();
database.collection_with_options(&collection.collection_name, options)
} else {
database.collection(&collection.collection_name)
};
(id, collection.into())
}
TestFileEntity::Session(session) => {
let id = session.id.clone();
let client = self.get_client(&session.client).await;
let client_session = client
.start_session(session.session_options.clone())
.await
.unwrap();
(id, Entity::Session(SessionEntity::new(client_session)))
}
TestFileEntity::Bucket(bucket) => {
let id = bucket.id.clone();
let database = self.get_database(&bucket.database).await;
(
id,
Entity::Bucket(database.gridfs_bucket(bucket.bucket_options.clone())),
)
}
TestFileEntity::Thread(thread) => {
let (sender, mut receiver) = mpsc::unbounded_channel::<ThreadMessage>();
let runner = self.clone();
let d = description.as_ref().to_string();
runtime::execute(async move {
while let Some(msg) = receiver.recv().await {
match msg {
ThreadMessage::ExecuteOperation(op) => {
op.execute(&runner, d.as_str()).await;
}
ThreadMessage::Stop(sender) => {
// This returns an error if the waitForThread operation stopped
// listening (e.g. due to timeout). The waitForThread operation
// will handle reporting that error, so we can ignore it here.
let _ = sender.send(());
break;
}
}
}
});
(thread.id.clone(), Entity::Thread(ThreadEntity { sender }))
}
#[cfg(feature = "in-use-encryption-unstable")]
TestFileEntity::ClientEncryption(client_enc) => {
let id = client_enc.id.clone();
let opts = &client_enc.client_encryption_opts;
let kv_client = self
.get_client(&opts.key_vault_client)
.await
.client()
.unwrap()
.clone();
let kms_providers: HashMap<mongocrypt::ctx::KmsProvider, Document> =
bson::from_document(opts.kms_providers.clone()).unwrap();
let kms_providers = fill_kms_placeholders(kms_providers);
let client_enc = crate::client_encryption::ClientEncryption::new(
kv_client,
opts.key_vault_namespace.clone(),
kms_providers,
)
.unwrap();
(id, Entity::ClientEncryption(Arc::new(client_enc)))
}
};
self.insert_entity(&id, entity).await;
}
}
pub(crate) async fn insert_entity(&self, id: impl AsRef<str>, entity: impl Into<Entity>) {
if self
.entities
.write()
.await
.insert(id.as_ref().to_string(), entity.into())
.is_some()
{
panic!(
"Entity with id {} already present in entity map",
id.as_ref()
);
}
}
pub(crate) async fn sync_workers(&self) {
self.internal_client.sync_workers().await;
let entities = self.entities.read().await;
for entity in entities.values() {
if let Entity::Client(client) = entity {
client.sync_workers().await;
}
}
}
pub(crate) async fn get_client(&self, id: &str) -> ClientEntity {
self.entities
.read()
.await
.get(id)
.unwrap()
.as_client()
.clone()
}
pub(crate) async fn get_database(&self, id: &str) -> Database {
self.entities
.read()
.await
.get(id)
.unwrap()
.as_database()
.clone()
}
pub(crate) async fn get_collection(&self, id: &str) -> Collection<Document> {
self.entities
.read()
.await
.get(id)
.unwrap()
.as_collection()
.clone()
}
pub(crate) async fn get_bucket(&self, id: &str) -> GridFsBucket {
self.entities
.read()
.await
.get(id)
.unwrap()
.as_bucket_entity()
.clone()
}
pub(crate) async fn get_thread(&self, id: &str) -> ThreadEntity {
self.entities
.read()
.await
.get(id)
.unwrap()
.as_thread()
.clone()
}
pub(crate) async fn get_topology_description(
&self,
id: impl AsRef<str>,
) -> TopologyDescription {
self.entities
.read()
.await
.get(id.as_ref())
.unwrap()
.as_topology_description()
.clone()
}
#[cfg(feature = "in-use-encryption-unstable")]
pub(crate) async fn get_client_encryption(
&self,
id: impl AsRef<str>,
) -> Arc<crate::client_encryption::ClientEncryption> {
self.entities
.read()
.await
.get(id.as_ref())
.unwrap()
.as_client_encryption()
.clone()
}
/// Removes the cursor with the given ID from the entity map. This method passes ownership of
/// the cursor to the caller so that a mutable reference to a ClientSession can be accessed from
/// the entity map simultaneously. Once the caller is finished with the cursor, it MUST be
/// returned to the test runner via the return_cursor method below.
pub(crate) async fn take_cursor(&self, id: impl AsRef<str>) -> TestCursor {
self.entities
.write()
.await
.remove(id.as_ref())
.unwrap()
.into_cursor()
}
/// Returns the given cursor to the entity map. This method must be called after take_cursor.
pub(crate) async fn return_cursor(&self, id: impl AsRef<str>, cursor: TestCursor) {
self.entities
.write()
.await
.insert(id.as_ref().into(), Entity::Cursor(cursor));
}
}
#[cfg(feature = "in-use-encryption-unstable")]
fn fill_kms_placeholders(
kms_providers: HashMap<mongocrypt::ctx::KmsProvider, Document>,
) -> KmsProviderList {
use crate::test::KMS_PROVIDERS_MAP;
let placeholder = bson::Bson::Document(doc! { "$$placeholder": 1 });
let mut out = vec![];
for (provider, mut config) in kms_providers {
for (key, value) in config.iter_mut() {
if *value == placeholder {
let new_value = KMS_PROVIDERS_MAP
.get(&provider)
.unwrap_or_else(|| panic!("missing config for {:?}", provider))
.0
.get(key)
.unwrap_or_else(|| {
panic!("provider config {:?} missing key {:?}", provider, key)
})
.clone();
*value = new_value;
}
}
let tls = KMS_PROVIDERS_MAP
.get(&provider)
.and_then(|(_, t)| t.clone());
out.push((provider, config, tls));
}
out
}
|
use std::fs;
use std::process::Command;
use std::sync::Once;
pub fn setup() {
static BUILD: Once = Once::new();
BUILD.call_once(|| {
let status = Command::new("cargo")
.arg("build")
.status()
.expect("failed to build");
assert!(status.success());
});
}
pub fn contains_panic(name: &str, code: &str) -> bool {
let tempdir = tempfile::tempdir().unwrap();
let prelude = stringify! {
use no_panic::no_panic;
};
let rs = tempdir.path().join(format!("{}.rs", name));
fs::write(&rs, format!("{}{}", prelude, code)).unwrap();
let status = Command::new("rustc")
.arg("--crate-name")
.arg(name)
.arg(rs)
.arg("--edition=2018")
.arg("-C")
.arg("opt-level=3")
.arg("--emit=asm")
.arg("--out-dir")
.arg(tempdir.path())
.arg("--extern")
.arg(format!(
"no_panic=target/debug/{prefix}no_panic.{extension}",
prefix = std::env::consts::DLL_PREFIX,
extension = std::env::consts::DLL_EXTENSION,
))
.status()
.expect("failed to execute rustc");
assert!(status.success());
let asm = tempdir.path().join(format!("{}.s", name));
let asm = fs::read_to_string(asm).unwrap();
asm.contains("detected panic in function")
}
macro_rules! assert_no_panic {
($(mod $name:ident { $($content:tt)* })*) => {
mod no_panic {
use crate::compiletest;
$(
#[test]
fn $name() {
compiletest::setup();
let name = stringify!($name);
let content = stringify!($($content)*);
assert!(!compiletest::contains_panic(name, content));
}
)*
}
};
}
macro_rules! assert_link_error {
($(mod $name:ident { $($content:tt)* })*) => {
mod link_error {
use crate::compiletest;
$(
#[test]
fn $name() {
compiletest::setup();
let name = stringify!($name);
let content = stringify!($($content)*);
assert!(compiletest::contains_panic(name, content));
}
)*
}
};
}
|
use anyhow;
use k8s_openapi::apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition;
use kube_derive::CustomResource;
use rweb::openapi::Entity;
use rweb::openapi::Schema;
use rweb::Schema;
use serde::{Deserialize, Serialize};
#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Schema)]
#[kube(group = "certmaster.kuberails.com", version = "v1")]
#[serde(rename_all = "camelCase")]
pub struct CertIssuerSpec {
pub domain_name: String,
pub dns_provider: DnsProvider,
pub secret_name: Option<String>,
#[serde(default = "default_namespace")]
pub namespaces: Vec<String>,
}
fn default_namespace() -> Vec<String> {
vec!["default".to_string()]
}
#[derive(Serialize, Deserialize, Clone, Debug, Schema)]
#[serde(tag = "provider")]
#[serde(rename_all = "lowercase")]
pub enum DnsProvider {
DigitalOcean(BasicAuth),
Cloudflare(BasicAuth),
Route53(Route53),
}
#[derive(Serialize, Deserialize, Clone, Debug, Schema)]
#[serde(rename_all = "camelCase")]
pub struct BasicAuth {
key: String,
secret_key: String,
}
#[derive(Serialize, Deserialize, Clone, Debug, Schema)]
#[serde(rename_all = "camelCase")]
pub struct Route53 {
access_key: String,
secret_access_key: String,
region: String,
profile: Option<String>,
hosted_zone_id: Option<String>,
}
fn main() -> anyhow::Result<()> {
let spec = CertIssuerSpec {
domain_name: "kuberails.com".to_string(),
dns_provider: DnsProvider::DigitalOcean(BasicAuth {
key: "key".to_string(),
secret_key: "secretKey".to_string(),
}),
secret_name: Some("secret+name".to_string()),
namespaces: vec!["default".to_string()],
};
let crd = CertIssuer::crd();
let schema = <CertIssuerSpec as Entity>::describe();
println!("CRD: \n{}\n", serde_yaml::to_string(&crd)?);
println!("\nSCHEMA: \n{}\n", serde_yaml::to_string(&schema)?);
println!("\nSPEC: \n{}\n", serde_yaml::to_string(&spec)?);
Ok(())
}
|
use std::collections::HashMap;
use std::convert::TryFrom;
use std::ops::{Deref, DerefMut};
use crate::Element;
/// Maps to `HashMap<String, Element>`
#[derive(Debug)]
pub struct Struct(pub HashMap<String, Element>);
impl From<Struct> for Element {
fn from(s: Struct) -> Self {
Element::Struct(s.0)
}
}
impl Deref for Struct {
type Target = HashMap<String, Element>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Struct {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl Struct {
/// Create a new empty struct.
pub fn new() -> Self { Self(HashMap::new()) }
/// Insert an item into the struct.
///
/// This function uses the `Into<Element>` trait.
/// ```
/// use tycho::collections::Struct;
/// use tycho::Element;
/// let mut s = Struct::new();
/// s.insert("a", "foo");
/// ```
pub fn insert<V: Into<Element>>(&mut self, key: &str, value: V) -> Option<Element> {
self.0.insert(key.to_string(), value.into())
}
/// Get an item from the struct.
pub fn get(&self, key: &str) -> Option<&Element> {
self.0.get(key)
}
/// Remove an item from the struct.
pub fn remove(&mut self, key: &str) -> Option<Element> {
self.0.remove(key)
}
/// Get a value from a struct with a given type
///
/// ```
/// use tycho::collections::Struct;
/// use tycho::{Element, Value, Number};
/// let mut s = Struct::new();
///
/// // Insert
/// s.insert("foo", 420i32);
///
/// // Retrieve
/// assert_eq!(s.get("foo"), Some(&Element::Value(Value::Number(Number::Signed32(420)))));
/// //assert_eq!(s.value("foo"), Some(&420i32))
/// ```
///
#[doc(hidden)]
pub fn value<'x, V: From<&'x Element>>(&'x self, key: &str) -> Option<V> {
match V::try_from(self.0.get(key)?) {
Ok(x) => Some(x),
Err(_) => None
}
}
}
impl From<HashMap<String, Element>> for Struct {
fn from(v: HashMap<String, Element>) -> Self {
Self(v)
}
}
impl TryFrom<Element> for Struct {
type Error = ();
fn try_from(value: Element) -> Result<Self, Self::Error> {
if let Element::Struct(map) = value {
Ok(Struct(map))
} else {
Err(())
}
}
}
|
#[cfg(test)]
#[path = "../../tests/unit/validation/vehicles_test.rs"]
mod vehicles_test;
use super::*;
use crate::parse_time;
use crate::utils::combine_error_results;
use crate::validation::common::get_time_windows;
use hashbrown::HashSet;
use std::cmp::Ordering;
use std::ops::Deref;
use vrp_core::models::common::TimeWindow;
use vrp_core::utils::compare_floats;
/// Checks that fleet has no vehicle with duplicate type ids.
fn check_e1300_no_vehicle_types_with_duplicate_type_ids(ctx: &ValidationContext) -> Result<(), FormatError> {
get_duplicates(ctx.vehicles().map(|vehicle| &vehicle.type_id)).map_or(Ok(()), |ids| {
Err(FormatError::new(
"E1300".to_string(),
"duplicated vehicle type ids".to_string(),
format!("remove duplicated vehicle type ids: {}", ids.join(", ")),
))
})
}
/// Checks that fleet has no vehicle with duplicate ids.
fn check_e1301_no_vehicle_types_with_duplicate_ids(ctx: &ValidationContext) -> Result<(), FormatError> {
get_duplicates(ctx.vehicles().flat_map(|vehicle| vehicle.vehicle_ids.iter())).map_or(Ok(()), |ids| {
Err(FormatError::new(
"E1301".to_string(),
"duplicated vehicle ids".to_string(),
format!("remove duplicated vehicle ids: {}", ids.join(", ")),
))
})
}
/// Checks that vehicle shift time is correct.
fn check_e1302_vehicle_shift_time(ctx: &ValidationContext) -> Result<(), FormatError> {
let type_ids = ctx
.vehicles()
.filter_map(|vehicle| {
let tws = vehicle
.shifts
.iter()
.map(|shift| {
vec![
shift.start.earliest.clone(),
shift.end.as_ref().map_or_else(|| shift.start.earliest.clone(), |end| end.latest.clone()),
]
})
.collect::<Vec<_>>();
if check_raw_time_windows(&tws, false) {
None
} else {
Some(vehicle.type_id.to_string())
}
})
.collect::<Vec<_>>();
if type_ids.is_empty() {
Ok(())
} else {
Err(FormatError::new(
"E1302".to_string(),
"invalid start or end times in vehicle shift".to_string(),
format!(
"ensure that start and end time conform shift time rules, vehicle type ids: {}",
type_ids.join(", ")
),
))
}
}
/// Checks that break time window is correct.
fn check_e1303_vehicle_breaks_time_is_correct(ctx: &ValidationContext) -> Result<(), FormatError> {
let type_ids = get_invalid_type_ids(
ctx,
Box::new(|_, shift, shift_time| {
shift
.breaks
.as_ref()
.map(|breaks| {
let tws = breaks
.iter()
.filter_map(|b| match &b.time {
VehicleBreakTime::TimeWindow(tw) => Some(get_time_window_from_vec(tw)),
_ => None,
})
.collect::<Vec<_>>();
check_shift_time_windows(shift_time, tws, false)
})
.unwrap_or(true)
}),
);
if type_ids.is_empty() {
Ok(())
} else {
Err(FormatError::new(
"E1303".to_string(),
"invalid break time windows in vehicle shift".to_string(),
format!("ensure that break conform rules, vehicle type ids: '{}'", type_ids.join(", ")),
))
}
}
/// Checks that reload time windows are correct.
fn check_e1304_vehicle_reload_time_is_correct(ctx: &ValidationContext) -> Result<(), FormatError> {
let type_ids = get_invalid_type_ids(
ctx,
Box::new(|_, shift, shift_time| {
shift
.reloads
.as_ref()
.map(|reloads| {
let tws = reloads
.iter()
.filter_map(|reload| reload.times.as_ref())
.map(|tws| get_time_windows(tws))
.flatten()
.collect::<Vec<_>>();
check_shift_time_windows(shift_time, tws, true)
})
.unwrap_or(true)
}),
);
if type_ids.is_empty() {
Ok(())
} else {
Err(FormatError::new(
"E1304".to_string(),
"invalid reload time windows in vehicle shift".to_string(),
format!("ensure that reload conform rules, vehicle type ids: '{}'", type_ids.join(", ")),
))
}
}
/// Checks that vehicle area restrictions are valid.
fn check_e1305_vehicle_limit_area_is_correct(ctx: &ValidationContext) -> Result<(), FormatError> {
let type_ids = ctx
.vehicles()
.filter(|vehicle| {
vehicle
.limits
.as_ref()
.and_then(|l| l.allowed_areas.as_ref())
.map_or(false, |areas| areas.is_empty() || areas.iter().any(|area| area.outer_shape.len() < 3))
})
.map(|vehicle| vehicle.type_id.to_string())
.collect::<Vec<_>>();
if type_ids.is_empty() {
Ok(())
} else {
Err(FormatError::new(
"E1305".to_string(),
"invalid allowed area definition in vehicle limits".to_string(),
format!(
"ensure that areas list is not empty and each area has at least three coordinates, \
vehicle type ids: '{}'",
type_ids.join(", ")
),
))
}
}
fn check_e1306_vehicle_dispatch_is_correct(ctx: &ValidationContext) -> Result<(), FormatError> {
let type_ids = get_invalid_type_ids(
ctx,
Box::new(move |vehicle, shift, shift_time| {
shift.dispatch.as_ref().map_or(true, |dispatch| {
let has_valid_tw = dispatch.iter().flat_map(|dispatch| dispatch.limits.iter()).all(|limit| {
let start = parse_time(&limit.start);
let end = parse_time(&limit.end);
compare_floats(start, end) != Ordering::Greater
&& shift_time.as_ref().map_or(true, |tw| {
TimeWindow::new(start, start).intersects(tw) && TimeWindow::new(end, end).intersects(tw)
})
});
let has_valid_max = dispatch.iter().all(|dispatch| {
dispatch.limits.iter().map(|limit| limit.max).sum::<usize>() == vehicle.vehicle_ids.len()
});
has_valid_tw
&& has_valid_max
&& dispatch.iter().map(|dispatch| dispatch.location.clone()).collect::<HashSet<_>>().len()
== dispatch.len()
})
}),
);
if type_ids.is_empty() {
Ok(())
} else {
Err(FormatError::new(
"E1306".to_string(),
"invalid dispatch in vehicle shift".to_string(),
format!(
"ensure that all dispatch have proper dispatch parameters and unique locations. Vehicle type ids: '{}'",
type_ids.join(", ")
),
))
}
}
/// Checks that vehicle area restrictions are valid.
fn check_e1307_vehicle_has_no_zero_costs(ctx: &ValidationContext) -> Result<(), FormatError> {
let type_ids = ctx
.vehicles()
.filter(|vehicle| {
compare_floats(vehicle.costs.time, 0.) == Ordering::Equal
&& compare_floats(vehicle.costs.distance, 0.) == Ordering::Equal
})
.map(|vehicle| vehicle.type_id.to_string())
.collect::<Vec<_>>();
if type_ids.is_empty() {
Ok(())
} else {
Err(FormatError::new(
"E1307".to_string(),
"time and duration costs are zeros".to_string(),
format!(
"ensure that either time or distance cost is non-zero, \
vehicle type ids: '{}'",
type_ids.join(", ")
),
))
}
}
fn get_invalid_type_ids(
ctx: &ValidationContext,
check_shift: Box<dyn Fn(&VehicleType, &VehicleShift, Option<TimeWindow>) -> bool>,
) -> Vec<String> {
ctx.vehicles()
.filter_map(|vehicle| {
let all_correct =
vehicle.shifts.iter().all(|shift| check_shift.deref()(vehicle, shift, get_shift_time_window(shift)));
if all_correct {
None
} else {
Some(vehicle.type_id.clone())
}
})
.collect::<Vec<_>>()
}
fn check_shift_time_windows(
shift_time: Option<TimeWindow>,
tws: Vec<Option<TimeWindow>>,
skip_intersection_check: bool,
) -> bool {
tws.is_empty()
|| (check_time_windows(&tws, skip_intersection_check)
&& shift_time
.as_ref()
.map_or(true, |shift_time| tws.into_iter().map(|tw| tw.unwrap()).all(|tw| tw.intersects(shift_time))))
}
fn get_shift_time_window(shift: &VehicleShift) -> Option<TimeWindow> {
get_time_window(
&shift.start.earliest,
&shift.end.clone().map_or_else(|| "2200-07-04T00:00:00Z".to_string(), |end| end.latest),
)
}
/// Validates vehicles from the fleet.
pub fn validate_vehicles(ctx: &ValidationContext) -> Result<(), Vec<FormatError>> {
combine_error_results(&[
check_e1300_no_vehicle_types_with_duplicate_type_ids(ctx),
check_e1301_no_vehicle_types_with_duplicate_ids(ctx),
check_e1302_vehicle_shift_time(ctx),
check_e1303_vehicle_breaks_time_is_correct(ctx),
check_e1304_vehicle_reload_time_is_correct(ctx),
check_e1305_vehicle_limit_area_is_correct(ctx),
check_e1306_vehicle_dispatch_is_correct(ctx),
check_e1307_vehicle_has_no_zero_costs(ctx),
])
}
|
use serde::{Deserialize, Serialize};
use structopt::StructOpt;
use persist_core::error::Error;
use persist_core::protocol::PruneRequest;
use crate::daemon;
use crate::format;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, StructOpt)]
pub struct Opts {
/// Also prune file of stopped, but still managed, processes.
#[structopt(long)]
pub stopped: bool,
}
pub async fn handle(opts: Opts) -> Result<(), Error> {
let request = PruneRequest {
stopped: opts.stopped,
};
let mut daemon = daemon::connect().await?;
let response = daemon.prune(request).await?;
if !response.pruned_files.is_empty() {
for pruned_file in response.pruned_files {
let msg = format!("'{}' successfully pruned", pruned_file);
format::success(msg);
}
} else {
format::success("nothing to prune");
}
Ok(())
}
|
//! The RFC 2389 Options (`OPTS`) command
//
// The OPTS (options) command allows a user-PI to specify the desired
// behavior of a server-FTP process when another FTP command (the target
// command) is later issued. The exact behavior, and syntax, will vary
// with the target command indicated, and will be specified with the
// definition of that command. Where no OPTS behavior is defined for a
// particular command there are no options available for that command.
use crate::{
auth::UserDetail,
server::controlchan::{
error::ControlChanError,
handler::{CommandContext, CommandHandler},
Reply, ReplyCode,
},
storage::{Metadata, StorageBackend},
};
use async_trait::async_trait;
/// The parameters that can be given to the `OPTS` command, specifying the option the client wants
/// to set.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Opt {
/// The client wants us to enable UTF-8 encoding for file paths and such.
Utf8 { on: bool },
}
#[derive(Debug)]
pub struct Opts {
option: Opt,
}
impl Opts {
pub fn new(option: Opt) -> Self {
Opts { option }
}
}
#[async_trait]
impl<Storage, User> CommandHandler<Storage, User> for Opts
where
User: UserDetail + 'static,
Storage: StorageBackend<User> + 'static,
Storage::Metadata: Metadata,
{
#[tracing_attributes::instrument]
async fn handle(&self, _args: CommandContext<Storage, User>) -> Result<Reply, ControlChanError> {
match &self.option {
Opt::Utf8 { on: true } => Ok(Reply::new(ReplyCode::CommandOkay, "Always in UTF-8 mode.")),
Opt::Utf8 { on: false } => Ok(Reply::new(ReplyCode::CommandNotImplementedForParameter, "Non UTF-8 mode not supported")),
}
}
}
|
extern crate stava;
#[macro_use]
extern crate clap;
#[macro_use]
extern crate include_dir;
use clap::{Arg, Command};
use include_dir::Dir;
use stava::{Stava, StavaResult};
use std::collections::HashMap;
use std::ffi::OsStr;
use std::fs;
use std::path::Path;
use std::process::exit;
const OPT_NAME_WORD: &str = "WORD";
const OPT_NAME_FILES: &str = "FILES";
const FLAG_INC_DEFAULT_WORDS: &str = "flag_inc_default_words";
const FLAG_RETURN_EXIT_CODE: &str = "flag_return_exit_code";
const FLAG_ONLY_EXIT_CODE: &str = "flag_only_exit_code";
const ASSETS_DIR: Dir = include_dir!("src/assets");
fn main() {
let opt_word = Arg::new(OPT_NAME_WORD)
.help("Word to correct")
.required(true)
.index(1);
let opt_files = Arg::new(OPT_NAME_FILES)
.help("Files to learn words from")
.takes_value(true)
.multiple_values(true)
.required(false)
.validator_os(exists_on_filesystem)
.index(2);
let flag_inc_default_words = Arg::new(FLAG_INC_DEFAULT_WORDS)
.help("Include default set of words (default: false)")
.short('d')
.long("default");
let flag_return_exit_code = Arg::new(FLAG_RETURN_EXIT_CODE)
.help("Exit with 1 if word is not spelled correctly, otherwise 0 (default: false)")
.short('e')
.long("exit-code");
let flag_only_exit_code = Arg::new(FLAG_ONLY_EXIT_CODE)
.help("Only return exit code and not corrected word (default: false)")
.short('o')
.long("exit-code-only");
let matches = Command::new("stava")
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.arg(opt_word)
.arg(opt_files)
.arg(flag_inc_default_words)
.arg(flag_return_exit_code)
.arg(flag_only_exit_code)
.get_matches();
let mut stava = Stava {
words_w_count: HashMap::new(),
};
if let Some(files) = matches.values_of(OPT_NAME_FILES) {
if matches.is_present(FLAG_INC_DEFAULT_WORDS) {
stava.learn(get_default_words());
}
let paths: Vec<&Path> = files.map(Path::new).collect::<Vec<&Path>>();
for file in paths {
let words = fs::read_to_string(file)
.unwrap_or_else(|_| panic!("Could not read the file: {}", file.display()));
stava.learn(words.as_str());
}
} else {
// No files provided by user - use default word file
stava.learn(get_default_words());
}
let word = matches.value_of(OPT_NAME_WORD).unwrap();
let result = stava.correct(word);
if matches.is_present(FLAG_ONLY_EXIT_CODE) {
exit_with_code(result)
} else {
println!("{}", result.word);
if matches.is_present(FLAG_RETURN_EXIT_CODE) {
exit_with_code(result)
}
}
}
fn exit_with_code(result: StavaResult) -> ! {
if result.was_corrected {
exit(1)
}
exit(0)
}
fn get_default_words() -> &'static str {
ASSETS_DIR
.get_file("words.txt")
.and_then(|file| file.contents_utf8())
.unwrap_or_else(|| panic!("Could not get default words"))
}
fn exists_on_filesystem(path: &OsStr) -> Result<(), String> {
match Some(path).map(Path::new).map(Path::exists).unwrap_or(false) {
true => Ok(()),
false => Err(format!("File not found [{:?}]", path)),
}
}
|
//! RPC wrapper for `/status` endpoint
use crate::{jsonrpc, node_info::NodeInfo};
use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer};
/// Request the status of the node
#[derive(Default)]
pub struct Status;
impl jsonrpc::Request for Status {
type Response = StatusResponse;
fn path(&self) -> gaunt::Path {
"/status".into()
}
}
/// Status responses
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct StatusResponse {
/// Node information
pub node_info: NodeInfo,
/// Sync information
pub sync_info: SyncInfo,
/// Validator information
pub validator_info: ValidatorInfo,
}
impl jsonrpc::Response for StatusResponse {}
/// Sync information
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct SyncInfo {
/// Latest block hash
pub latest_block_hash: tendermint::Hash,
/// Latest app hash
pub latest_app_hash: tendermint::Hash,
/// Latest block height
pub latest_block_height: tendermint::block::Height,
/// Latest block time
pub latest_block_time: tendermint::Timestamp,
/// Are we catching up?
pub catching_up: bool,
}
/// Validator information
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct ValidatorInfo {
/// Validator account address
pub address: tendermint::account::Id,
/// Validator public key
pub pub_key: tendermint::PublicKey,
/// Validator voting power
pub voting_power: VotingPower,
}
/// Voting power
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord)]
pub struct VotingPower(u64);
impl VotingPower {
/// Get the current voting power
pub fn value(self) -> u64 {
self.0
}
}
impl From<VotingPower> for u64 {
fn from(power: VotingPower) -> u64 {
power.0
}
}
impl<'de> Deserialize<'de> for VotingPower {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
Ok(VotingPower(
String::deserialize(deserializer)?
.parse()
.map_err(|e| D::Error::custom(format!("{}", e)))?,
))
}
}
impl Serialize for VotingPower {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
self.0.to_string().serialize(serializer)
}
}
|
#![cfg_attr(not(feature = "force_macros11_mode"),
feature(rustc_attrs, rustc_private,
rustc_macro_internals, plugin))]
#![cfg_attr(not(feature = "force_macros11_mode"),
plugin(namedarg_hack))]
#![feature(rustc_macro, rustc_macro_lib)]
// for testing: #![feature(rustc_macro_internals, rustc_private)]
extern crate rustc_macro;
mod namedarg;
#[cfg(feature = "force_macros11_mode")]
include!("macros11.rs");
#[cfg(not(feature = "force_macros11_mode"))]
include!("nightly.rs");
|
//
// All datetimes in chore are implicitly durations over the provided resolution.
// For example, "2001-02-03" describes 2001-02-03T00:00:00 to 2001-02-03T23:59:59.
//
use chrono::{Datelike, Timelike};
#[derive(Clone, Debug, PartialEq)]
pub struct Date {
start: chrono::NaiveDateTime,
duration: self::Duration,
}
#[derive(Clone, Debug, PartialEq)]
pub enum Duration {
Seconds(i64),
Minutes(i64),
Hours(i64),
Days(i64),
Months(i64),
Years(i32),
}
use Duration::*;
impl Date {
pub fn from_chrono(date: &chrono::NaiveDateTime) -> Date {
Date {
start: date.with_nanosecond(0).unwrap(),
duration: Seconds(1),
}
}
pub fn new(str: &str, cf: &Date) -> Option<Date> {
Date::from_abs(str).or_else(|| Date::from_rel(str, cf))
}
pub fn from_abs(str: &str) -> Option<Date> {
let (year, str) = (str.get(0..4)?.parse().ok()?, str.get(4..)?);
if str.is_empty() {
return Date::from_fields(year, 1, 1, 0, 0, 0, Years(1));
}
let (month, str) = Date::parse_field(str, '-')?;
if str.is_empty() {
return Date::from_fields(year, month, 1, 0, 0, 0, Months(1));
}
let (day, str) = Date::parse_field(str, '-')?;
if str.is_empty() {
return Date::from_fields(year, month, day, 0, 0, 0, Days(1));
}
let (hour, str) = Date::parse_field(str, 'T')?;
if str.is_empty() {
return Date::from_fields(year, month, day, hour, 0, 0, Hours(1));
}
let (minute, str) = Date::parse_field(str, ':')?;
if str.is_empty() {
return Date::from_fields(year, month, day, hour, minute, 0, Minutes(1));
}
let (second, str) = Date::parse_field(str, ':')?;
if str.is_empty() {
return Date::from_fields(year, month, day, hour, minute, second, Seconds(1));
}
None
}
pub fn from_rel(str: &str, cf: &Date) -> Option<Date> {
Date::from_relative_hms(str, cf)
.or_else(|| Date::from_relative_hm(str, cf))
.or_else(|| Date::from_relative_hour(str, cf))
.or_else(|| Date::from_relative_weekday(str, cf))
.or_else(|| Date::from_relative_day_of_month(str, cf))
.or_else(|| Date::from_relative_month(str, cf))
.or_else(|| Date::from_relative_offset(str, cf))
.or_else(|| Date::from_relative_named(str, cf))
}
fn from_relative_hms(str: &str, cf: &Date) -> Option<Date> {
let target = chrono::NaiveTime::parse_from_str(str, "%H:%M:%S").ok()?;
let date = Date {
start: cf.start,
duration: Seconds(1),
};
Some(Date::next_time(date, &target))
}
fn from_relative_hm(str: &str, cf: &Date) -> Option<Date> {
let target = chrono::NaiveTime::parse_from_str(str, "%H:%M").ok()?;
let date = Date {
start: cf.start,
duration: Minutes(1),
};
Some(Date::next_time(date, &target))
}
fn from_relative_hour(str: &str, cf: &Date) -> Option<Date> {
let hour: u32 = str.parse().ok()?;
let target = chrono::NaiveTime::from_hms_opt(hour, 0, 0)?;
let date = Date {
start: cf.start,
duration: Hours(1),
};
Some(Date::next_time(date, &target))
}
fn from_relative_weekday(str: &str, cf: &Date) -> Option<Date> {
let weekday = str.parse::<chrono::Weekday>().ok()?;
let current_weekday: i64 = cf.start.weekday().number_from_monday().into();
let target_weekday: i64 = weekday.number_from_monday().into();
let date = Date {
start: cf.start.with_second(0)?.with_minute(0)?.with_hour(0)?,
duration: Days(1),
};
if target_weekday > current_weekday {
Some(date + Days(target_weekday - current_weekday))
} else {
Some(date + Days(7 + target_weekday - current_weekday))
}
}
fn from_relative_day_of_month(str: &str, cf: &Date) -> Option<Date> {
let target_day = str
.strip_suffix("st")
.or_else(|| str.strip_suffix("nd"))
.or_else(|| str.strip_suffix("rd"))
.or_else(|| str.strip_suffix("th"))?
.parse::<i64>()
.ok()
.filter(|&day| (1..=31).contains(&day))?;
let current_day = cf.start.day() as i64;
let mut date = Date {
start: cf.start.with_second(0)?.with_minute(0)?.with_hour(0)?,
duration: Days(1),
};
let day_delta = Days(target_day - current_day);
if cf.start.day() as i64 >= target_day {
date += Months(1);
}
if (&date + &day_delta).start.month() != date.start.month() {
date += Months(1);
}
Some(date + day_delta)
}
fn from_relative_month(str: &str, cf: &Date) -> Option<Date> {
let target_month = str.parse::<chrono::Month>().ok()?.number_from_month() as i64;
let current_month = cf.start.month() as i64;
let date = Date {
start: cf
.start
.with_second(0)?
.with_minute(0)?
.with_hour(0)?
.with_day(1)?,
duration: Months(1),
};
if target_month > current_month {
Some(date + Months(target_month - current_month))
} else {
Some(date + Years(1) + Months(target_month - current_month))
}
}
fn from_relative_offset(str: &str, cf: &Date) -> Option<Date> {
let offset = str
.strip_suffix('s')
.and_then(|str| str.parse::<i64>().ok())
.map(Seconds)
.or_else(|| {
str.strip_suffix('m')
.and_then(|str| str.parse::<i64>().ok())
.map(Minutes)
})
.or_else(|| {
str.strip_suffix('h')
.and_then(|str| str.parse::<i64>().ok())
.map(Hours)
})
.or_else(|| {
str.strip_suffix('d')
.and_then(|str| str.parse::<i64>().ok())
.map(Days)
})
.or_else(|| {
str.strip_suffix('W')
.and_then(|str| str.parse::<i64>().ok())
// Handle first sub-week section which goes through a
// weekend directly
.map(|dur| match (dur, cf.start.weekday()) {
(i64::MIN..=-1, chrono::Weekday::Sun) => (dur + 1, -2),
(0, chrono::Weekday::Sun) => (dur, 0),
(1..=i64::MAX, chrono::Weekday::Sun) => (dur - 1, 1),
(i64::MIN..=-1, chrono::Weekday::Mon) => (dur + 1, -3),
(0..=i64::MAX, chrono::Weekday::Mon) => (dur, 0),
(i64::MIN..=-2, chrono::Weekday::Tue) => (dur + 2, -4),
(-1..=3, chrono::Weekday::Tue) => (dur, 0),
(4..=i64::MAX, chrono::Weekday::Tue) => (dur - 4, 6),
(i64::MIN..=-3, chrono::Weekday::Wed) => (dur + 3, -5),
(-2..=2, chrono::Weekday::Wed) => (dur, 0),
(3..=i64::MAX, chrono::Weekday::Wed) => (dur - 3, 5),
(i64::MIN..=-4, chrono::Weekday::Thu) => (dur + 4, -6),
(-3..=1, chrono::Weekday::Thu) => (dur, 0),
(2..=i64::MAX, chrono::Weekday::Thu) => (dur - 2, 4),
(i64::MIN..=-5, chrono::Weekday::Fri) => (dur + 5, -7),
(-4..=0, chrono::Weekday::Fri) => (dur, 0),
(1..=i64::MAX, chrono::Weekday::Fri) => (dur - 1, 3),
(i64::MIN..=-6, chrono::Weekday::Sat) => (dur + 6, -8),
(-5, chrono::Weekday::Sat) => (dur + 5, -5),
(-4..=0, chrono::Weekday::Sat) => (dur, 0),
(1..=i64::MAX, chrono::Weekday::Sat) => (dur -1, 2),
})
// Do rest by converting every five weekdays to seven days
.map(|(dur, adj)| Days((dur / 5 * 7) + (dur % 5) + adj))
})
.or_else(|| {
str.strip_suffix('w')
.and_then(|str| str.parse::<i64>().ok())
.map(|dur| Days(dur * 7))
})
.or_else(|| {
str.strip_suffix('M')
.and_then(|str| str.parse::<i64>().ok())
.map(Months)
})
.or_else(|| {
str.strip_suffix('y')
.and_then(|str| str.parse::<i32>().ok())
.map(Years)
})?;
Some(cf + offset)
}
fn from_relative_named(str: &str, cf: &Date) -> Option<Date> {
let offset = if str.eq("today") {
Duration::Days(0)
} else if str.eq("tomorrow") || str.eq("tom") {
Duration::Days(1)
} else if str.eq("yesterday") || str.eq("yes") {
Duration::Days(-1)
} else if str.eq("now") {
return Some(cf.clone());
} else {
return None;
};
Some(
Date {
start: cf.start.with_second(0)?.with_minute(0)?.with_hour(0)?,
duration: Duration::Days(1),
} + offset,
)
}
fn parse_field(str: &str, sep: char) -> Option<(u32, &str)> {
if !str.starts_with(sep) {
None
} else if let (Some(val), Some(str)) =
(str.get(1..3).and_then(|s| s.parse().ok()), str.get(3..))
{
Some((val, str))
} else {
None
}
}
fn from_fields(
year: i32,
month: u32,
day: u32,
hour: u32,
minute: u32,
second: u32,
duration: Duration,
) -> Option<Date> {
Some(Date {
start: chrono::NaiveDate::from_ymd_opt(year, month, day)?
.and_hms_opt(hour, minute, second)?,
duration,
})
}
fn next_time(mut date: Date, target: &chrono::NaiveTime) -> Date {
if target.second() < date.start.second() {
date += Minutes(1);
};
date += Seconds(target.second() as i64 - date.start.second() as i64);
if target.minute() < date.start.minute() {
date += Hours(1);
};
date += Minutes(target.minute() as i64 - date.start.minute() as i64);
if target.hour() < date.start.hour() {
date += Days(1);
};
date += Hours(target.hour() as i64 - date.start.hour() as i64);
date
}
pub fn within(&self, other: &Date) -> bool {
other.start <= self.start && other.end() >= self.end()
}
pub fn before(&self, other: &Date) -> bool {
self.end() <= other.start
}
pub fn after(&self, other: &Date) -> bool {
other.end() <= self.start
}
fn end(&self) -> chrono::NaiveDateTime {
(self + &self.duration).start
}
}
impl std::fmt::Display for Date {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
match self.duration {
Years(_) => write!(fmt, "{:04}", self.start.year()),
Months(_) => {
write!(fmt, "{:04}-{:02}", self.start.year(), self.start.month())
}
Days(_) => write!(
fmt,
"{:04}-{:02}-{:02}",
self.start.year(),
self.start.month(),
self.start.day()
),
Hours(_) => write!(
fmt,
"{:04}-{:02}-{:02}T{:02}",
self.start.year(),
self.start.month(),
self.start.day(),
self.start.hour()
),
Minutes(_) => write!(
fmt,
"{:04}-{:02}-{:02}T{:02}:{:02}",
self.start.year(),
self.start.month(),
self.start.day(),
self.start.hour(),
self.start.minute()
),
Seconds(_) => write!(
fmt,
"{:04}-{:02}-{:02}T{:02}:{:02}:{:02}",
self.start.year(),
self.start.month(),
self.start.day(),
self.start.hour(),
self.start.minute(),
self.start.second()
),
}
}
}
impl std::ops::Add<&Duration> for &Date {
type Output = Date;
fn add(self, other: &Duration) -> Date {
let start = match *other {
Seconds(dur) => self.start + chrono::Duration::seconds(dur),
Minutes(dur) => self.start + chrono::Duration::minutes(dur),
Hours(dur) => self.start + chrono::Duration::hours(dur),
Days(dur) => self.start + chrono::Duration::days(dur),
// chrono does not provide month duration because of variable month length.
// Shorten to fit.
Months(dur) => {
let months_from_epoch =
(self.start.year() * 12) + (self.start.month0() as i32) + dur as i32;
let new_year = months_from_epoch / 12;
let new_month0 = (months_from_epoch % 12) as u32;
let new_day = self.start.day();
let new_date = self
.start
.with_day(1)
.unwrap() // e.g. January 32nd; can't happen with 1st
.with_month0(new_month0)
.unwrap() // e.g. 13th month; can't happen with above `% 12`
.with_year(new_year)
.unwrap(); // e.g. leap day within non-leap year; can't happen with 1st
// Most extreme input is 31st but maximum day for the given month is 28th. Try
// reducing target date to fit up to 31-28 = 3 times.
new_date
.with_day(new_day) // 31st
.or_else(|| new_date.with_day(new_day - 1)) // 30th
.or_else(|| new_date.with_day(new_day - 2)) // 29th
.or_else(|| new_date.with_day(new_day - 3)) // 28th
.unwrap()
}
// chrono does not provide month duration because of variable year length, i.e. leap
// years day. Shorten to fit.
Years(dur) => {
let target_year = self.start.year() + dur;
self.start
.with_year(target_year)
.or_else(|| (self.start - chrono::Duration::days(1)).with_year(target_year))
.unwrap()
}
};
Date {
start,
duration: self.duration.clone(),
}
}
}
impl std::ops::Add<Duration> for Date {
type Output = Date;
fn add(self, other: Duration) -> Date {
&self + &other
}
}
impl std::ops::Add<Duration> for &Date {
type Output = Date;
fn add(self, other: Duration) -> Date {
self + &other
}
}
impl std::ops::AddAssign<Duration> for Date {
fn add_assign(&mut self, other: Duration) {
*self = &(*self) + &other;
}
}
#[cfg(test)]
mod tests {
use super::*;
fn test_now() -> Date {
Date::from_fields(2001, 2, 3, 4, 5, 6, Seconds(1)).unwrap()
}
#[test]
fn from_abs() {
for (input, expect) in &[
// happy path
(
"2001-02-03T04:05:06",
Date::from_fields(2001, 2, 3, 4, 5, 6, Seconds(1)),
),
(
"2001-02-03T04:05",
Date::from_fields(2001, 2, 3, 4, 5, 0, Minutes(1)),
),
(
"2001-02-03T04",
Date::from_fields(2001, 2, 3, 4, 0, 0, Hours(1)),
),
(
"2001-02-03",
Date::from_fields(2001, 2, 3, 0, 0, 0, Days(1)),
),
("2001-02", Date::from_fields(2001, 2, 1, 0, 0, 0, Months(1))),
("2001", Date::from_fields(2001, 1, 1, 0, 0, 0, Years(1))),
// unhappy path: non-num fields
("2001-02-03T04:05:xx", None),
("2001-02-03T04:05x", None),
("2001-02-03T04:xx", None),
("2001-02-03Txx", None),
("2001-02-xx", None),
("2001-xx", None),
// unhappy path: invalid dates
("2001-01-32T00:00:00", None),
("2001-02-03T04:05:60", None),
("2000-02-30", None),
("2001-02-29", None),
// unhappy path: bad field sizes
("2001-02-03T04:05:6", None),
("2001-02-03T04:5:06", None),
("2001-02-03T4:05:06", None),
("2001-02-3T04:05:06", None),
("2001-2-03T04:05:06", None),
("201-02-03T04:05:06", None),
// unhappy path: bad field separators
("2001-02-03t04:05:06", None),
("2001:02-03T04:05:06", None),
("2001-02:03T04:05:06", None),
("2001-02-03:04:05:06", None),
("2001T02-03T04:05:06", None),
("2001-02T03T04:05:06", None),
("2001-02-03T04T05:06", None),
("2001-02-03T04:05T06", None),
("2001-02-03-04:05:06", None),
("2001-02-03T04-05:06", None),
("2001-02-03T04:05-06", None),
// unhappy path: trailing content
("2001-02-03T04:05:06 ", None),
// unhappy path: relative dates
("tomorrow", None),
("2h", None),
// unhappy path: not a date
("this is not a date", None),
("", None),
] {
let abs_input = Date::from_abs(input);
assert_eq!(abs_input, *expect);
if expect.is_some() {
let any_input = Date::new(input, &test_now());
assert_eq!(any_input, *expect);
}
}
}
#[test]
fn from_relative_hms() {
for (input, expect) in &[
// wrap day
("00:00:00", Date::from_abs("2001-02-04T00:00:00")),
("01:02:03", Date::from_abs("2001-02-04T01:02:03")),
("04:05:05", Date::from_abs("2001-02-04T04:05:05")),
("04:04:06", Date::from_abs("2001-02-04T04:04:06")),
("03:05:06", Date::from_abs("2001-02-04T03:05:06")),
// same as current time, retain time
("04:05:06", Date::from_abs("2001-02-03T04:05:06")),
// same day
("04:05:07", Date::from_abs("2001-02-03T04:05:07")),
("04:06:06", Date::from_abs("2001-02-03T04:06:06")),
("05:05:06", Date::from_abs("2001-02-03T05:05:06")),
// allow short field sizes
("4:05:06", Date::from_abs("2001-02-03T04:05:06")),
("04:5:06", Date::from_abs("2001-02-03T04:05:06")),
("04:05:6", Date::from_abs("2001-02-03T04:05:06")),
// unhappy path, bad field sizes
("004:05:06", None),
("04:005:06", None),
("04:05:006", None),
("04::05:06", None),
("04::5:06", None),
// unhappy path: impossible times
("04:05:61", None),
("04:60:06", None),
("24:25:06", None),
] {
let rel_input = Date::from_rel(input, &test_now());
assert_eq!(rel_input, *expect);
if expect.is_some() {
let any_input = Date::new(input, &test_now());
assert_eq!(any_input, *expect);
}
}
}
#[test]
fn from_relative_hm() {
for (input, expect) in &[
// wrap day
("00:00", Date::from_abs("2001-02-04T00:00")),
("01:02", Date::from_abs("2001-02-04T01:02")),
("04:04", Date::from_abs("2001-02-04T04:04")),
("03:05", Date::from_abs("2001-02-04T03:05")),
("04:05", Date::from_abs("2001-02-04T04:05")),
// retain day
("04:06", Date::from_abs("2001-02-03T04:06")),
("05:05", Date::from_abs("2001-02-03T05:05")),
// allow short field sizes
("1:2", Date::from_abs("2001-02-04T01:02")),
// unhappy path
("004:05", None),
("04:005", None),
("04::05", None),
("04::5", None),
] {
let rel_input = Date::from_rel(input, &test_now());
assert_eq!(rel_input, *expect);
if expect.is_some() {
let any_input = Date::new(input, &test_now());
assert_eq!(any_input, *expect);
}
}
}
#[test]
fn from_relative_hour() {
for (input, expect) in &[
// wrap day
("00", Date::from_abs("2001-02-04T00")),
// ("01", Date::from_abs("2001-02-04T01")),
// ("04", Date::from_abs("2001-02-04T04")),
// ("03", Date::from_abs("2001-02-04T03")),
// // retain day
// ("05", Date::from_abs("2001-02-03T05")),
// // allow short field sizes
// ("1", Date::from_abs("2001-02-04T01")),
// // unhappy path
// ("004:", None),
// (":005", None),
// ("4::", None),
// (":4:", None),
] {
let rel_input = Date::from_rel(input, &test_now());
assert_eq!(rel_input, *expect);
if expect.is_some() {
let any_input = Date::new(input, &test_now());
assert_eq!(any_input, *expect);
}
}
}
#[test]
fn from_relative_weekday() {
for (input, expect) in &[
("sun", Date::from_abs("2001-02-04")),
("Sun", Date::from_abs("2001-02-04")),
("sunday", Date::from_abs("2001-02-04")),
("Sunday", Date::from_abs("2001-02-04")),
("mon", Date::from_abs("2001-02-05")),
("Mon", Date::from_abs("2001-02-05")),
("monday", Date::from_abs("2001-02-05")),
("Monday", Date::from_abs("2001-02-05")),
("tue", Date::from_abs("2001-02-06")),
("Tue", Date::from_abs("2001-02-06")),
("tuesday", Date::from_abs("2001-02-06")),
("Tuesday", Date::from_abs("2001-02-06")),
("wed", Date::from_abs("2001-02-07")),
("Wed", Date::from_abs("2001-02-07")),
("wednesday", Date::from_abs("2001-02-07")),
("Wednesday", Date::from_abs("2001-02-07")),
("thu", Date::from_abs("2001-02-08")),
("Thu", Date::from_abs("2001-02-08")),
("thursday", Date::from_abs("2001-02-08")),
("Thursday", Date::from_abs("2001-02-08")),
("fri", Date::from_abs("2001-02-09")),
("Fri", Date::from_abs("2001-02-09")),
("friday", Date::from_abs("2001-02-09")),
("Friday", Date::from_abs("2001-02-09")),
("sat", Date::from_abs("2001-02-10")),
("Sat", Date::from_abs("2001-02-10")),
// same as current day, bump to next week
("saturday", Date::from_abs("2001-02-10")),
("Saturday", Date::from_abs("2001-02-10")),
// unhappy path
("", None),
("s", None),
("satu", None),
("stu", None),
("saturdya", None),
("saturdayx", None),
("satxurday", None),
] {
let rel_input = Date::from_rel(input, &test_now());
assert_eq!(rel_input, *expect);
if expect.is_some() {
let any_input = Date::new(input, &test_now());
assert_eq!(any_input, *expect);
}
}
}
#[test]
fn from_relative_day_of_month() {
// input is relative to 2001-02-03T04:05:06
for (input, expect) in &[
// wrap month
("1st", Date::from_abs("2001-03-01")),
("2nd", Date::from_abs("2001-03-02")),
("3rd", Date::from_abs("2001-03-03")),
// retain month
("4th", Date::from_abs("2001-02-04")),
("5th", Date::from_abs("2001-02-05")),
("6th", Date::from_abs("2001-02-06")),
("7th", Date::from_abs("2001-02-07")),
("8th", Date::from_abs("2001-02-08")),
("9th", Date::from_abs("2001-02-09")),
("10th", Date::from_abs("2001-02-10")),
("11th", Date::from_abs("2001-02-11")),
("12th", Date::from_abs("2001-02-12")),
("13th", Date::from_abs("2001-02-13")),
("14th", Date::from_abs("2001-02-14")),
("15th", Date::from_abs("2001-02-15")),
("16th", Date::from_abs("2001-02-16")),
("17th", Date::from_abs("2001-02-17")),
("18th", Date::from_abs("2001-02-18")),
("19th", Date::from_abs("2001-02-19")),
("20th", Date::from_abs("2001-02-20")),
("21st", Date::from_abs("2001-02-21")),
("22nd", Date::from_abs("2001-02-22")),
("23rd", Date::from_abs("2001-02-23")),
("24th", Date::from_abs("2001-02-24")),
("25th", Date::from_abs("2001-02-25")),
("26th", Date::from_abs("2001-02-26")),
("27th", Date::from_abs("2001-02-27")),
("28th", Date::from_abs("2001-02-28")),
// day does not fit in current month, wrap
("29th", Date::from_abs("2001-03-29")),
// ("30th", Date::from_abs("2001-03-30")),
// ("31th", Date::from_abs("2001-03-31")),
// unhappy path
("0th", None),
("32nd", None),
("x1st", None),
] {
let rel_input = Date::from_rel(input, &test_now());
assert_eq!(rel_input, *expect);
if expect.is_some() {
let any_input = Date::new(input, &test_now());
assert_eq!(any_input, *expect);
}
}
}
#[test]
fn from_relative_month() {
for (input, expect) in &[
// wrap year
("jan", Date::from_abs("2002-01")),
("Jan", Date::from_abs("2002-01")),
("january", Date::from_abs("2002-01")),
("January", Date::from_abs("2002-01")),
("feb", Date::from_abs("2002-02")),
("Feb", Date::from_abs("2002-02")),
// same as current month, bump to next year
("february", Date::from_abs("2002-02")),
("February", Date::from_abs("2002-02")),
// retain year
("mar", Date::from_abs("2001-03")),
("Mar", Date::from_abs("2001-03")),
("march", Date::from_abs("2001-03")),
("March", Date::from_abs("2001-03")),
("apr", Date::from_abs("2001-04")),
("Apr", Date::from_abs("2001-04")),
("april", Date::from_abs("2001-04")),
("April", Date::from_abs("2001-04")),
("may", Date::from_abs("2001-05")),
("May", Date::from_abs("2001-05")),
("jun", Date::from_abs("2001-06")),
("Jun", Date::from_abs("2001-06")),
("june", Date::from_abs("2001-06")),
("June", Date::from_abs("2001-06")),
("jul", Date::from_abs("2001-07")),
("Jul", Date::from_abs("2001-07")),
("july", Date::from_abs("2001-07")),
("July", Date::from_abs("2001-07")),
("aug", Date::from_abs("2001-08")),
("Aug", Date::from_abs("2001-08")),
("august", Date::from_abs("2001-08")),
("August", Date::from_abs("2001-08")),
("sep", Date::from_abs("2001-09")),
("Sep", Date::from_abs("2001-09")),
("september", Date::from_abs("2001-09")),
("September", Date::from_abs("2001-09")),
("oct", Date::from_abs("2001-10")),
("Oct", Date::from_abs("2001-10")),
("october", Date::from_abs("2001-10")),
("October", Date::from_abs("2001-10")),
("nov", Date::from_abs("2001-11")),
("Nov", Date::from_abs("2001-11")),
("november", Date::from_abs("2001-11")),
("November", Date::from_abs("2001-11")),
("dec", Date::from_abs("2001-12")),
("Dec", Date::from_abs("2001-12")),
("december", Date::from_abs("2001-12")),
("December", Date::from_abs("2001-12")),
// unhappy path
("janu", None),
("xjan", None),
] {
let rel_input = Date::from_rel(input, &test_now());
assert_eq!(rel_input, *expect);
if expect.is_some() {
let any_input = Date::new(input, &test_now());
assert_eq!(any_input, *expect);
}
}
}
#[test]
fn from_relative_offset() {
for (cf, input, expect) in &[
(Date::from_abs("2001-02-03T04:05:06"), "-1s", Date::from_abs("2001-02-03T04:05:05")),
(Date::from_abs("2001-02-03T04:05:06"), "0s", Date::from_abs("2001-02-03T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "1s", Date::from_abs("2001-02-03T04:05:07")),
(Date::from_abs("2001-02-03T04:05:06"), "59s", Date::from_abs("2001-02-03T04:06:05")),
(Date::from_abs("2001-02-03T04:05:06"), "-1m", Date::from_abs("2001-02-03T04:04:06")),
(Date::from_abs("2001-02-03T04:05:06"), "0m", Date::from_abs("2001-02-03T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "1m", Date::from_abs("2001-02-03T04:06:06")),
(Date::from_abs("2001-02-03T04:05:06"), "59m", Date::from_abs("2001-02-03T05:04:06")),
(Date::from_abs("2001-02-03T04:05:06"), "-1h", Date::from_abs("2001-02-03T03:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "0h", Date::from_abs("2001-02-03T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "1h", Date::from_abs("2001-02-03T05:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "23h", Date::from_abs("2001-02-04T03:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "-1d", Date::from_abs("2001-02-02T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "0d", Date::from_abs("2001-02-03T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "1d", Date::from_abs("2001-02-04T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "27d", Date::from_abs("2001-03-02T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "-1w", Date::from_abs("2001-01-27T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "0w", Date::from_abs("2001-02-03T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "1w", Date::from_abs("2001-02-10T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "52w", Date::from_abs("2002-02-02T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "-1M", Date::from_abs("2001-01-03T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "0M", Date::from_abs("2001-02-03T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "1M", Date::from_abs("2001-03-03T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "11M", Date::from_abs("2002-01-03T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "-1y", Date::from_abs("2000-02-03T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "0y", Date::from_abs("2001-02-03T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "1y", Date::from_abs("2002-02-03T04:05:06")),
(Date::from_abs("2001-02-03T04:05:06"), "10y", Date::from_abs("2011-02-03T04:05:06")),
// Relative weekday from a Sunday
(Date::from_abs("2001-02-04T04:05:06"), "-11W", Date::from_abs("2001-01-19T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "-10W", Date::from_abs("2001-01-22T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "-6W", Date::from_abs("2001-01-26T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "-5W", Date::from_abs("2001-01-29T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "-4W", Date::from_abs("2001-01-30T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "-3W", Date::from_abs("2001-01-31T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "-2W", Date::from_abs("2001-02-01T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "-1W", Date::from_abs("2001-02-02T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "0W", Date::from_abs("2001-02-04T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "1W", Date::from_abs("2001-02-05T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "2W", Date::from_abs("2001-02-06T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "3W", Date::from_abs("2001-02-07T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "4W", Date::from_abs("2001-02-08T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "5W", Date::from_abs("2001-02-09T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "6W", Date::from_abs("2001-02-12T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "10W", Date::from_abs("2001-02-16T04:05:06")),
(Date::from_abs("2001-02-04T04:05:06"), "11W", Date::from_abs("2001-02-19T04:05:06")),
// Relative weekday from a Monday
(Date::from_abs("2001-02-05T04:05:06"), "-11W", Date::from_abs("2001-01-19T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "-10W", Date::from_abs("2001-01-22T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "-6W", Date::from_abs("2001-01-26T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "-5W", Date::from_abs("2001-01-29T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "-4W", Date::from_abs("2001-01-30T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "-3W", Date::from_abs("2001-01-31T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "-2W", Date::from_abs("2001-02-01T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "-1W", Date::from_abs("2001-02-02T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "0W", Date::from_abs("2001-02-05T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "1W", Date::from_abs("2001-02-06T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "2W", Date::from_abs("2001-02-07T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "3W", Date::from_abs("2001-02-08T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "4W", Date::from_abs("2001-02-09T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "5W", Date::from_abs("2001-02-12T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "6W", Date::from_abs("2001-02-13T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "10W", Date::from_abs("2001-02-19T04:05:06")),
(Date::from_abs("2001-02-05T04:05:06"), "11W", Date::from_abs("2001-02-20T04:05:06")),
// Relative weekday from a Tuesday
(Date::from_abs("2001-02-06T04:05:06"), "-11W", Date::from_abs("2001-01-22T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "-10W", Date::from_abs("2001-01-23T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "-6W", Date::from_abs("2001-01-29T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "-5W", Date::from_abs("2001-01-30T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "-4W", Date::from_abs("2001-01-31T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "-3W", Date::from_abs("2001-02-01T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "-2W", Date::from_abs("2001-02-02T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "-1W", Date::from_abs("2001-02-05T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "0W", Date::from_abs("2001-02-06T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "1W", Date::from_abs("2001-02-07T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "2W", Date::from_abs("2001-02-08T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "3W", Date::from_abs("2001-02-09T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "4W", Date::from_abs("2001-02-12T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "5W", Date::from_abs("2001-02-13T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "6W", Date::from_abs("2001-02-14T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "10W", Date::from_abs("2001-02-20T04:05:06")),
(Date::from_abs("2001-02-06T04:05:06"), "11W", Date::from_abs("2001-02-21T04:05:06")),
// Relative weekday from a Wednesday
(Date::from_abs("2001-02-07T04:05:06"), "-11W", Date::from_abs("2001-01-23T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "-10W", Date::from_abs("2001-01-24T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "-6W", Date::from_abs("2001-01-30T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "-5W", Date::from_abs("2001-01-31T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "-4W", Date::from_abs("2001-02-01T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "-3W", Date::from_abs("2001-02-02T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "-2W", Date::from_abs("2001-02-05T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "-1W", Date::from_abs("2001-02-06T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "0W", Date::from_abs("2001-02-07T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "1W", Date::from_abs("2001-02-08T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "2W", Date::from_abs("2001-02-09T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "3W", Date::from_abs("2001-02-12T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "4W", Date::from_abs("2001-02-13T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "5W", Date::from_abs("2001-02-14T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "6W", Date::from_abs("2001-02-15T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "10W", Date::from_abs("2001-02-21T04:05:06")),
(Date::from_abs("2001-02-07T04:05:06"), "11W", Date::from_abs("2001-02-22T04:05:06")),
// Relative weekday from a Thursday
(Date::from_abs("2001-02-08T04:05:06"), "-11W", Date::from_abs("2001-01-24T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "-10W", Date::from_abs("2001-01-25T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "-6W", Date::from_abs("2001-01-31T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "-5W", Date::from_abs("2001-02-01T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "-4W", Date::from_abs("2001-02-02T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "-3W", Date::from_abs("2001-02-05T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "-2W", Date::from_abs("2001-02-06T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "-1W", Date::from_abs("2001-02-07T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "0W", Date::from_abs("2001-02-08T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "1W", Date::from_abs("2001-02-09T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "2W", Date::from_abs("2001-02-12T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "3W", Date::from_abs("2001-02-13T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "4W", Date::from_abs("2001-02-14T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "5W", Date::from_abs("2001-02-15T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "6W", Date::from_abs("2001-02-16T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "10W", Date::from_abs("2001-02-22T04:05:06")),
(Date::from_abs("2001-02-08T04:05:06"), "11W", Date::from_abs("2001-02-23T04:05:06")),
// Relative weekday from a Friday
(Date::from_abs("2001-02-09T04:05:06"), "-11W", Date::from_abs("2001-01-25T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "-10W", Date::from_abs("2001-01-26T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "-6W", Date::from_abs("2001-02-01T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "-5W", Date::from_abs("2001-02-02T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "-4W", Date::from_abs("2001-02-05T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "-3W", Date::from_abs("2001-02-06T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "-2W", Date::from_abs("2001-02-07T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "-1W", Date::from_abs("2001-02-08T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "0W", Date::from_abs("2001-02-09T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "1W", Date::from_abs("2001-02-12T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "2W", Date::from_abs("2001-02-13T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "3W", Date::from_abs("2001-02-14T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "4W", Date::from_abs("2001-02-15T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "5W", Date::from_abs("2001-02-16T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "6W", Date::from_abs("2001-02-19T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "10W", Date::from_abs("2001-02-23T04:05:06")),
(Date::from_abs("2001-02-09T04:05:06"), "11W", Date::from_abs("2001-02-26T04:05:06")),
// Relative weekday from a Saturday
(Date::from_abs("2001-02-10T04:05:06"), "-11W", Date::from_abs("2001-01-26T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "-10W", Date::from_abs("2001-01-29T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "-6W", Date::from_abs("2001-02-02T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "-5W", Date::from_abs("2001-02-05T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "-4W", Date::from_abs("2001-02-06T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "-3W", Date::from_abs("2001-02-07T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "-2W", Date::from_abs("2001-02-08T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "-1W", Date::from_abs("2001-02-09T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "0W", Date::from_abs("2001-02-10T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "1W", Date::from_abs("2001-02-12T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "2W", Date::from_abs("2001-02-13T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "3W", Date::from_abs("2001-02-14T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "4W", Date::from_abs("2001-02-15T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "5W", Date::from_abs("2001-02-16T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "6W", Date::from_abs("2001-02-19T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "10W", Date::from_abs("2001-02-23T04:05:06")),
(Date::from_abs("2001-02-10T04:05:06"), "11W", Date::from_abs("2001-02-26T04:05:06")),
// unhappy path
(Date::from_abs("2001-02-03T04:05:06"), "1x", None),
] {
let rel_input = Date::from_rel(input, cf.as_ref().unwrap());
assert_eq!(rel_input, *expect);
if expect.is_some() {
let any_input = Date::new(input, cf.as_ref().unwrap());
assert_eq!(any_input, *expect);
}
}
}
#[test]
fn from_relative_named() {
for (input, expect) in &[
("today", Date::from_abs("2001-02-03")),
("tomorrow", Date::from_abs("2001-02-04")),
("tom", Date::from_abs("2001-02-04")),
("yesterday", Date::from_abs("2001-02-02")),
("yes", Date::from_abs("2001-02-02")),
("now", Date::from_abs("2001-02-03T04:05:06")),
// unhappy path
("then", None),
] {
let rel_input = Date::from_rel(input, &test_now());
assert_eq!(rel_input, *expect);
if expect.is_some() {
let any_input = Date::new(input, &test_now());
assert_eq!(any_input, *expect);
}
}
}
#[test]
fn add() {
for (start, dur, expect) in &[
("2001-02-03T04:05:06", Seconds(-59), "2001-02-03T04:04:07"),
("2001-02-03T04:05:06", Seconds(-1), "2001-02-03T04:05:05"),
("2001-02-03T04:05:06", Seconds(0), "2001-02-03T04:05:06"),
("2001-02-03T04:05:06", Seconds(1), "2001-02-03T04:05:07"),
("2001-02-03T04:05:06", Seconds(59), "2001-02-03T04:06:05"),
("2001-02-03T04:05:06", Seconds(119), "2001-02-03T04:07:05"),
("2001-02-03T04:05:06", Minutes(-59), "2001-02-03T03:06:06"),
("2001-02-03T04:05:06", Minutes(-1), "2001-02-03T04:04:06"),
("2001-02-03T04:05:06", Minutes(0), "2001-02-03T04:05:06"),
("2001-02-03T04:05:06", Minutes(1), "2001-02-03T04:06:06"),
("2001-02-03T04:05:06", Minutes(59), "2001-02-03T05:04:06"),
("2001-02-03T04:05:06", Minutes(119), "2001-02-03T06:04:06"),
("2001-02-03T04:05:06", Hours(-23), "2001-02-02T05:05:06"),
("2001-02-03T04:05:06", Hours(-1), "2001-02-03T03:05:06"),
("2001-02-03T04:05:06", Hours(0), "2001-02-03T04:05:06"),
("2001-02-03T04:05:06", Hours(1), "2001-02-03T05:05:06"),
("2001-02-03T04:05:06", Hours(23), "2001-02-04T03:05:06"),
("2001-02-03T04:05:06", Hours(47), "2001-02-05T03:05:06"),
("2001-02-03T04:05:06", Days(-30), "2001-01-04T04:05:06"),
("2001-02-03T04:05:06", Days(-1), "2001-02-02T04:05:06"),
("2001-02-03T04:05:06", Days(0), "2001-02-03T04:05:06"),
("2001-02-03T04:05:06", Days(1), "2001-02-04T04:05:06"),
("2001-02-03T04:05:06", Days(27), "2001-03-02T04:05:06"),
("2001-02-03T04:05:06", Days(58), "2001-04-02T04:05:06"),
("2001-02-03T04:05:06", Days(7), "2001-02-10T04:05:06"),
("2001-02-03T04:05:06", Months(-11), "2000-03-03T04:05:06"),
("2001-02-03T04:05:06", Months(-1), "2001-01-03T04:05:06"),
("2001-02-03T04:05:06", Months(0), "2001-02-03T04:05:06"),
("2001-02-03T04:05:06", Months(1), "2001-03-03T04:05:06"),
("2001-02-03T04:05:06", Months(11), "2002-01-03T04:05:06"),
("2001-02-03T04:05:06", Months(23), "2003-01-03T04:05:06"),
("2000-01-01T00:00:00", Months(24), "2002-01-01T00:00:00"),
("2000-02-29T00:00:00", Months(12), "2001-02-28T00:00:00"),
("2001-02-03T04:05:06", Years(-1), "2000-02-03T04:05:06"),
("2000-02-29T04:05:06", Years(-1), "1999-02-28T04:05:06"),
("2001-02-03T04:05:06", Years(0), "2001-02-03T04:05:06"),
("2000-02-29T04:05:06", Years(1), "2001-02-28T04:05:06"),
("2000-02-29T04:05:06", Years(2), "2002-02-28T04:05:06"),
("2001-02-03T04:05:06", Years(20), "2021-02-03T04:05:06"),
] {
let start = Date::from_abs(start).unwrap();
let expect = Date::from_abs(expect).unwrap();
assert_eq!(&start + dur, expect);
}
}
#[test]
fn on_or_within() {
for (inside, outside, expect) in &[
("2001-02-03T04:05:06", "2001-02-03T04:05:06", true),
("2001-02-03T04:05:06", "2001-02-03T04:05:07", false),
("2001-02-03T04:05:06", "2001-02-03T04:05:05", false),
("2001-02-03T04:05:00", "2001-02-03T04:05", true),
("2001-02-03T04:05:30", "2001-02-03T04:05", true),
("2001-02-03T04:05", "2001-02-03T04:05:00", false),
("2001-02-03T04:05:00", "2001-02-03T04", true),
("2001-02-03T04:05:30", "2001-02-03T04", true),
("2001-02-03T04", "2001-02-03T04:05:00", false),
("2001-02-03T04:05", "2001-02-03T04", true),
("2001-02-03T04", "2001-02-03T04:05", false),
("2001-02-03T04:05", "2001-02-03", true),
("2001-02-03", "2001-02-03T04:05", false),
("2001-02-03", "2001-02", true),
("2001-02", "2001-02-03", false),
("2001-02-03", "2001", true),
("2001", "2001-02-03", false),
] {
let inside = Date::from_abs(inside).unwrap();
let outside = Date::from_abs(outside).unwrap();
assert_eq!(inside.within(&outside), *expect);
}
}
#[test]
fn before() {
for (before, after, expect) in &[
("2001-02-03T04:05:06", "2001-02-03T04:05:07", true),
("2001-02-03T04:05:06", "2001-02-03T04:05:06", false),
("2001-02-03T04:05:06", "2001-02-03T04:05:05", false),
("2001-02-03T04:05:06", "2001-02-03T04:06:06", true),
("2001-02-03T04:06:06", "2001-02-03T04:05:05", false),
("2001-02-03T04:05:06", "2001-02-03T05:05:06", true),
("2001-02-03T04:06:06", "2001-02-03T03:05:06", false),
("2001-02-03T04:05:06", "2001-02-04T04:05:06", true),
("2001-02-03T04:06:06", "2001-02-02T04:05:06", false),
("2001-02-03T04:05:06", "2001-03-03T04:05:06", true),
("2001-02-03T04:06:06", "2001-01-03T04:05:06", false),
("2001-02-03T04:05:06", "2002-02-03T04:05:06", true),
("2001-02-03T04:06:06", "2000-02-03T04:05:06", false),
("2001-02-03T04:05", "2001-02-03T04:06", true),
("2001-02-03T04:05", "2001-02-03T04:05", false),
("2001-02-03T04:05", "2001-02-03T04:04", false),
("2001-02-03T04:05", "2001-02-03T05:05", true),
("2001-02-03T04:05", "2001-02-03T03:04", false),
("2001-02-03T04:05", "2001-02-04T04:05", true),
("2001-02-03T04:05", "2001-02-02T04:04", false),
("2001-02-03T04:05", "2001-03-03T04:05", true),
("2001-02-03T04:05", "2001-01-03T04:05", false),
("2001-02-03T04:05", "2002-02-03T04:05", true),
("2001-02-03T04:05", "2000-02-03T04:05", false),
("2001-02-03", "2001-02-04", true),
("2001-02-03", "2001-02-03", false),
("2001-02-03", "2001-02-02", false),
("2001-02-03T04:05:06", "2001-02-03T04:06", true),
("2001-02-03T04:05", "2001-02-03T04:05:06", false),
("2001-02-03T04:05:06", "2001-02-03T04:05", false),
("2001-02-03T04:05", "2001-02-03T05", true),
("2001-02-03T04", "2001-02-03T04:05:06", false),
("2001-02-03T04:05", "2001-02-03T04", false),
("2001-02-03T04", "2001-02-04", true),
("2001-02-03", "2001-02-03T04:05", false),
("2001-02-03T04", "2001-02-03", false),
] {
let before = Date::from_abs(before).unwrap();
let after = Date::from_abs(after).unwrap();
assert_eq!(before.before(&after), *expect);
}
}
#[test]
fn after() {
for (after, before, expect) in &[
("2001-02-03T04:05:07", "2001-02-03T04:05:06", true),
("2001-02-03T04:05:06", "2001-02-03T04:05:06", false),
("2001-02-03T04:05:05", "2001-02-03T04:05:06", false),
("2001-02-03T04:06:06", "2001-02-03T04:05:06", true),
("2001-02-03T04:05:05", "2001-02-03T04:06:06", false),
("2001-02-03T05:05:06", "2001-02-03T04:05:06", true),
("2001-02-03T03:05:06", "2001-02-03T04:06:06", false),
("2001-02-04T04:05:06", "2001-02-03T04:05:06", true),
("2001-02-02T04:05:06", "2001-02-03T04:06:06", false),
("2001-03-03T04:05:06", "2001-02-03T04:05:06", true),
("2001-01-03T04:05:06", "2001-02-03T04:06:06", false),
("2002-02-03T04:05:06", "2001-02-03T04:05:06", true),
("2000-02-03T04:05:06", "2001-02-03T04:06:06", false),
("2001-02-03T04:06", "2001-02-03T04:05", true),
("2001-02-03T04:05", "2001-02-03T04:05", false),
("2001-02-03T04:04", "2001-02-03T04:05", false),
("2001-02-03T05:05", "2001-02-03T04:05", true),
("2001-02-03T03:04", "2001-02-03T04:05", false),
("2001-02-04T04:05", "2001-02-03T04:05", true),
("2001-02-02T04:04", "2001-02-03T04:05", false),
("2001-03-03T04:05", "2001-02-03T04:05", true),
("2001-01-03T04:05", "2001-02-03T04:05", false),
("2002-02-03T04:05", "2001-02-03T04:05", true),
("2000-02-03T04:05", "2001-02-03T04:05", false),
("2001-02-04", "2001-02-03", true),
("2001-02-03", "2001-02-03", false),
("2001-02-02", "2001-02-03", false),
("2001-02-03T04:06", "2001-02-03T04:05:06", true),
("2001-02-03T04:05:06", "2001-02-03T04:05", false),
("2001-02-03T04:05", "2001-02-03T04:05:06", false),
("2001-02-03T05", "2001-02-03T04:05", true),
("2001-02-03T04:05:06", "2001-02-03T04", false),
("2001-02-03T04", "2001-02-03T04:05", false),
("2001-02-04", "2001-02-03T04", true),
("2001-02-03T04:05", "2001-02-03", false),
("2001-02-03", "2001-02-03T04", false),
] {
let after = Date::from_abs(after).unwrap();
let before = Date::from_abs(before).unwrap();
assert_eq!(after.after(&before), *expect);
}
}
#[test]
fn fmt() {
for (input, expect) in &[
(
Date::from_fields(2001, 2, 3, 4, 5, 6, Duration::Seconds(1)).unwrap(),
"2001-02-03T04:05:06",
),
(
Date::from_fields(2001, 2, 3, 4, 5, 0, Duration::Minutes(1)).unwrap(),
"2001-02-03T04:05",
),
(
// confirm fields below duration are ignored
Date::from_fields(2001, 2, 3, 4, 5, 6, Duration::Minutes(1)).unwrap(),
"2001-02-03T04:05",
),
(
Date::from_fields(2001, 2, 3, 4, 0, 0, Duration::Hours(1)).unwrap(),
"2001-02-03T04",
),
(
Date::from_fields(2001, 2, 3, 0, 0, 0, Duration::Days(1)).unwrap(),
"2001-02-03",
),
(
Date::from_fields(2001, 2, 3, 0, 0, 0, Duration::Months(1)).unwrap(),
"2001-02",
),
(
Date::from_fields(2001, 2, 3, 0, 0, 0, Duration::Years(1)).unwrap(),
"2001",
),
] {
assert_eq!(input.to_string(), *expect);
}
}
}
|
use std::path::Path;
use ignore::{self, DirEntry};
use log;
/// A configuration for describing how subjects should be built.
#[derive(Clone, Debug)]
struct Config {
strip_dot_prefix: bool,
}
impl Default for Config {
fn default() -> Config {
Config {
strip_dot_prefix: false,
}
}
}
/// A builder for constructing things to search over.
#[derive(Clone, Debug)]
pub struct SubjectBuilder {
config: Config,
}
impl SubjectBuilder {
/// Return a new subject builder with a default configuration.
pub fn new() -> SubjectBuilder {
SubjectBuilder { config: Config::default() }
}
/// Create a new subject from a possibly missing directory entry.
///
/// If the directory entry isn't present, then the corresponding error is
/// logged if messages have been configured. Otherwise, if the subject is
/// deemed searchable, then it is returned.
pub fn build_from_result(
&self,
result: Result<DirEntry, ignore::Error>,
) -> Option<Subject> {
match result {
Ok(dent) => self.build(dent),
Err(err) => {
err_message!("{}", err);
None
}
}
}
/// Create a new subject using this builder's configuration.
///
/// If a subject could not be created or should otherwise not be searched,
/// then this returns `None` after emitting any relevant log messages.
pub fn build(&self, dent: DirEntry) -> Option<Subject> {
let subj = Subject {
dent: dent,
strip_dot_prefix: self.config.strip_dot_prefix,
};
if let Some(ignore_err) = subj.dent.error() {
ignore_message!("{}", ignore_err);
}
// If this entry represents stdin, then we always search it.
if subj.dent.is_stdin() {
return Some(subj);
}
// If this subject has a depth of 0, then it was provided explicitly
// by an end user (or via a shell glob). In this case, we always want
// to search it if it even smells like a file (e.g., a symlink).
if subj.dent.depth() == 0 && !subj.is_dir() {
return Some(subj);
}
// At this point, we only want to search something it's explicitly a
// file. This omits symlinks. (If ripgrep was configured to follow
// symlinks, then they have already been followed by the directory
// traversal.)
if subj.is_file() {
return Some(subj);
}
// We got nothin. Emit a debug message, but only if this isn't a
// directory. Otherwise, emitting messages for directories is just
// noisy.
if !subj.is_dir() {
log::debug!(
"ignoring {}: failed to pass subject filter: \
file type: {:?}, metadata: {:?}",
subj.dent.path().display(),
subj.dent.file_type(),
subj.dent.metadata()
);
}
None
}
/// When enabled, if the subject's file path starts with `./` then it is
/// stripped.
///
/// This is useful when implicitly searching the current working directory.
pub fn strip_dot_prefix(&mut self, yes: bool) -> &mut SubjectBuilder {
self.config.strip_dot_prefix = yes;
self
}
}
/// A subject is a thing we want to search. Generally, a subject is either a
/// file or stdin.
#[derive(Clone, Debug)]
pub struct Subject {
dent: DirEntry,
strip_dot_prefix: bool,
}
impl Subject {
/// Return the file path corresponding to this subject.
///
/// If this subject corresponds to stdin, then a special `<stdin>` path
/// is returned instead.
pub fn path(&self) -> &Path {
if self.strip_dot_prefix && self.dent.path().starts_with("./") {
self.dent.path().strip_prefix("./").unwrap()
} else {
self.dent.path()
}
}
/// Returns true if and only if this entry corresponds to stdin.
pub fn is_stdin(&self) -> bool {
self.dent.is_stdin()
}
/// Returns true if and only if this subject points to a directory after
/// following symbolic links.
fn is_dir(&self) -> bool {
let ft = match self.dent.file_type() {
None => return false,
Some(ft) => ft,
};
if ft.is_dir() {
return true;
}
// If this is a symlink, then we want to follow it to determine
// whether it's a directory or not.
self.dent.path_is_symlink() && self.dent.path().is_dir()
}
/// Returns true if and only if this subject points to a file.
fn is_file(&self) -> bool {
self.dent.file_type().map_or(false, |ft| ft.is_file())
}
}
|
use std::collections::HashMap;
#[derive(Clone, Debug)]
pub struct Bounds {
bd: HashMap<usize, (usize, usize)>,
}
impl Bounds {
pub fn new(y: usize, (lb, hb): (usize, usize)) -> Self {
let mut bd = HashMap::new();
bd.insert(y, (lb, hb));
Self { bd }
}
pub fn lower_bound(&self, y: usize) -> usize {
self.bd[&y].0
}
pub fn upper_bound(&self, y: usize) -> usize {
self.bd[&y].1
}
pub fn get_mut(&mut self, y: usize) -> Option<&mut (usize, usize)> {
self.bd.get_mut(&y)
}
pub fn insert(&mut self, y: usize, (lb, hb): (usize, usize)) {
self.bd.insert(y, (lb, hb));
}
pub fn contains(&mut self, y: usize) -> bool {
self.bd.contains_key(&y)
}
pub fn shift_keys_left(&mut self, n: usize) {
let mut new_h = std::collections::HashMap::new();
for i in 0..n {
self.bd.remove_entry(&i);
}
for (k, v) in &self.bd {
new_h.insert(k - 1, *v);
}
self.bd = new_h;
}
}
|
#![cfg_attr(test, deny(warnings))]
//#![deny(missing_docs)]
//! # httpparse
//!
//! A chunks-based, asynchronous HTTP parser.
//!
extern crate hyper;
use std::default::Default;
use hyper::header::Headers;
use hyper::status::StatusCode as Status;
use self::Next::{Continue, Break};
pub enum Next { Continue, Break }
pub trait Chunks {
fn chunk<F>(self, F) where F: for<'a> FnOnce(&'a [u8], Self) -> Next + 'static;
}
pub trait Parser: Default + Sized + 'static {
type Next: Parser<Error=Self::Error, Out=Self::Out>;
type Error;
type Out;
fn update(self, &[u8]) -> ParserResult<Self>;
fn parse<C, F>(self, src: C, cb: F)
where F: FnOnce(Result<Self::Out, Self::Error>) + 'static,
C: Chunks {
src.chunk(move |chunk, rest| {
match self.update(chunk) {
ParserResult::Next(next) => next.parse(rest, cb),
ParserResult::Error(err) => { cb(Err(err)); return Next::Break },
ParserResult::Out(val) => { cb(Ok(val)); return Next::Break },
ParserResult::Continue(slf) => slf.parse(rest, cb)
};
Next::Continue
})
}
}
pub enum ParserResult<P: Parser> {
Next(P::Next),
Error(P::Error),
Out(P::Out),
Continue(P)
}
|
use std::io::{BufReader, Read};
use thiserror::Error;
use super::{lookup::LookupRef, value::Value};
pub trait CelesteIo: Sized {
type Error;
fn read<R: Read>(
reader: &mut BufReader<R>,
lookup: Option<LookupRef<'_>>,
) -> Result<Self, Self::Error>;
}
impl CelesteIo for bool {
type Error = std::io::Error;
fn read<R: Read>(
reader: &mut BufReader<R>,
_lookup: Option<LookupRef<'_>>,
) -> Result<Self, std::io::Error> {
let mut buf = [0u8];
reader.read_exact(&mut buf)?;
Ok(buf[0] != 0)
}
}
macro_rules! impl_value_type_prim {
( $(( $size:literal, $x:ty )),* ) => {
$(
impl CelesteIo for $x {
type Error = std::io::Error;
fn read<R: Read>(
reader: &mut BufReader<R>,
_lookup: Option<LookupRef<'_>>,
) -> Result<Self, std::io::Error> {
let mut buf = [0u8; $size];
reader.read_exact(&mut buf)?;
Ok(<$x>::from_le_bytes(buf))
}
}
)*
};
}
impl_value_type_prim!((1, u8), (2, i16), (2, u16), (4, i32), (4, u32), (4, f32));
pub struct StringLength(pub usize);
impl CelesteIo for StringLength {
type Error = std::io::Error;
fn read<R: Read>(
reader: &mut BufReader<R>,
lookup: Option<LookupRef<'_>>,
) -> Result<Self, std::io::Error> {
let mut result = 0usize;
let mut bit_offset = 0usize;
loop {
let byte = u8::read(reader, lookup)?;
result += ((byte & 0b0111_1111) as usize) << bit_offset;
bit_offset += 7;
if byte & 0b1000_0000 == 0 {
return Ok(StringLength(result));
}
}
}
}
#[derive(Error, Debug)]
pub enum StringReadError {
#[error("failed to read string bytes")]
Io(#[from] std::io::Error),
#[error("failed to decode string as utf")]
Utf(#[from] std::string::FromUtf8Error),
}
pub struct NonRleString(pub String);
pub struct RleString(pub String);
impl From<NonRleString> for Value {
fn from(x: NonRleString) -> Self {
Value::String(x.0)
}
}
impl From<RleString> for Value {
fn from(x: RleString) -> Self {
Value::String(x.0)
}
}
impl CelesteIo for NonRleString {
type Error = StringReadError;
fn read<R: Read>(
reader: &mut BufReader<R>,
lookup: Option<LookupRef<'_>>,
) -> Result<Self, Self::Error> {
let size = StringLength::read(reader, lookup)?.0;
let mut bytes = Vec::with_capacity(size);
for _ in 0..size {
bytes.push(u8::read(reader, lookup)?);
}
Ok(NonRleString(String::from_utf8(bytes)?))
}
}
impl CelesteIo for RleString {
type Error = StringReadError;
fn read<R: Read>(
reader: &mut BufReader<R>,
lookup: Option<LookupRef<'_>>,
) -> Result<Self, Self::Error> {
let size = u16::read(reader, lookup)? as usize;
let mut bytes = Vec::new();
for _ in 0..size / 2 {
let times = u8::read(reader, lookup)?;
let byte = u8::read(reader, lookup)?;
for _ in 0..times {
bytes.push(byte);
}
}
Ok(RleString(String::from_utf8(bytes)?))
}
}
|
#[doc = "Register `DBG_CR` reader"]
pub type R = crate::R<DBG_CR_SPEC>;
#[doc = "Register `DBG_CR` writer"]
pub type W = crate::W<DBG_CR_SPEC>;
#[doc = "Field `DBG_STOP` reader - Debug Stop mode"]
pub type DBG_STOP_R = crate::BitReader;
#[doc = "Field `DBG_STOP` writer - Debug Stop mode"]
pub type DBG_STOP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DBG_STANDBY` reader - Debug Standby and Shutdown modes"]
pub type DBG_STANDBY_R = crate::BitReader;
#[doc = "Field `DBG_STANDBY` writer - Debug Standby and Shutdown modes"]
pub type DBG_STANDBY_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 1 - Debug Stop mode"]
#[inline(always)]
pub fn dbg_stop(&self) -> DBG_STOP_R {
DBG_STOP_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - Debug Standby and Shutdown modes"]
#[inline(always)]
pub fn dbg_standby(&self) -> DBG_STANDBY_R {
DBG_STANDBY_R::new(((self.bits >> 2) & 1) != 0)
}
}
impl W {
#[doc = "Bit 1 - Debug Stop mode"]
#[inline(always)]
#[must_use]
pub fn dbg_stop(&mut self) -> DBG_STOP_W<DBG_CR_SPEC, 1> {
DBG_STOP_W::new(self)
}
#[doc = "Bit 2 - Debug Standby and Shutdown modes"]
#[inline(always)]
#[must_use]
pub fn dbg_standby(&mut self) -> DBG_STANDBY_W<DBG_CR_SPEC, 2> {
DBG_STANDBY_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "DBG configuration register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dbg_cr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dbg_cr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct DBG_CR_SPEC;
impl crate::RegisterSpec for DBG_CR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`dbg_cr::R`](R) reader structure"]
impl crate::Readable for DBG_CR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`dbg_cr::W`](W) writer structure"]
impl crate::Writable for DBG_CR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets DBG_CR to value 0"]
impl crate::Resettable for DBG_CR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
#[doc = "Register `FIR0` reader"]
pub type R = crate::R<FIR0_SPEC>;
#[doc = "Register `FIR0` writer"]
pub type W = crate::W<FIR0_SPEC>;
#[doc = "Field `FAE0` reader - Force acknowledge error 0"]
pub type FAE0_R = crate::BitReader;
#[doc = "Field `FAE0` writer - Force acknowledge error 0"]
pub type FAE0_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE1` reader - Force acknowledge error 1"]
pub type FAE1_R = crate::BitReader;
#[doc = "Field `FAE1` writer - Force acknowledge error 1"]
pub type FAE1_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE2` reader - Force acknowledge error 2"]
pub type FAE2_R = crate::BitReader;
#[doc = "Field `FAE2` writer - Force acknowledge error 2"]
pub type FAE2_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE3` reader - Force acknowledge error 3"]
pub type FAE3_R = crate::BitReader;
#[doc = "Field `FAE3` writer - Force acknowledge error 3"]
pub type FAE3_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE4` reader - Force acknowledge error 4"]
pub type FAE4_R = crate::BitReader;
#[doc = "Field `FAE4` writer - Force acknowledge error 4"]
pub type FAE4_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE5` reader - Force acknowledge error 5"]
pub type FAE5_R = crate::BitReader;
#[doc = "Field `FAE5` writer - Force acknowledge error 5"]
pub type FAE5_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE6` reader - Force acknowledge error 6"]
pub type FAE6_R = crate::BitReader;
#[doc = "Field `FAE6` writer - Force acknowledge error 6"]
pub type FAE6_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE7` reader - Force acknowledge error 7"]
pub type FAE7_R = crate::BitReader;
#[doc = "Field `FAE7` writer - Force acknowledge error 7"]
pub type FAE7_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE8` reader - Force acknowledge error 8"]
pub type FAE8_R = crate::BitReader;
#[doc = "Field `FAE8` writer - Force acknowledge error 8"]
pub type FAE8_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE9` reader - Force acknowledge error 9"]
pub type FAE9_R = crate::BitReader;
#[doc = "Field `FAE9` writer - Force acknowledge error 9"]
pub type FAE9_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE10` reader - Force acknowledge error 10"]
pub type FAE10_R = crate::BitReader;
#[doc = "Field `FAE10` writer - Force acknowledge error 10"]
pub type FAE10_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE11` reader - Force acknowledge error 11"]
pub type FAE11_R = crate::BitReader;
#[doc = "Field `FAE11` writer - Force acknowledge error 11"]
pub type FAE11_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE12` reader - Force acknowledge error 12"]
pub type FAE12_R = crate::BitReader;
#[doc = "Field `FAE12` writer - Force acknowledge error 12"]
pub type FAE12_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE13` reader - Force acknowledge error 13"]
pub type FAE13_R = crate::BitReader;
#[doc = "Field `FAE13` writer - Force acknowledge error 13"]
pub type FAE13_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE14` reader - Force acknowledge error 14"]
pub type FAE14_R = crate::BitReader;
#[doc = "Field `FAE14` writer - Force acknowledge error 14"]
pub type FAE14_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FAE15` reader - Force acknowledge error 15"]
pub type FAE15_R = crate::BitReader;
#[doc = "Field `FAE15` writer - Force acknowledge error 15"]
pub type FAE15_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FPE0` reader - Force PHY error 0"]
pub type FPE0_R = crate::BitReader;
#[doc = "Field `FPE0` writer - Force PHY error 0"]
pub type FPE0_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FPE1` reader - Force PHY error 1"]
pub type FPE1_R = crate::BitReader;
#[doc = "Field `FPE1` writer - Force PHY error 1"]
pub type FPE1_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FPE2` reader - Force PHY error 2"]
pub type FPE2_R = crate::BitReader;
#[doc = "Field `FPE2` writer - Force PHY error 2"]
pub type FPE2_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FPE3` reader - Force PHY error 3"]
pub type FPE3_R = crate::BitReader;
#[doc = "Field `FPE3` writer - Force PHY error 3"]
pub type FPE3_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FPE4` reader - Force PHY error 4"]
pub type FPE4_R = crate::BitReader;
#[doc = "Field `FPE4` writer - Force PHY error 4"]
pub type FPE4_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - Force acknowledge error 0"]
#[inline(always)]
pub fn fae0(&self) -> FAE0_R {
FAE0_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - Force acknowledge error 1"]
#[inline(always)]
pub fn fae1(&self) -> FAE1_R {
FAE1_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - Force acknowledge error 2"]
#[inline(always)]
pub fn fae2(&self) -> FAE2_R {
FAE2_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - Force acknowledge error 3"]
#[inline(always)]
pub fn fae3(&self) -> FAE3_R {
FAE3_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - Force acknowledge error 4"]
#[inline(always)]
pub fn fae4(&self) -> FAE4_R {
FAE4_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - Force acknowledge error 5"]
#[inline(always)]
pub fn fae5(&self) -> FAE5_R {
FAE5_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - Force acknowledge error 6"]
#[inline(always)]
pub fn fae6(&self) -> FAE6_R {
FAE6_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - Force acknowledge error 7"]
#[inline(always)]
pub fn fae7(&self) -> FAE7_R {
FAE7_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 8 - Force acknowledge error 8"]
#[inline(always)]
pub fn fae8(&self) -> FAE8_R {
FAE8_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 9 - Force acknowledge error 9"]
#[inline(always)]
pub fn fae9(&self) -> FAE9_R {
FAE9_R::new(((self.bits >> 9) & 1) != 0)
}
#[doc = "Bit 10 - Force acknowledge error 10"]
#[inline(always)]
pub fn fae10(&self) -> FAE10_R {
FAE10_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - Force acknowledge error 11"]
#[inline(always)]
pub fn fae11(&self) -> FAE11_R {
FAE11_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - Force acknowledge error 12"]
#[inline(always)]
pub fn fae12(&self) -> FAE12_R {
FAE12_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 13 - Force acknowledge error 13"]
#[inline(always)]
pub fn fae13(&self) -> FAE13_R {
FAE13_R::new(((self.bits >> 13) & 1) != 0)
}
#[doc = "Bit 14 - Force acknowledge error 14"]
#[inline(always)]
pub fn fae14(&self) -> FAE14_R {
FAE14_R::new(((self.bits >> 14) & 1) != 0)
}
#[doc = "Bit 15 - Force acknowledge error 15"]
#[inline(always)]
pub fn fae15(&self) -> FAE15_R {
FAE15_R::new(((self.bits >> 15) & 1) != 0)
}
#[doc = "Bit 16 - Force PHY error 0"]
#[inline(always)]
pub fn fpe0(&self) -> FPE0_R {
FPE0_R::new(((self.bits >> 16) & 1) != 0)
}
#[doc = "Bit 17 - Force PHY error 1"]
#[inline(always)]
pub fn fpe1(&self) -> FPE1_R {
FPE1_R::new(((self.bits >> 17) & 1) != 0)
}
#[doc = "Bit 18 - Force PHY error 2"]
#[inline(always)]
pub fn fpe2(&self) -> FPE2_R {
FPE2_R::new(((self.bits >> 18) & 1) != 0)
}
#[doc = "Bit 19 - Force PHY error 3"]
#[inline(always)]
pub fn fpe3(&self) -> FPE3_R {
FPE3_R::new(((self.bits >> 19) & 1) != 0)
}
#[doc = "Bit 20 - Force PHY error 4"]
#[inline(always)]
pub fn fpe4(&self) -> FPE4_R {
FPE4_R::new(((self.bits >> 20) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - Force acknowledge error 0"]
#[inline(always)]
#[must_use]
pub fn fae0(&mut self) -> FAE0_W<FIR0_SPEC, 0> {
FAE0_W::new(self)
}
#[doc = "Bit 1 - Force acknowledge error 1"]
#[inline(always)]
#[must_use]
pub fn fae1(&mut self) -> FAE1_W<FIR0_SPEC, 1> {
FAE1_W::new(self)
}
#[doc = "Bit 2 - Force acknowledge error 2"]
#[inline(always)]
#[must_use]
pub fn fae2(&mut self) -> FAE2_W<FIR0_SPEC, 2> {
FAE2_W::new(self)
}
#[doc = "Bit 3 - Force acknowledge error 3"]
#[inline(always)]
#[must_use]
pub fn fae3(&mut self) -> FAE3_W<FIR0_SPEC, 3> {
FAE3_W::new(self)
}
#[doc = "Bit 4 - Force acknowledge error 4"]
#[inline(always)]
#[must_use]
pub fn fae4(&mut self) -> FAE4_W<FIR0_SPEC, 4> {
FAE4_W::new(self)
}
#[doc = "Bit 5 - Force acknowledge error 5"]
#[inline(always)]
#[must_use]
pub fn fae5(&mut self) -> FAE5_W<FIR0_SPEC, 5> {
FAE5_W::new(self)
}
#[doc = "Bit 6 - Force acknowledge error 6"]
#[inline(always)]
#[must_use]
pub fn fae6(&mut self) -> FAE6_W<FIR0_SPEC, 6> {
FAE6_W::new(self)
}
#[doc = "Bit 7 - Force acknowledge error 7"]
#[inline(always)]
#[must_use]
pub fn fae7(&mut self) -> FAE7_W<FIR0_SPEC, 7> {
FAE7_W::new(self)
}
#[doc = "Bit 8 - Force acknowledge error 8"]
#[inline(always)]
#[must_use]
pub fn fae8(&mut self) -> FAE8_W<FIR0_SPEC, 8> {
FAE8_W::new(self)
}
#[doc = "Bit 9 - Force acknowledge error 9"]
#[inline(always)]
#[must_use]
pub fn fae9(&mut self) -> FAE9_W<FIR0_SPEC, 9> {
FAE9_W::new(self)
}
#[doc = "Bit 10 - Force acknowledge error 10"]
#[inline(always)]
#[must_use]
pub fn fae10(&mut self) -> FAE10_W<FIR0_SPEC, 10> {
FAE10_W::new(self)
}
#[doc = "Bit 11 - Force acknowledge error 11"]
#[inline(always)]
#[must_use]
pub fn fae11(&mut self) -> FAE11_W<FIR0_SPEC, 11> {
FAE11_W::new(self)
}
#[doc = "Bit 12 - Force acknowledge error 12"]
#[inline(always)]
#[must_use]
pub fn fae12(&mut self) -> FAE12_W<FIR0_SPEC, 12> {
FAE12_W::new(self)
}
#[doc = "Bit 13 - Force acknowledge error 13"]
#[inline(always)]
#[must_use]
pub fn fae13(&mut self) -> FAE13_W<FIR0_SPEC, 13> {
FAE13_W::new(self)
}
#[doc = "Bit 14 - Force acknowledge error 14"]
#[inline(always)]
#[must_use]
pub fn fae14(&mut self) -> FAE14_W<FIR0_SPEC, 14> {
FAE14_W::new(self)
}
#[doc = "Bit 15 - Force acknowledge error 15"]
#[inline(always)]
#[must_use]
pub fn fae15(&mut self) -> FAE15_W<FIR0_SPEC, 15> {
FAE15_W::new(self)
}
#[doc = "Bit 16 - Force PHY error 0"]
#[inline(always)]
#[must_use]
pub fn fpe0(&mut self) -> FPE0_W<FIR0_SPEC, 16> {
FPE0_W::new(self)
}
#[doc = "Bit 17 - Force PHY error 1"]
#[inline(always)]
#[must_use]
pub fn fpe1(&mut self) -> FPE1_W<FIR0_SPEC, 17> {
FPE1_W::new(self)
}
#[doc = "Bit 18 - Force PHY error 2"]
#[inline(always)]
#[must_use]
pub fn fpe2(&mut self) -> FPE2_W<FIR0_SPEC, 18> {
FPE2_W::new(self)
}
#[doc = "Bit 19 - Force PHY error 3"]
#[inline(always)]
#[must_use]
pub fn fpe3(&mut self) -> FPE3_W<FIR0_SPEC, 19> {
FPE3_W::new(self)
}
#[doc = "Bit 20 - Force PHY error 4"]
#[inline(always)]
#[must_use]
pub fn fpe4(&mut self) -> FPE4_W<FIR0_SPEC, 20> {
FPE4_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "DSI Host force interrupt register 0\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`fir0::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`fir0::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct FIR0_SPEC;
impl crate::RegisterSpec for FIR0_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`fir0::R`](R) reader structure"]
impl crate::Readable for FIR0_SPEC {}
#[doc = "`write(|w| ..)` method takes [`fir0::W`](W) writer structure"]
impl crate::Writable for FIR0_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets FIR0 to value 0"]
impl crate::Resettable for FIR0_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
mod helpers;
use jsonprima;
// t
test!(test_1, "t", vec![("E104", 0, 1)]);
test!(test_2, " t", vec![("E104", 1, 2)]);
test!(test_3, "t ", vec![("E105", 0, 2)]);
test!(test_4, " t ", vec![("E105", 1, 3)]);
test!(test_5, " t😋", vec![("E105", 1, 3)]);
// tr
test!(test_6, "tr", vec![("E104", 0, 2)]);
test!(test_7, " tr", vec![("E104", 1, 3)]);
test!(test_8, "tr ", vec![("E105", 0, 3)]);
test!(test_9, " tr ", vec![("E105", 1, 4)]);
test!(test_10, " tr😋", vec![("E105", 1, 4)]);
// tru
test!(test_11, "tru", vec![("E104", 0, 3)]);
test!(test_12, " tru", vec![("E104", 1, 4)]);
test!(test_13, "tru ", vec![("E105", 0, 4)]);
test!(test_14, " tru ", vec![("E105", 1, 5)]);
test!(test_15, " tru😋", vec![("E105", 1, 5)]);
// true
test!(test_16, "true", vec![]);
test!(test_17, " \n\t\rtrue", vec![]);
test!(test_18, "true \n\t\r", vec![]);
test!(test_19, " \n\t\rtrue \n\t\r", vec![]);
// Ignore byte order mark.
test!(test_20, "\u{feff}true", vec![]);
// true true
test!(test_21, "truetrue", vec![("E103", 4, 5)]);
test!(test_22, "true true", vec![("E103", 5, 6)]);
test!(test_23, " true true", vec![("E103", 6, 7)]);
test!(test_24, " \n\r true \ttrue", vec![("E103", 10, 11)]);
// true <any_token>
test!(test_33, "truew", vec![("E106", 4, 5)]);
test!(test_34, "true (", vec![("E106", 5, 6)]);
test!(test_35, " true ***", vec![("E106", 6, 7)]);
test!(test_36, " \n\r true \tq", vec![("E106", 10, 11)]);
test!(test_37, " true😋", vec![("E106", 5, 6)]);
// true false
test!(test_38, "truefalse", vec![("E107", 4, 5)]);
test!(test_39, "true false", vec![("E107", 5, 6)]);
test!(test_40, " true false", vec![("E107", 6, 7)]);
test!(test_41, " \n\r true \tfalse", vec![("E107", 10, 11)]);
// true null
test!(test_43, "truenull", vec![("E108", 4, 5)]);
test!(test_44, "true null", vec![("E108", 5, 6)]);
test!(test_45, " true null", vec![("E108", 6, 7)]);
test!(test_46, " \n\r true \r\nnull", vec![("E108", 11, 12)]);
// true number
test!(test_49, "true0", vec![("E109", 4, 5)]);
test!(test_50, "true 0", vec![("E109", 5, 6)]);
test!(test_51, " true 0", vec![("E109", 6, 7)]);
test!(test_52, " \n\r true \t0", vec![("E109", 10, 11)]);
// true string
test!(test_53, "true\"\"", vec![("E114", 4, 5)]);
test!(test_54, "true \"\"", vec![("E114", 5, 6)]);
test!(test_55, " true \"\"", vec![("E114", 6, 7)]);
test!(test_56, " \n\r true \t\"\"", vec![("E114", 10, 11)]);
// true begin-array
test!(test_57, "true[", vec![("E125", 4, 5)]);
test!(test_58, "true [", vec![("E125", 5, 6)]);
test!(test_59, " true [", vec![("E125", 6, 7)]);
test!(test_60, " \n\r true \t[", vec![("E125", 10, 11)]);
// true end-array
test!(test_61, "true]", vec![("E126", 4, 5)]);
test!(test_62, "true ]", vec![("E126", 5, 6)]);
test!(test_63, " true ]", vec![("E126", 6, 7)]);
test!(test_64, " \n\r true \t]", vec![("E126", 10, 11)]);
// true value-separator
test!(test_65, "true,", vec![("E124", 4, 5)]);
test!(test_66, "true ,", vec![("E124", 5, 6)]);
test!(test_67, " true ,", vec![("E124", 6, 7)]);
test!(test_68, " \n\r true \t,", vec![("E124", 10, 11)]);
// true begin-object
test!(test_69, "true{", vec![("E130", 4, 5)]);
test!(test_70, "true {", vec![("E130", 5, 6)]);
test!(test_71, " true {", vec![("E130", 6, 7)]);
test!(test_72, " \n\r true \t{", vec![("E130", 10, 11)]);
// true end-object
test!(test_73, "true}", vec![("E131", 4, 5)]);
test!(test_74, "true }", vec![("E131", 5, 6)]);
test!(test_75, " true }", vec![("E131", 6, 7)]);
test!(test_76, " \n\r true \t}", vec![("E131", 10, 11)]);
// true name-separator
test!(test_77, "true:", vec![("E136", 4, 5)]);
test!(test_78, "true :", vec![("E136", 5, 6)]);
test!(test_79, " true :", vec![("E136", 6, 7)]);
test!(test_80, " \n\r true \t:", vec![("E136", 10, 11)]);
|
use libc;
use nix;
pub fn all() -> Result<Vec<String>, nix::Error> {
nix::net::if_::if_nametoindex
nix::sys::ioctl!();
} |
/// Error Correction Code
pub fn hamming_parity(mut x: i64) -> i64 {
x = x ^ (x >> 1);
x = x ^ (x >> 2);
x = x ^ (x >> 4);
x = x ^ (x >> 8);
x = x ^ (x >> 16);
return x & 1;
}
/* Computes the six parity check bits for the
"information" bits given in the 32-bit word u. The
check bits are p[5:0]. On sending, an overall parity
bit will be prepended to p (by another process).
Bit Checks these bits of u
p[0] 0, 1, 3, 5, ..., 31 (0 and the odd positions).
p[1] 0, 2-3, 6-7, ..., 30-31 (0 and positions xxx1x).
p[2] 0, 4-7, 12-15, 20-23, 28-31 (0 and posns xx1xx).
p[3] 0, 8-15, 24-31 (0 and positions x1xxx).
p[4] 0, 16-31 (0 and positions 1xxxx).
p[5] 1-31 */
pub fn hamming_checkbits(mut u: i64) -> i64 {
// First calculate p[5:0] ignoring u[0].
let mut p0 = u ^ (u >> 2);
p0 = p0 ^ (p0 >> 4);
p0 = p0 ^ (p0 >> 8);
p0 = p0 ^ (p0 >> 16); // p0 is in posn 1.
let t1 = u ^ (u >> 1);
let mut p1 = t1 ^ (t1 >> 4);
p1 = p1 ^ (p1 >> 8);
p1 = p1 ^ (p1 >> 16); // p1 is in posn 2.
let t2 = t1 ^ (t1 >> 2);
let mut p2 = t2 ^ (t2 >> 8);
p2 = p2 ^ (p2 >> 16); // p2 is in posn 4.
let t3 = t2 ^ (t2 >> 4);
let p3 = t3 ^ (t3 >> 16); // p3 is in posn 8.
let p4 = t3 ^ (t3 >> 8); // p4 is in posn 16.
let p5 = p4 ^ (p4 >> 16); // p5 is in posn 0.
let mut p = ((p0 >> 1) & 1) | ((p1 >> 1) & 2) | ((p2 >> 2) & 4) |
((p3 >> 5) & 8) | ((p4 >> 12) & 16) | ((p5 & 1) << 5);
p = p ^ (-(u & 1) & 0x3F); // Now account for u[0].
return p;
}
/* This function looks at the received seven check
bits and 32 information bits (pr and ur), and
determines how many errors occurred (under the
presumption that it must be 0, 1, or 2). It returns
with 0, 1, or 2, meaning that no errors, one error, or
two errors occurred. It corrects the information word
received (ur) if there was one error in it. */
pub fn hamming_correct(pr: i64, ur: &mut i64) -> i64 {
let po = hamming_parity(pr ^ *ur); // Compute overall parity
// of the received data.
let p = hamming_checkbits(*ur); // Calculate check bits
// for the received info.
let mut syn = p ^ (pr & 0x3F); // Syndrome (exclusive of
// overall parity bit).
if po == 0 {
if syn == 0 { return 0; } // If no errors, return 0.
else { return 2; } // Two errors, return 2.
}
// One error occurred.
if ((syn - 1) & syn) == 0 // If syn has zero or one
{ return 1; } // bits set, then the
// error is in the check
// bits or the overall
// parity bit (no
// correction required).
// One error, and syn bits 5:0 tell where it is in ur.
let b = syn - 31 - (syn >> 5); // Map syn to range 0 to 31.
// if (syn == 0x1f) b = 0; // (These two lines equiv.
// else b = syn & 0x1f; // to the one line above.)
*ur = *ur ^ (1 << b); // Correct the bit.
return 1;
}
pub fn hamming_perturb(p: &mut i64, u: &mut i64) -> i64 {
/* This generates all the possible 39-bit quantities with 0, 1, or 2
bits set, and alters the corresponding 0, 1, or 2 bits of p and u,
treating them as a concatenation of p and u (39 bits long).
The error bit words are generated in the order (illustrated for a
5-bit quantitity):
00011, 00101, 01001, 10001, 00001, 00110, 01010, 10010, 00010,
01100, 10100, 00100, 11000, 01000, 10000, 00000. */
let mask: i64 = (1 << 39) - 1;
let mut x = 1;
let mut y = 2;
let errorBits;
let num;
errorBits = x | y;
if errorBits == 0 { num = 0; } // Set num = number
else if x == 0 || y == 0
{ num = 1; } // of 1-bits in
else { num = 2; } // errorBits.
*u = *u ^ errorBits; // Apply the
*p = *p ^ (errorBits >> 32); // error bits.
if y != 0 { y = (y << 1) & mask; } else {
x = (x << 1) & mask;
y = (x << 1) & mask;
}
return num;
}
#[cfg_attr(not(target_arch = "x86_64"),test_case)]
#[cfg_attr(not(target_arch = "riscv64"),test)]
fn test_hamming() {
assert_eq!(hamming_parity(1), 1);
} |
use core::position::{Size, Pos, HasSize, HasPosition};
use core::cellbuffer::CellAccessor;
use std::boxed::Box;
use std::collections::HashMap;
use ui::core::{
Layout,
Alignable,
HorizontalAlign,
VerticalAlign,
Widget,
Frame,
Button,
Painter,
ButtonResult
};
/// Hold buttons and align them horizontally for drawing within a
/// [Dialog](struct.Dialog.html)
///
/// # Examples
///
/// ```
/// use rustty::ui::core::{HorizontalAlign, VerticalAlign, ButtonResult, Widget, Button};
/// use rustty::ui::{Dialog, StdButton, HorizontalLayout};
///
/// let mut maindlg = Dialog::new(60, 10);
///
/// let b1 = StdButton::new("Quit", 'q', ButtonResult::Ok);
/// let b2 = StdButton::new("Foo!", 'f', ButtonResult::Custom(1));
/// let b3 = StdButton::new("Bar!", 'b', ButtonResult::Custom(2));
///
/// let buttons = vec![b1, b2, b3].into_iter().map(Box::new);
/// let buttons = buttons.map(|x| x as Box<Button>).collect();
///
/// let mut hlayout = HorizontalLayout::from_vec(buttons, 1);
/// hlayout.pack(&maindlg, HorizontalAlign::Middle, VerticalAlign::Bottom, (0,1));
///
/// maindlg.add_layout(hlayout);
/// ```
///
pub struct HorizontalLayout {
frame: Frame,
inner_margin: usize,
origin: Pos,
widgets: Vec<Box<Button>>
}
impl HorizontalLayout {
/// Construct a `HorizontalLayout` object from a vector of boxed objects that implement
/// [Button](core/button/trait.Button.html). The current API for this function will
/// change *very* soon
///
/// # Examples
///
/// ```
/// use rustty::ui::core::{ButtonResult, Button};
/// use rustty::ui::{StdButton, HorizontalLayout};
///
/// let b1 = StdButton::new("Quit", 'q', ButtonResult::Ok);
/// let b2 = StdButton::new("Foo!", 'f', ButtonResult::Custom(1));
/// let b3 = StdButton::new("Bar!", 'b', ButtonResult::Custom(2));
///
/// let v = vec![b1, b2, b3].into_iter().map(Box::new).map(|x| x as Box<Button>).collect();
/// let mut hlayout = HorizontalLayout::from_vec(v, 1);
/// ```
///
pub fn from_vec(widgets: Vec<Box<Button>>, inner_margin: usize) -> HorizontalLayout {
let first_origin = widgets.first().unwrap().frame().origin();
let total_width = widgets.iter().fold(0, |acc, item| acc + item.frame().size().0);
let width = total_width + inner_margin * (widgets.len() - 1);
HorizontalLayout {
frame: Frame::new(width, 1),
inner_margin: inner_margin,
origin: first_origin,
widgets: widgets
}
}
}
impl Widget for HorizontalLayout {
fn draw(&mut self, parent: &mut CellAccessor) {
self.frame.draw_into(parent);
}
fn pack(&mut self, parent: &HasSize, halign: HorizontalAlign, valign: VerticalAlign,
margin: (usize, usize)) {
self.frame.align(parent, halign, valign, margin);
}
fn draw_box(&mut self) {
self.frame.draw_box();
}
fn resize(&mut self, new_size: Size) {
self.frame.resize(new_size);
}
fn frame(&self) -> &Frame {
&self.frame
}
fn frame_mut(&mut self) -> &mut Frame {
&mut self.frame
}
}
impl Layout for HorizontalLayout {
fn align_elems(&mut self) {
let (x, y) = self.origin;
let mut current_x = x;
for widget in self.widgets.iter_mut() {
widget.frame_mut().set_origin((current_x, y));
current_x += widget.frame_mut().size().0 + self.inner_margin;
}
for w in self.widgets.iter() {
w.frame().draw_into(&mut self.frame);
}
}
fn forward_keys(&mut self, map: &mut HashMap<char, ButtonResult>) {
for w in self.widgets.iter() {
map.insert(w.accel(), w.result());
}
}
}
|
use std::error::Error;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use std::path::Path;
fn main() {
//Day 1 Input
//let lines: Vec<String> = get_input("./src/input1.txt");
//Day 1, Q1
//fuel_tally(&lines);
//Day 1, Q2
//fuel_fuel_tally(&lines)
//Day 2 Input
let test_mode = false;
let lines = if !test_mode {
get_input("./src/input2.txt")[0].clone()
} else {
String::from("1,9,10,3,2,3,11,0,99,30,40,50")
};
//Day 2, Q1
//intcode_solver(&lines);
//Day 2, Q2
noun_verb_finder(&lines);
}
///Return a Lines object for the file on the provided path
///
///@param filepath - string representing path to an input file
fn get_input(filepath : &str) -> Vec<String>
{
let path = Path::new(&filepath);
// Open the path in read-only mode, returns `io::Result<File>`
let file = match File::open(&path) {
// The `description` method of `io::Error` returns a string that describes the error
Err(why) => panic!("couldn't open file {}", Error::description(&why)),
Ok(file) => file
};
let reader = BufReader::new(file);
// |l| is the input vars for a {closure/lambda}
let lines: Vec<String> = reader.lines().map(|l| {l.expect("Could not parse line")}).collect();
lines
}
fn fuel_tally(data : &[String])
{
let mut total: f64 = 0.0;
let lines = data;
for l in lines {
//iterator returns a Result<T,E> where E is the error
let mass = l.parse::<f64>().unwrap();
let fuel: f64 = (mass/3.0).floor()-2.0;
println!("Mass:{} \t-> Fuel:{}",&mass, &fuel);
total += fuel;
}
println!("**Total is: {}**",total)
}
fn fuel_fuel_tally(data : &[String])
{
let mut total: f64 = 0.0;
let lines = data;
for l in lines {
//iterator returns a Result<T,E> where E is the error
let mass = l.parse::<f64>().unwrap();
let fuel: f64 = calc_fuel(mass);
println!("Mass:{} \t-> Fuel:{}",&mass, &fuel);
total += fuel;
}
println!("**Total is: {}**",total)
}
fn calc_fuel(mass : f64) -> f64
{
let mut sub_total : f64 = 0.0;
let fuel : f64 = (mass/3.0).floor()-2.0;
println!("{}",fuel);
sub_total += fuel;
if fuel > 0.0
{
let fuel_for_fuel : f64 = calc_fuel(fuel);
if fuel_for_fuel > 0.0
{
sub_total += fuel_for_fuel;
}
}
sub_total
}
//Day 2 Part 1 - intcode computer
fn intcode_solver(mut intcode : Vec<i64>) -> Vec<i64>
{
let mut count : usize = 0;
while intcode[count] != 99
{
let value : i64;
match intcode[count]
{
//if Instruction = 1, add
1 => value = intcode[intcode[(count + 1) as usize] as usize] + intcode[intcode[(count + 2) as usize] as usize],
//if Instruction = 2, multiply
2 => value = intcode[intcode[(count + 1) as usize] as usize] * intcode[intcode[(count + 2) as usize] as usize],
_ => panic!("Bad input, release smoke")
}
//Set the address to store the result and store it
let location = intcode[(count + 3) as usize];
intcode[location as usize] = value;
//Increment by 4 to get next opcode/Instruction
count+=4;
}
//println!("{:?}",intcode );
intcode
}
//Iterate through possible inputs until desired solution is found, answer is printed
fn noun_verb_finder(data : &str)
{
let mut stop = false;
let intcode : Vec<i64> = data.split(',').map(|i| i.parse::<i64>().expect("Invalid integer?")).collect();
for n in 0..(intcode.len()-1) as i64
{
for v in 0..(intcode.len()-1) as i64
{
let mut intcode : Vec<i64> = data.split(',').map(|i| i.parse::<i64>().expect("Invalid integer?")).collect();
intcode[1] = n;
intcode[2] = v;
//Without clone, value borrowed after move error
let result : i64 = intcode_solver(intcode.clone())[0];
if result == 19690720
{
println!("Eureka! {}",n*100+v);
stop = true;
break;
}
}
if stop
{
break;
}
}
} |
use crate::errors::*;
use std::fs;
/// A trait for mutable stores. This is abstracted away so that users can implement a non-filesystem mutable store, which is useful
/// for read-only filesystem environments, as on many modern hosting providers. See the book for further details on this subject.
#[async_trait::async_trait]
pub trait MutableStore: Clone {
/// Reads data from the named asset.
async fn read(&self, name: &str) -> Result<String, StoreError>;
/// Writes data to the named asset. This will create a new asset if one doesn't exist already.
async fn write(&self, name: &str, content: &str) -> Result<(), StoreError>;
}
/// The default mutable store, which simply uses the filesystem. This is suitable for development and production environments with
/// writable filesystems (in which it's advised), but this is of course not usable on production read-only filesystems, and another
/// implementation of `MutableStore` should be preferred.
///
/// Note: the `.write()` methods on this implementation will create any missing parent directories automatically.
#[derive(Clone)]
pub struct FsMutableStore {
root_path: String,
}
impl FsMutableStore {
/// Creates a new filesystem configuration manager. You should provide a path like `/dist/mutable` here. Make sure that this is
/// not the same path as the immutable store, as this will cause potentially problematic overlap between the two systems.
pub fn new(root_path: String) -> Self {
Self { root_path }
}
}
#[async_trait::async_trait]
impl MutableStore for FsMutableStore {
async fn read(&self, name: &str) -> Result<String, StoreError> {
let asset_path = format!("{}/{}", self.root_path, name);
match fs::metadata(&asset_path) {
Ok(_) => fs::read_to_string(&asset_path).map_err(|err| StoreError::ReadFailed {
name: asset_path,
source: err.into(),
}),
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
return Err(StoreError::NotFound { name: asset_path })
}
Err(err) => {
return Err(StoreError::ReadFailed {
name: asset_path,
source: err.into(),
})
}
}
}
// This creates a directory structure as necessary
async fn write(&self, name: &str, content: &str) -> Result<(), StoreError> {
let asset_path = format!("{}/{}", self.root_path, name);
let mut dir_tree: Vec<&str> = asset_path.split('/').collect();
dir_tree.pop();
fs::create_dir_all(dir_tree.join("/")).map_err(|err| StoreError::WriteFailed {
name: asset_path.clone(),
source: err.into(),
})?;
fs::write(&asset_path, content).map_err(|err| StoreError::WriteFailed {
name: asset_path,
source: err.into(),
})
}
}
|
#[macro_use]
extern crate iron;
extern crate router;
extern crate mount;
extern crate params;
extern crate iron_sessionstorage;
extern crate iron_json_response as ijr;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate diesel;
#[macro_use]
extern crate diesel_codegen;
extern crate r2d2;
extern crate r2d2_diesel;
extern crate config;
extern crate time;
extern crate base64;
use diesel::pg::PgConnection;
use ijr::JsonResponseMiddleware;
use iron::prelude::*;
use iron_sessionstorage::SessionStorage;
use iron_sessionstorage::backends::SignedCookieBackend;
use r2d2::Pool;
use r2d2_diesel::ConnectionManager;
use server_config::Config;
mod routes;
mod server_config;
mod schema;
mod models;
mod repo;
mod middleware;
mod auth;
fn main() {
match Config::load("application.yaml") {
Ok(config) => start_server(&config),
Err(e) => {
println!("Error while starting server:");
println!("{}", e);
}
}
}
fn start_server(config: &Config) {
let url = format!("{}:{}", config.host, config.port);
let db_config = r2d2::Config::default();
let db_manager = ConnectionManager::<PgConnection>::new(config.db_url.as_str());
let db_pool = Pool::new(db_config, db_manager).expect("Failed to connect to database");
let mount = routes::create();
let mut chain = Chain::new(mount);
chain.link_before(middleware::DatabaseMiddleware::new(db_pool));
chain.link_around(SessionStorage::new(SignedCookieBackend::new(config.secret.clone())));
chain.link_after(JsonResponseMiddleware {});
chain.link_after(middleware::DeleteCookieMiddleware {});
if let Some(ref domain) = config.cors {
chain.link_after(middleware::CorsMiddleware::new(domain));
}
let start_status = Iron::new(chain).http(&url);
match start_status {
Ok(_) => {
println!("Server started on {}", &url);
}
Err(e) => {
println!("Error: {}", e);
}
}
}
|
#[derive(Debug)]
struct Person {
name: String,
age: i32,
}
fn main1() {
// 変数xを用意する
let x: &Person;
// ブロックを開始する
{
// 変数aにメモリ領域を割り当てる
let a = Person {
name: String::from("masuda"),
age: 50,
};
// 変数xに参照させる
x = &a ;
// ブロックを抜ける
}
// これは参照できない
// println!("x is {:?}", x );
}
fn main2() {
// 変数xを用意する
let x: Person;
// ブロックを開始する
{
// 変数aにメモリ領域を割り当てる
let a = Person {
name: String::from("masuda"),
age: 50,
};
x = a ;
};
// 所有権を貰う
println!("x is {:?}", x );
}
fn main3() {
// 関数 new_person から所有権を受け取る
let a = new_person( "masuda", 50 ) ;
println!("a is {:?}", a );
}
/*
fn new_person( name: &str, age: i32 ) -> &Person {
let p = Person {
name: String::from(name),
age: age,
};
// 確保したメモリの参照を返そうと試みるが、
// コンパイルすると &Person のライフタイム指定がないとの
// エラーがでるのでコンパイルができない。
&p
}
*/
fn new_person( name: &str, age: i32 ) -> Person {
let p = Person {
name: String::from(name),
age: age,
};
// 正しく実体を返すようにする
p
}
/*
fn main() {
let mut a = Person {
name: String::from("masuda"),
age: 50,
};
println!("a is {:?}", a );
// 可変で参照する
let mut x = &mut a ;
let mut y = &mut a ;
x.age = 0;
y.name = String::from("kato");
println!("a is {:?}", a );
println!("x is {:?}", x );
println!("y is {:?}", y );
}
*/
fn main4() {
let mut a = Person {
name: String::from("masuda"),
age: 50,
};
println!("a is {:?}", a );
// 可変で参照する
let mut x = &mut a ;
x.age = 0;
println!("x is {:?}", x );
let mut y = &mut a ;
y.name = String::from("kato");
println!("y is {:?}", y );
println!("a is {:?}", a );
}
/*
fn main() {
let mut a = Person {
name: String::from("masuda"),
age: 50,
};
println!("a is {:?}", a );
// 可変で参照する
let mut x = &mut a ;
println!("x is {:?}", x );
x.name = String::from("kato");
x.age = 0;
// 借用する順序に注意する
println!("a is {:?}", a );
println!("x is {:?}", x );
}
*/
fn main() {
let mut a = Person {
name: String::from("masuda"),
age: 50,
};
println!("a is {:?}", a );
// 可変で参照する
let mut x = &mut a ;
println!("x is {:?}", x );
x.name = String::from("kato");
x.age = 0;
// 借用する順序に注意する
println!("x is {:?}", x );
println!("a is {:?}", a );
}
|
use crate::{R, FontError};
use nom::{
number::complete::{be_u16, be_u32},
};
use svg_dom::{Svg, Item};
use std::collections::HashMap;
use std::sync::Arc;
use std::fmt;
#[derive(Clone)]
pub struct SvgGlyph {
pub svg: Arc<Svg>,
pub item: Arc<Item>,
}
impl fmt::Debug for SvgGlyph {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "SVG")
}
}
#[derive(Clone)]
pub struct SvgTable {
pub glyphs: HashMap<u16, SvgGlyph>
}
pub fn parse_svg(data: &[u8]) -> Result<SvgTable, FontError> {
let (i, version) = be_u16(data)?;
let (i, document_list_offset) = be_u32(i)?;
let (i, _reserved) = be_u32(i)?;
read_document_list(slice!(data, document_list_offset as usize ..))
}
fn read_document_list(input: &[u8]) -> Result<SvgTable, FontError> {
let mut glyphs = HashMap::new();
let (mut data, num_entries) = be_u16(input)?;
for _ in 0 .. num_entries {
let (i, start_gid) = be_u16(data)?;
let (i, end_gid) = be_u16(i)?;
let (i, data_offset) = be_u32(i)?;
let (i, data_len) = be_u32(i)?;
data = i;
let svg_data = slice!(input, data_offset as usize .. data_offset as usize + data_len as usize);
// std::fs::write(format!("/tmp/font/{}.svg", start_gid), svg_data);
let svg = match Svg::from_data(svg_data) {
Ok(svg) => Arc::new(svg),
Err(e) => {
error!("SVG error: {:?}", e)
}
};
for gid in start_gid ..= end_gid {
let glyph_id = format!("glyph{}", gid);
match svg.get_item(&glyph_id) {
Some(item) => {
glyphs.insert(gid, SvgGlyph {
svg: svg.clone(),
item: item.clone()
});
}
None => {
warn!("missing SVG glyph: {}", glyph_id);
}
}
}
}
Ok(SvgTable { glyphs })
}
|
use crate::main;
pub mod frame_allocator;
#[macro_use]
pub mod interrupts;
pub mod intrinsics;
pub mod gdt;
pub mod multiboot;
pub mod paging;
pub mod pic;
pub mod stacks;
pub mod syscall;
pub mod tss;
pub const KERNEL_BASE: usize = 0xffffffff80000000;
use self::multiboot::MultibootTags;
use self::frame_allocator::{frame_alloc, get_fallocator};
#[no_mangle]
pub unsafe extern fn kstart(multiboot_tags: &MultibootTags) {
assert_minimum_cpuid();
let multiboot_info = multiboot_tags.parse();
// protect some memory regions from frame allocator
let elf_sections = multiboot_info.elf_sections.unwrap();
let (k_begin, k_end) = (elf_sections.image_start(), elf_sections.image_end() - KERNEL_BASE);
let (m_begin, m_end) = (multiboot_tags.start(), multiboot_tags.end());
let protected_regions = [
(k_begin, k_end), // kernel image
(m_begin, m_end), // multiboot data
];
let mmap = multiboot_info.mem_map.unwrap();
frame_allocator::initialize(mmap, protected_regions);
println!("boot loader: {}", &multiboot_info.boot_loader_name.unwrap_or("none"));
println!("cmd line: {}", &multiboot_info.cmd_line.unwrap_or("none"));
println!("");
println!("protected memory regions");
println!(" kernel: ({:#x}, {:#x}) size {} KiB", k_begin, k_end, (k_end - k_begin) / 1024);
println!(" multiboot: ({:#x}, {:#x}) size {} KiB", m_begin, m_end, (m_end - m_begin) / 1024);
println!("first free page 0x{:x}", frame_alloc().addr());
let free_pages = get_fallocator().free_pages();
println!("free pages {} ({} MiB)", free_pages, free_pages / 256);
let _ = paging::initialize();
// set up interrupt handlers
interrupts::initialize();
pic::initialize();
gdt::initialize();
tss::initialize();
syscall::initialize();
main::kmain();
}
#[repr(packed)]
pub struct Registers {
pub rax: u64,
pub rbx: u64,
pub rcx: u64,
pub rdx: u64,
pub rsi: u64,
pub rdi: u64,
pub rbp: u64,
pub r8: u64,
pub r9: u64,
pub r10: u64,
pub r11: u64,
pub r12: u64,
pub r13: u64,
pub r14: u64,
pub r15: u64,
pub cs: u16,
pub ss: u16,
pub ds: u16,
pub es: u16,
pub fs: u16,
pub gs: u16,
_pad: u32, // 12 bytes of selectors would otherwise unalign the following
pub rip: u64,
pub rflags: u64,
pub rsp: u64,
}
impl Registers {
pub fn default_user(rip: usize, rsp: usize) -> Self {
use self::gdt::{USR_CODE_OFFSET, USR_DATA_OFFSET};
Registers {
rip: rip as u64, cs: USR_CODE_OFFSET as u16,
rsp: rsp as u64, ss: USR_DATA_OFFSET as u16,
rflags: 0x200, // TODO standardize rflags
rax: 0, rbx: 0, rcx: 0, rdx: 0,
rbp: 0, rsi: 0, rdi: 0,
r8: 0, r9: 0, r10: 0, r11: 0,
r12: 0, r13: 0, r14: 0, r15: 0,
ds: 0, es: 0, fs: 0, gs: 0,
_pad: 0,
}
}
}
fn assert_minimum_cpuid() {
let cpuid = intrinsics::get_cpuid();
assert!(cpuid.supported, "minimum processor requirements unmet");
// presumably the rest of these requirements could be eliminated with extra work
assert!(cpuid.pse());
assert!(cpuid.pae());
assert!(cpuid.page1gb());
assert!(cpuid.msr());
assert!(cpuid.apic());
assert!(cpuid.syscall());
assert!(cpuid.rdpid() || cpuid.rdtscp()); // read processor id
println!("running on {} (family {:02x}, model {:02x})", cpuid.vendor_id().unwrap_or("unknown"),
cpuid.effective_family().unwrap(),
cpuid.effective_model().unwrap(),
);
}
|
use std::{fs::File, io::Read};
use crate::code::*;
use crate::symbol_table::SymbolTable;
#[derive(Debug)]
pub struct CCommand {
dest: Option<String>,
comp: String,
jump: Option<String>,
}
impl CCommand {
fn new(s: &str) -> Self {
let mut ccommand = CCommand{dest: None, comp: "NOP".to_string(), jump: None};
let dc_j = s.split(";").collect::<Vec<&str>>();
if dc_j.len() > 1 {
ccommand.jump = Some(dc_j[1].to_string());
}
let d_c = dc_j[0].split("=").collect::<Vec<&str>>();
if d_c.len() > 1 {
ccommand.dest = Some(d_c[0].to_string());
ccommand.comp = d_c[1].to_string();
} else {
ccommand.comp = d_c[0].to_string();
}
ccommand
}
}
#[derive(Debug)]
pub enum Line {
ACommand(String),
CCommand(CCommand),
LCommand(String),
NotCommand,
}
#[derive(Debug)]
pub struct Lines {
line_number: usize,
pub lines: Vec<Line>,
}
impl Lines {
pub fn new(path: &str) -> Self {
let mut file = File::open(path).expect("File not found!");
let mut strings = String::new();
file.read_to_string(&mut strings).expect("Something went wrong reading the file!");
let mut lines = Vec::new();
for line in strings.split('\n') {
let line = line.to_string();
let line = line.split("//").collect::<Vec<&str>>()[0];
let line = line.trim();
if line == "" {lines.push(Line::NotCommand)}
else if line.chars().nth(0).unwrap() == '@' {
let mut line = line.to_string();
line.retain(|c| c != '@' && c != ' ');
lines.push(Line::ACommand(line.to_string()));
}
else if line.chars().nth(0).unwrap() == '(' {
let mut line = line.to_string();
line.retain(|c| c != '(' && c !=')' && c != ' ');
lines.push(Line::LCommand(line.to_string()));
}
else {
let mut line = line.to_string();
line.retain(|c| c != ' ');
lines.push(Line::CCommand(CCommand::new(&line)));
}
}
// Lines(lines)
Lines{line_number: 0, lines: lines}
}
pub fn to_binary(&self, symbol_table: &SymbolTable) -> Vec<String> {
let mut binaries = Vec::new();
for line in &self.lines {
match line {
Line::ACommand(s) => {
let value = s.parse::<usize>();
if let Ok(value) = value {
let v_string = format!("0{:015b}", value);
binaries.push(v_string);
} else {
let symbol = s;
let value = symbol_table.get_address(&symbol);
let v_string = format!("0{:015b}", value);
binaries.push(v_string);
}
},
Line::CCommand(c) => {
let dest = dest_to_binary(&c.dest);
let comp = comp_to_binary(&c.comp);
let jump = jump_to_binary(&c.jump);
let c_string = format!("111{}{}{}", comp, dest, jump);
binaries.push(c_string);
},
Line::LCommand(_) => {
}
Line::NotCommand => {}
}
}
binaries
}
}
|
pub mod component;
|
use std::path::PathBuf;
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
#[structopt(about = "GUI viewer for `nix store --query --tree` output.")]
pub struct Opts {
/// PATH in /nix/store to view references of
#[structopt(name = "PATH", parse(from_os_str))]
pub nix_store_path: PathBuf,
}
impl Opts {
pub fn parse_from_args() -> Self {
Opts::from_args()
}
}
|
use std::str;
static TABLE: [uint, ..10] = [
0b1110111,
0b0100100,
0b1011101,
0b1101101,
0b0101110,
0b1101011,
0b1111011,
0b0100101,
0b1111111,
0b1101111
];
static H_SYM: &'static str = "-";
static V_SYM: &'static str = "|";
static S_SYM: &'static str = " ";
struct LCD {
numbers: ~[uint],
segments: uint
}
impl LCD {
fn new(input: ~str, segments: uint) -> LCD {
let mut numbers = ~[];
for input.iter().advance |c| {
let option = std::int::from_str(str::from_char(c));
let n : uint = match option {
None => 0,
Some(y) => { y }
} as uint;
numbers.push(n);
}
LCD { numbers: numbers, segments: segments }
}
fn test(&self, number: uint, position: int) -> bool {
return (TABLE[number] & (0b0000001 << position)) > 0;
}
fn draw(&self) {
let mut offset = 0;
for std::int::range(0, 5) |row| {
if (row % 2 == 0) {
for self.numbers.iter().advance |&number| {
let symbol = if (self.test(number, row + offset)) { H_SYM } else { S_SYM };
let mut symbols = ~"";
for self.segments.times {
symbols = symbols + symbol;
}
print(S_SYM + symbols + S_SYM + S_SYM);
}
print("\n");
} else {
for self.segments.times {
for self.numbers.iter().advance |&number| {
for std::int::range(0, 2) |column| {
let symbol = if (self.test(number, row + column + offset)) { V_SYM } else { S_SYM };
let mut spaces = ~"";
let spaces_count = if (column == 0) { self.segments } else { 1 };
for spaces_count.times {
spaces = spaces + S_SYM;
}
print(symbol + spaces);
}
}
print("\n");
}
offset = offset + 1;
}
}
}
}
fn main() {
let args = std::os::args();
if (args.len() > 1) {
let mut input: ~str;
let mut segments: uint = 2;
if (args[1] == ~"-s") {
match std::int::from_str(args[2]) {
Some(n) => { segments = n as uint },
None => {}
};
input = args[3];
} else {
input = args[1];
}
let lcd = LCD::new(input, segments);
lcd.draw();
} else {
println("Usage: LCD [-s SEGMENTS] NUMBER");
}
}
|
extern crate core;
extern crate rk3399_tools;
pub struct PerilpM0 { }
pub trait M0 {
fn setup(&mut self, pmusgrf: &rk3399_tools::PMUSGRF,
pmucru: &rk3399_tools::PMUCRU, start: u32);
fn on(&mut self, pmucru: &rk3399_tools::PMUCRU);
}
// WMSK_BIT(x) => BIT(x + 16) => 1 << (x + 16)
// BIT_WITH_WMASK(x) => BIT(x) | WMSK_BIT(x) => (1 << x) | (1 << (x + 16))
// BITS_WITH_WMASK(x, y, z) ->
impl M0 for PerilpM0 {
fn setup(&mut self, pmusgrf: &rk3399_tools::PMUSGRF,
pmucru: &rk3399_tools::PMUCRU, start: u32) {
// put PMU M0 into secure mode
pmusgrf.pmu_con0.write(|w| unsafe { w.
sgrf_pmu_cm0_mst_ctrl().clear_bit().
write_mask().bits(1 << 7)
});
// m0_init also puts secure master for perilp
// but there's sometyhing fishy going on:
// docs say sgrf_con_perim0_secure_ctrl is [13] for PERILP
// code does [12] for PMU
// secure master table has:
// [12] - perlip
// [13] - pmu
//
// maybe they're flipped around?
// but then why are there duplitate secure settings?
//
// let's go with the code for now...
// sets to 0
pmusgrf.soc_con6.write(|w| unsafe { w.
write_enable().bits(1 << 12)
});
// middle 16 bits
pmusgrf.pmu_con3.write(|w| unsafe { w.
pmu_remap_flash_rom_mid().bits((start >> 12) as u16).
write_mask().bits(0xffff)
});
// high 4 bits
pmusgrf.pmu_con7.write(|w| unsafe { w.
pmu_remap_flash_rom_high().bits((start >> 28) as u8).
write_mask().bits(0xf)
});
// writes 0x2 to this?
// m0_init also disables clk_center1 but probably a bug
// but surely we just want to set first bit to 1?
pmucru.pmucru_gatedis_con0.modify(|_, w| w.
clk_pmum0_gating_dis().clear_bit().
clk_center1_gating_dis().set_bit() // FIXME: do we need this?
);
// FIXME: do we actually need this? find out what it does!
// write_volatile::<u32>(PMUCRU_CLKGATE_CON(28), 1 << (16 + 5));
}
fn on (&mut self, pmucru: &rk3399_tools::PMUCRU) {
// enable clocks
pmucru.pmucru_clkgate_con2.write(|w| w.
fclk_cm0s_en().clear_bit().
sclk_cm0s_en().clear_bit().
hclk_cm0s_en().clear_bit().
dclk_cm0s_en().clear_bit()
);
// pull hresetn_cm0s_pmu high
pmucru.pmucru_softrst_con0.write(|w| unsafe { w.
hresetn_cm0s_pmu_req().clear_bit().
write_mask().bits(1 << 2)
});
// sleep for 5 usecs?
for _ in 1..99999 {
unsafe { asm!("nop"); }
}
// now pull poresetn_cm0s_pmu high
pmucru.pmucru_softrst_con0.write(|w| unsafe { w.
poresetn_cm0s_pmu_req().clear_bit().
write_mask().bits(1 << 5)
});
}
} |
use clumsy::fs::inmem::InMemFileSystem;
use clumsy::fs::mac::MacOSFileSystem;
use clumsy::fs::FileSystem;
use clumsy::object::GitObject;
use clumsy::*;
use std::io;
use libflate::zlib::{Decoder, Encoder};
use std::fs::File;
use std::io::prelude::*;
fn main() -> io::Result<()> {
let args: Vec<String> = std::env::args().collect();
let fs = MacOSFileSystem::init()?;
let mut git = Git::new(fs);
let sub_cmd = args.get(1).unwrap().clone();
match sub_cmd.as_str() {
"cat-file" => {
let obj = cat_file_p(args.get(2).unwrap().clone())?;
println!("{}", obj);
Ok(())
}
"hash-object" => {
let blob = hash_object(args.get(2).unwrap().clone())?;
println!("{}", hex::encode(blob.calc_hash()));
Ok(())
}
"add" => {
let bytes = git.file_system.read(args.get(2).unwrap().clone())?;
add(&mut git, args.get(2).unwrap().clone(), &bytes)
}
"commit" => commit(&mut git, args.get(2).unwrap().clone()),
"switch" => switch(&mut git, args.get(2).unwrap().clone()),
"log" => {
let obj = log(&mut git)?;
obj.iter().for_each(|x| println!("{}", x));
Ok(())
},
_ => Ok(()),
}
}
pub fn cat_file_p(hash: String) -> io::Result<GitObject> {
let (sub_dir, file) = hash.split_at(2);
let path = format!(".git/objects/{}/{}", sub_dir, file);
let mut file = File::open(path)?;
let mut buf = Vec::new();
file.read_to_end(&mut buf)?;
let mut d = Decoder::new(&buf[..])?;
let mut buf = Vec::new();
d.read_to_end(&mut buf)?;
GitObject::new(&buf).ok_or(io::Error::from(io::ErrorKind::InvalidData))
}
pub fn hash_object(path: String) -> io::Result<object::blob::Blob> {
let mut file = File::open(path)?;
let mut buf = Vec::new();
file.read_to_end(&mut buf)?;
object::blob::Blob::from(&buf).ok_or(io::Error::from(io::ErrorKind::InvalidInput))
}
pub fn add<F: FileSystem>(git: &mut Git<F>, file_name: String, bytes: &[u8]) -> io::Result<()> {
// git hash-object -w path
let blob = git.hash_object(&bytes).map(GitObject::Blob)?;
git.write_object(&blob)?;
// git update-index --add --cacheinfo <mode> <hash> <name>
let index = git.read_index().and_then(|x| git.ls_files_stage(&x))?;
let index = git.update_index(index, &blob.calc_hash(), file_name)?;
git.write_index(&index)?;
println!("write_index");
Ok(())
}
fn commit<F: FileSystem>(git: &mut Git<F>, message: String) -> io::Result<()> {
// git write-tree
let tree = git.write_tree().map(GitObject::Tree)?;
git.write_object(&tree)?;
let tree_hash = tree.calc_hash();
// echo message | git commit-tree <hash>
let commit = git
.commit_tree(
"uzimaru0000".to_string(),
"shuji365630@gmail.com".to_string(),
hex::encode(tree_hash),
message,
)
.map(GitObject::Commit)?;
git.write_object(&commit)?;
// git update-ref refs/heads/master <hash>
git.update_ref(git.head_ref()?, &commit.calc_hash())?;
Ok(())
}
fn log<F: FileSystem>(git: &mut Git<F>) -> io::Result<Vec<GitObject>> {
let commit = git
.head_ref()
.and_then(|x| git.read_ref(x))
.and_then(|x| git.read_object(x))
.and_then(|x| git.cat_file_p(&x))?;
Ok((0..)
.scan(Some(commit), |st, _| {
let next = match st {
Some(GitObject::Commit(commit)) => {
if let Some(parent) = &commit.parent {
git
.read_object(parent.clone())
.and_then(|x| git.cat_file_p(&x))
.ok()
} else {
None
}
}
_ => None,
};
let curr = st.clone();
*st = next;
curr
})
.collect::<Vec<_>>())
}
fn switch<F: FileSystem>(git: &mut Git<F>, branch: String) -> io::Result<()> {
let commit_hash = git.read_ref(format!("refs/heads/{}", branch))?;
let diff = git.reset_index(commit_hash.clone())?;
git.diff_apply(diff)?;
let commit = git
.read_object(commit_hash)
.and_then(|x| git.cat_file_p(&x))
.and_then(|x| match x {
GitObject::Commit(commit) => Ok(commit),
_ => Err(io::Error::from(io::ErrorKind::InvalidData)),
})?;
let idx = git.tree2index(commit.tree)?;
git.file_system.write(
".git/HEAD".to_string(),
format!("ref: refs/heads/{}", branch).as_bytes(),
)?;
git.write_index(&idx)?;
Ok(())
}
|
//! testing closure with a linear offset
use anyhow::Result;
use approx::assert_abs_diff_eq;
use ndarray::{array, Array1, Array2};
use ndarray_glm::{Linear, ModelBuilder};
#[test]
/// Check that the result is the same in linear regression when subtracting
/// offsets from the y values as it is when adding linear offsets to the model.
fn lin_off_0() -> Result<()> {
let y_data: Array1<f64> = array![0.6, 0.3, 0.5, 0.1];
let offsets: Array1<f64> = array![0.1, -0.1, 0.2, 0.0];
let x_data: Array2<f64> = array![[1.2, 0.7], [2.1, 0.8], [1.5, 0.6], [1.6, 0.3]];
let lin_model = ModelBuilder::<Linear>::data(y_data.view(), x_data.view())
.linear_offset(offsets.clone())
.build()?;
let lin_fit = lin_model.fit()?;
let y_offset = y_data - offsets;
let lin_model_off = ModelBuilder::<Linear>::data(y_offset.view(), x_data.view()).build()?;
let lin_fit_off = lin_model_off.fit()?;
dbg!(&lin_fit.result);
dbg!(&lin_fit_off.result);
// Ensure that the two methods give consistent results
assert_abs_diff_eq!(
lin_fit.result,
lin_fit_off.result,
epsilon = 16.0 * f64::EPSILON
);
Ok(())
}
#[test]
// Ensure that the linear offset term adjusts all values sanely.
// TODO: similar test for all types of regression, to ensure they are using
// linear_predictor() properly.
fn lin_off_1() -> Result<()> {
let data_x = array![
[-0.23, 2.1, 0.7],
[1.2, 4.5, 1.3],
[0.42, 1.8, 0.97],
[0.4, 3.2, -0.3]
];
let data_y = array![1.23, 0.91, 2.34, 0.62];
let model = ModelBuilder::<Linear>::data(data_y.view(), data_x.view()).build()?;
let fit = model.fit()?;
let result = fit.result;
// a constant linear offset to add for easy checking
let lin_off = 1.832;
let lin_offsets = array![lin_off, lin_off, lin_off, lin_off];
let model_off = ModelBuilder::<Linear>::data(data_y.view(), data_x.view())
.linear_offset(lin_offsets)
.build()?;
let off_fit = model_off.fit()?;
dbg!(off_fit.n_iter);
let off_result = off_fit.result;
let mut compensated_offset_result = off_result;
compensated_offset_result[0] += lin_off;
assert_abs_diff_eq!(
result,
compensated_offset_result,
epsilon = 32. * f64::EPSILON
);
Ok(())
}
|
use serde::{Deserialize, Serialize};
//;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
// RDF
//;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
//
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct RDFList {
//#[serde(rename = "rdf:type")]
//pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "rdf:first")]
pub rdf_first: Vec<Object>,
#[serde(rename = "rdf:rest")]
pub rdf_rest: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct Members {
#[serde(rename = "rdf:type")]//TODO: the type is *not* optional for 'owl:members'?
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:members")]
pub members: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct DistinctMembers {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:distinctMembers")]
pub distinct_members: Vec<Object>,
}
//;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
// Restrictions
//;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct SomeValuesFrom {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:onProperty")]
pub owl_on_property: Vec<Object>,
#[serde(rename = "owl:someValuesFrom")]
pub owl_some_values_from: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct AllValuesFrom {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:onProperty")]
pub owl_on_property: Vec<Object>,
#[serde(rename = "owl:allValuesFrom")]
pub owl_all_values_from: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct HasValue {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:onProperty")]
pub owl_on_property: Vec<Object>,
#[serde(rename = "owl:hasValue")]
pub owl_has_value: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct MinCardinality {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:onProperty")]
pub owl_on_property: Vec<Object>,
#[serde(rename = "owl:minCardinality")]
pub owl_min_cardinality: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct MinObjectQualifiedCardinality {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:onProperty")]
pub owl_on_property: Vec<Object>,
#[serde(rename = "owl:minQualifiedCardinality")]
pub owl_min_qualified_cardinality: Vec<Object>,
#[serde(rename = "owl:onClass")]
pub owl_on_class: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct MinDataQualifiedCardinality {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:onProperty")]
pub owl_on_property: Vec<Object>,
#[serde(rename = "owl:minQualifiedCardinality")]
pub owl_min_qualified_cardinality: Vec<Object>,
#[serde(rename = "owl:onDataRange")]
pub owl_on_datarange: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct MaxCardinality {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:onProperty")]
pub owl_on_property: Vec<Object>,
#[serde(rename = "owl:maxCardinality")]
pub owl_max_cardinality: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct MaxObjectQualifiedCardinality {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:onProperty")]
pub owl_on_property: Vec<Object>,
#[serde(rename = "owl:maxQualifiedCardinality")]
pub owl_max_qualified_cardinality: Vec<Object>,
#[serde(rename = "owl:onClass")]
pub owl_on_class: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct MaxDataQualifiedCardinality {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:onProperty")]
pub owl_on_property: Vec<Object>,
#[serde(rename = "owl:maxQualifiedCardinality")]
pub owl_max_qualified_cardinality: Vec<Object>,
#[serde(rename = "owl:onDataRange")]
pub owl_on_datarange: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct ExactCardinality {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:onProperty")]
pub owl_on_property: Vec<Object>,
#[serde(rename = "owl:cardinality")]
pub owl_cardinality: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct ExactObjectQualifiedCardinality {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:onProperty")]
pub owl_on_property: Vec<Object>,
#[serde(rename = "owl:qualifiedCardinality")]
pub owl_qualified_cardinality: Vec<Object>,
#[serde(rename = "owl:onClass")]
pub owl_on_class: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct ExactDataQualifiedCardinality {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:onProperty")]
pub owl_on_property: Vec<Object>,
#[serde(rename = "owl:qualifiedCardinality")]
pub owl_qualified_cardinality: Vec<Object>,
#[serde(rename = "owl:onDataRange")]
pub owl_on_datarange: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct HasSelf {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:onProperty")]
pub owl_on_property: Vec<Object>,
#[serde(rename = "owl:hasSelf")]
pub owl_has_self: Vec<Object>,
}
//;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
// OWL propositional connectives
//;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct IntersectionOf {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:intersectionOf")]
pub owl_intersection_of: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct UnionOf {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:unionOf")]
pub owl_union_of: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct OneOf {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:oneOf")]
pub owl_one_of: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct ComplementOf {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:complementOf")]
pub owl_complement_of: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct NegativeObjectPropertyAssertion {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:sourceIndividual")]
pub source_individual: Vec<Object>,
#[serde(rename = "owl:assertionProperty")]
pub assertion_property: Vec<Object>,
#[serde(rename = "owl:targetIndividual")]
pub target_individual: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct NegativeDataPropertyAssertion {
#[serde(rename = "rdf:type")]
pub rdf_type: Option<Vec<Object>>,
#[serde(rename = "owl:sourceIndividual")]
pub source_individual: Vec<Object>,
#[serde(rename = "owl:assertionProperty")]
pub assertion_property: Vec<Object>,
#[serde(rename = "owl:targetValue")]
pub target_value: Vec<Object>,
}
//;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
// OWL Object Properties
//;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct InverseOf {
#[serde(rename = "owl:inverseOf")]
pub owl_inverse_of: Vec<Object>,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
pub struct Object {
pub object: OWL,
pub datatype: String,
pub meta: Option<String>,
//pub annotation: String,
}
#[derive(Debug,Serialize, Deserialize,Clone,Hash)]
#[serde(untagged)]
pub enum OWL {
Named(String),
//Number(i64), //TODO type numbers for cardinality restrictions ?
SomeValuesFrom(SomeValuesFrom),
AllValuesFrom(AllValuesFrom),
HasValue(HasValue),
HasSelf(HasSelf),
MinCardinality(MinCardinality),
MaxCardinality(MaxCardinality),
ExactCardinality(ExactCardinality),
MinObjectQualifiedCardinality(MinObjectQualifiedCardinality),
MaxObjectQualifiedCardinality(MaxObjectQualifiedCardinality),
ExactObjectQualifiedCardinality(ExactObjectQualifiedCardinality),
MinDataQualifiedCardinality(MinDataQualifiedCardinality),
MaxDataQualifiedCardinality(MaxDataQualifiedCardinality),
ExactDataQualifiedCardinality(ExactDataQualifiedCardinality),
IntersectionOf(IntersectionOf),
UnionOf(UnionOf),
OneOf(OneOf),
ComplementOf(ComplementOf),
InverseOf(InverseOf),
RDFList(RDFList),
Members(Members),
DistinctMembers(DistinctMembers),
NegativeObjectPropertyAssertion(NegativeObjectPropertyAssertion),
NegativeDataPropertyAssertion(NegativeDataPropertyAssertion),
}
|
pub struct Sporangium {}
pub struct Spore {}
struct Cell {}
pub fn produce_spore(factory: &mut Sporangium) -> Spore {
Spore
}
fn recombine(parent: &mut Cell) {} |
use advent_libs::input_helpers;
fn main() {
println!("Advent of Code 2020 - Day 3");
println!("---------------------------");
// Read in puzzle input
let mut input = input_helpers::read_puzzle_input_to_string(3);
// Strip out the carriage returns (on Windows)
input.retain(|c| c != '\r');
// Parse to vector of strings on newline
let input_vec: Vec<String> = input_helpers::split_string_to_vector(&input, "\n");
// Represent map as 2D vector of bools. True is tree, false is open
let map_width = input_vec[0].len();
let map_height = input_vec.len();
let mut map = vec![vec![false; map_width]; map_height];
// Populate map
for (row, line) in input_vec.iter().enumerate() {
for (col, character) in line.chars().enumerate() {
match character {
'.' => map[row][col] = false,
'#' => map[row][col] = true,
_ => panic!("Character encountered that is neither # nor ."),
}
}
}
// Uncomment for debug view of data sets
//println!("{:?}", input_vec);
//println!("{:?}", map);
// Anonymous function (closure) to use for calculating trajectory
fn calculate_trajectory(
map: &Vec<Vec<bool>>,
map_height: usize,
map_width: usize,
horz_delta: usize,
vert_delta: usize,
) -> usize {
let mut tree_count: usize = 0;
let mut row: usize = 0;
let mut col: usize = 0;
while row < map_height {
if map[row][col] == true {
tree_count += 1;
}
row += vert_delta;
col += horz_delta;
// Rollover
if col >= map_width {
col -= map_width;
}
}
tree_count
};
// ------------------------------------
// Calculate part 1 - Trees encountered with right:3 down:1 trajectory
println!(
"Trees encountered on initial trajectory: {}",
calculate_trajectory(&map, map_height, map_width, 3, 1)
);
// Calculate part 2
let multiplied_trees = calculate_trajectory(&map, map_height, map_width, 1, 1)
* calculate_trajectory(&map, map_height, map_width, 3, 1)
* calculate_trajectory(&map, map_height, map_width, 5, 1)
* calculate_trajectory(&map, map_height, map_width, 7, 1)
* calculate_trajectory(&map, map_height, map_width, 1, 2);
println!(
"Trees encountered on multiplied trajectory: {}",
multiplied_trees
);
}
|
#[doc = "Register `DOUTR18` reader"]
pub type R = crate::R<DOUTR18_SPEC>;
#[doc = "Register `DOUTR18` writer"]
pub type W = crate::W<DOUTR18_SPEC>;
#[doc = "Field `DOUT18` reader - Output data sent to MDIO Master during read frames"]
pub type DOUT18_R = crate::FieldReader<u16>;
#[doc = "Field `DOUT18` writer - Output data sent to MDIO Master during read frames"]
pub type DOUT18_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 16, O, u16>;
impl R {
#[doc = "Bits 0:15 - Output data sent to MDIO Master during read frames"]
#[inline(always)]
pub fn dout18(&self) -> DOUT18_R {
DOUT18_R::new((self.bits & 0xffff) as u16)
}
}
impl W {
#[doc = "Bits 0:15 - Output data sent to MDIO Master during read frames"]
#[inline(always)]
#[must_use]
pub fn dout18(&mut self) -> DOUT18_W<DOUTR18_SPEC, 0> {
DOUT18_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "MDIOS output data register 18\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`doutr18::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`doutr18::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct DOUTR18_SPEC;
impl crate::RegisterSpec for DOUTR18_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`doutr18::R`](R) reader structure"]
impl crate::Readable for DOUTR18_SPEC {}
#[doc = "`write(|w| ..)` method takes [`doutr18::W`](W) writer structure"]
impl crate::Writable for DOUTR18_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets DOUTR18 to value 0"]
impl crate::Resettable for DOUTR18_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use super::Transition;
use super::super::Frame;
use super::StateData;
pub trait State: Send + StateClone {
fn draw(&self, _frame: &mut Frame, _state_data: &StateData) {
}
fn shadow_draw(&self, _frame: &mut Frame, _state_data: &StateData) {
}
fn update(&mut self, _state_data: &StateData) -> Transition {
Transition::None
}
fn shadow_update(&mut self, _state_data: &StateData) {
}
}
pub trait StateClone {
fn clone_box(&self) -> Box<dyn State>;
}
impl<T> StateClone for T
where T: State + Clone + 'static
{
fn clone_box(&self) -> Box<dyn State> {
Box::new(self.clone())
}
}
impl Clone for Box<dyn State> {
fn clone(&self) -> Self {
self.clone_box()
}
}
|
use crate::{reflector::ObjectRef, watcher::Error};
use core::{
pin::Pin,
task::{Context, Poll},
};
use futures::{ready, Stream};
use kube_client::Resource;
use pin_project::pin_project;
use std::{collections::HashMap, hash::Hash};
#[allow(clippy::pedantic)]
#[pin_project]
/// Stream returned by the [`predicate_filter`](super::WatchStreamExt::predicate_filter) method.
#[must_use = "streams do nothing unless polled"]
pub struct PredicateFilter<St, K: Resource, Func> {
#[pin]
stream: St,
predicate: Func,
cache: HashMap<ObjectRef<K>, u64>,
}
impl<St, K, F> PredicateFilter<St, K, F>
where
St: Stream<Item = Result<K, Error>>,
K: Resource,
F: Fn(&K) -> Option<u64> + 'static,
{
pub(super) fn new(stream: St, predicate: F) -> Self {
Self {
stream,
predicate,
cache: HashMap::new(),
}
}
}
impl<St, K, F> Stream for PredicateFilter<St, K, F>
where
St: Stream<Item = Result<K, Error>>,
K: Resource,
K::DynamicType: Default + Eq + Hash,
F: Fn(&K) -> Option<u64> + 'static,
{
type Item = Result<K, Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut me = self.project();
Poll::Ready(loop {
break match ready!(me.stream.as_mut().poll_next(cx)) {
Some(Ok(obj)) => {
if let Some(val) = (me.predicate)(&obj) {
let key = ObjectRef::from_obj(&obj);
let changed = if let Some(old) = me.cache.get(&key) {
*old != val
} else {
true
};
if let Some(old) = me.cache.get_mut(&key) {
*old = val;
} else {
me.cache.insert(key, val);
}
if changed {
Some(Ok(obj))
} else {
continue;
}
} else {
// if we can't evaluate predicate, always emit K
Some(Ok(obj))
}
}
Some(Err(err)) => Some(Err(err)),
None => return Poll::Ready(None),
};
})
}
}
/// Predicate functions for [`WatchStreamExt::predicate_filter`](crate::WatchStreamExt::predicate_filter)
///
/// These functions just return a hash of commonly compared values,
/// to help decide whether to pass a watch event along or not.
///
/// Functional rewrite of the [controller-runtime/predicate module](https://github.com/kubernetes-sigs/controller-runtime/blob/main/pkg/predicate/predicate.go).
pub mod predicates {
use kube_client::{Resource, ResourceExt};
use std::{
collections::hash_map::DefaultHasher,
hash::{Hash, Hasher},
};
fn hash<T: Hash>(t: &T) -> u64 {
let mut hasher = DefaultHasher::new();
t.hash(&mut hasher);
hasher.finish()
}
/// Hash the generation of a Resource K
pub fn generation<K: Resource>(obj: &K) -> Option<u64> {
obj.meta().generation.map(|g| hash(&g))
}
/// Hash the resource version of a Resource K
pub fn resource_version<K: Resource>(obj: &K) -> Option<u64> {
obj.meta().resource_version.as_ref().map(hash)
}
/// Hash the labels of a Resource K
pub fn labels<K: Resource>(obj: &K) -> Option<u64> {
Some(hash(obj.labels()))
}
/// Hash the annotations of a Resource K
pub fn annotations<K: Resource>(obj: &K) -> Option<u64> {
Some(hash(obj.annotations()))
}
}
#[cfg(test)]
pub(crate) mod tests {
use std::task::Poll;
use super::{predicates, Error, PredicateFilter};
use futures::{pin_mut, poll, stream, FutureExt, StreamExt};
use kube_client::Resource;
use serde_json::json;
#[tokio::test]
async fn predicate_filtering_hides_equal_predicate_values() {
use k8s_openapi::api::core::v1::Pod;
let mkobj = |gen: i32| {
let p: Pod = serde_json::from_value(json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "blog",
"generation": Some(gen),
},
"spec": {
"containers": [{
"name": "blog",
"image": "clux/blog:0.1.0"
}],
}
}))
.unwrap();
p
};
let data = stream::iter([
Ok(mkobj(1)),
Err(Error::TooManyObjects),
Ok(mkobj(1)),
Ok(mkobj(2)),
]);
let rx = PredicateFilter::new(data, predicates::generation);
pin_mut!(rx);
// mkobj(1) passed through
let first = rx.next().now_or_never().unwrap().unwrap().unwrap();
assert_eq!(first.meta().generation, Some(1));
// Error passed through
assert!(matches!(
poll!(rx.next()),
Poll::Ready(Some(Err(Error::TooManyObjects)))
));
// (no repeat mkobj(1) - same generation)
// mkobj(2) next
let second = rx.next().now_or_never().unwrap().unwrap().unwrap();
assert_eq!(second.meta().generation, Some(2));
assert!(matches!(poll!(rx.next()), Poll::Ready(None)));
}
}
|
use futures::sync::mpsc::{unbounded, UnboundedSender};
use futures::Stream;
use netlink_packet_core::NetlinkMessage;
use std::fmt::Debug;
use crate::errors::{Error, ErrorKind};
use crate::Request;
use netlink_sys::SocketAddr;
/// A handle to pass requests to a [`Connection`](struct.Connection.html).
#[derive(Clone, Debug)]
pub struct ConnectionHandle<T>
where
T: Debug + Clone + Eq + PartialEq,
{
requests_tx: UnboundedSender<Request<T>>,
}
impl<T> ConnectionHandle<T>
where
T: Debug + Clone + Eq + PartialEq,
{
pub(crate) fn new(requests_tx: UnboundedSender<Request<T>>) -> Self {
ConnectionHandle { requests_tx }
}
/// Send a new request and get the response as a stream of messages. Note that some messages
/// are not part of the response stream:
///
/// - **acknowledgements**: when an acknowledgement is received, the stream is closed
/// - **end of dump messages**: similarly, upon receiving an "end of dump" message, the stream is
/// closed
pub fn request(
&mut self,
message: NetlinkMessage<T>,
destination: SocketAddr,
) -> impl Stream<Item = NetlinkMessage<T>, Error = Error<T>> {
let (tx, rx) = unbounded::<NetlinkMessage<T>>();
let request = Request::from((tx, message, destination));
debug!("handle: forwarding new request to connection");
// We don't handle the error here, because we would have to return a Result, which makes
// the signature of this method pretty ugly. If this fails, we know that the receiver has
// been dropped, so the request (and the tx channed it contains) will be dropped when this
// function returns. Then rx.poll() will return the error we want.
let _ = UnboundedSender::unbounded_send(&self.requests_tx, request);
rx.map_err(|()| {
error!("could not forward new request to connection: the connection is closed");
ErrorKind::ConnectionClosed.into()
})
}
pub fn notify(
&mut self,
message: NetlinkMessage<T>,
destination: SocketAddr,
) -> Result<(), Error<T>> {
let (tx, _rx) = unbounded::<NetlinkMessage<T>>();
let request = Request::from((tx, message, destination));
debug!("handle: forwarding new request to connection");
UnboundedSender::unbounded_send(&self.requests_tx, request)
.map_err(|_| ErrorKind::ConnectionClosed.into())
}
}
|
use std::cell::RefCell;
use std::collections::HashMap;
use std::rc::Rc;
use crate::execute::{VMError, VMResult};
use crate::value::Value;
#[derive(Debug, Clone)]
pub struct Environment {
enclosing: Option<Rc<RefCell<Environment>>>,
bindings: HashMap<String, Value>,
}
impl Environment {
pub fn new() -> Self {
Environment {
enclosing: None,
bindings: HashMap::new(),
}
}
pub fn new_enclosing(env: Rc<RefCell<Environment>>) -> Self {
Environment {
enclosing: Some(env),
bindings: HashMap::new(),
}
}
pub fn define(&mut self, name: String, value: Value) {
self.bindings.insert(name, value);
}
pub fn set(&mut self, name: String, value: Value) -> VMResult {
if self.bindings.contains_key(&name) {
self.bindings.insert(name, value.clone());
return Ok(value);
}
if let Some(ref env) = self.enclosing {
return env.borrow_mut().set(name, value);
}
Err(VMError::Message(format!("no such variable '{}'", name)))
}
pub fn get(&self, name: &str) -> VMResult {
if let Some(val) = self.bindings.get(name) {
return Ok(val.clone());
}
if let Some(ref env) = self.enclosing {
return env.borrow().get(name);
}
Err(VMError::Message(format!("no such variable '{}'", name)))
}
}
|
#![allow(dead_code)]
use iced_wgpu::{wgpu, wgpu::vertex_attr_array};
use rand::{rngs::SmallRng, Rng, SeedableRng};
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
pub struct ParticleAttributes {
position: glam::Vec3,
radius: f32,
color: glam::Vec4,
}
pub struct ParticleSystem {
particle_count: usize,
particle_buffer: wgpu::Buffer,
particle_system_bind_group: wgpu::BindGroup,
}
impl ParticleSystem {
pub fn new(device: &wgpu::Device, particle_renderer: &ParticleRenderer, particle_count: usize) -> Self {
let particle_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: (std::mem::size_of::<ParticleAttributes>()*particle_count) as wgpu::BufferAddress,
usage: wgpu::BufferUsage::STORAGE|wgpu::BufferUsage::COPY_DST|wgpu::BufferUsage::VERTEX,
mapped_at_creation: false,
});
let particle_system_bind_group = device.create_bind_group(&bind_group_descriptor!(
&particle_renderer.particle_system_bind_group_layout,
0 => Buffer(particle_buffer.slice(..)),
));
Self {
particle_count,
particle_buffer,
particle_system_bind_group,
}
}
pub fn generate_clouds(&self, queue: &wgpu::Queue, matrices: &[glam::Mat4]) {
let mut prng = SmallRng::from_seed([0; 16]);
let mut particle_attributes = vec![ParticleAttributes::default(); self.particle_count];
let particles_per_cloud = self.particle_count/matrices.len();
for j in 0..matrices.len() {
for i in 0..particles_per_cloud {
let angle = prng.gen::<f32>()*std::f32::consts::PI*2.0;
let dist = prng.gen::<f32>()*1.1;
let mut particle = &mut particle_attributes[j*particles_per_cloud+i];
particle.position = (matrices[j]*glam::Vec4::new(angle.cos()*dist, 2.0+(prng.gen::<f32>()-0.5).max(0.0), angle.sin()*dist*0.5, 1.0)).truncate().into();
particle.radius = 0.3;
particle.color = glam::Vec4::new(1.0, 1.0, 1.0, 0.0);
}
}
let data = unsafe { std::slice::from_raw_parts(particle_attributes.as_ptr() as *const _ as *const u8, std::mem::size_of::<ParticleAttributes>()*self.particle_count) };
queue.write_buffer(&self.particle_buffer, 0, data);
}
}
pub struct ParticleRenderer {
surface_pipeline: wgpu::RenderPipeline,
surface_bind_group_layout: wgpu::BindGroupLayout,
surface_bind_group: wgpu::BindGroup,
particle_system_bind_group_layout: wgpu::BindGroupLayout,
}
impl ParticleRenderer {
pub fn new(device: &wgpu::Device, renderer: &crate::renderer::Renderer, asset_pack: &crate::assets::AssetPack) -> Self {
let surface_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: &[],
});
let surface_bind_group = device.create_bind_group(&bind_group_descriptor!(
&surface_bind_group_layout,
));
let particle_system_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
min_binding_size: wgpu::BufferSize::new(std::mem::size_of::<ParticleAttributes>() as u64),
readonly: true,
},
count: None,
},
],
});
let surface_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
push_constant_ranges: &[],
bind_group_layouts: &[&surface_bind_group_layout, &renderer.bind_group_layouts.camera_uniforms_bind_group_layout, &particle_system_bind_group_layout],
});
let surface_pipeline = device.create_render_pipeline(&surface_pass_pipeline_descriptor!(
asset_pack,
surface_pipeline_layout,
"assets/shader_modules/particle_vert",
"assets/shader_modules/particle_frag",
None,
TriangleStrip,
wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
instance_attributes_vertex_buffer_descriptor!(0),
instance_attributes_vertex_buffer_descriptor!(4),
instance_attributes_vertex_buffer_descriptor!(8),
],
}
));
Self {
surface_pipeline,
surface_bind_group_layout,
surface_bind_group,
particle_system_bind_group_layout,
}
}
pub fn render_surface<'a>(&'a self, render_pass: &mut wgpu::RenderPass<'a>, particle_system: &'a ParticleSystem, instances_indices: std::ops::Range<u32>) {
render_pass.set_pipeline(&self.surface_pipeline);
render_pass.set_bind_group(0, &self.surface_bind_group, &[]);
render_pass.set_bind_group(2, &particle_system.particle_system_bind_group, &[]);
render_pass.draw(0..(particle_system.particle_count*4) as u32, instances_indices);
}
}
|
use std::{
io,
borrow::Cow,
os::unix::ffi::{OsStrExt, OsStringExt},
path::Path,
pin::Pin,
task::{Context, Poll},
ffi::{OsStr, OsString},
};
use lazy_static::lazy_static;
use derive_more::From;
use thiserror::Error;
use serde::de::DeserializeOwned;
use regex::bytes::Regex;
use futures::AsyncReadExt;
pub use isahc::http::{
StatusCode,
header::{HeaderMap, HeaderName, HeaderValue}
};
use super::headers;
#[derive(Debug)]
pub struct Response(
pub(super) isahc::http::Response<isahc::Body>
);
#[derive(Debug, From, Error)]
#[error("{0}")]
pub struct Error(anyhow::Error);
impl Response {
pub fn status(&self) -> StatusCode {
self.0.status()
}
pub fn headers(&self) -> &HeaderMap {
self.0.headers()
}
pub fn content_length(&self) -> Option<usize> {
let content_length = self
.headers()
.get(headers::CONTENT_LENGTH)?;
log::debug!("content length: {:#?}", content_length);
let total_size = content_length
.to_str()
.ok()?
.parse()
.ok()?;
Some(total_size)
}
pub fn filename(&self) -> Option<Cow<Path>> {
lazy_static! {
static ref FILENAME_REGEX: Regex = Regex
::new(r#"attachment; filename\*?=(UTF-8''|")?(?P<filename>[^"]*)"?"#)
.expect("invalid filename regex");
}
let content_disposition = self
.headers()
.get(headers::CONTENT_DISPOSITION)?;
log::debug!("Content-Disposition header: {:#?}", content_disposition);
let filename = FILENAME_REGEX
.captures(
content_disposition.as_bytes()
)?
.name("filename")?
.as_bytes();
if filename.is_empty() {
None
}
else {
let filename_decoded = match percent_encoding::percent_decode(filename).into() {
Cow::Borrowed(filename_decoded) => Cow::Borrowed(
Path::new(
OsStr::from_bytes(filename_decoded)
)
),
Cow::Owned(filename_decoded) => Cow::Owned(
OsString::from_vec(filename_decoded).into()
),
};
Some(filename_decoded)
}
}
pub fn mime(&self) -> Option<&str> {
let content_type = self
.headers()
.get(headers::CONTENT_TYPE)?;
log::debug!("Content-Type header: {:#?}", content_type);
let mime = content_type
.as_bytes()
.splitn(
2,
|&c| c == b';'
)
.next()
.expect("splitn should yield at least once");
std::str
::from_utf8(mime)
.ok()
}
pub async fn body_bytes(&mut self, body: &mut Vec<u8>) -> Result<usize, Error> {
self.0
.body_mut()
.read_to_end(body)
.await
.map_err(
|error| Error(
error.into()
)
)
}
pub async fn body_json<T: DeserializeOwned>(&mut self) -> Result<T, Error> {
let mut body = Vec::with_capacity(1024);
self.body_bytes(&mut body)
.await
.map_err(Into::<anyhow::Error>::into)?;
log::trace!(
"response json payload: {}",
String::from_utf8_lossy(&body)
);
serde_json
::from_slice(&body)
.map_err(Into::into)
.map_err(Error)
}
pub async fn body_string(&mut self) -> Result<Box<str>, Error> {
let mut string = String::with_capacity(8);
self.0
.body_mut()
.read_to_string(&mut string)
.await
.map_err(Into::<anyhow::Error>::into)?;
Ok(
string.into_boxed_str()
)
}
}
impl futures::AsyncRead for Response {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut [u8]
) -> Poll<Result<usize, io::Error>> {
let body = self.0.body_mut();
Pin
::new(body)
.poll_read(cx, buf)
}
}
|
// This file is part of Substrate.
// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # Phragmén Election Module.
//!
//! An election module based on sequential phragmen.
//!
//! ### Term and Round
//!
//! The election happens in _rounds_: every `N` blocks, all previous members are retired and a new
//! set is elected (which may or may not have an intersection with the previous set). Each round
//! lasts for some number of blocks defined by `TermDuration` storage item. The words _term_ and
//! _round_ can be used interchangeably in this context.
//!
//! `TermDuration` might change during a round. This can shorten or extend the length of the round.
//! The next election round's block number is never stored but rather always checked on the fly.
//! Based on the current block number and `TermDuration`, the condition `BlockNumber % TermDuration
//! == 0` being satisfied will always trigger a new election round.
//!
//! ### Voting
//!
//! Voters can vote for any set of the candidates by providing a list of account ids. Invalid votes
//! (voting for non-candidates) are ignored during election. Yet, a voter _might_ vote for a future
//! candidate. Voters reserve a bond as they vote. Each vote defines a `value`. This amount is
//! locked from the account of the voter and indicates the weight of the vote. Voters can update
//! their votes at any time by calling `vote()` again. This keeps the bond untouched but can
//! optionally change the locked `value`. After a round, votes are kept and might still be valid for
//! further rounds. A voter is responsible for calling `remove_voter` once they are done to have
//! their bond back and remove the lock.
//!
//! Voters also report other voters as being defunct to earn their bond. A voter is defunct once all
//! of the candidates that they have voted for are neither a valid candidate anymore nor a member.
//! Upon reporting, if the target voter is actually defunct, the reporter will be rewarded by the
//! voting bond of the target. The target will lose their bond and get removed. If the target is not
//! defunct, the reporter is slashed and removed. To prevent being reported, voters should manually
//! submit a `remove_voter()` as soon as they are in the defunct state.
//!
//! ### Candidacy and Members
//!
//! Candidates also reserve a bond as they submit candidacy. A candidate cannot take their candidacy
//! back. A candidate can end up in one of the below situations:
//! - **Winner**: A winner is kept as a _member_. They must still have a bond in reserve and they
//! are automatically counted as a candidate for the next election.
//! - **Runner-up**: Runners-up are the best candidates immediately after the winners. The number
//! of runners_up to keep is configurable. Runners-up are used, in order that they are elected,
//! as replacements when a candidate is kicked by `[remove_member]`, or when an active member
//! renounces their candidacy. Runners are automatically counted as a candidate for the next
//! election.
//! - **Loser**: Any of the candidate who are not a winner are left as losers. A loser might be an
//! _outgoing member or runner_, meaning that they are an active member who failed to keep their
//! spot. An outgoing will always lose their bond.
//!
//! ##### Renouncing candidacy.
//!
//! All candidates, elected or not, can renounce their candidacy. A call to
//! [`Module::renounce_candidacy`] will always cause the candidacy bond to be refunded.
//!
//! Note that with the members being the default candidates for the next round and votes persisting
//! in storage, the election system is entirely stable given no further input. This means that if
//! the system has a particular set of candidates `C` and voters `V` that lead to a set of members
//! `M` being elected, as long as `V` and `C` don't remove their candidacy and votes, `M` will keep
//! being re-elected at the end of each round.
//!
//! ### Module Information
//!
//! - [`election_sp_phragmen::Trait`](./trait.Trait.html)
//! - [`Call`](./enum.Call.html)
//! - [`Module`](./struct.Module.html)
#![cfg_attr(not(feature = "std"), no_std)]
use codec::{Decode, Encode};
use frame_support::{
decl_error, decl_event, decl_module, decl_storage,
dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo},
ensure,
storage::{IterableStorageMap, StorageMap},
traits::{
BalanceStatus, ChangeMembers, Contains, ContainsLengthBound, Currency, Get,
InitializeMembers, LockIdentifier, LockableCurrency, OnUnbalanced, ReservableCurrency,
WithdrawReason, WithdrawReasons,
},
weights::Weight,
};
use frame_system::{ensure_root, ensure_signed};
use sp_npos_elections::{ElectionResult, ExtendedBalance, VoteWeight};
use sp_runtime::{
traits::{Convert, Saturating, StaticLookup, Zero},
DispatchError, Perbill, RuntimeDebug,
};
use sp_std::prelude::*;
mod benchmarking;
mod default_weights;
/// The maximum votes allowed per voter.
pub const MAXIMUM_VOTE: usize = 16;
type BalanceOf<T> =
<<T as Trait>::Currency as Currency<<T as frame_system::Trait>::AccountId>>::Balance;
type NegativeImbalanceOf<T> =
<<T as Trait>::Currency as Currency<<T as frame_system::Trait>::AccountId>>::NegativeImbalance;
/// An indication that the renouncing account currently has which of the below roles.
#[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug)]
pub enum Renouncing {
/// A member is renouncing.
Member,
/// A runner-up is renouncing.
RunnerUp,
/// A candidate is renouncing, while the given total number of candidates exists.
Candidate(#[codec(compact)] u32),
}
/// Information needed to prove the defunct-ness of a voter.
#[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug)]
pub struct DefunctVoter<AccountId> {
/// the voter's who's being challenged for being defunct
pub who: AccountId,
/// The number of votes that `who` has placed.
#[codec(compact)]
pub vote_count: u32,
/// The number of current active candidates.
#[codec(compact)]
pub candidate_count: u32,
}
pub trait WeightInfo {
fn vote(v: u32) -> Weight;
fn vote_update(v: u32) -> Weight;
fn remove_voter() -> Weight;
fn report_defunct_voter_correct(c: u32, v: u32) -> Weight;
fn report_defunct_voter_incorrect(c: u32, v: u32) -> Weight;
fn submit_candidacy(c: u32) -> Weight;
fn renounce_candidacy_candidate(c: u32) -> Weight;
fn renounce_candidacy_members() -> Weight;
fn renounce_candidacy_runners_up() -> Weight;
fn remove_member_with_replacement() -> Weight;
fn remove_member_wrong_refund() -> Weight;
}
pub trait Trait: frame_system::Trait {
/// The overarching event type.c
type Event: From<Event<Self>> + Into<<Self as frame_system::Trait>::Event>;
/// Identifier for the elections-phragmen pallet's lock
type ModuleId: Get<LockIdentifier>;
/// The currency that people are electing with.
type Currency: LockableCurrency<Self::AccountId, Moment = Self::BlockNumber>
+ ReservableCurrency<Self::AccountId>;
/// What to do when the members change.
type ChangeMembers: ChangeMembers<Self::AccountId>;
/// What to do with genesis members
type InitializeMembers: InitializeMembers<Self::AccountId>;
/// Convert a balance into a number used for election calculation.
/// This must fit into a `u64` but is allowed to be sensibly lossy.
type CurrencyToVote: Convert<BalanceOf<Self>, VoteWeight>
+ Convert<ExtendedBalance, BalanceOf<Self>>;
/// How much should be locked up in order to submit one's candidacy.
type CandidacyBond: Get<BalanceOf<Self>>;
/// How much should be locked up in order to be able to submit votes.
type VotingBond: Get<BalanceOf<Self>>;
/// Handler for the unbalanced reduction when a candidate has lost (and is not a runner-up)
type LoserCandidate: OnUnbalanced<NegativeImbalanceOf<Self>>;
/// Handler for the unbalanced reduction when a reporter has submitted a bad defunct report.
type BadReport: OnUnbalanced<NegativeImbalanceOf<Self>>;
/// Handler for the unbalanced reduction when a member has been kicked.
type KickedMember: OnUnbalanced<NegativeImbalanceOf<Self>>;
/// Number of members to elect.
type DesiredMembers: Get<u32>;
/// Number of runners_up to keep.
type DesiredRunnersUp: Get<u32>;
/// How long each seat is kept. This defines the next block number at which an election
/// round will happen. If set to zero, no elections are ever triggered and the module will
/// be in passive mode.
type TermDuration: Get<Self::BlockNumber>;
/// Weight information for extrinsics in this pallet.
type WeightInfo: WeightInfo;
}
decl_storage! {
trait Store for Module<T: Trait> as PhragmenElection {
// ---- State
/// The current elected membership. Sorted based on account id.
pub Members get(fn members): Vec<(T::AccountId, BalanceOf<T>)>;
/// The current runners_up. Sorted based on low to high merit (worse to best).
pub RunnersUp get(fn runners_up): Vec<(T::AccountId, BalanceOf<T>)>;
/// The total number of vote rounds that have happened, excluding the upcoming one.
pub ElectionRounds get(fn election_rounds): u32 = Zero::zero();
/// Votes and locked stake of a particular voter.
///
/// TWOX-NOTE: SAFE as `AccountId` is a crypto hash
pub Voting get(fn voting): map hasher(twox_64_concat) T::AccountId => (BalanceOf<T>, Vec<T::AccountId>);
/// The present candidate list. Sorted based on account-id. A current member or runner-up
/// can never enter this vector and is always implicitly assumed to be a candidate.
pub Candidates get(fn candidates): Vec<T::AccountId>;
} add_extra_genesis {
config(members): Vec<(T::AccountId, BalanceOf<T>)>;
build(|config: &GenesisConfig<T>| {
let members = config.members.iter().map(|(ref member, ref stake)| {
// make sure they have enough stake
assert!(
T::Currency::free_balance(member) >= *stake,
"Genesis member does not have enough stake",
);
// reserve candidacy bond and set as members.
T::Currency::reserve(&member, T::CandidacyBond::get())
.expect("Genesis member does not have enough balance to be a candidate");
// Note: all members will only vote for themselves, hence they must be given exactly
// their own stake as total backing. Any sane election should behave as such.
// Nonetheless, stakes will be updated for term 1 onwards according to the election.
Members::<T>::mutate(|members| {
match members.binary_search_by(|(a, _b)| a.cmp(member)) {
Ok(_) => panic!("Duplicate member in elections phragmen genesis: {}", member),
Err(pos) => members.insert(pos, (member.clone(), *stake)),
}
});
// set self-votes to make persistent.
<Module<T>>::vote(
T::Origin::from(Some(member.clone()).into()),
vec![member.clone()],
*stake,
).expect("Genesis member could not vote.");
member.clone()
}).collect::<Vec<T::AccountId>>();
// report genesis members to upstream, if any.
T::InitializeMembers::initialize_members(&members);
})
}
}
decl_error! {
pub enum Error for Module<T: Trait> {
/// Cannot vote when no candidates or members exist.
UnableToVote,
/// Must vote for at least one candidate.
NoVotes,
/// Cannot vote more than candidates.
TooManyVotes,
/// Cannot vote more than maximum allowed.
MaximumVotesExceeded,
/// Cannot vote with stake less than minimum balance.
LowBalance,
/// Voter can not pay voting bond.
UnableToPayBond,
/// Must be a voter.
MustBeVoter,
/// Cannot report self.
ReportSelf,
/// Duplicated candidate submission.
DuplicatedCandidate,
/// Member cannot re-submit candidacy.
MemberSubmit,
/// Runner cannot re-submit candidacy.
RunnerSubmit,
/// Candidate does not have enough funds.
InsufficientCandidateFunds,
/// Not a member.
NotMember,
/// The provided count of number of candidates is incorrect.
InvalidCandidateCount,
/// The provided count of number of votes is incorrect.
InvalidVoteCount,
/// The renouncing origin presented a wrong `Renouncing` parameter.
InvalidRenouncing,
/// Prediction regarding replacement after member removal is wrong.
InvalidReplacement,
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
type Error = Error<T>;
fn deposit_event() = default;
const CandidacyBond: BalanceOf<T> = T::CandidacyBond::get();
const VotingBond: BalanceOf<T> = T::VotingBond::get();
const DesiredMembers: u32 = T::DesiredMembers::get();
const DesiredRunnersUp: u32 = T::DesiredRunnersUp::get();
const TermDuration: T::BlockNumber = T::TermDuration::get();
const ModuleId: LockIdentifier = T::ModuleId::get();
/// Vote for a set of candidates for the upcoming round of election. This can be called to
/// set the initial votes, or update already existing votes.
///
/// Upon initial voting, `value` units of `who`'s balance is locked and a bond amount is
/// reserved.
///
/// The `votes` should:
/// - not be empty.
/// - be less than the number of possible candidates. Note that all current members and
/// runners-up are also automatically candidates for the next round.
///
/// It is the responsibility of the caller to not place all of their balance into the lock
/// and keep some for further transactions.
///
/// # <weight>
/// Base weight: 47.93 µs
/// State reads:
/// - Candidates.len() + Members.len() + RunnersUp.len()
/// - Voting (is_voter)
/// - Lock
/// - [AccountBalance(who) (unreserve + total_balance)]
/// State writes:
/// - Voting
/// - Lock
/// - [AccountBalance(who) (unreserve -- only when creating a new voter)]
/// # </weight>
#[weight = T::WeightInfo::vote(votes.len() as u32)]
fn vote(
origin,
votes: Vec<T::AccountId>,
#[compact] value: BalanceOf<T>,
) {
let who = ensure_signed(origin)?;
ensure!(votes.len() <= MAXIMUM_VOTE, Error::<T>::MaximumVotesExceeded);
ensure!(!votes.is_empty(), Error::<T>::NoVotes);
let candidates_count = <Candidates<T>>::decode_len().unwrap_or(0);
let members_count = <Members<T>>::decode_len().unwrap_or(0);
let runners_up_count = <RunnersUp<T>>::decode_len().unwrap_or(0);
// addition is valid: candidates, members and runners-up will never overlap.
let allowed_votes = candidates_count + members_count + runners_up_count;
ensure!(!allowed_votes.is_zero(), Error::<T>::UnableToVote);
ensure!(votes.len() <= allowed_votes, Error::<T>::TooManyVotes);
ensure!(value > T::Currency::minimum_balance(), Error::<T>::LowBalance);
// first time voter. Reserve bond.
if !Self::is_voter(&who) {
T::Currency::reserve(&who, T::VotingBond::get())
.map_err(|_| Error::<T>::UnableToPayBond)?;
}
// Amount to be locked up.
let locked_balance = value.min(T::Currency::total_balance(&who));
// lock
T::Currency::set_lock(
T::ModuleId::get(),
&who,
locked_balance,
WithdrawReasons::except(WithdrawReason::TransactionPayment),
);
Voting::<T>::insert(&who, (locked_balance, votes));
}
/// Remove `origin` as a voter. This removes the lock and returns the bond.
///
/// # <weight>
/// Base weight: 36.8 µs
/// All state access is from do_remove_voter.
/// State reads:
/// - Voting
/// - [AccountData(who)]
/// State writes:
/// - Voting
/// - Locks
/// - [AccountData(who)]
/// # </weight>
#[weight = T::WeightInfo::remove_voter()]
fn remove_voter(origin) {
let who = ensure_signed(origin)?;
ensure!(Self::is_voter(&who), Error::<T>::MustBeVoter);
Self::do_remove_voter(&who, true);
}
/// Report `target` for being an defunct voter. In case of a valid report, the reporter is
/// rewarded by the bond amount of `target`. Otherwise, the reporter itself is removed and
/// their bond is slashed.
///
/// A defunct voter is defined to be:
/// - a voter whose current submitted votes are all invalid. i.e. all of them are no
/// longer a candidate nor an active member or a runner-up.
///
///
/// The origin must provide the number of current candidates and votes of the reported target
/// for the purpose of accurate weight calculation.
///
/// # <weight>
/// No Base weight based on min square analysis.
/// Complexity of candidate_count: 1.755 µs
/// Complexity of vote_count: 18.51 µs
/// State reads:
/// - Voting(reporter)
/// - Candidate.len()
/// - Voting(Target)
/// - Candidates, Members, RunnersUp (is_defunct_voter)
/// State writes:
/// - Lock(reporter || target)
/// - [AccountBalance(reporter)] + AccountBalance(target)
/// - Voting(reporter || target)
/// Note: the db access is worse with respect to db, which is when the report is correct.
/// # </weight>
#[weight = T::WeightInfo::report_defunct_voter_correct(
defunct.candidate_count,
defunct.vote_count,
)]
fn report_defunct_voter(
origin,
defunct: DefunctVoter<<T::Lookup as StaticLookup>::Source>,
) -> DispatchResultWithPostInfo {
let reporter = ensure_signed(origin)?;
let target = T::Lookup::lookup(defunct.who)?;
ensure!(reporter != target, Error::<T>::ReportSelf);
ensure!(Self::is_voter(&reporter), Error::<T>::MustBeVoter);
let DefunctVoter { candidate_count, vote_count, .. } = defunct;
ensure!(
<Candidates<T>>::decode_len().unwrap_or(0) as u32 <= candidate_count,
Error::<T>::InvalidCandidateCount,
);
let (_, votes) = <Voting<T>>::get(&target);
// indirect way to ensure target is a voter. We could call into `::contains()`, but it
// would have the same effect with one extra db access. Note that votes cannot be
// submitted with length 0. Hence, a non-zero length means that the target is a voter.
ensure!(votes.len() > 0, Error::<T>::MustBeVoter);
// ensure that the size of votes that need to be searched is correct.
ensure!(
votes.len() as u32 <= vote_count,
Error::<T>::InvalidVoteCount,
);
let valid = Self::is_defunct_voter(&votes);
let maybe_refund = if valid {
// reporter will get the voting bond of the target
T::Currency::repatriate_reserved(&target, &reporter, T::VotingBond::get(), BalanceStatus::Free)?;
// remove the target. They are defunct.
Self::do_remove_voter(&target, false);
None
} else {
// slash the bond of the reporter.
let imbalance = T::Currency::slash_reserved(&reporter, T::VotingBond::get()).0;
T::BadReport::on_unbalanced(imbalance);
// remove the reporter.
Self::do_remove_voter(&reporter, false);
Some(T::WeightInfo::report_defunct_voter_incorrect(
defunct.candidate_count,
defunct.vote_count,
))
};
Self::deposit_event(RawEvent::VoterReported(target, reporter, valid));
Ok(maybe_refund.into())
}
/// Submit oneself for candidacy.
///
/// A candidate will either:
/// - Lose at the end of the term and forfeit their deposit.
/// - Win and become a member. Members will eventually get their stash back.
/// - Become a runner-up. Runners-ups are reserved members in case one gets forcefully
/// removed.
///
/// # <weight>
/// Base weight = 33.33 µs
/// Complexity of candidate_count: 0.375 µs
/// State reads:
/// - Candidates
/// - Members
/// - RunnersUp
/// - [AccountBalance(who)]
/// State writes:
/// - [AccountBalance(who)]
/// - Candidates
/// # </weight>
#[weight = T::WeightInfo::submit_candidacy(*candidate_count)]
fn submit_candidacy(origin, #[compact] candidate_count: u32) {
let who = ensure_signed(origin)?;
let actual_count = <Candidates<T>>::decode_len().unwrap_or(0);
ensure!(
actual_count as u32 <= candidate_count,
Error::<T>::InvalidCandidateCount,
);
let is_candidate = Self::is_candidate(&who);
ensure!(is_candidate.is_err(), Error::<T>::DuplicatedCandidate);
// assured to be an error, error always contains the index.
let index = is_candidate.unwrap_err();
ensure!(!Self::is_member(&who), Error::<T>::MemberSubmit);
ensure!(!Self::is_runner_up(&who), Error::<T>::RunnerSubmit);
T::Currency::reserve(&who, T::CandidacyBond::get())
.map_err(|_| Error::<T>::InsufficientCandidateFunds)?;
<Candidates<T>>::mutate(|c| c.insert(index, who));
}
/// Renounce one's intention to be a candidate for the next election round. 3 potential
/// outcomes exist:
/// - `origin` is a candidate and not elected in any set. In this case, the bond is
/// unreserved, returned and origin is removed as a candidate.
/// - `origin` is a current runner-up. In this case, the bond is unreserved, returned and
/// origin is removed as a runner-up.
/// - `origin` is a current member. In this case, the bond is unreserved and origin is
/// removed as a member, consequently not being a candidate for the next round anymore.
/// Similar to [`remove_voter`], if replacement runners exists, they are immediately used.
/// <weight>
/// If a candidate is renouncing:
/// Base weight: 17.28 µs
/// Complexity of candidate_count: 0.235 µs
/// State reads:
/// - Candidates
/// - [AccountBalance(who) (unreserve)]
/// State writes:
/// - Candidates
/// - [AccountBalance(who) (unreserve)]
/// If member is renouncing:
/// Base weight: 46.25 µs
/// State reads:
/// - Members, RunnersUp (remove_and_replace_member),
/// - [AccountData(who) (unreserve)]
/// State writes:
/// - Members, RunnersUp (remove_and_replace_member),
/// - [AccountData(who) (unreserve)]
/// If runner is renouncing:
/// Base weight: 46.25 µs
/// State reads:
/// - RunnersUp (remove_and_replace_member),
/// - [AccountData(who) (unreserve)]
/// State writes:
/// - RunnersUp (remove_and_replace_member),
/// - [AccountData(who) (unreserve)]
/// </weight>
#[weight = match *renouncing {
Renouncing::Candidate(count) => T::WeightInfo::renounce_candidacy_candidate(count),
Renouncing::Member => T::WeightInfo::renounce_candidacy_members(),
Renouncing::RunnerUp => T::WeightInfo::renounce_candidacy_runners_up(),
}]
fn renounce_candidacy(origin, renouncing: Renouncing) {
let who = ensure_signed(origin)?;
match renouncing {
Renouncing::Member => {
// returns NoMember error in case of error.
let _ = Self::remove_and_replace_member(&who)?;
T::Currency::unreserve(&who, T::CandidacyBond::get());
Self::deposit_event(RawEvent::MemberRenounced(who));
},
Renouncing::RunnerUp => {
let mut runners_up_with_stake = Self::runners_up();
if let Some(index) = runners_up_with_stake
.iter()
.position(|(ref r, ref _s)| r == &who)
{
runners_up_with_stake.remove(index);
// unreserve the bond
T::Currency::unreserve(&who, T::CandidacyBond::get());
// update storage.
<RunnersUp<T>>::put(runners_up_with_stake);
} else {
Err(Error::<T>::InvalidRenouncing)?;
}
}
Renouncing::Candidate(count) => {
let mut candidates = Self::candidates();
ensure!(count >= candidates.len() as u32, Error::<T>::InvalidRenouncing);
if let Some(index) = candidates.iter().position(|x| *x == who) {
candidates.remove(index);
// unreserve the bond
T::Currency::unreserve(&who, T::CandidacyBond::get());
// update storage.
<Candidates<T>>::put(candidates);
} else {
Err(Error::<T>::InvalidRenouncing)?;
}
}
};
}
/// Remove a particular member from the set. This is effective immediately and the bond of
/// the outgoing member is slashed.
///
/// If a runner-up is available, then the best runner-up will be removed and replaces the
/// outgoing member. Otherwise, a new phragmen election is started.
///
/// Note that this does not affect the designated block number of the next election.
///
/// # <weight>
/// If we have a replacement:
/// - Base weight: 50.93 µs
/// - State reads:
/// - RunnersUp.len()
/// - Members, RunnersUp (remove_and_replace_member)
/// - State writes:
/// - Members, RunnersUp (remove_and_replace_member)
/// Else, since this is a root call and will go into phragmen, we assume full block for now.
/// # </weight>
#[weight = if *has_replacement {
T::WeightInfo::remove_member_with_replacement()
} else {
T::MaximumBlockWeight::get()
}]
fn remove_member(
origin,
who: <T::Lookup as StaticLookup>::Source,
has_replacement: bool,
) -> DispatchResultWithPostInfo {
ensure_root(origin)?;
let who = T::Lookup::lookup(who)?;
let will_have_replacement = <RunnersUp<T>>::decode_len().unwrap_or(0) > 0;
if will_have_replacement != has_replacement {
// In both cases, we will change more weight than neede. Refund and abort.
return Err(Error::<T>::InvalidReplacement.with_weight(
// refund. The weight value comes from a benchmark which is special to this.
// 5.751 µs
T::WeightInfo::remove_member_wrong_refund()
));
} // else, prediction was correct.
Self::remove_and_replace_member(&who).map(|had_replacement| {
let (imbalance, _) = T::Currency::slash_reserved(&who, T::CandidacyBond::get());
T::KickedMember::on_unbalanced(imbalance);
Self::deposit_event(RawEvent::MemberKicked(who.clone()));
if !had_replacement {
// if we end up here, we will charge a full block weight.
Self::do_phragmen();
}
// no refund needed.
None.into()
}).map_err(|e| e.into())
}
/// What to do at the end of each block. Checks if an election needs to happen or not.
fn on_initialize(n: T::BlockNumber) -> Weight {
// returns the correct weight.
Self::end_block(n)
}
}
}
decl_event!(
pub enum Event<T> where
Balance = BalanceOf<T>,
<T as frame_system::Trait>::AccountId,
{
/// A new term with \[new_members\]. This indicates that enough candidates existed to run the
/// election, not that enough have has been elected. The inner value must be examined for
/// this purpose. A `NewTerm(\[\])` indicates that some candidates got their bond slashed and
/// none were elected, whilst `EmptyTerm` means that no candidates existed to begin with.
NewTerm(Vec<(AccountId, Balance)>),
/// No (or not enough) candidates existed for this round. This is different from
/// `NewTerm(\[\])`. See the description of `NewTerm`.
EmptyTerm,
/// Internal error happened while trying to perform election.
ElectionError,
/// A \[member\] has been removed. This should always be followed by either `NewTerm` or
/// `EmptyTerm`.
MemberKicked(AccountId),
/// A \[member\] has renounced their candidacy.
MemberRenounced(AccountId),
/// A voter was reported with the the report being successful or not.
/// \[voter, reporter, success\]
VoterReported(AccountId, AccountId, bool),
}
);
impl<T: Trait> Module<T> {
/// Attempts to remove a member `who`. If a runner-up exists, it is used as the replacement and
/// Ok(true). is returned.
///
/// Otherwise, `Ok(false)` is returned to signal the caller.
///
/// If a replacement exists, `Members` and `RunnersUp` storage is updated, where the first
/// element of `RunnersUp` is used as the replacement and `Ok(true)` is returned. Else,
/// `Ok(false)` is returned with no storage updated.
///
/// Note that this function _will_ call into `T::ChangeMembers` in case any change happens
/// (`Ok(true)`).
///
/// If replacement exists, this will read and write from/into both `Members` and `RunnersUp`.
fn remove_and_replace_member(who: &T::AccountId) -> Result<bool, DispatchError> {
let mut members_with_stake = Self::members();
if let Ok(index) = members_with_stake.binary_search_by(|(ref m, ref _s)| m.cmp(who)) {
members_with_stake.remove(index);
let next_up = <RunnersUp<T>>::mutate(|runners_up| runners_up.pop());
let maybe_replacement = next_up.and_then(|(replacement, stake)| {
members_with_stake
.binary_search_by(|(ref m, ref _s)| m.cmp(&replacement))
.err()
.map(|index| {
members_with_stake.insert(index, (replacement.clone(), stake));
replacement
})
});
<Members<T>>::put(&members_with_stake);
let members = members_with_stake.into_iter().map(|m| m.0).collect::<Vec<_>>();
let result = Ok(maybe_replacement.is_some());
let old = [who.clone()];
match maybe_replacement {
Some(new) => T::ChangeMembers::change_members_sorted(&[new], &old, &members),
None => T::ChangeMembers::change_members_sorted(&[], &old, &members),
}
result
} else {
Err(Error::<T>::NotMember)?
}
}
/// Check if `who` is a candidate. It returns the insert index if the element does not exists as
/// an error.
///
/// O(LogN) given N candidates.
fn is_candidate(who: &T::AccountId) -> Result<(), usize> {
Self::candidates().binary_search(who).map(|_| ())
}
/// Check if `who` is a voter. It may or may not be a _current_ one.
///
/// State: O(1).
fn is_voter(who: &T::AccountId) -> bool {
Voting::<T>::contains_key(who)
}
/// Check if `who` is currently an active member.
///
/// O(LogN) given N members. Since members are limited, O(1).
fn is_member(who: &T::AccountId) -> bool {
Self::members().binary_search_by(|(a, _b)| a.cmp(who)).is_ok()
}
/// Check if `who` is currently an active runner-up.
///
/// O(LogN) given N runners-up. Since runners-up are limited, O(1).
fn is_runner_up(who: &T::AccountId) -> bool {
Self::runners_up().iter().position(|(a, _b)| a == who).is_some()
}
/// Returns number of desired members.
fn desired_members() -> u32 {
T::DesiredMembers::get()
}
/// Returns number of desired runners up.
fn desired_runners_up() -> u32 {
T::DesiredRunnersUp::get()
}
/// Returns the term duration
fn term_duration() -> T::BlockNumber {
T::TermDuration::get()
}
/// Get the members' account ids.
fn members_ids() -> Vec<T::AccountId> {
Self::members().into_iter().map(|(m, _)| m).collect::<Vec<T::AccountId>>()
}
/// The the runners' up account ids.
fn runners_up_ids() -> Vec<T::AccountId> {
Self::runners_up().into_iter().map(|(r, _)| r).collect::<Vec<T::AccountId>>()
}
/// Check if `votes` will correspond to a defunct voter. As no origin is part of the inputs,
/// this function does not check the origin at all.
///
/// O(NLogM) with M candidates and `who` having voted for `N` of them.
/// Reads Members, RunnersUp, Candidates and Voting(who) from database.
fn is_defunct_voter(votes: &[T::AccountId]) -> bool {
votes.iter().all(|v| {
!Self::is_member(v) && !Self::is_runner_up(v) && !Self::is_candidate(v).is_ok()
})
}
/// Remove a certain someone as a voter.
///
/// This will clean always clean the storage associated with the voter, and remove the balance
/// lock. Optionally, it would also return the reserved voting bond if indicated by `unreserve`.
/// If unreserve is true, has 3 storage reads and 1 reads.
///
/// DB access: Voting and Lock are always written to, if unreserve, then 1 read and write added.
fn do_remove_voter(who: &T::AccountId, unreserve: bool) {
// remove storage and lock.
Voting::<T>::remove(who);
T::Currency::remove_lock(T::ModuleId::get(), who);
if unreserve {
T::Currency::unreserve(who, T::VotingBond::get());
}
}
/// Check there's nothing to do this block.
///
/// Runs phragmen election and cleans all the previous candidate state. The voter state is NOT
/// cleaned and voters must themselves submit a transaction to retract.
fn end_block(block_number: T::BlockNumber) -> Weight {
if !Self::term_duration().is_zero() {
if (block_number % Self::term_duration()).is_zero() {
Self::do_phragmen();
return T::MaximumBlockWeight::get()
}
}
0
}
/// Run the phragmen election with all required side processes and state updates, if election
/// succeeds. Else, it will emit an `ElectionError` event.
///
/// Calls the appropriate [`ChangeMembers`] function variant internally.
///
/// Reads: O(C + V*E) where C = candidates, V voters and E votes per voter exits.
/// Writes: O(M + R) with M desired members and R runners_up.
fn do_phragmen() {
let desired_seats = Self::desired_members() as usize;
let desired_runners_up = Self::desired_runners_up() as usize;
let num_to_elect = desired_runners_up + desired_seats;
let mut candidates = Self::candidates();
// candidates who explicitly called `submit_candidacy`. Only these folks are at risk of
// losing their bond.
let exposed_candidates = candidates.clone();
// current members are always a candidate for the next round as well.
// this is guaranteed to not create any duplicates.
candidates.append(&mut Self::members_ids());
// previous runners_up are also always candidates for the next round.
candidates.append(&mut Self::runners_up_ids());
if candidates.len().is_zero() {
Self::deposit_event(RawEvent::EmptyTerm);
return
}
// helper closures to deal with balance/stake.
let to_votes = |b: BalanceOf<T>| -> VoteWeight {
<T::CurrencyToVote as Convert<BalanceOf<T>, VoteWeight>>::convert(b)
};
let to_balance = |e: ExtendedBalance| -> BalanceOf<T> {
<T::CurrencyToVote as Convert<ExtendedBalance, BalanceOf<T>>>::convert(e)
};
// used for prime election.
let voters_and_stakes = Voting::<T>::iter()
.map(|(voter, (stake, votes))| (voter, stake, votes))
.collect::<Vec<_>>();
// used for phragmen.
let voters_and_votes = voters_and_stakes
.iter()
.cloned()
.map(|(voter, stake, votes)| (voter, to_votes(stake), votes))
.collect::<Vec<_>>();
let _ = sp_npos_elections::seq_phragmen::<T::AccountId, Perbill>(
num_to_elect,
candidates,
voters_and_votes.clone(),
None,
)
.map(|ElectionResult { winners, assignments: _ }| {
let old_members_ids =
<Members<T>>::take().into_iter().map(|(m, _)| m).collect::<Vec<T::AccountId>>();
let old_runners_up_ids =
<RunnersUp<T>>::take().into_iter().map(|(r, _)| r).collect::<Vec<T::AccountId>>();
// filter out those who end up with no backing stake.
let new_set_with_stake = winners
.into_iter()
.filter_map(|(m, b)| if b.is_zero() { None } else { Some((m, to_balance(b))) })
.collect::<Vec<(T::AccountId, BalanceOf<T>)>>();
// OPTIMISATION NOTE: we could bail out here if `new_set.len() == 0`. There isn't much
// left to do. Yet, re-arranging the code would require duplicating the slashing of
// exposed candidates, cleaning any previous members, and so on. For now, in favour of
// readability and veracity, we keep it simple.
// split new set into winners and runners up.
let split_point = desired_seats.min(new_set_with_stake.len());
let mut new_members = (&new_set_with_stake[..split_point]).to_vec();
// save the runners up as-is. They are sorted based on desirability.
// save the members, sorted based on account id.
new_members.sort_by(|i, j| i.0.cmp(&j.0));
// Now we select a prime member using a [Borda count](https://en.wikipedia.org/wiki/Borda_count).
// We weigh everyone's vote for that new member by a multiplier based on the order
// of the votes. i.e. the first person a voter votes for gets a 16x multiplier,
// the next person gets a 15x multiplier, an so on... (assuming `MAXIMUM_VOTE` = 16)
let mut prime_votes: Vec<_> =
new_members.iter().map(|c| (&c.0, BalanceOf::<T>::zero())).collect();
for (_, stake, votes) in voters_and_stakes.into_iter() {
for (vote_multiplier, who) in votes
.iter()
.enumerate()
.map(|(vote_position, who)| ((MAXIMUM_VOTE - vote_position) as u32, who))
{
if let Ok(i) = prime_votes.binary_search_by_key(&who, |k| k.0) {
prime_votes[i].1 = prime_votes[i]
.1
.saturating_add(stake.saturating_mul(vote_multiplier.into()));
}
}
}
// We then select the new member with the highest weighted stake. In the case of
// a tie, the last person in the list with the tied score is selected. This is
// the person with the "highest" account id based on the sort above.
let prime = prime_votes.into_iter().max_by_key(|x| x.1).map(|x| x.0.clone());
// new_members_ids is sorted by account id.
let new_members_ids =
new_members.iter().map(|(m, _)| m.clone()).collect::<Vec<T::AccountId>>();
let new_runners_up = &new_set_with_stake[split_point..]
.into_iter()
.cloned()
.rev()
.collect::<Vec<(T::AccountId, BalanceOf<T>)>>();
// new_runners_up remains sorted by desirability.
let new_runners_up_ids =
new_runners_up.iter().map(|(r, _)| r.clone()).collect::<Vec<T::AccountId>>();
// report member changes. We compute diff because we need the outgoing list.
let (incoming, outgoing) =
T::ChangeMembers::compute_members_diff(&new_members_ids, &old_members_ids);
T::ChangeMembers::change_members_sorted(&incoming, &outgoing, &new_members_ids);
T::ChangeMembers::set_prime(prime);
// outgoing candidates lose their bond.
let mut to_burn_bond = outgoing.to_vec();
// compute the outgoing of runners up as well and append them to the `to_burn_bond`
{
let (_, outgoing) = T::ChangeMembers::compute_members_diff(
&new_runners_up_ids,
&old_runners_up_ids,
);
to_burn_bond.extend(outgoing);
}
// Burn loser bond. members list is sorted. O(NLogM) (N candidates, M members)
// runner up list is not sorted. O(K*N) given K runner ups. Overall: O(NLogM + N*K)
// both the member and runner counts are bounded.
exposed_candidates.into_iter().for_each(|c| {
// any candidate who is not a member and not a runner up.
if new_members.binary_search_by_key(&c, |(m, _)| m.clone()).is_err() &&
!new_runners_up_ids.contains(&c)
{
let (imbalance, _) = T::Currency::slash_reserved(&c, T::CandidacyBond::get());
T::LoserCandidate::on_unbalanced(imbalance);
}
});
// Burn outgoing bonds
to_burn_bond.into_iter().for_each(|x| {
let (imbalance, _) = T::Currency::slash_reserved(&x, T::CandidacyBond::get());
T::LoserCandidate::on_unbalanced(imbalance);
});
<Members<T>>::put(&new_members);
<RunnersUp<T>>::put(new_runners_up);
Self::deposit_event(RawEvent::NewTerm(new_members.clone().to_vec()));
// clean candidates.
<Candidates<T>>::kill();
ElectionRounds::mutate(|v| *v += 1);
})
.map_err(|e| {
frame_support::debug::error!("elections-phragmen: failed to run election [{:?}].", e);
Self::deposit_event(RawEvent::ElectionError);
});
}
}
impl<T: Trait> Contains<T::AccountId> for Module<T> {
fn contains(who: &T::AccountId) -> bool {
Self::is_member(who)
}
fn sorted_members() -> Vec<T::AccountId> {
Self::members_ids()
}
// A special function to populate members in this pallet for passing Origin
// checks in runtime benchmarking.
#[cfg(feature = "runtime-benchmarks")]
fn add(who: &T::AccountId) {
Members::<T>::mutate(|members| match members.binary_search_by(|(a, _b)| a.cmp(who)) {
Ok(_) => (),
Err(pos) => members.insert(pos, (who.clone(), BalanceOf::<T>::default())),
})
}
}
impl<T: Trait> ContainsLengthBound for Module<T> {
fn min_len() -> usize {
0
}
/// Implementation uses a parameter type so calling is cost-free.
fn max_len() -> usize {
Self::desired_members() as usize
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate as elections_phragmen;
use frame_support::{
assert_err_with_weight, assert_noop, assert_ok, parameter_types, weights::Weight,
};
use sp_core::H256;
use sp_runtime::{
testing::Header,
traits::{BlakeTwo256, Block as BlockT, IdentityLookup},
BuildStorage, DispatchResult, Perbill,
};
use std::cell::RefCell;
use substrate_test_utils::assert_eq_uvec;
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::one();
}
impl frame_system::Trait for Test {
type BaseCallFilter = ();
type Origin = Origin;
type Index = u64;
type BlockNumber = u64;
type Call = Call;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type MaximumBlockWeight = MaximumBlockWeight;
type DbWeight = ();
type BlockExecutionWeight = ();
type ExtrinsicBaseWeight = ();
type MaximumExtrinsicWeight = MaximumBlockWeight;
type MaximumBlockLength = MaximumBlockLength;
type AvailableBlockRatio = AvailableBlockRatio;
type Version = ();
type PalletInfo = ();
type AccountData = pallet_balances::AccountData<u64>;
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
}
parameter_types! {
pub const ExistentialDeposit: u64 = 1;
}
impl pallet_balances::Trait for Test {
type Balance = u64;
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = frame_system::Module<Test>;
type MaxLocks = ();
type WeightInfo = ();
}
parameter_types! {
pub const CandidacyBond: u64 = 3;
}
thread_local! {
static VOTING_BOND: RefCell<u64> = RefCell::new(2);
static DESIRED_MEMBERS: RefCell<u32> = RefCell::new(2);
static DESIRED_RUNNERS_UP: RefCell<u32> = RefCell::new(2);
static TERM_DURATION: RefCell<u64> = RefCell::new(5);
}
pub struct VotingBond;
impl Get<u64> for VotingBond {
fn get() -> u64 {
VOTING_BOND.with(|v| *v.borrow())
}
}
pub struct DesiredMembers;
impl Get<u32> for DesiredMembers {
fn get() -> u32 {
DESIRED_MEMBERS.with(|v| *v.borrow())
}
}
pub struct DesiredRunnersUp;
impl Get<u32> for DesiredRunnersUp {
fn get() -> u32 {
DESIRED_RUNNERS_UP.with(|v| *v.borrow())
}
}
pub struct TermDuration;
impl Get<u64> for TermDuration {
fn get() -> u64 {
TERM_DURATION.with(|v| *v.borrow())
}
}
thread_local! {
pub static MEMBERS: RefCell<Vec<u64>> = RefCell::new(vec![]);
pub static PRIME: RefCell<Option<u64>> = RefCell::new(None);
}
pub struct TestChangeMembers;
impl ChangeMembers<u64> for TestChangeMembers {
fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) {
// new, incoming, outgoing must be sorted.
let mut new_sorted = new.to_vec();
new_sorted.sort();
assert_eq!(new, &new_sorted[..]);
let mut incoming_sorted = incoming.to_vec();
incoming_sorted.sort();
assert_eq!(incoming, &incoming_sorted[..]);
let mut outgoing_sorted = outgoing.to_vec();
outgoing_sorted.sort();
assert_eq!(outgoing, &outgoing_sorted[..]);
// incoming and outgoing must be disjoint
for x in incoming.iter() {
assert!(outgoing.binary_search(x).is_err());
}
let mut old_plus_incoming = MEMBERS.with(|m| m.borrow().to_vec());
old_plus_incoming.extend_from_slice(incoming);
old_plus_incoming.sort();
let mut new_plus_outgoing = new.to_vec();
new_plus_outgoing.extend_from_slice(outgoing);
new_plus_outgoing.sort();
assert_eq!(old_plus_incoming, new_plus_outgoing, "change members call is incorrect!");
MEMBERS.with(|m| *m.borrow_mut() = new.to_vec());
PRIME.with(|p| *p.borrow_mut() = None);
}
fn set_prime(who: Option<u64>) {
PRIME.with(|p| *p.borrow_mut() = who);
}
}
/// Simple structure that exposes how u64 currency can be represented as... u64.
pub struct CurrencyToVoteHandler;
impl Convert<u64, u64> for CurrencyToVoteHandler {
fn convert(x: u64) -> u64 {
x
}
}
impl Convert<u128, u64> for CurrencyToVoteHandler {
fn convert(x: u128) -> u64 {
x as u64
}
}
parameter_types! {
pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect";
}
impl Trait for Test {
type ModuleId = ElectionsPhragmenModuleId;
type Event = Event;
type Currency = Balances;
type CurrencyToVote = CurrencyToVoteHandler;
type ChangeMembers = TestChangeMembers;
type InitializeMembers = ();
type CandidacyBond = CandidacyBond;
type VotingBond = VotingBond;
type TermDuration = TermDuration;
type DesiredMembers = DesiredMembers;
type DesiredRunnersUp = DesiredRunnersUp;
type LoserCandidate = ();
type KickedMember = ();
type BadReport = ();
type WeightInfo = ();
}
pub type Block = sp_runtime::generic::Block<Header, UncheckedExtrinsic>;
pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic<u32, u64, Call, ()>;
frame_support::construct_runtime!(
pub enum Test where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic
{
System: frame_system::{Module, Call, Event<T>},
Balances: pallet_balances::{Module, Call, Event<T>, Config<T>},
Elections: elections_phragmen::{Module, Call, Event<T>, Config<T>},
}
);
pub struct ExtBuilder {
genesis_members: Vec<(u64, u64)>,
balance_factor: u64,
voter_bond: u64,
term_duration: u64,
desired_runners_up: u32,
desired_members: u32,
}
impl Default for ExtBuilder {
fn default() -> Self {
Self {
genesis_members: vec![],
balance_factor: 1,
voter_bond: 2,
term_duration: 5,
desired_runners_up: 0,
desired_members: 2,
}
}
}
impl ExtBuilder {
pub fn voter_bond(mut self, fee: u64) -> Self {
self.voter_bond = fee;
self
}
pub fn desired_runners_up(mut self, count: u32) -> Self {
self.desired_runners_up = count;
self
}
pub fn term_duration(mut self, duration: u64) -> Self {
self.term_duration = duration;
self
}
pub fn genesis_members(mut self, members: Vec<(u64, u64)>) -> Self {
self.genesis_members = members;
self
}
#[cfg(feature = "runtime-benchmarks")]
pub fn desired_members(mut self, count: u32) -> Self {
self.desired_members = count;
self
}
pub fn balance_factor(mut self, factor: u64) -> Self {
self.balance_factor = factor;
self
}
fn set_constants(&self) {
VOTING_BOND.with(|v| *v.borrow_mut() = self.voter_bond);
TERM_DURATION.with(|v| *v.borrow_mut() = self.term_duration);
DESIRED_RUNNERS_UP.with(|v| *v.borrow_mut() = self.desired_runners_up);
DESIRED_MEMBERS.with(|m| *m.borrow_mut() = self.desired_members);
MEMBERS.with(|m| {
*m.borrow_mut() =
self.genesis_members.iter().map(|(m, _)| m.clone()).collect::<Vec<_>>()
});
}
pub fn build_and_execute(self, test: impl FnOnce() -> ()) {
self.set_constants();
let mut ext: sp_io::TestExternalities = GenesisConfig {
pallet_balances: Some(pallet_balances::GenesisConfig::<Test> {
balances: vec![
(1, 10 * self.balance_factor),
(2, 20 * self.balance_factor),
(3, 30 * self.balance_factor),
(4, 40 * self.balance_factor),
(5, 50 * self.balance_factor),
(6, 60 * self.balance_factor),
],
}),
elections_phragmen: Some(elections_phragmen::GenesisConfig::<Test> {
members: self.genesis_members,
}),
}
.build_storage()
.unwrap()
.into();
ext.execute_with(pre_conditions);
ext.execute_with(test);
ext.execute_with(post_conditions)
}
}
fn all_voters() -> Vec<u64> {
Voting::<Test>::iter().map(|(v, _)| v).collect::<Vec<u64>>()
}
fn balances(who: &u64) -> (u64, u64) {
(Balances::free_balance(who), Balances::reserved_balance(who))
}
fn has_lock(who: &u64) -> u64 {
let lock = Balances::locks(who)[0].clone();
assert_eq!(lock.id, ElectionsPhragmenModuleId::get());
lock.amount
}
fn intersects<T: PartialEq>(a: &[T], b: &[T]) -> bool {
a.iter().any(|e| b.contains(e))
}
fn ensure_members_sorted() {
let mut members = Elections::members().clone();
members.sort();
assert_eq!(Elections::members(), members);
}
fn ensure_candidates_sorted() {
let mut candidates = Elections::candidates().clone();
candidates.sort();
assert_eq!(Elections::candidates(), candidates);
}
fn locked_stake_of(who: &u64) -> u64 {
Voting::<Test>::get(who).0
}
fn ensure_members_has_approval_stake() {
// we filter members that have no approval state. This means that even we have more seats
// than candidates, we will never ever chose a member with no votes.
assert!(Elections::members()
.iter()
.chain(Elections::runners_up().iter())
.all(|(_, s)| *s != u64::zero()));
}
fn ensure_member_candidates_runners_up_disjoint() {
// members, candidates and runners-up must always be disjoint sets.
assert!(!intersects(&Elections::members_ids(), &Elections::candidates()));
assert!(!intersects(&Elections::members_ids(), &Elections::runners_up_ids()));
assert!(!intersects(&Elections::candidates(), &Elections::runners_up_ids()));
}
fn pre_conditions() {
System::set_block_number(1);
ensure_members_sorted();
ensure_candidates_sorted();
}
fn post_conditions() {
ensure_members_sorted();
ensure_candidates_sorted();
ensure_member_candidates_runners_up_disjoint();
ensure_members_has_approval_stake();
}
fn submit_candidacy(origin: Origin) -> DispatchResult {
Elections::submit_candidacy(origin, Elections::candidates().len() as u32)
}
fn vote(origin: Origin, votes: Vec<u64>, stake: u64) -> DispatchResult {
// historical note: helper function was created in a period of time in which the API of vote
// call was changing. Currently it is a wrapper for the original call and does not do much.
// Nonetheless, totally harmless.
ensure_signed(origin.clone()).expect("vote origin must be signed");
Elections::vote(origin, votes, stake)
}
fn votes_of(who: &u64) -> Vec<u64> {
Voting::<Test>::get(who).1
}
fn defunct_for(who: u64) -> DefunctVoter<u64> {
DefunctVoter {
who,
candidate_count: Elections::candidates().len() as u32,
vote_count: votes_of(&who).len() as u32,
}
}
#[test]
fn params_should_work() {
ExtBuilder::default().build_and_execute(|| {
assert_eq!(Elections::desired_members(), 2);
assert_eq!(Elections::term_duration(), 5);
assert_eq!(Elections::election_rounds(), 0);
assert!(Elections::members().is_empty());
assert!(Elections::runners_up().is_empty());
assert!(Elections::candidates().is_empty());
assert_eq!(<Candidates<Test>>::decode_len(), None);
assert!(Elections::is_candidate(&1).is_err());
assert!(all_voters().is_empty());
assert!(votes_of(&1).is_empty());
});
}
#[test]
fn genesis_members_should_work() {
ExtBuilder::default().genesis_members(vec![(1, 10), (2, 20)]).build_and_execute(|| {
System::set_block_number(1);
assert_eq!(Elections::members(), vec![(1, 10), (2, 20)]);
assert_eq!(Elections::voting(1), (10, vec![1]));
assert_eq!(Elections::voting(2), (20, vec![2]));
// they will persist since they have self vote.
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![1, 2]);
})
}
#[test]
fn genesis_members_unsorted_should_work() {
ExtBuilder::default().genesis_members(vec![(2, 20), (1, 10)]).build_and_execute(|| {
System::set_block_number(1);
assert_eq!(Elections::members(), vec![(1, 10), (2, 20)]);
assert_eq!(Elections::voting(1), (10, vec![1]));
assert_eq!(Elections::voting(2), (20, vec![2]));
// they will persist since they have self vote.
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![1, 2]);
})
}
#[test]
#[should_panic = "Genesis member does not have enough stake"]
fn genesis_members_cannot_over_stake_0() {
// 10 cannot lock 20 as their stake and extra genesis will panic.
ExtBuilder::default().genesis_members(vec![(1, 20), (2, 20)]).build_and_execute(|| {});
}
#[test]
#[should_panic]
fn genesis_members_cannot_over_stake_1() {
// 10 cannot reserve 20 as voting bond and extra genesis will panic.
ExtBuilder::default()
.voter_bond(20)
.genesis_members(vec![(1, 10), (2, 20)])
.build_and_execute(|| {});
}
#[test]
#[should_panic = "Duplicate member in elections phragmen genesis: 2"]
fn genesis_members_cannot_be_duplicate() {
ExtBuilder::default()
.genesis_members(vec![(1, 10), (2, 10), (2, 10)])
.build_and_execute(|| {});
}
#[test]
fn term_duration_zero_is_passive() {
ExtBuilder::default().term_duration(0).build_and_execute(|| {
assert_eq!(Elections::term_duration(), 0);
assert_eq!(Elections::desired_members(), 2);
assert_eq!(Elections::election_rounds(), 0);
assert!(Elections::members_ids().is_empty());
assert!(Elections::runners_up().is_empty());
assert!(Elections::candidates().is_empty());
System::set_block_number(5);
Elections::end_block(System::block_number());
assert!(Elections::members_ids().is_empty());
assert!(Elections::runners_up().is_empty());
assert!(Elections::candidates().is_empty());
});
}
#[test]
fn simple_candidate_submission_should_work() {
ExtBuilder::default().build_and_execute(|| {
assert_eq!(Elections::candidates(), Vec::<u64>::new());
assert!(Elections::is_candidate(&1).is_err());
assert!(Elections::is_candidate(&2).is_err());
assert_eq!(balances(&1), (10, 0));
assert_ok!(submit_candidacy(Origin::signed(1)));
assert_eq!(balances(&1), (7, 3));
assert_eq!(Elections::candidates(), vec![1]);
assert!(Elections::is_candidate(&1).is_ok());
assert!(Elections::is_candidate(&2).is_err());
assert_eq!(balances(&2), (20, 0));
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_eq!(balances(&2), (17, 3));
assert_eq!(Elections::candidates(), vec![1, 2]);
assert!(Elections::is_candidate(&1).is_ok());
assert!(Elections::is_candidate(&2).is_ok());
});
}
#[test]
fn simple_candidate_submission_with_no_votes_should_work() {
ExtBuilder::default().build_and_execute(|| {
assert_eq!(Elections::candidates(), Vec::<u64>::new());
assert_ok!(submit_candidacy(Origin::signed(1)));
assert_ok!(submit_candidacy(Origin::signed(2)));
assert!(Elections::is_candidate(&1).is_ok());
assert!(Elections::is_candidate(&2).is_ok());
assert_eq!(Elections::candidates(), vec![1, 2]);
assert!(Elections::members_ids().is_empty());
assert!(Elections::runners_up().is_empty());
System::set_block_number(5);
Elections::end_block(System::block_number());
assert!(Elections::is_candidate(&1).is_err());
assert!(Elections::is_candidate(&2).is_err());
assert!(Elections::candidates().is_empty());
assert!(Elections::members_ids().is_empty());
assert!(Elections::runners_up().is_empty());
});
}
#[test]
fn dupe_candidate_submission_should_not_work() {
ExtBuilder::default().build_and_execute(|| {
assert_eq!(Elections::candidates(), Vec::<u64>::new());
assert_ok!(submit_candidacy(Origin::signed(1)));
assert_eq!(Elections::candidates(), vec![1]);
assert_noop!(submit_candidacy(Origin::signed(1)), Error::<Test>::DuplicatedCandidate,);
});
}
#[test]
fn member_candidacy_submission_should_not_work() {
// critically important to make sure that outgoing candidates and losers are not mixed up.
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(vote(Origin::signed(2), vec![5], 20));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![5]);
assert!(Elections::runners_up().is_empty());
assert!(Elections::candidates().is_empty());
assert_noop!(submit_candidacy(Origin::signed(5)), Error::<Test>::MemberSubmit,);
});
}
#[test]
fn runner_candidate_submission_should_not_work() {
ExtBuilder::default().desired_runners_up(2).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(vote(Origin::signed(2), vec![5, 4], 20));
assert_ok!(vote(Origin::signed(1), vec![3], 10));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert_eq!(Elections::runners_up_ids(), vec![3]);
assert_noop!(submit_candidacy(Origin::signed(3)), Error::<Test>::RunnerSubmit,);
});
}
#[test]
fn poor_candidate_submission_should_not_work() {
ExtBuilder::default().build_and_execute(|| {
assert_eq!(Elections::candidates(), Vec::<u64>::new());
assert_noop!(
submit_candidacy(Origin::signed(7)),
Error::<Test>::InsufficientCandidateFunds,
);
});
}
#[test]
fn simple_voting_should_work() {
ExtBuilder::default().build_and_execute(|| {
assert_eq!(Elections::candidates(), Vec::<u64>::new());
assert_eq!(balances(&2), (20, 0));
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(vote(Origin::signed(2), vec![5], 20));
assert_eq!(balances(&2), (18, 2));
assert_eq!(has_lock(&2), 20);
});
}
#[test]
fn can_vote_with_custom_stake() {
ExtBuilder::default().build_and_execute(|| {
assert_eq!(Elections::candidates(), Vec::<u64>::new());
assert_eq!(balances(&2), (20, 0));
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(vote(Origin::signed(2), vec![5], 12));
assert_eq!(balances(&2), (18, 2));
assert_eq!(has_lock(&2), 12);
});
}
#[test]
fn can_update_votes_and_stake() {
ExtBuilder::default().build_and_execute(|| {
assert_eq!(balances(&2), (20, 0));
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(vote(Origin::signed(2), vec![5], 20));
assert_eq!(balances(&2), (18, 2));
assert_eq!(has_lock(&2), 20);
assert_eq!(locked_stake_of(&2), 20);
// can update; different stake; different lock and reserve.
assert_ok!(vote(Origin::signed(2), vec![5, 4], 15));
assert_eq!(balances(&2), (18, 2));
assert_eq!(has_lock(&2), 15);
assert_eq!(locked_stake_of(&2), 15);
});
}
#[test]
fn cannot_vote_for_no_candidate() {
ExtBuilder::default().build_and_execute(|| {
assert_noop!(vote(Origin::signed(2), vec![], 20), Error::<Test>::NoVotes,);
});
}
#[test]
fn can_vote_for_old_members_even_when_no_new_candidates() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(vote(Origin::signed(2), vec![4, 5], 20));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert!(Elections::candidates().is_empty());
assert_ok!(vote(Origin::signed(3), vec![4, 5], 10));
});
}
#[test]
fn prime_works() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(vote(Origin::signed(1), vec![4, 3], 10));
assert_ok!(vote(Origin::signed(2), vec![4], 20));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert!(Elections::candidates().is_empty());
assert_ok!(vote(Origin::signed(3), vec![4, 5], 10));
assert_eq!(PRIME.with(|p| *p.borrow()), Some(4));
});
}
#[test]
fn prime_votes_for_exiting_members_are_removed() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(vote(Origin::signed(1), vec![4, 3], 10));
assert_ok!(vote(Origin::signed(2), vec![4], 20));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(3)));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![3, 5]);
assert!(Elections::candidates().is_empty());
assert_eq!(PRIME.with(|p| *p.borrow()), Some(5));
});
}
#[test]
fn cannot_vote_for_more_than_candidates_and_members_and_runners() {
ExtBuilder::default().desired_runners_up(1).balance_factor(10).build_and_execute(|| {
// when we have only candidates
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_noop!(
// content of the vote is irrelevant.
vote(Origin::signed(1), vec![9, 99, 999, 9999], 5),
Error::<Test>::TooManyVotes,
);
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
// now we have 2 members, 1 runner-up, and 1 new candidate
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_ok!(vote(Origin::signed(1), vec![9, 99, 999, 9999], 5));
assert_noop!(
vote(Origin::signed(1), vec![9, 99, 999, 9_999, 99_999], 5),
Error::<Test>::TooManyVotes,
);
});
}
#[test]
fn cannot_vote_for_less_than_ed() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_noop!(vote(Origin::signed(2), vec![4], 1), Error::<Test>::LowBalance,);
})
}
#[test]
fn can_vote_for_more_than_total_balance_but_moot() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(vote(Origin::signed(2), vec![4, 5], 30));
// you can lie but won't get away with it.
assert_eq!(locked_stake_of(&2), 20);
assert_eq!(has_lock(&2), 20);
});
}
#[test]
fn remove_voter_should_work() {
ExtBuilder::default().voter_bond(8).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(vote(Origin::signed(2), vec![5], 20));
assert_ok!(vote(Origin::signed(3), vec![5], 30));
assert_eq_uvec!(all_voters(), vec![2, 3]);
assert_eq!(locked_stake_of(&2), 20);
assert_eq!(locked_stake_of(&3), 30);
assert_eq!(votes_of(&2), vec![5]);
assert_eq!(votes_of(&3), vec![5]);
assert_ok!(Elections::remove_voter(Origin::signed(2)));
assert_eq_uvec!(all_voters(), vec![3]);
assert!(votes_of(&2).is_empty());
assert_eq!(locked_stake_of(&2), 0);
assert_eq!(balances(&2), (20, 0));
assert_eq!(Balances::locks(&2).len(), 0);
});
}
#[test]
fn non_voter_remove_should_not_work() {
ExtBuilder::default().build_and_execute(|| {
assert_noop!(Elections::remove_voter(Origin::signed(3)), Error::<Test>::MustBeVoter);
});
}
#[test]
fn dupe_remove_should_fail() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(vote(Origin::signed(2), vec![5], 20));
assert_ok!(Elections::remove_voter(Origin::signed(2)));
assert!(all_voters().is_empty());
assert_noop!(Elections::remove_voter(Origin::signed(2)), Error::<Test>::MustBeVoter);
});
}
#[test]
fn removed_voter_should_not_be_counted() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(Elections::remove_voter(Origin::signed(4)));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![3, 5]);
});
}
#[test]
fn reporter_must_be_voter() {
ExtBuilder::default().build_and_execute(|| {
assert_noop!(
Elections::report_defunct_voter(Origin::signed(1), defunct_for(2)),
Error::<Test>::MustBeVoter,
);
});
}
#[test]
fn reporter_must_provide_lengths() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
// both are defunct.
assert_ok!(vote(Origin::signed(5), vec![99, 999, 9999], 50));
assert_ok!(vote(Origin::signed(4), vec![999], 40));
// 3 candidates! incorrect candidate length.
assert_noop!(
Elections::report_defunct_voter(
Origin::signed(4),
DefunctVoter { who: 5, candidate_count: 2, vote_count: 3 }
),
Error::<Test>::InvalidCandidateCount,
);
// 3 votes! incorrect vote length
assert_noop!(
Elections::report_defunct_voter(
Origin::signed(4),
DefunctVoter { who: 5, candidate_count: 3, vote_count: 2 }
),
Error::<Test>::InvalidVoteCount,
);
// correct.
assert_ok!(Elections::report_defunct_voter(
Origin::signed(4),
DefunctVoter { who: 5, candidate_count: 3, vote_count: 3 }
));
});
}
#[test]
fn reporter_can_overestimate_length() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
// both are defunct.
assert_ok!(vote(Origin::signed(5), vec![99], 50));
assert_ok!(vote(Origin::signed(4), vec![999], 40));
// 2 candidates! overestimation is okay.
assert_ok!(Elections::report_defunct_voter(Origin::signed(4), defunct_for(5)));
});
}
#[test]
fn can_detect_defunct_voter() {
ExtBuilder::default().desired_runners_up(2).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(6)));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(2), vec![4, 5], 20));
assert_ok!(vote(Origin::signed(6), vec![6], 30));
// will be soon a defunct voter.
assert_ok!(vote(Origin::signed(3), vec![3], 30));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert_eq!(Elections::runners_up_ids(), vec![6]);
assert!(Elections::candidates().is_empty());
// all of them have a member or runner-up that they voted for.
assert_eq!(Elections::is_defunct_voter(&votes_of(&5)), false);
assert_eq!(Elections::is_defunct_voter(&votes_of(&4)), false);
assert_eq!(Elections::is_defunct_voter(&votes_of(&2)), false);
assert_eq!(Elections::is_defunct_voter(&votes_of(&6)), false);
// defunct
assert_eq!(Elections::is_defunct_voter(&votes_of(&3)), true);
assert_ok!(submit_candidacy(Origin::signed(1)));
assert_ok!(vote(Origin::signed(1), vec![1], 10));
// has a candidate voted for.
assert_eq!(Elections::is_defunct_voter(&votes_of(&1)), false);
});
}
#[test]
fn report_voter_should_work_and_earn_reward() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(2), vec![4, 5], 20));
// will be soon a defunct voter.
assert_ok!(vote(Origin::signed(3), vec![3], 30));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert!(Elections::candidates().is_empty());
assert_eq!(balances(&3), (28, 2));
assert_eq!(balances(&5), (45, 5));
assert_ok!(Elections::report_defunct_voter(Origin::signed(5), defunct_for(3)));
assert!(System::events().iter().any(|event| {
event.event == Event::elections_phragmen(RawEvent::VoterReported(3, 5, true))
}));
assert_eq!(balances(&3), (28, 0));
assert_eq!(balances(&5), (47, 5));
});
}
#[test]
fn report_voter_should_slash_when_bad_report() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert!(Elections::candidates().is_empty());
assert_eq!(balances(&4), (35, 5));
assert_eq!(balances(&5), (45, 5));
assert_ok!(Elections::report_defunct_voter(Origin::signed(5), defunct_for(4)));
assert!(System::events().iter().any(|event| {
event.event == Event::elections_phragmen(RawEvent::VoterReported(4, 5, false))
}));
assert_eq!(balances(&4), (35, 5));
assert_eq!(balances(&5), (45, 3));
});
}
#[test]
fn simple_voting_rounds_should_work() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(vote(Origin::signed(2), vec![5], 20));
assert_ok!(vote(Origin::signed(4), vec![4], 15));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_eq_uvec!(all_voters(), vec![2, 3, 4]);
assert_eq!(votes_of(&2), vec![5]);
assert_eq!(votes_of(&3), vec![3]);
assert_eq!(votes_of(&4), vec![4]);
assert_eq!(Elections::candidates(), vec![3, 4, 5]);
assert_eq!(<Candidates<Test>>::decode_len().unwrap(), 3);
assert_eq!(Elections::election_rounds(), 0);
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members(), vec![(3, 30), (5, 20)]);
assert!(Elections::runners_up().is_empty());
assert_eq_uvec!(all_voters(), vec![2, 3, 4]);
assert!(Elections::candidates().is_empty());
assert_eq!(<Candidates<Test>>::decode_len(), None);
assert_eq!(Elections::election_rounds(), 1);
});
}
#[test]
fn empty_term() {
ExtBuilder::default().build_and_execute(|| {
// no candidates, no nothing.
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(
System::events().iter().last().unwrap().event,
Event::elections_phragmen(RawEvent::EmptyTerm),
)
})
}
#[test]
fn all_outgoing() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(
System::events().iter().last().unwrap().event,
Event::elections_phragmen(RawEvent::NewTerm(vec![(4, 40), (5, 50)])),
);
assert_eq!(Elections::members(), vec![(4, 40), (5, 50)]);
assert_eq!(Elections::runners_up(), vec![]);
assert_ok!(Elections::remove_voter(Origin::signed(5)));
assert_ok!(Elections::remove_voter(Origin::signed(4)));
System::set_block_number(10);
Elections::end_block(System::block_number());
assert_eq!(
System::events().iter().last().unwrap().event,
Event::elections_phragmen(RawEvent::NewTerm(vec![])),
);
// outgoing have lost their bond.
assert_eq!(balances(&4), (37, 0));
assert_eq!(balances(&5), (47, 0));
});
}
#[test]
fn defunct_voter_will_be_counted() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
// This guy's vote is pointless for this round.
assert_ok!(vote(Origin::signed(3), vec![4], 30));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members(), vec![(5, 50)]);
assert_eq!(Elections::election_rounds(), 1);
// but now it has a valid target.
assert_ok!(submit_candidacy(Origin::signed(4)));
System::set_block_number(10);
Elections::end_block(System::block_number());
// candidate 4 is affected by an old vote.
assert_eq!(Elections::members(), vec![(4, 30), (5, 50)]);
assert_eq!(Elections::election_rounds(), 2);
assert_eq_uvec!(all_voters(), vec![3, 5]);
});
}
#[test]
fn only_desired_seats_are_chosen() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_ok!(vote(Origin::signed(2), vec![2], 20));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::election_rounds(), 1);
assert_eq!(Elections::members_ids(), vec![4, 5]);
});
}
#[test]
fn phragmen_should_not_self_vote() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert!(Elections::candidates().is_empty());
assert_eq!(Elections::election_rounds(), 1);
assert!(Elections::members_ids().is_empty());
assert_eq!(
System::events().iter().last().unwrap().event,
Event::elections_phragmen(RawEvent::NewTerm(vec![])),
)
});
}
#[test]
fn runners_up_should_be_kept() {
ExtBuilder::default().desired_runners_up(2).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_ok!(vote(Origin::signed(2), vec![3], 20));
assert_ok!(vote(Origin::signed(3), vec![2], 30));
assert_ok!(vote(Origin::signed(4), vec![5], 40));
assert_ok!(vote(Origin::signed(5), vec![4], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
// sorted based on account id.
assert_eq!(Elections::members_ids(), vec![4, 5]);
// sorted based on merit (least -> most)
assert_eq!(Elections::runners_up_ids(), vec![3, 2]);
// runner ups are still locked.
assert_eq!(balances(&4), (35, 5));
assert_eq!(balances(&5), (45, 5));
assert_eq!(balances(&3), (25, 5));
});
}
#[test]
fn runners_up_should_be_next_candidates() {
ExtBuilder::default().desired_runners_up(2).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_ok!(vote(Origin::signed(2), vec![2], 20));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members(), vec![(4, 40), (5, 50)]);
assert_eq!(Elections::runners_up(), vec![(2, 20), (3, 30)]);
assert_ok!(vote(Origin::signed(5), vec![5], 15));
System::set_block_number(10);
Elections::end_block(System::block_number());
assert_eq!(Elections::members(), vec![(3, 30), (4, 40)]);
assert_eq!(Elections::runners_up(), vec![(5, 15), (2, 20)]);
});
}
#[test]
fn runners_up_lose_bond_once_outgoing() {
ExtBuilder::default().desired_runners_up(1).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_ok!(vote(Origin::signed(2), vec![2], 20));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert_eq!(Elections::runners_up_ids(), vec![2]);
assert_eq!(balances(&2), (15, 5));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
System::set_block_number(10);
Elections::end_block(System::block_number());
assert_eq!(Elections::runners_up_ids(), vec![3]);
assert_eq!(balances(&2), (15, 2));
});
}
#[test]
fn members_lose_bond_once_outgoing() {
ExtBuilder::default().build_and_execute(|| {
assert_eq!(balances(&5), (50, 0));
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_eq!(balances(&5), (47, 3));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
assert_eq!(balances(&5), (45, 5));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![5]);
assert_ok!(Elections::remove_voter(Origin::signed(5)));
assert_eq!(balances(&5), (47, 3));
System::set_block_number(10);
Elections::end_block(System::block_number());
assert!(Elections::members_ids().is_empty());
assert_eq!(balances(&5), (47, 0));
});
}
#[test]
fn losers_will_lose_the_bond() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(vote(Origin::signed(4), vec![5], 40));
assert_eq!(balances(&5), (47, 3));
assert_eq!(balances(&3), (27, 3));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![5]);
// winner
assert_eq!(balances(&5), (47, 3));
// loser
assert_eq!(balances(&3), (27, 0));
});
}
#[test]
fn current_members_are_always_next_candidate() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert_eq!(Elections::election_rounds(), 1);
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_ok!(vote(Origin::signed(2), vec![2], 20));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(Elections::remove_voter(Origin::signed(4)));
// 5 will persist as candidates despite not being in the list.
assert_eq!(Elections::candidates(), vec![2, 3]);
System::set_block_number(10);
Elections::end_block(System::block_number());
// 4 removed; 5 and 3 are the new best.
assert_eq!(Elections::members_ids(), vec![3, 5]);
});
}
#[test]
fn election_state_is_uninterrupted() {
// what I mean by uninterrupted:
// given no input or stimulants the same members are re-elected.
ExtBuilder::default().desired_runners_up(2).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(vote(Origin::signed(2), vec![2], 20));
let check_at_block = |b: u32| {
System::set_block_number(b.into());
Elections::end_block(System::block_number());
// we keep re-electing the same folks.
assert_eq!(Elections::members(), vec![(4, 40), (5, 50)]);
assert_eq!(Elections::runners_up(), vec![(2, 20), (3, 30)]);
// no new candidates but old members and runners-up are always added.
assert!(Elections::candidates().is_empty());
assert_eq!(Elections::election_rounds(), b / 5);
assert_eq_uvec!(all_voters(), vec![2, 3, 4, 5]);
};
// this state will always persist when no further input is given.
check_at_block(5);
check_at_block(10);
check_at_block(15);
check_at_block(20);
});
}
#[test]
fn remove_members_triggers_election() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert_eq!(Elections::election_rounds(), 1);
// a new candidate
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(Elections::remove_member(Origin::root(), 4, false));
assert_eq!(balances(&4), (35, 2)); // slashed
assert_eq!(Elections::election_rounds(), 2); // new election round
assert_eq!(Elections::members_ids(), vec![3, 5]); // new members
});
}
#[test]
fn remove_member_should_indicate_replacement() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
// no replacement yet.
assert_err_with_weight!(
Elections::remove_member(Origin::root(), 4, true),
Error::<Test>::InvalidReplacement,
Some(33777000), /* only thing that matters for now is that it is NOT the full
* block. */
);
});
ExtBuilder::default().desired_runners_up(1).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert_eq!(Elections::runners_up_ids(), vec![3]);
// there is a replacement! and this one needs a weight refund.
assert_err_with_weight!(
Elections::remove_member(Origin::root(), 4, false),
Error::<Test>::InvalidReplacement,
Some(33777000) // only thing that matters for now is that it is NOT the full block.
);
});
}
#[test]
fn seats_should_be_released_when_no_vote() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(vote(Origin::signed(2), vec![3], 20));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
assert_eq!(<Candidates<Test>>::decode_len().unwrap(), 3);
assert_eq!(Elections::election_rounds(), 0);
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![3, 5]);
assert_eq!(Elections::election_rounds(), 1);
assert_ok!(Elections::remove_voter(Origin::signed(2)));
assert_ok!(Elections::remove_voter(Origin::signed(3)));
assert_ok!(Elections::remove_voter(Origin::signed(4)));
assert_ok!(Elections::remove_voter(Origin::signed(5)));
// meanwhile, no one cares to become a candidate again.
System::set_block_number(10);
Elections::end_block(System::block_number());
assert!(Elections::members_ids().is_empty());
assert_eq!(Elections::election_rounds(), 2);
});
}
#[test]
fn incoming_outgoing_are_reported() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert_ok!(submit_candidacy(Origin::signed(1)));
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_ok!(submit_candidacy(Origin::signed(3)));
// 5 will change their vote and becomes an `outgoing`
assert_ok!(vote(Origin::signed(5), vec![4], 8));
// 4 will stay in the set
assert_ok!(vote(Origin::signed(4), vec![4], 40));
// 3 will become a winner
assert_ok!(vote(Origin::signed(3), vec![3], 30));
// these two are losers.
assert_ok!(vote(Origin::signed(2), vec![2], 20));
assert_ok!(vote(Origin::signed(1), vec![1], 10));
System::set_block_number(10);
Elections::end_block(System::block_number());
// 3, 4 are new members, must still be bonded, nothing slashed.
assert_eq!(Elections::members(), vec![(3, 30), (4, 48)]);
assert_eq!(balances(&3), (25, 5));
assert_eq!(balances(&4), (35, 5));
// 1 is a loser, slashed by 3.
assert_eq!(balances(&1), (5, 2));
// 5 is an outgoing loser. will also get slashed.
assert_eq!(balances(&5), (45, 2));
assert!(System::events().iter().any(|event| {
event.event == Event::elections_phragmen(RawEvent::NewTerm(vec![(4, 40), (5, 50)]))
}));
})
}
#[test]
fn invalid_votes_are_moot() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![10], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq_uvec!(Elections::members_ids(), vec![3, 4]);
assert_eq!(Elections::election_rounds(), 1);
});
}
#[test]
fn members_are_sorted_based_on_id_runners_on_merit() {
ExtBuilder::default().desired_runners_up(2).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_ok!(vote(Origin::signed(2), vec![3], 20));
assert_ok!(vote(Origin::signed(3), vec![2], 30));
assert_ok!(vote(Origin::signed(4), vec![5], 40));
assert_ok!(vote(Origin::signed(5), vec![4], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
// id: low -> high.
assert_eq!(Elections::members(), vec![(4, 50), (5, 40)]);
// merit: low -> high.
assert_eq!(Elections::runners_up(), vec![(3, 20), (2, 30)]);
});
}
#[test]
fn candidates_are_sorted() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_eq!(Elections::candidates(), vec![3, 5]);
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::Candidate(4)));
assert_eq!(Elections::candidates(), vec![2, 4, 5]);
})
}
#[test]
fn runner_up_replacement_maintains_members_order() {
ExtBuilder::default().desired_runners_up(2).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_ok!(vote(Origin::signed(2), vec![5], 20));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![2], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![2, 4]);
assert_ok!(Elections::remove_member(Origin::root(), 2, true));
assert_eq!(Elections::members_ids(), vec![4, 5]);
});
}
#[test]
fn can_renounce_candidacy_member_with_runners_bond_is_refunded() {
ExtBuilder::default().desired_runners_up(2).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(vote(Origin::signed(2), vec![2], 20));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert_eq!(Elections::runners_up_ids(), vec![2, 3]);
assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member));
assert_eq!(balances(&4), (38, 2)); // 2 is voting bond.
assert_eq!(Elections::members_ids(), vec![3, 5]);
assert_eq!(Elections::runners_up_ids(), vec![2]);
})
}
#[test]
fn can_renounce_candidacy_member_without_runners_bond_is_refunded() {
ExtBuilder::default().desired_runners_up(2).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert!(Elections::runners_up_ids().is_empty());
assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member));
assert_eq!(balances(&4), (38, 2)); // 2 is voting bond.
// no replacement
assert_eq!(Elections::members_ids(), vec![5]);
assert!(Elections::runners_up_ids().is_empty());
})
}
#[test]
fn can_renounce_candidacy_runner() {
ExtBuilder::default().desired_runners_up(2).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_ok!(vote(Origin::signed(5), vec![4], 50));
assert_ok!(vote(Origin::signed(4), vec![5], 40));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(vote(Origin::signed(2), vec![2], 20));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert_eq!(Elections::runners_up_ids(), vec![2, 3]);
assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp));
assert_eq!(balances(&3), (28, 2)); // 2 is voting bond.
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert_eq!(Elections::runners_up_ids(), vec![2]);
})
}
#[test]
fn runner_up_replacement_works_when_out_of_order() {
ExtBuilder::default().desired_runners_up(2).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(submit_candidacy(Origin::signed(2)));
assert_ok!(vote(Origin::signed(2), vec![5], 20));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(5), vec![2], 50));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![2, 4]);
assert_eq!(Elections::runners_up_ids(), vec![5, 3]);
assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp));
assert_eq!(Elections::members_ids(), vec![2, 4]);
assert_eq!(Elections::runners_up_ids(), vec![5]);
});
}
#[test]
fn can_renounce_candidacy_candidate() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_eq!(balances(&5), (47, 3));
assert_eq!(Elections::candidates(), vec![5]);
assert_ok!(Elections::renounce_candidacy(Origin::signed(5), Renouncing::Candidate(1)));
assert_eq!(balances(&5), (50, 0));
assert!(Elections::candidates().is_empty());
})
}
#[test]
fn wrong_renounce_candidacy_should_fail() {
ExtBuilder::default().build_and_execute(|| {
assert_noop!(
Elections::renounce_candidacy(Origin::signed(5), Renouncing::Candidate(0)),
Error::<Test>::InvalidRenouncing,
);
assert_noop!(
Elections::renounce_candidacy(Origin::signed(5), Renouncing::Member),
Error::<Test>::NotMember,
);
assert_noop!(
Elections::renounce_candidacy(Origin::signed(5), Renouncing::RunnerUp),
Error::<Test>::InvalidRenouncing,
);
})
}
#[test]
fn non_member_renounce_member_should_fail() {
ExtBuilder::default().desired_runners_up(1).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert_eq!(Elections::runners_up_ids(), vec![3]);
assert_noop!(
Elections::renounce_candidacy(Origin::signed(3), Renouncing::Member),
Error::<Test>::NotMember,
);
})
}
#[test]
fn non_runner_up_renounce_runner_up_should_fail() {
ExtBuilder::default().desired_runners_up(1).build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_ok!(vote(Origin::signed(5), vec![5], 50));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![4, 5]);
assert_eq!(Elections::runners_up_ids(), vec![3]);
assert_noop!(
Elections::renounce_candidacy(Origin::signed(4), Renouncing::RunnerUp),
Error::<Test>::InvalidRenouncing,
);
})
}
#[test]
fn wrong_candidate_count_renounce_should_fail() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
assert_noop!(
Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(2)),
Error::<Test>::InvalidRenouncing,
);
assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(3)));
})
}
#[test]
fn renounce_candidacy_count_can_overestimate() {
ExtBuilder::default().build_and_execute(|| {
assert_ok!(submit_candidacy(Origin::signed(5)));
assert_ok!(submit_candidacy(Origin::signed(4)));
assert_ok!(submit_candidacy(Origin::signed(3)));
// while we have only 3 candidates.
assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(4)));
})
}
#[test]
fn behavior_with_dupe_candidate() {
ExtBuilder::default().desired_runners_up(2).build_and_execute(|| {
<Candidates<Test>>::put(vec![1, 1, 2, 3, 4]);
assert_ok!(vote(Origin::signed(5), vec![1], 50));
assert_ok!(vote(Origin::signed(4), vec![4], 40));
assert_ok!(vote(Origin::signed(3), vec![3], 30));
assert_ok!(vote(Origin::signed(2), vec![2], 20));
System::set_block_number(5);
Elections::end_block(System::block_number());
assert_eq!(Elections::members_ids(), vec![1, 4]);
assert_eq!(Elections::runners_up_ids(), vec![2, 3]);
assert!(Elections::candidates().is_empty());
})
}
}
|
pub mod graph_fns;
pub mod validate_fns;
pub use self::graph_fns::*;
|
// https://www.codewars.com/kata/52761ee4cffbc69732000738
fn good_vs_evil(good: &str, evil: &str) -> String {
let good_points : Vec<u32> = vec![1,2,3,3,4,10];
let evil_points : Vec<u32> = vec![1,2,2,2,3,5,10];
let good_wins : String = String::from("Battle Result: Good triumphs over Evil");
let evil_wins : String = String::from("Battle Result: Evil eradicates all trace of Good");
let tie : String = String::from("Battle Result: No victor on this battle field");
let good_score : u32 = good.split(' ').enumerate().fold(0,|mut score,item|{
let points : u32 = item.1.parse::<u32>().unwrap();
score += good_points[item.0] * points;
score
});
let evil_score : u32 = evil.split(' ').enumerate().fold(0,|mut score,item|{
let points : u32 = item.1.parse::<u32>().unwrap();
score += evil_points[item.0] * points;
score
});
if good_score > evil_score {
good_wins
}else if evil_score > good_score {
evil_wins
}else{
tie
}
}
fn main(){
assert_eq!(good_vs_evil("0 0 0 0 0 10", "0 0 0 0 0 0 0"), "Battle Result: Good triumphs over Evil");
assert_eq!(good_vs_evil("0 0 0 0 0 0", "0 0 0 0 0 0 10"), "Battle Result: Evil eradicates all trace of Good");
assert_eq!(good_vs_evil("0 0 0 0 0 10", "0 0 0 0 0 0 10"), "Battle Result: No victor on this battle field");
} |
use super::{Body, Frame};
#[derive(Debug, Eq, PartialEq)]
pub struct Cancel {}
pub struct CancelBuilder {
stream_id: u32,
flag: u16,
}
impl CancelBuilder {
pub fn build(self) -> Frame {
Frame::new(self.stream_id, Body::Cancel(), self.flag)
}
}
impl Cancel {
pub fn builder(stream_id: u32, flag: u16) -> CancelBuilder {
CancelBuilder { stream_id, flag }
}
}
|
// This file is part of rdma-core. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/rdma-core/master/COPYRIGHT. No part of rdma-core, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2016 The developers of rdma-core. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/rdma-core/master/COPYRIGHT.
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum ibv_wc_opcode
{
IBV_WC_SEND = 0,
IBV_WC_RDMA_WRITE = 1,
IBV_WC_RDMA_READ = 2,
IBV_WC_COMP_SWAP = 3,
IBV_WC_FETCH_ADD = 4,
IBV_WC_BIND_MW = 5,
IBV_WC_LOCAL_INV = 6,
IBV_WC_TSO = 7,
IBV_WC_RECV = 128,
IBV_WC_RECV_RDMA_WITH_IMM = 129,
IBV_WC_TM_ADD = 130,
IBV_WC_TM_DEL = 131,
IBV_WC_TM_SYNC = 132,
IBV_WC_TM_RECV = 133,
IBV_WC_TM_NO_TAG = 134,
}
|
// Copyright 2019 The Grin Develope;
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic implementation of owner API functions
use uuid::Uuid;
use crate::grin_core::core::hash::Hashed;
use crate::grin_core::core::Transaction;
use crate::grin_util::secp::key::SecretKey;
use crate::grin_util::Mutex;
use crate::api_impl::owner_updater::StatusMessage;
use crate::grin_keychain::{Identifier, Keychain};
use crate::grin_util::secp::key::PublicKey;
use crate::internal::{keys, scan, selection, tx, updater};
use crate::slate::{PaymentInfo, Slate};
use crate::types::{
AcctPathMapping, Context, NodeClient, OutputData, TxLogEntry, WalletBackend, WalletInfo,
};
use crate::{
wallet_lock, InitTxArgs, IssueInvoiceTxArgs, NodeHeightResult, OutputCommitMapping,
PaymentProof, ScannedBlockInfo, TxLogEntryType, WalletInst, WalletLCProvider,
};
use crate::{Error, ErrorKind};
use crate::proof::tx_proof::{pop_proof_for_slate, TxProof};
use ed25519_dalek::PublicKey as DalekPublicKey;
use std::cmp;
use std::fs::File;
use std::io::Write;
use std::sync::mpsc::Sender;
use std::sync::Arc;
const USER_MESSAGE_MAX_LEN: usize = 1000; // We can keep messages as long as we need unless the slate will be too large to operate. 1000 symbols should be enough to keep everybody happy
use crate::proof::crypto;
use crate::proof::proofaddress;
use grin_wallet_util::grin_core::core::Committed;
/// List of accounts
pub fn accounts<'a, T: ?Sized, C, K>(w: &mut T) -> Result<Vec<AcctPathMapping>, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
keys::accounts(&mut *w)
}
/// new account path
pub fn create_account_path<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
label: &str,
) -> Result<Identifier, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
keys::new_acct_path(&mut *w, keychain_mask, label)
}
/// set active account
pub fn set_active_account<'a, T: ?Sized, C, K>(w: &mut T, label: &str) -> Result<(), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
w.set_parent_key_id_by_name(label)
}
/// Retrieve the MQS address for the wallet
pub fn get_mqs_address<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
) -> Result<PublicKey, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let k = w.keychain(keychain_mask)?;
let pub_key = proofaddress::payment_proof_address_pubkey(&k)?;
Ok(pub_key)
}
/// Retrieve TOR or public wallet address
pub fn get_wallet_public_address<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
) -> Result<DalekPublicKey, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let k = w.keychain(keychain_mask)?;
let secret = proofaddress::payment_proof_address_secret(&k, None)?;
let tor_pk = proofaddress::secret_2_tor_pub(&secret)?;
Ok(tor_pk)
}
/// Refresh outputs/tx states of the wallet. Resync with a blockchain data
pub fn perform_refresh_from_node<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
status_send_channel: &Option<Sender<StatusMessage>>,
) -> Result<bool, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let validated = update_wallet_state(wallet_inst.clone(), keychain_mask, status_send_channel)?;
Ok(validated)
}
/// retrieve outputs
pub fn retrieve_outputs<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
status_send_channel: &Option<Sender<StatusMessage>>,
include_spent: bool,
refresh_from_node: bool,
tx_id: Option<u32>,
) -> Result<(bool, Vec<OutputCommitMapping>), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut validated = false;
if refresh_from_node {
validated =
perform_refresh_from_node(wallet_inst.clone(), keychain_mask, status_send_channel)?;
}
wallet_lock!(wallet_inst, w);
let parent_key_id = w.parent_key_id();
let mut tx: Option<TxLogEntry> = None;
if tx_id.is_some() {
let mut txs = updater::retrieve_txs(
&mut **w,
keychain_mask,
tx_id,
None,
Some(&parent_key_id),
false,
None,
None,
)?;
if !txs.is_empty() {
tx = Some(txs.remove(0));
}
}
Ok((
validated,
updater::retrieve_outputs(
&mut **w,
keychain_mask,
include_spent,
tx.as_ref(),
&parent_key_id,
None,
None,
)?,
))
}
/// Retrieve txs
pub fn retrieve_txs<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
status_send_channel: &Option<Sender<StatusMessage>>,
refresh_from_node: bool,
tx_id: Option<u32>,
tx_slate_id: Option<Uuid>,
) -> Result<(bool, Vec<TxLogEntry>), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut validated = false;
if refresh_from_node {
validated =
perform_refresh_from_node(wallet_inst.clone(), keychain_mask, status_send_channel)?;
}
wallet_lock!(wallet_inst, w);
let parent_key_id = w.parent_key_id();
let txs = updater::retrieve_txs(
&mut **w,
keychain_mask,
tx_id,
tx_slate_id,
Some(&parent_key_id),
false,
None,
None,
)?;
Ok((validated, txs))
}
/// Retrieve summary info
pub fn retrieve_summary_info<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
status_send_channel: &Option<Sender<StatusMessage>>,
refresh_from_node: bool,
minimum_confirmations: u64,
) -> Result<(bool, WalletInfo), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut validated = false;
if refresh_from_node {
validated =
perform_refresh_from_node(wallet_inst.clone(), keychain_mask, status_send_channel)?;
}
wallet_lock!(wallet_inst, w);
let parent_key_id = w.parent_key_id();
let wallet_info = updater::retrieve_info(&mut **w, &parent_key_id, minimum_confirmations)?;
Ok((validated, wallet_info))
}
/// Retrieve payment proof
pub fn retrieve_payment_proof<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
status_send_channel: &Option<Sender<StatusMessage>>,
refresh_from_node: bool,
tx_id: Option<u32>,
tx_slate_id: Option<Uuid>,
) -> Result<PaymentProof, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
if tx_id.is_none() && tx_slate_id.is_none() {
return Err(ErrorKind::PaymentProofRetrieval(
"Transaction ID or Slate UUID must be specified".into(),
)
.into());
}
if refresh_from_node {
update_wallet_state(wallet_inst.clone(), keychain_mask, status_send_channel)?
} else {
false
};
let txs = retrieve_txs(
wallet_inst.clone(),
keychain_mask,
status_send_channel,
refresh_from_node,
tx_id,
tx_slate_id,
)?;
if txs.1.len() != 1 {
return Err(ErrorKind::PaymentProofRetrieval("Transaction doesn't exist".into()).into());
}
// Pull out all needed fields, returning an error if they're not present
let tx = txs.1[0].clone();
let proof = match tx.payment_proof {
Some(p) => p,
None => {
return Err(ErrorKind::PaymentProofRetrieval(
"Transaction does not contain a payment proof".into(),
)
.into());
}
};
let amount = if tx.amount_credited >= tx.amount_debited {
tx.amount_credited - tx.amount_debited
} else {
let fee = match tx.fee {
Some(f) => f,
None => 0,
};
tx.amount_debited - tx.amount_credited - fee
};
let excess = match tx.kernel_excess {
Some(e) => e,
None => {
return Err(ErrorKind::PaymentProofRetrieval(
"Transaction does not contain kernel excess".into(),
)
.into());
}
};
let r_sig = match proof.receiver_signature {
Some(e) => e,
None => {
return Err(ErrorKind::PaymentProofRetrieval(
"Proof does not contain receiver signature ".into(),
)
.into());
}
};
let s_sig = match proof.sender_signature {
Some(e) => e,
None => {
return Err(ErrorKind::PaymentProofRetrieval(
"Proof does not contain sender signature ".into(),
)
.into());
}
};
Ok(PaymentProof {
amount: amount,
excess: excess,
recipient_address: proof.receiver_address,
recipient_sig: r_sig,
sender_address: proof.sender_address,
sender_sig: s_sig,
})
}
///get stored tx proof file.
pub fn get_stored_tx_proof<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
id: Option<u32>,
) -> Result<TxProof, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
if id.is_none() {
return Err(
ErrorKind::PaymentProofRetrieval("Transaction ID must be specified".into()).into(),
);
}
let tx_id = id.unwrap();
wallet_lock!(wallet_inst, w);
let parent_key_id = w.parent_key_id();
let txs: Vec<TxLogEntry> = updater::retrieve_txs(
&mut **w,
None,
Some(tx_id),
None,
Some(&parent_key_id),
false,
None,
None,
)
.map_err(|e| ErrorKind::StoredTransactionError(format!("{}", e)))?;
if txs.len() != 1 {
return Err(ErrorKind::GenericError(format!(
"Unable to find tx, {}",
tx_id
)))?;
}
let uuid = txs[0].tx_slate_id.ok_or_else(|| {
ErrorKind::GenericError(format!("Unable to find slateId for txId, {}", tx_id))
})?;
let proof = TxProof::get_stored_tx_proof(w.get_data_file_dir(), &uuid.to_string())
.map_err(|e| ErrorKind::TransactionHasNoProof(format!("{}", e)))?;
return Ok(proof);
}
/// Initiate tx as sender
/// Caller is responsible for wallet refresh
pub fn init_send_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
args: &InitTxArgs,
use_test_rng: bool,
routputs: usize, // Number of resulting outputs. Normally it is 1
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let parent_key_id = match &args.src_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.clone())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
let message = match &args.message {
Some(m) => {
let mut m = m.clone();
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
let compact_slate =
args.slatepack_recipient.is_some() || args.target_slate_version.clone().unwrap_or(0) >= 4;
let mut slate = tx::new_tx_slate(
&mut *w,
args.amount,
2,
use_test_rng,
args.ttl_blocks,
compact_slate,
)?;
// if we just want to estimate, don't save a context, just send the results
// back
if let Some(true) = args.estimate_only {
let (total, fee) = tx::estimate_send_tx(
&mut *w,
args.amount,
&args.min_fee,
args.minimum_confirmations,
args.max_outputs as usize,
args.num_change_outputs as usize,
args.selection_strategy_is_use_all,
&parent_key_id,
&args.outputs,
routputs,
args.exclude_change_outputs.unwrap_or(false),
args.minimum_confirmations_change_outputs,
)?;
slate.amount = total;
slate.fee = fee;
return Ok(slate);
}
// Updating height because it is lookup height for the kernel
slate.height = w.w2n_client().get_chain_tip()?.0;
let h = slate.height;
let mut context = if args.late_lock.unwrap_or(false) {
if !slate.compact_slate {
return Err(ErrorKind::GenericError(
"Lock later feature available only with a slatepack (compact slate) model"
.to_string(),
)
.into());
}
tx::create_late_lock_context(
&mut *w,
keychain_mask,
&mut slate,
h,
&args,
&parent_key_id,
use_test_rng,
0,
)?
} else {
tx::add_inputs_to_slate(
&mut *w,
keychain_mask,
&mut slate,
&args.min_fee,
args.minimum_confirmations,
args.max_outputs as usize,
args.num_change_outputs as usize,
args.selection_strategy_is_use_all,
&parent_key_id,
0,
message,
true,
use_test_rng,
&args.outputs,
routputs,
args.exclude_change_outputs.unwrap_or(false),
args.minimum_confirmations_change_outputs,
)?
};
// Payment Proof, add addresses to slate and save address
// TODO: Note we only use single derivation path for now,
// probably want to allow sender to specify which one
// sender_a has to in MQS format because we need Normal public key to sign, dalek will not work
let k = w.keychain(keychain_mask)?;
let sender_a = proofaddress::payment_proof_address(&k, proofaddress::ProofAddressType::MQS)?;
if let Some(a) = &args.address {
if a.eq("file_proof") {
debug!("doing file proof");
//in file proof, we are putting the same address both both sender_address and receiver_address
slate.payment_proof = Some(PaymentInfo {
sender_address: sender_a.clone(),
receiver_address: sender_a.clone(),
receiver_signature: None,
});
context.payment_proof_derivation_index = Some(proofaddress::get_address_index());
}
}
if let Some(a) = &args.payment_proof_recipient_address {
slate.payment_proof = Some(PaymentInfo {
sender_address: sender_a,
receiver_address: a.clone(),
receiver_signature: None,
});
context.payment_proof_derivation_index = Some(proofaddress::get_address_index());
} else {
debug!("There is no payment proof recipient address");
}
// mwc713 payment proof support.
context.input_commits = slate.tx.inputs_committed();
for output in slate.tx.outputs() {
context.output_commits.push(output.commitment());
}
// Save the aggsig context in our DB for when we
// recieve the transaction back
{
let mut batch = w.batch(keychain_mask)?;
batch.save_private_context(slate.id.as_bytes(), 0, &context)?;
batch.commit()?;
}
Ok(slate)
}
/// Initiate a transaction as the recipient (invoicing)
pub fn issue_invoice_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
args: &IssueInvoiceTxArgs,
use_test_rng: bool,
num_outputs: usize, // Number of outputs for this transaction. Normally it is 1
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let parent_key_id = match &args.dest_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.clone())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
let message = match &args.message {
Some(m) => {
let mut m = m.clone();
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
let compact_slate = args.slatepack_recipient.is_some();
let mut slate = tx::new_tx_slate(&mut *w, args.amount, 2, use_test_rng, None, compact_slate)?;
let chain_tip = slate.height; // it is fresh slate, height is a tip
let context = tx::add_output_to_slate(
&mut *w,
keychain_mask,
&mut slate,
chain_tip,
args.address.clone(),
None,
None,
&parent_key_id,
0, // Participant 0 for mwc713 compatibility
message,
true,
use_test_rng,
num_outputs,
)?;
// Save the aggsig context in our DB for when we
// recieve the transaction back
{
let mut batch = w.batch(keychain_mask)?;
// Participant id is 0 for mwc713 compatibility
batch.save_private_context(slate.id.as_bytes(), 0, &context)?;
batch.commit()?;
}
Ok(slate)
}
/// Receive an invoice tx, essentially adding inputs to whatever
/// output was specified
/// Caller is responsible for wallet refresh
pub fn process_invoice_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
args: &InitTxArgs,
use_test_rng: bool,
refresh_from_node: bool,
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut ret_slate = slate.clone();
check_ttl(w, &ret_slate, refresh_from_node)?;
let parent_key_id = match &args.src_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.clone())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
// Don't do this multiple times
let tx = updater::retrieve_txs(
&mut *w,
keychain_mask,
None,
Some(ret_slate.id),
Some(&parent_key_id),
use_test_rng,
None,
None,
)?;
for t in &tx {
if t.tx_type == TxLogEntryType::TxSent {
return Err(ErrorKind::TransactionAlreadyReceived(ret_slate.id.to_string()).into());
}
}
let message = match &args.message {
Some(m) => {
let mut m = m.clone();
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
// update slate current height
ret_slate.height = w.w2n_client().get_chain_tip()?.0;
// update ttl if desired
if let Some(b) = &args.ttl_blocks {
ret_slate.ttl_cutoff_height = Some(ret_slate.height + b);
}
// if self sending, make sure to store 'initiator' keys
let context_res = w.get_private_context(keychain_mask, slate.id.as_bytes(), 0); // See issue_invoice_tx for sender (self)
let mut context = tx::add_inputs_to_slate(
&mut *w,
keychain_mask,
&mut ret_slate,
&args.min_fee,
args.minimum_confirmations,
args.max_outputs as usize,
args.num_change_outputs as usize,
args.selection_strategy_is_use_all,
&parent_key_id,
1, // Participant id 1 for mwc713 compatibility
message,
false,
use_test_rng,
&None,
1,
args.exclude_change_outputs.unwrap_or(false),
args.minimum_confirmations_change_outputs,
)?;
if slate.compact_slate {
let keychain = w.keychain(keychain_mask)?;
// Add our contribution to the offset
if context_res.is_ok() {
// Self sending: don't correct for inputs and outputs
// here, as we will do it during finalization.
let mut tmp_context = context.clone();
tmp_context.input_ids.clear();
tmp_context.output_ids.clear();
ret_slate.adjust_offset(&keychain, &mut tmp_context)?;
} else {
ret_slate.adjust_offset(&keychain, &mut context)?;
}
// needs to be stored as we're removing sig data for return trip. this needs to be present
// when locking transaction context and updating tx log with excess later
context.calculated_excess = Some(ret_slate.calc_excess(Some(&keychain))?);
// if self-sending, merge contexts
if let Ok(c) = context_res {
context.initial_sec_key = c.initial_sec_key;
context.initial_sec_nonce = c.initial_sec_nonce;
context.fee = c.fee;
context.amount = c.amount;
for o in c.output_ids.iter() {
context.output_ids.push(o.clone());
}
for i in c.input_ids.iter() {
context.input_ids.push(i.clone());
}
}
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut ret_slate,
&context,
false,
use_test_rng,
)?;
}
// Save the aggsig context in our DB for when we
// recieve the transaction back
{
let mut batch = w.batch(keychain_mask)?;
// Participant id 1 for mwc713 compatibility
batch.save_private_context(ret_slate.id.as_bytes(), 1, &context)?;
batch.commit()?;
}
Ok(ret_slate)
}
/// Lock sender outputs
pub fn tx_lock_outputs<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
address: Option<String>,
participant_id: usize,
use_test_rng: bool,
) -> Result<(), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let context = w.get_private_context(keychain_mask, slate.id.as_bytes(), participant_id)?;
let mut excess_override = None;
let mut sl = slate.clone();
if slate.compact_slate {
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut sl,
&context,
true,
use_test_rng,
)?;
if sl.participant_data.len() == 1 {
// purely for invoice workflow, payer needs the excess back temporarily for storage
excess_override = context.calculated_excess;
}
}
let height = w.w2n_client().get_chain_tip()?.0;
selection::lock_tx_context(
&mut *w,
keychain_mask,
&sl,
height,
&context,
address,
excess_override,
)
}
/// Finalize slate
/// Context needed for mwc713 proof of sending funds through mwcmqs
pub fn finalize_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
refresh_from_node: bool,
use_test_rng: bool,
) -> Result<(Slate, Context), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut sl = slate.clone();
sl.height = w.w2n_client().get_chain_tip()?.0;
check_ttl(w, &sl, refresh_from_node)?;
let mut context = w.get_private_context(keychain_mask, sl.id.as_bytes(), 0)?;
let keychain = w.keychain(keychain_mask)?;
let parent_key_id = context.parent_key_id.clone();
if let Some(args) = context.late_lock_args.take() {
// Transaction was late locked, select inputs+change now
// and insert into original context
let mut temp_sl = tx::new_tx_slate(
&mut *w,
context.amount,
2,
false,
args.ttl_blocks,
slate.compact_slate,
)?;
temp_sl.height = sl.height;
let temp_context = selection::build_send_tx(
w,
&keychain,
keychain_mask,
&mut temp_sl,
&args.min_fee,
args.minimum_confirmations,
args.max_outputs as usize,
args.num_change_outputs as usize,
args.selection_strategy_is_use_all,
parent_key_id.clone(),
0,
use_test_rng,
true,
&args.outputs,
1,
args.exclude_change_outputs.unwrap_or(false),
args.minimum_confirmations_change_outputs,
args.message,
)?;
// Add inputs and outputs to original context
context.input_ids = temp_context.input_ids;
context.output_ids = temp_context.output_ids;
// Store the updated context
{
let mut batch = w.batch(keychain_mask)?;
batch.save_private_context(sl.id.as_bytes(), 0, &context)?;
batch.commit()?;
}
// Now do the actual locking
tx_lock_outputs(w, keychain_mask, &sl, args.address, 0, use_test_rng)?;
}
if slate.compact_slate {
// Add our contribution to the offset
sl.adjust_offset(&keychain, &mut context)?;
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut sl,
&context,
true,
use_test_rng,
)?;
}
tx::complete_tx(&mut *w, keychain_mask, &mut sl, 0, &context)?;
tx::verify_slate_payment_proof(&mut *w, keychain_mask, &context, &sl)?;
tx::update_stored_tx(&mut *w, keychain_mask, &context, &sl, false)?;
tx::update_message(&mut *w, keychain_mask, &sl)?;
{
let mut batch = w.batch(keychain_mask)?;
batch.delete_private_context(sl.id.as_bytes(), 0)?;
batch.commit()?;
}
// If Proof available, we can store it at that point
if let Some(mut proof) = pop_proof_for_slate(&slate.id) {
proof.amount = context.amount;
proof.fee = context.fee;
for input in &context.input_commits {
proof.inputs.push(input.clone());
}
for output in &context.output_commits {
proof.outputs.push(output.clone());
}
proof.store_tx_proof(w.get_data_file_dir(), &slate.id.to_string())?;
};
Ok((sl, context))
}
/// cancel tx
pub fn cancel_tx<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
status_send_channel: &Option<Sender<StatusMessage>>,
tx_id: Option<u32>,
tx_slate_id: Option<Uuid>,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
if !perform_refresh_from_node(wallet_inst.clone(), keychain_mask, status_send_channel)? {
return Err(ErrorKind::TransactionCancellationError(
"Can't contact running MWC node. Not Cancelling.",
))?;
}
wallet_lock!(wallet_inst, w);
let parent_key_id = w.parent_key_id();
tx::cancel_tx(&mut **w, keychain_mask, &parent_key_id, tx_id, tx_slate_id)
}
/// get stored tx
pub fn get_stored_tx<'a, T: ?Sized, C, K>(
w: &T,
entry: &TxLogEntry,
) -> Result<Option<Transaction>, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
w.get_stored_tx(entry)
}
/// Loads a stored transaction from a file
pub fn load_stored_tx<'a, T: ?Sized, C, K>(w: &T, file: &String) -> Result<Transaction, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
w.load_stored_tx(file)
}
/// Posts a transaction to the chain
/// take a client impl instead of wallet so as not to have to lock the wallet
pub fn post_tx<'a, C>(client: &C, tx: &Transaction, fluff: bool) -> Result<(), Error>
where
C: NodeClient + 'a,
{
let res = client.post_tx(tx, fluff);
if let Err(e) = res {
error!("api: post_tx: failed with error: {}", e);
Err(e)
} else {
debug!(
"api: post_tx: successfully posted tx: {}, fluff? {}",
tx.hash(),
fluff
);
Ok(())
}
}
/// verify slate messages
pub fn verify_slate_messages(slate: &Slate) -> Result<(), Error> {
slate.verify_messages()
}
/// check repair
/// Accepts a wallet inst instead of a raw wallet so it can
/// lock as little as possible
pub fn scan<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
start_height: Option<u64>,
delete_unconfirmed: bool,
status_send_channel: &Option<Sender<StatusMessage>>,
do_full_outputs_refresh: bool,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
{
wallet_lock!(wallet_inst, w);
w.w2n_client().reset_cache();
}
// Checking from what point we should start scanning
let (tip_height, tip_hash, last_scanned_block, has_reorg) = get_last_detect_last_scanned_block(
wallet_inst.clone(),
keychain_mask,
status_send_channel,
)?;
if tip_height == 0 {
return Err(ErrorKind::NodeNotReady)?;
}
if has_reorg {
info!(
"Wallet update will do full outputs checking because since last update reorg happend"
);
}
debug!(
"Preparing to update the wallet from height {} to {}",
last_scanned_block.height, tip_height
);
let start_height = match start_height {
Some(h) => cmp::min(last_scanned_block.height, h),
None => 1,
};
// First we need to get the hashes for heights... Reason, if block chain will be changed during scan, we will detect that naturally with next wallet_update.
let mut blocks: Vec<ScannedBlockInfo> =
vec![ScannedBlockInfo::new(tip_height, tip_hash.clone())];
{
wallet_lock!(wallet_inst, w);
let mut step = 4;
while blocks.last().unwrap().height.saturating_sub(step) > start_height {
let h = blocks.last().unwrap().height.saturating_sub(step);
let hdr = w.w2n_client().get_header_info(h)?;
blocks.push(ScannedBlockInfo::new(h, hdr.hash));
step *= 2;
}
// adding last_scanned_block.height not needed
}
scan::scan(
wallet_inst.clone(),
keychain_mask,
delete_unconfirmed,
start_height,
tip_height,
status_send_channel,
true,
do_full_outputs_refresh,
)?;
wallet_lock!(wallet_inst, w);
let mut batch = w.batch(keychain_mask)?;
batch.save_last_scanned_blocks(start_height, &blocks)?;
batch.commit()?;
Ok(())
}
/// node height
pub fn node_height<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
) -> Result<NodeHeightResult, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let res = {
wallet_lock!(wallet_inst, w);
w.w2n_client().get_chain_tip()
};
match res {
Ok(r) => Ok(NodeHeightResult {
height: r.0,
header_hash: r.1,
updated_from_node: true,
}),
Err(_) => {
let outputs = retrieve_outputs(wallet_inst, keychain_mask, &None, true, false, None)?;
let height = match outputs.1.iter().map(|m| m.output.height).max() {
Some(height) => height,
None => 0,
};
Ok(NodeHeightResult {
height,
header_hash: "".to_owned(),
updated_from_node: false,
})
}
}
}
// write infor into the file or channel
fn write_info(
message: String,
file: Option<&mut File>,
status_send_channel: &Sender<StatusMessage>,
) {
match file {
Some(file) => {
let _ = write!(file, "{}\n", message);
}
None => {
let _ = status_send_channel.send(StatusMessage::Info(message));
}
};
}
/// Print wallet status into send channel. This data suppose to be used for troubleshouting only
pub fn dump_wallet_data<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
status_send_channel: &Sender<StatusMessage>,
file_name: Option<String>,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let fn_copy = file_name.clone();
let mut file: Option<File> = match file_name {
Some(file_name) => Some(File::create(file_name)?),
None => None,
};
write_info(
String::from("Wallet Outputs:"),
file.as_mut(),
status_send_channel,
);
for output in w.iter() {
write_info(format!("{:?}", output), file.as_mut(), status_send_channel);
}
write_info(
String::from("Wallet Transactions:"),
file.as_mut(),
status_send_channel,
);
for tx_log in w.tx_log_iter() {
write_info(format!("{:?}", tx_log), file.as_mut(), status_send_channel);
// Checking if Slate is available
if let Some(uuid) = tx_log.tx_slate_id {
let uuid_str = uuid.to_string();
match w.get_stored_tx_by_uuid(&uuid_str) {
Ok(t) => {
write_info(
format!(" Slate for {}: {:?}", uuid_str, t),
file.as_mut(),
status_send_channel,
);
}
Err(_) => write_info(
format!(" Slate for {} not found", uuid_str),
file.as_mut(),
status_send_channel,
),
}
}
}
if let Some(f) = fn_copy {
let _ = status_send_channel.send(StatusMessage::Info(format!(
"Wallet dump is stored at {}",
f
)));
}
Ok(())
}
// Checking if node head is fine and we can perform the scanning
// Result: (tip_height: u64, tip_hash:String, first_block_to_scan_from: ScannedBlockInfo, is_reorg: bool)
// is_reorg true if new need go back by the chain to perform scanning
// Note: In case of error return tip 0!!!
fn get_last_detect_last_scanned_block<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
status_send_channel: &Option<Sender<StatusMessage>>,
) -> Result<(u64, String, ScannedBlockInfo, bool), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
// Wallet update logic doesn't handle truncating of the blockchain. That happen when node in sync or in reorg-sync
// In this case better to inform user and do nothing. Sync is useless in any case.
// Checking if keychain mask correct. Issue that sometimes update_wallet_state doesn't need it and it is a security problem
let _ = w.batch(keychain_mask)?;
let (tip_height, tip_hash, _) = match w.w2n_client().get_chain_tip() {
Ok(t) => t,
Err(_) => {
if let Some(ref s) = status_send_channel {
let _ = s.send(StatusMessage::Warning(
"Unable to contact mwc-node".to_owned(),
));
}
return Ok((0, String::new(), ScannedBlockInfo::empty(), false));
}
};
let blocks = w.last_scanned_blocks()?;
// If the server height is less than our confirmed height, don't apply
// these changes as the chain is syncing, incorrect or forking
if tip_height == 0
|| tip_height < blocks.first().map(|b| b.height).unwrap_or(0)
&& !(tip_height >= 694859 && tip_height < 707100)
// This heights range is matching expected switch from one branch to another.
{
if let Some(ref s) = status_send_channel {
let _ = s.send(StatusMessage::Warning(
String::from("Wallet Update is skipped, please wait for sync on node to complete or fork to resolve.")
));
}
return Ok((0, String::new(), ScannedBlockInfo::empty(), false));
}
let mut last_scanned_block = ScannedBlockInfo::empty();
let head_height = blocks.first().map(|b| b.height).unwrap_or(0);
for bl in blocks {
// check if that block is not changed
if bl.height > tip_height {
continue; // Possible because of the parch (switch from branches)
}
if let Ok(hdr_info) = w.w2n_client().get_header_info(bl.height) {
if hdr_info.hash == bl.hash {
last_scanned_block = bl;
break;
}
}
}
let has_reorg = last_scanned_block.height != head_height;
Ok((tip_height, tip_hash, last_scanned_block, has_reorg))
}
/// Experimental, wrap the entire definition of how a wallet's state is updated
pub fn update_wallet_state<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
status_send_channel: &Option<Sender<StatusMessage>>,
) -> Result<bool, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
// Checking from what point we should start scanning
let (tip_height, tip_hash, last_scanned_block, has_reorg) = get_last_detect_last_scanned_block(
wallet_inst.clone(),
keychain_mask,
status_send_channel,
)?;
if tip_height == 0 {
return Ok(false);
}
if has_reorg {
wallet_lock!(wallet_inst, w);
w.w2n_client().reset_cache(); // let's reset cach to be safe
info!(
"Wallet update will do full outputs checking because since last update reorg happend"
);
}
debug!(
"Preparing to update the wallet from height {} to {}",
last_scanned_block.height, tip_height
);
if last_scanned_block.height == tip_height {
debug!("update_wallet_state is skipped because data is already recently updated");
return Ok(true);
}
let show_progress =
tip_height < 1000 || tip_height.saturating_sub(last_scanned_block.height) > 20;
if last_scanned_block.height == 0 {
let msg = "This wallet has not been scanned against the current chain. Beginning full scan... (this first scan may take a while, but subsequent scans will be much quicker)".to_string();
if let Some(ref s) = status_send_channel {
let _ = s.send(StatusMessage::FullScanWarn(msg));
}
}
// First we need to get the hashes for heights... Reason, if block chain will be changed during scan, we will detect that naturally.
let mut blocks: Vec<ScannedBlockInfo> =
vec![ScannedBlockInfo::new(tip_height, tip_hash.clone())];
{
wallet_lock!(wallet_inst, w);
let mut step = 4;
while blocks.last().unwrap().height.saturating_sub(step) > last_scanned_block.height {
let h = blocks.last().unwrap().height.saturating_sub(step);
let hdr = w.w2n_client().get_header_info(h)?;
blocks.push(ScannedBlockInfo::new(h, hdr.hash));
step *= 2;
}
// adding last_scanned_block.height not needed
}
scan::scan(
wallet_inst.clone(),
keychain_mask,
false,
last_scanned_block.height,
tip_height,
status_send_channel,
show_progress,
has_reorg,
)?;
// Checking if tip was changed. In this case we need to retry. Retry will be handles naturally optimal
let mut tip_was_changed = false;
{
wallet_lock!(wallet_inst, w);
if let Ok((after_tip_height, after_tip_hash, _)) = w.w2n_client().get_chain_tip() {
// Since we are still online, we can save the scan status
{
let mut batch = w.batch(keychain_mask)?;
batch.save_last_scanned_blocks(last_scanned_block.height, &blocks)?;
batch.commit()?;
}
if after_tip_height == tip_height && after_tip_hash == tip_hash {
return Ok(true);
} else {
tip_was_changed = true;
}
}
}
if tip_was_changed {
// Since head was chaged, we need to update it
return update_wallet_state(wallet_inst, keychain_mask, &status_send_channel);
}
// wasn't be able to confirm the tip. Scan is failed, scan height not updated.
Ok(false)
}
/// Check TTL
pub fn check_ttl<'a, T: ?Sized, C, K>(
w: &mut T,
slate: &Slate,
refresh_from_node: bool,
) -> Result<(), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
// Refuse if TTL is expired
let last_confirmed_height = if refresh_from_node {
w.w2n_client().get_chain_tip()?.0
} else {
w.last_confirmed_height()?
};
if let Some(e) = slate.ttl_cutoff_height {
if last_confirmed_height >= e {
return Err(ErrorKind::TransactionExpired.into());
}
}
Ok(())
}
/// Verify/validate arbitrary payment proof
/// Returns (whether this wallet is the sender, whether this wallet is the recipient)
pub fn verify_payment_proof<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
proof: &PaymentProof,
) -> Result<(bool, bool), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let sender_pubkey = proof.sender_address.clone().public_key;
let msg = tx::payment_proof_message(proof.amount, &proof.excess, sender_pubkey)?;
let (client, keychain) = {
wallet_lock!(wallet_inst, w);
(w.w2n_client().clone(), w.keychain(keychain_mask)?)
};
// Check kernel exists
match client.get_kernel(&proof.excess, None, None) {
Err(e) => {
return Err(ErrorKind::PaymentProof(format!(
"Error retrieving kernel from chain: {}",
e
))
.into());
}
Ok(None) => {
return Err(ErrorKind::PaymentProof(format!(
"Transaction kernel with excess {:?} not found on chain",
proof.excess
))
.into());
}
Ok(Some(_)) => {}
};
// Check Sigs
let recipient_pubkey = proof.recipient_address.public_key()?;
// std::str::from_utf8(&msg).unwrap(),
crypto::verify_signature(
&msg,
&crypto::signature_from_string(&proof.recipient_sig).unwrap(),
&recipient_pubkey,
)
.map_err(|e| ErrorKind::TxProofVerifySignature(format!("{}", e)))?;
let sender_pubkey = proof.sender_address.public_key()?;
crypto::verify_signature(
&msg,
&crypto::signature_from_string(&proof.sender_sig).unwrap(),
&sender_pubkey,
)
.map_err(|e| ErrorKind::TxProofVerifySignature(format!("{}", e)))?;
let my_address_pubkey = proofaddress::payment_proof_address_pubkey(&keychain)?;
let sender_mine = my_address_pubkey == sender_pubkey;
let recipient_mine = my_address_pubkey == recipient_pubkey;
Ok((sender_mine, recipient_mine))
}
///
pub fn self_spend_particular_putput<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
output: OutputData,
address: Option<String>,
_current_height: u64,
_minimum_confirmations: u64,
_seperate_tx: bool,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
scan::self_spend_particular_output(
wallet_inst,
keychain_mask,
output.value,
output.commit.unwrap(),
address,
_current_height,
_minimum_confirmations,
)?;
Ok(())
}
|
use specs::Join;
pub struct HelpSystem;
impl<'a> ::specs::System<'a> for HelpSystem {
type SystemData = (
::specs::ReadStorage<'a, ::component::Attracted>,
::specs::ReadStorage<'a, ::component::Avoider>,
::specs::ReadStorage<'a, ::component::Bouncer>,
::specs::ReadStorage<'a, ::component::Motionless>,
::specs::FetchMut<'a, ::resource::Help>,
::specs::Fetch<'a, ::resource::Text>,
);
fn run(&mut self, (attracted, avoider, bouncer, motionless, mut help, text): Self::SystemData) {
let r = [
(attracted.join().count(), &text.attracted),
(avoider.join().count(), &text.avoider),
(bouncer.join().count(), &text.bouncer),
(motionless.join().count(), &text.motionless),
];
let remaining = r
.iter()
.filter(|(count, _)| *count != 0)
.collect::<Vec<_>>();
if remaining.len() == 0 {
help.0 = text.go_to_portal.clone();
} else {
help.0 = text.remains.clone();
for (count, name) in remaining {
help.0.push_str(&format!("\n {} - {}", name, count));
}
}
}
}
|
use std::string::String;
use std::io;
use std::io::{Read,Write};
use customer::{CustomerList,CustomerId};
use util::{Creatable,UrlEncodable};
use card::CardList;
use decoder::{StripeDecoder,StripeDecoderError};
use url::{Url,form_urlencoded};
use hyper::client::Request;
use hyper::net::{Fresh,Streaming};
use hyper::method::Method;
use hyper::{Get, Post};
use hyper::header::Authorization;
use hyper::error::Error as HttpError;
use rustc_serialize::Decodable;
type SecretKey = String;
pub struct Connection {
base_url: Url,
secret_key: SecretKey
}
macro_rules! urlify {
($($component:expr),*) => {
vec![$($component.to_string()),*]
}
}
#[derive(Debug)]
pub enum StripeError {
TransportError(HttpError),
DecodingError(StripeDecoderError),
IOError(io::Error),
}
impl From<StripeDecoderError> for StripeError {
fn from(e: StripeDecoderError) -> StripeError {
StripeError::DecodingError(e)
}
}
impl From<io::Error> for StripeError {
fn from(e: io::Error) -> StripeError {
StripeError::IOError(e)
}
}
impl From<HttpError> for StripeError {
fn from(e: HttpError) -> StripeError {
StripeError::TransportError(e)
}
}
macro_rules! etry {
($expr:expr) => {
match $expr {
Ok(o) => o,
Err(e) => return Err(StripeError::from(e)),
}
}
}
fn process<T: Decodable>(req: Request<Streaming>) -> Result<T, StripeError> {
let mut response = etry!(req.send());
let mut body = vec![];
etry!(response.read_to_end(&mut body));
let object: T = etry!(StripeDecoder::decode(body));
return Ok(object);
}
impl Connection {
pub fn new(secret_key: String) -> Connection {
return Connection {
base_url: Url::parse("https://api.stripe.com").unwrap(),
secret_key: secret_key
};
}
fn request(&self, method: Method, mut path: Vec<String>) -> Request<Fresh> {
let mut url = self.base_url.clone();
url.path_mut().unwrap().append(&mut path);
let mut request = Request::new(method, url).unwrap();
let mut auth = "Bearer ".to_string();
auth.push_str(&self.secret_key[..]);
request.headers_mut().set(Authorization(auth));
return request;
}
fn fetch<T: Decodable>(req: Request<Fresh>) -> Result<T, StripeError> {
let req = etry!(req.start());
process(req)
}
pub fn create<T>(&self, object: T::Object) -> Result<T, StripeError>
where T : Creatable + Decodable {
let req = self.request(Post, urlify!("v1", T::path()));
let mut req = etry!(req.start());
let payload = form_urlencoded::serialize(object.into_iter());
etry!(req.write_all(payload.as_bytes()));
process(req)
}
pub fn customers(&self) -> Result<CustomerList, StripeError> {
let req = self.request(Get, urlify!("v1", "customers"));
return Connection::fetch(req);
}
pub fn cards(&self, customer: CustomerId) -> Result<CardList, StripeError> {
let req = self.request(Get, urlify!("v1", "customers", customer, "cards"));
return Connection::fetch(req);
}
}
|
use clap::{Arg, App, AppSettings};
use image::FilterType;
use job::{Format, Job, JobBuilder, ResizeType};
use std::env;
use std::path::PathBuf;
const SUPPORTED_IMAGES: [&str; 7] = ["jpg", "png", "jpeg", "gif", "bmp", "tif", "tiff"];
const SUPPORTED_SAVES: [&str; 2] = ["jpg", "png"];
const FILTERS: [&str; 5] = ["nn", "lf", "cf", "gf", "l"];
const RESIZE_TYPE: [&str; 4] = ["inc", "dec", "non", "iod"];
pub struct Arguments {
pub job: Job,
pub images: Vec<PathBuf>,
}
impl Arguments {
pub fn new() -> Self {
Arguments {
job: Arguments::job_from_clap(),
images: Arguments::image_paths(),
}
}
fn job_from_clap() -> Job {
let matches = App::new("\nImage Resize")
.version("0.1.0")
.author("Slaven Kuhinek © 2017")
.about("\nImage Resize je program za smanjivanje fotografija \
koji korisiti naredbeni redak za unos parametara za promjenu veličine fotografija.")
.settings(&[
AppSettings::WaitOnError,
AppSettings::ColorAlways,
AppSettings::AllowExternalSubcommands,
])
.arg(Arg::with_name("width")
.short("W")
.long("width")
.value_name("NUMBER")
.validator(validate_u32)
.help("Sets a width of resized image. \
Image will be resized proportionaly to this value.")
.takes_value(true))
.arg(Arg::with_name("height")
.short("H")
.long("height")
.value_name("NUMBER")
.validator(validate_u32)
.help("Sets a height of resized image. \
Image will be resized proportionaly to this value.")
.takes_value(true))
.arg(Arg::with_name("format")
.short("f")
.long("format")
.value_name("STRING")
.help("Sets a format in witch resized image will be saved to a disk. \
Accepted format are jpg or png.")
.takes_value(true)
.possible_values(&SUPPORTED_SAVES))
.arg(Arg::with_name("filter")
.short("F")
.long("filter")
.value_name("STRING")
.help("Sets an image resize filter. Filters are: \n\
nn - Nearest Neighbor,\n\
lf - Linear Filter,\n\
cf - Cubic Filter,\n\
gf - Gaussian Filter,\n\
l - Lanczos3\n")
.takes_value(true)
.possible_values(&FILTERS))
.arg(Arg::with_name("resize")
.short("r")
.long("resize")
.value_name("STRING")
.help("Sets an option for resize. Options are:\n\
inc - Image size will be increased but not decreased,\n\
dec - Image size will be decreased but not increased,\n\
non - Image size will stay unchanged, \
can be used for converting to another image format\n\
iod - Image size will be increased or decreased to specified size,\n")
.takes_value(true)
.possible_values(&RESIZE_TYPE))
.arg(Arg::with_name("suffix")
.short("s")
.long("suffix")
.value_name("STRING")
.help("Sets a suffix that will be added to a file name of resized image.")
.takes_value(true))
.arg(Arg::with_name("renameall")
.short("a")
.long("renameall")
.help("Creates renamed copies of files that wasn't resized."))
.get_matches();
let mut job = Job::new();
fn validate_u32(v: String) -> Result<(), String> {
match v.parse::<u32>() {
Ok(_) => Ok(()),
Err(_) => Err(String::from("Value must be a number.")),
}
}
if let Some(width) = matches.value_of("width") {
match width.parse::<u32>() {
Ok(w) => job.width = w,
Err(_) => unreachable!(),
}
}
if let Some(height) = matches.value_of("height") {
match height.parse::<u32>() {
Ok(h) => job.height = h,
Err(_) => unreachable!(),
}
}
if let Some(format) = matches.value_of("format") {
match format {
"jpg" => job.format = Format::Jpeg,
"png" => job.format = Format::Png,
_ => unreachable!("while matching format in Arguments::job_from_clap()"),
}
}
if let Some(filter) = matches.value_of("filter") {
match filter {
"nn" => job.filter = FilterType::Nearest,
"lf" => job.filter = FilterType::Triangle,
"cf" => job.filter = FilterType::CatmullRom,
"gf" => job.filter = FilterType::Gaussian,
"l" => job.filter = FilterType::Lanczos3,
_ => unreachable!("while matching filters in Arguments::job_from_clap()"),
}
}
if let Some(resize) = matches.value_of("resize") {
match resize {
"inc" => job.resize = ResizeType::Increase,
"dec" => job.resize = ResizeType::Decrease,
"non" => job.resize = ResizeType::Neither,
"iod" => job.resize = ResizeType::Eather,
_ => unreachable!("while resize type in Arguments::job_from_clap()"),
}
}
if let Some(suffix) = matches.value_of("suffix") {
job.suffix = suffix.to_string();
}
if matches.is_present("renameall") {
job.rename_all = true;
}
job
}
pub fn from(builder: JobBuilder) -> Self {
Arguments {
job: builder.execute(),
images: Arguments::image_paths(),
}
}
fn image_paths() -> Vec<PathBuf> {
let mut images: Vec<PathBuf> = Vec::new();
for argument in env::args().skip(1) {
let path = PathBuf::from(argument);
if path.is_file() {
let file_extension = path.extension()
.expect("arguments::image_paths()").to_str()
.expect("arguments::image_paths()").to_lowercase();
for item in &SUPPORTED_IMAGES {
if item == &file_extension {
images.push(path.clone());
}
}
}
}
images
}
} |
use crate::sys;
use piet::{Error, RoundInto};
pub struct Text(pub(crate) sys::dwrite::Factory);
impl piet::Text for Text {
type Font = Font;
type FontBuilder = FontBuilder;
type TextLayout = TextLayout;
type TextLayoutBuilder = TextLayoutBuilder;
fn new_font_by_name(
&mut self,
name: &str,
size: f64,
) -> Result<Self::FontBuilder, Error> {
Ok(FontBuilder {
text_format: self.0.create_text_format(
name,
size.round_into(),
sys::dwrite::FontWeight::Normal,
),
})
}
fn new_text_layout(
&mut self,
font: &Self::Font,
text: &str,
) -> Result<Self::TextLayoutBuilder, Error> {
Ok(TextLayoutBuilder {
text_layout: self.0.create_text_layout(text, &font.0, 1e6, 1e6), // hmm no widht/height?
})
}
}
pub struct TextLayout(pub(crate) sys::dwrite::TextLayout);
impl piet::TextLayout for TextLayout {
fn width(&self) -> f64 {
unimplemented!()
}
}
pub struct Font(sys::dwrite::TextFormat);
impl piet::Font for Font {}
pub struct FontBuilder {
text_format: sys::dwrite::TextFormat,
}
impl piet::FontBuilder for FontBuilder {
type Out = Font;
fn build(self) -> Result<Self::Out, Error> {
Ok(Font(self.text_format))
}
}
pub struct TextLayoutBuilder {
text_layout: sys::dwrite::TextLayout,
}
impl piet::TextLayoutBuilder for TextLayoutBuilder {
type Out = TextLayout;
fn build(self) -> Result<Self::Out, Error> {
Ok(TextLayout(self.text_layout))
}
}
|
#[doc = "Reader of register DDRCTRL_DFIMISC"]
pub type R = crate::R<u32, super::DDRCTRL_DFIMISC>;
#[doc = "Writer for register DDRCTRL_DFIMISC"]
pub type W = crate::W<u32, super::DDRCTRL_DFIMISC>;
#[doc = "Register DDRCTRL_DFIMISC `reset()`'s with value 0x01"]
impl crate::ResetValue for super::DDRCTRL_DFIMISC {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x01
}
}
#[doc = "Reader of field `DFI_INIT_COMPLETE_EN`"]
pub type DFI_INIT_COMPLETE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DFI_INIT_COMPLETE_EN`"]
pub struct DFI_INIT_COMPLETE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> DFI_INIT_COMPLETE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `CTL_IDLE_EN`"]
pub type CTL_IDLE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CTL_IDLE_EN`"]
pub struct CTL_IDLE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> CTL_IDLE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `DFI_INIT_START`"]
pub type DFI_INIT_START_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DFI_INIT_START`"]
pub struct DFI_INIT_START_W<'a> {
w: &'a mut W,
}
impl<'a> DFI_INIT_START_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Reader of field `DFI_FREQUENCY`"]
pub type DFI_FREQUENCY_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `DFI_FREQUENCY`"]
pub struct DFI_FREQUENCY_W<'a> {
w: &'a mut W,
}
impl<'a> DFI_FREQUENCY_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x1f << 8)) | (((value as u32) & 0x1f) << 8);
self.w
}
}
impl R {
#[doc = "Bit 0 - DFI_INIT_COMPLETE_EN"]
#[inline(always)]
pub fn dfi_init_complete_en(&self) -> DFI_INIT_COMPLETE_EN_R {
DFI_INIT_COMPLETE_EN_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 4 - CTL_IDLE_EN"]
#[inline(always)]
pub fn ctl_idle_en(&self) -> CTL_IDLE_EN_R {
CTL_IDLE_EN_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - DFI_INIT_START"]
#[inline(always)]
pub fn dfi_init_start(&self) -> DFI_INIT_START_R {
DFI_INIT_START_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bits 8:12 - DFI_FREQUENCY"]
#[inline(always)]
pub fn dfi_frequency(&self) -> DFI_FREQUENCY_R {
DFI_FREQUENCY_R::new(((self.bits >> 8) & 0x1f) as u8)
}
}
impl W {
#[doc = "Bit 0 - DFI_INIT_COMPLETE_EN"]
#[inline(always)]
pub fn dfi_init_complete_en(&mut self) -> DFI_INIT_COMPLETE_EN_W {
DFI_INIT_COMPLETE_EN_W { w: self }
}
#[doc = "Bit 4 - CTL_IDLE_EN"]
#[inline(always)]
pub fn ctl_idle_en(&mut self) -> CTL_IDLE_EN_W {
CTL_IDLE_EN_W { w: self }
}
#[doc = "Bit 5 - DFI_INIT_START"]
#[inline(always)]
pub fn dfi_init_start(&mut self) -> DFI_INIT_START_W {
DFI_INIT_START_W { w: self }
}
#[doc = "Bits 8:12 - DFI_FREQUENCY"]
#[inline(always)]
pub fn dfi_frequency(&mut self) -> DFI_FREQUENCY_W {
DFI_FREQUENCY_W { w: self }
}
}
|
/*
* The default huffman tables are taken from
* section K.3 Typical Huffman tables for 8-bit precision luminance and chrominance
*/
#[derive(Copy, Clone, Debug)]
pub enum CodingClass {
Dc = 0,
Ac = 1,
}
static DEFAULT_LUMA_DC_CODE_LENGTHS: [u8; 16] = [
0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
];
static DEFAULT_LUMA_DC_VALUES: [u8; 12] = [
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0A, 0x0B,
];
static DEFAULT_CHROMA_DC_CODE_LENGTHS: [u8; 16] = [
0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
];
static DEFAULT_CHROMA_DC_VALUES: [u8; 12] = [
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0A, 0x0B,
];
static DEFAULT_LUMA_AC_CODE_LENGTHS: [u8; 16] = [
0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D,
];
static DEFAULT_LUMA_AC_VALUES: [u8; 162] = [
0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08,
0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0,
0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16,
0x17, 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6,
0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5,
0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4,
0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2,
0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA,
0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8,
0xF9, 0xFA,
];
static DEFAULT_CHROMA_AC_CODE_LENGTHS: [u8; 16] = [
0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
];
static DEFAULT_CHROMA_AC_VALUES: [u8; 162] = [
0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0,
0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34,
0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26,
0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38,
0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96,
0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5,
0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4,
0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3,
0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2,
0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA,
0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9,
0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8,
0xF9, 0xFA,
];
pub struct HuffmanTable {
lookup_table: [(u8, u16); 256],
length: [u8; 16],
values: Vec<u8>,
}
impl HuffmanTable {
pub fn new(length: &[u8; 16], values: &[u8]) -> HuffmanTable {
HuffmanTable {
lookup_table: create_lookup_table(length, values),
length: *length,
values: values.to_vec(),
}
}
pub fn default_luma_dc() -> HuffmanTable {
Self::new(&DEFAULT_LUMA_DC_CODE_LENGTHS, &DEFAULT_LUMA_DC_VALUES)
}
pub fn default_luma_ac() -> HuffmanTable {
Self::new(&DEFAULT_LUMA_AC_CODE_LENGTHS, &DEFAULT_LUMA_AC_VALUES)
}
pub fn default_chroma_dc() -> HuffmanTable {
Self::new(&DEFAULT_CHROMA_DC_CODE_LENGTHS, &DEFAULT_CHROMA_DC_VALUES)
}
pub fn default_chroma_ac() -> HuffmanTable {
Self::new(&DEFAULT_CHROMA_AC_CODE_LENGTHS, &DEFAULT_CHROMA_AC_VALUES)
}
/// Generates an optimized huffman table as described in Section K.2
#[allow(clippy::needless_range_loop)]
pub fn new_optimized(mut freq: [u32; 257]) -> HuffmanTable {
let mut others = [-1i32; 257];
let mut codesize = [0usize; 257];
// Find Huffman code sizes
// Figure K.1
loop {
let mut v1 = None;
let mut v1_min = u32::MAX;
// Find the largest value with the least non zero frequency
for (i, &f) in freq.iter().enumerate() {
if f > 0 && f <= v1_min {
v1_min = f;
v1 = Some(i);
}
}
let mut v1 = match v1 {
Some(v) => v,
None => break
};
let mut v2 = None;
let mut v2_min = u32::MAX;
// Find the next largest value with the least non zero frequency
for (i, &f) in freq.iter().enumerate() {
if f > 0 && f <= v2_min && i != v1 {
v2_min = f;
v2 = Some(i);
}
}
let mut v2 = match v2 {
Some(v) => v,
None => break
};
freq[v1] += freq[v2];
freq[v2] = 0;
codesize[v1] += 1;
while others[v1] >= 0 {
v1 = others[v1] as usize;
codesize[v1] += 1;
}
others[v1] = v2 as i32;
codesize[v2] += 1;
while others[v2] >= 0 {
v2 = others[v2] as usize;
codesize[v2] += 1;
}
}
// Find the number of codes of each size
// Figure K.2
let mut bits = [0u8; 33];
for &size in codesize.iter() {
if size > 0 {
bits[size] += 1;
}
}
// Limiting code lengths to 16 bits
// Figure K.3
let mut i = 32;
while i > 16 {
while bits[i] > 0 {
let mut j = i - 2;
while bits[j] == 0 {
j -= 1;
}
bits[i] -= 2;
bits[i - 1] += 1;
bits[j + 1] += 2;
bits[j] -= 1;
}
i -= 1;
}
while bits[i] == 0 {
i -= 1;
}
bits[i] -= 1;
// Sorting of input values according to code size
// Figure K.4
let mut huffval = [0u8; 256];
let mut k = 0;
for i in 1..=32 {
for j in 0..=255 {
if codesize[j] == i {
huffval[k] = j as u8;
k += 1;
}
}
}
let mut length = [0u8; 16];
for (i, v) in length.iter_mut().enumerate() {
*v = bits[i + 1];
}
let values = huffval[0..k].to_vec();
HuffmanTable {
lookup_table: create_lookup_table(&length, &values),
length,
values,
}
}
#[inline]
pub fn get_for_value(&self, value: u8) -> &(u8, u16) {
let res = &self.lookup_table[value as usize];
debug_assert!(res.0 > 0, "Got zero size code for value: {}", value);
res
}
pub fn length(&self) -> &[u8; 16] {
&self.length
}
pub fn values(&self) -> &[u8] {
&self.values
}
}
// Create huffman table code sizes as defined in Figure C.1
fn create_sizes(code_lengths: &[u8; 16]) -> [u8; 256] {
let mut sizes = [0u8; 256];
let mut k = 0;
for (i, &length) in code_lengths.iter().enumerate() {
for _ in 0..length {
sizes[k] = (i + 1) as u8;
k += 1;
}
}
sizes
}
// Create huffman table codes as defined in Figure C.2
fn create_codes(sizes: &[u8; 256]) -> [u16; 256] {
let mut codes = [0u16; 256];
let mut current_code = 0;
let mut current_size = sizes[0];
for (&size, code) in sizes.iter().take_while(|s| **s != 0).zip(codes.iter_mut()) {
if current_size != size {
let size_diff = size - current_size;
current_code <<= size_diff as usize;
current_size = size;
}
*code = current_code;
current_code += 1;
}
codes
}
// Create huffman table codes as defined in Figure C.3
fn create_lookup_table(code_length: &[u8; 16], values: &[u8]) -> [(u8, u16); 256] {
let sizes = create_sizes(code_length);
let codes = create_codes(&sizes);
let mut lookup_table = [(0u8, 0u16); 256];
for (i, &value) in values.iter().enumerate() {
lookup_table[value as usize] = (sizes[i], codes[i]);
}
lookup_table
} |
use std::{ops::Deref, time::Duration};
use futures::{future::BoxFuture, stream::StreamExt, FutureExt};
use serde::{
de::{self, Deserializer},
Deserialize,
};
use crate::{
bson::{Bson, Deserializer as BsonDeserializer, Document},
bson_util,
error::Result,
options::{FindOptions, Hint, InsertManyOptions, UpdateOptions},
test::util::{CommandEvent, EventClient},
Collection,
};
pub(super) trait TestOperation {
/// The command names to monitor as part of this test.
fn command_names(&self) -> &[&str];
fn execute(&self, collection: Collection<Document>) -> BoxFuture<Result<()>>;
}
pub(super) struct AnyTestOperation {
operation: Box<dyn TestOperation>,
}
impl<'de> Deserialize<'de> for AnyTestOperation {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
#[derive(Deserialize)]
struct OperationDefinition {
name: String,
arguments: Bson,
}
let definition = OperationDefinition::deserialize(deserializer)?;
let boxed_op = match definition.name.as_str() {
"insertOne" => InsertOne::deserialize(BsonDeserializer::new(definition.arguments))
.map(|op| Box::new(op) as Box<dyn TestOperation>),
"insertMany" => InsertMany::deserialize(BsonDeserializer::new(definition.arguments))
.map(|op| Box::new(op) as Box<dyn TestOperation>),
"updateOne" => UpdateOne::deserialize(BsonDeserializer::new(definition.arguments))
.map(|op| Box::new(op) as Box<dyn TestOperation>),
"updateMany" => UpdateMany::deserialize(BsonDeserializer::new(definition.arguments))
.map(|op| Box::new(op) as Box<dyn TestOperation>),
"deleteMany" => DeleteMany::deserialize(BsonDeserializer::new(definition.arguments))
.map(|op| Box::new(op) as Box<dyn TestOperation>),
"deleteOne" => DeleteOne::deserialize(BsonDeserializer::new(definition.arguments))
.map(|op| Box::new(op) as Box<dyn TestOperation>),
"find" => Find::deserialize(BsonDeserializer::new(definition.arguments))
.map(|op| Box::new(op) as Box<dyn TestOperation>),
_ => unimplemented!(),
}
.map_err(|e| de::Error::custom(format!("{}", e)))?;
Ok(AnyTestOperation {
operation: boxed_op,
})
}
}
impl Deref for AnyTestOperation {
type Target = Box<dyn TestOperation>;
fn deref(&self) -> &Box<dyn TestOperation> {
&self.operation
}
}
#[derive(Deserialize)]
pub(super) struct DeleteMany {
filter: Document,
}
impl TestOperation for DeleteMany {
fn command_names(&self) -> &[&str] {
&["delete"]
}
fn execute(&self, collection: Collection<Document>) -> BoxFuture<Result<()>> {
async move {
collection
.delete_many(self.filter.clone(), None)
.await
.map(|_| ())
}
.boxed()
}
}
#[derive(Deserialize)]
pub(super) struct DeleteOne {
filter: Document,
}
impl TestOperation for DeleteOne {
fn command_names(&self) -> &[&str] {
&["delete"]
}
fn execute(&self, collection: Collection<Document>) -> BoxFuture<Result<()>> {
async move {
collection
.delete_one(self.filter.clone(), None)
.await
.map(|_| ())
}
.boxed()
}
}
#[derive(Debug, Default, Deserialize)]
#[serde(deny_unknown_fields)]
pub(super) struct Find {
filter: Option<Document>,
// `FindOptions` cannot be embedded directly because serde doesn't support combining `flatten`
// and `deny_unknown_fields`, so its fields are replicated here.
#[serde(default)]
sort: Option<Document>,
#[serde(default)]
skip: Option<u64>,
#[serde(default, rename = "batchSize")]
batch_size: Option<i64>,
#[serde(default)]
limit: Option<i64>,
#[serde(default)]
comment: Option<String>,
#[serde(default)]
hint: Option<Hint>,
#[serde(
rename = "maxTimeMS",
deserialize_with = "bson_util::deserialize_duration_option_from_u64_millis",
default
)]
max_time: Option<Duration>,
#[serde(default)]
min: Option<Document>,
#[serde(default)]
max: Option<Document>,
#[serde(rename = "returnKey", default)]
return_key: Option<bool>,
#[serde(rename = "showRecordId", default)]
show_record_id: Option<bool>,
}
impl TestOperation for Find {
fn command_names(&self) -> &[&str] {
&["find", "getMore"]
}
fn execute(&self, collection: Collection<Document>) -> BoxFuture<Result<()>> {
async move {
// `FindOptions` is constructed without the use of `..Default::default()` to enforce at
// compile-time that any new fields added there need to be considered here.
let options = FindOptions {
sort: self.sort.clone(),
skip: self.skip,
batch_size: self.batch_size.map(|i| i as u32),
limit: self.limit,
comment: self.comment.clone(),
hint: self.hint.clone(),
max_time: self.max_time,
min: self.min.clone(),
max: self.max.clone(),
return_key: self.return_key,
show_record_id: self.show_record_id,
allow_disk_use: None,
allow_partial_results: None,
cursor_type: None,
max_await_time: None,
max_scan: None,
no_cursor_timeout: None,
projection: None,
read_concern: None,
selection_criteria: None,
collation: None,
};
let mut cursor = collection.find(self.filter.clone(), options).await?;
while let Some(result) = cursor.next().await {
result?;
}
Ok(())
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct InsertMany {
documents: Vec<Document>,
#[serde(default)]
options: Option<InsertManyOptions>,
}
impl TestOperation for InsertMany {
fn command_names(&self) -> &[&str] {
&["insert"]
}
fn execute(&self, collection: Collection<Document>) -> BoxFuture<Result<()>> {
async move {
collection
.insert_many(self.documents.clone(), self.options.clone())
.await
.map(|_| ())
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct InsertOne {
document: Document,
}
impl TestOperation for InsertOne {
fn command_names(&self) -> &[&str] {
&["insert"]
}
fn execute(&self, collection: Collection<Document>) -> BoxFuture<Result<()>> {
async move {
collection
.insert_one(self.document.clone(), None)
.await
.map(|_| ())
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct UpdateMany {
filter: Document,
update: Document,
}
impl TestOperation for UpdateMany {
fn command_names(&self) -> &[&str] {
&["update"]
}
fn execute(&self, collection: Collection<Document>) -> BoxFuture<Result<()>> {
async move {
collection
.update_many(self.filter.clone(), self.update.clone(), None)
.await
.map(|_| ())
}
.boxed()
}
}
#[derive(Debug, Deserialize)]
pub(super) struct UpdateOne {
filter: Document,
update: Document,
#[serde(default)]
upsert: Option<bool>,
}
impl TestOperation for UpdateOne {
fn command_names(&self) -> &[&str] {
&["update"]
}
fn execute(&self, collection: Collection<Document>) -> BoxFuture<Result<()>> {
async move {
let options = self.upsert.map(|b| UpdateOptions {
upsert: Some(b),
..Default::default()
});
collection
.update_one(self.filter.clone(), self.update.clone(), options)
.await
.map(|_| ())
}
.boxed()
}
}
impl EventClient {
pub(super) async fn run_operation_with_events(
&self,
operation: AnyTestOperation,
database_name: &str,
collection_name: &str,
) -> Vec<CommandEvent> {
let _: Result<_> = operation
.execute(self.database(database_name).collection(collection_name))
.await;
self.get_command_events(operation.command_names())
}
}
|
#[doc = "Reader of register BUFF_CPU_SHOULD_HANDLE"]
pub type R = crate::R<u32, super::BUFF_CPU_SHOULD_HANDLE>;
#[doc = "Reader of field `EP15_OUT`"]
pub type EP15_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP15_IN`"]
pub type EP15_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP14_OUT`"]
pub type EP14_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP14_IN`"]
pub type EP14_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP13_OUT`"]
pub type EP13_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP13_IN`"]
pub type EP13_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP12_OUT`"]
pub type EP12_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP12_IN`"]
pub type EP12_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP11_OUT`"]
pub type EP11_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP11_IN`"]
pub type EP11_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP10_OUT`"]
pub type EP10_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP10_IN`"]
pub type EP10_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP9_OUT`"]
pub type EP9_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP9_IN`"]
pub type EP9_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP8_OUT`"]
pub type EP8_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP8_IN`"]
pub type EP8_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP7_OUT`"]
pub type EP7_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP7_IN`"]
pub type EP7_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP6_OUT`"]
pub type EP6_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP6_IN`"]
pub type EP6_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP5_OUT`"]
pub type EP5_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP5_IN`"]
pub type EP5_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP4_OUT`"]
pub type EP4_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP4_IN`"]
pub type EP4_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP3_OUT`"]
pub type EP3_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP3_IN`"]
pub type EP3_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP2_OUT`"]
pub type EP2_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP2_IN`"]
pub type EP2_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP1_OUT`"]
pub type EP1_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP1_IN`"]
pub type EP1_IN_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP0_OUT`"]
pub type EP0_OUT_R = crate::R<bool, bool>;
#[doc = "Reader of field `EP0_IN`"]
pub type EP0_IN_R = crate::R<bool, bool>;
impl R {
#[doc = "Bit 31"]
#[inline(always)]
pub fn ep15_out(&self) -> EP15_OUT_R {
EP15_OUT_R::new(((self.bits >> 31) & 0x01) != 0)
}
#[doc = "Bit 30"]
#[inline(always)]
pub fn ep15_in(&self) -> EP15_IN_R {
EP15_IN_R::new(((self.bits >> 30) & 0x01) != 0)
}
#[doc = "Bit 29"]
#[inline(always)]
pub fn ep14_out(&self) -> EP14_OUT_R {
EP14_OUT_R::new(((self.bits >> 29) & 0x01) != 0)
}
#[doc = "Bit 28"]
#[inline(always)]
pub fn ep14_in(&self) -> EP14_IN_R {
EP14_IN_R::new(((self.bits >> 28) & 0x01) != 0)
}
#[doc = "Bit 27"]
#[inline(always)]
pub fn ep13_out(&self) -> EP13_OUT_R {
EP13_OUT_R::new(((self.bits >> 27) & 0x01) != 0)
}
#[doc = "Bit 26"]
#[inline(always)]
pub fn ep13_in(&self) -> EP13_IN_R {
EP13_IN_R::new(((self.bits >> 26) & 0x01) != 0)
}
#[doc = "Bit 25"]
#[inline(always)]
pub fn ep12_out(&self) -> EP12_OUT_R {
EP12_OUT_R::new(((self.bits >> 25) & 0x01) != 0)
}
#[doc = "Bit 24"]
#[inline(always)]
pub fn ep12_in(&self) -> EP12_IN_R {
EP12_IN_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bit 23"]
#[inline(always)]
pub fn ep11_out(&self) -> EP11_OUT_R {
EP11_OUT_R::new(((self.bits >> 23) & 0x01) != 0)
}
#[doc = "Bit 22"]
#[inline(always)]
pub fn ep11_in(&self) -> EP11_IN_R {
EP11_IN_R::new(((self.bits >> 22) & 0x01) != 0)
}
#[doc = "Bit 21"]
#[inline(always)]
pub fn ep10_out(&self) -> EP10_OUT_R {
EP10_OUT_R::new(((self.bits >> 21) & 0x01) != 0)
}
#[doc = "Bit 20"]
#[inline(always)]
pub fn ep10_in(&self) -> EP10_IN_R {
EP10_IN_R::new(((self.bits >> 20) & 0x01) != 0)
}
#[doc = "Bit 19"]
#[inline(always)]
pub fn ep9_out(&self) -> EP9_OUT_R {
EP9_OUT_R::new(((self.bits >> 19) & 0x01) != 0)
}
#[doc = "Bit 18"]
#[inline(always)]
pub fn ep9_in(&self) -> EP9_IN_R {
EP9_IN_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 17"]
#[inline(always)]
pub fn ep8_out(&self) -> EP8_OUT_R {
EP8_OUT_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 16"]
#[inline(always)]
pub fn ep8_in(&self) -> EP8_IN_R {
EP8_IN_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 15"]
#[inline(always)]
pub fn ep7_out(&self) -> EP7_OUT_R {
EP7_OUT_R::new(((self.bits >> 15) & 0x01) != 0)
}
#[doc = "Bit 14"]
#[inline(always)]
pub fn ep7_in(&self) -> EP7_IN_R {
EP7_IN_R::new(((self.bits >> 14) & 0x01) != 0)
}
#[doc = "Bit 13"]
#[inline(always)]
pub fn ep6_out(&self) -> EP6_OUT_R {
EP6_OUT_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 12"]
#[inline(always)]
pub fn ep6_in(&self) -> EP6_IN_R {
EP6_IN_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 11"]
#[inline(always)]
pub fn ep5_out(&self) -> EP5_OUT_R {
EP5_OUT_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 10"]
#[inline(always)]
pub fn ep5_in(&self) -> EP5_IN_R {
EP5_IN_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 9"]
#[inline(always)]
pub fn ep4_out(&self) -> EP4_OUT_R {
EP4_OUT_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 8"]
#[inline(always)]
pub fn ep4_in(&self) -> EP4_IN_R {
EP4_IN_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 7"]
#[inline(always)]
pub fn ep3_out(&self) -> EP3_OUT_R {
EP3_OUT_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 6"]
#[inline(always)]
pub fn ep3_in(&self) -> EP3_IN_R {
EP3_IN_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 5"]
#[inline(always)]
pub fn ep2_out(&self) -> EP2_OUT_R {
EP2_OUT_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 4"]
#[inline(always)]
pub fn ep2_in(&self) -> EP2_IN_R {
EP2_IN_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 3"]
#[inline(always)]
pub fn ep1_out(&self) -> EP1_OUT_R {
EP1_OUT_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 2"]
#[inline(always)]
pub fn ep1_in(&self) -> EP1_IN_R {
EP1_IN_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 1"]
#[inline(always)]
pub fn ep0_out(&self) -> EP0_OUT_R {
EP0_OUT_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 0"]
#[inline(always)]
pub fn ep0_in(&self) -> EP0_IN_R {
EP0_IN_R::new((self.bits & 0x01) != 0)
}
}
|
#![allow(dead_code)]
pub fn area(w: i32, h: i32) -> i32 {
w * h
}
|
// unihernandez22
// https://codeforces.com/problemset/problem/401/C
// simulation
use std::io;
fn main() {
let mut line = String::new();
io::stdin()
.read_line(&mut line)
.unwrap();
let words: Vec<i64> =
line
.split_whitespace()
.map(|x| x.parse().unwrap())
.collect();
let mut n = words[0];
let mut m = words[1];
let mut ans = String::new();
while m > n && n > 0 && m > 0 {
ans.push_str("110");
n -= 1;
m -= 2;
}
if m >= n {
while n > 0 && m > 0 {
ans.push_str("10");
n -= 1;
m -= 1;
}
} else {
while n > 0 && m > 0 {
ans.push_str("01");
n -= 1;
m -= 1;
}
}
if n > 1 || m > 2 {
println!("-1");
return;
}
print!("{}", ans);
for _ in 0..m {
print!("1");
}
for _ in 0..n {
print!("0");
}
println!();
}
|
use crate::analytics::create_directory;
use crate::maze::maze_genotype::MazeGenome;
use crate::mcc::agent::mcc_agent::MCCAgent;
use crate::neatns::agent::Agent;
use crate::simulator::{simulate_single_mcc, simulate_single_neatns};
use crate::visualization::maze::visualize_maze;
use crate::visualization::simulation::visualize_agent_path;
use crate::visualization::VisualizationOptions;
#[allow(dead_code)]
pub fn visualise_mazes(mazes: &Vec<MazeGenome>, path: &String) {
for (i, maze) in mazes.iter().enumerate() {
let maze_seed_path = format!("{}/maze_{}.png", path, i);
let maze_phenotype = maze.to_phenotype();
visualize_maze(&maze_phenotype, maze_seed_path, false);
}
}
#[allow(dead_code)]
pub fn visualise_maze(maze: &MazeGenome, path: &String) {
let maze_seed_path = format!("{}", path);
let maze_phenotype = maze.to_phenotype();
visualize_maze(&maze_phenotype, maze_seed_path, false);
}
#[allow(dead_code)]
pub fn visualise_seeds_agent_path(
mazes: &Vec<MazeGenome>,
agents: &Vec<Agent>,
folder_path: &String,
) {
for (i, maze) in mazes.iter().enumerate() {
let file_name = format!("maze_{}_solution.png", i);
let maze_phenotype = maze.to_phenotype();
let mut agent_index: Option<u32> = None;
for (j, agent) in agents.iter().enumerate() {
if maze.successful_agent_id.is_some() && agent.id == maze.successful_agent_id.unwrap() {
agent_index = Some(j as u32);
break;
}
}
if agent_index.is_some() {
let agent = &agents[agent_index.unwrap() as usize];
let simulator_result = simulate_single_neatns(
&agent,
&maze_phenotype,
maze.get_solution_path_cell_length(),
true,
);
visualize_agent_path(
&maze_phenotype,
&simulator_result,
VisualizationOptions {
file_name,
folder_path: folder_path.clone(),
save_all_steps: false,
},
);
}
}
}
#[allow(dead_code)]
pub fn visualise_mazes_with_agent_path(
mazes: &Vec<MazeGenome>,
agents: &Vec<MCCAgent>,
folder_path: String,
) {
for (i, maze) in mazes.iter().enumerate() {
if maze.successful_agent_id.is_some() {
let agent = agents
.iter()
.find(|agent| agent.id == maze.successful_agent_id.unwrap());
if agent.is_some() {
let file_name = format!("maze_solution_{}.png", i);
let maze_phenotype = maze.to_phenotype();
let simulator_result = simulate_single_mcc(
&agent.unwrap(),
&maze_phenotype,
maze.get_solution_path_cell_length(),
true,
);
visualize_agent_path(
&maze_phenotype,
&simulator_result,
VisualizationOptions {
file_name,
folder_path: folder_path.clone(),
save_all_steps: false,
},
);
}
}
}
}
|
#[macro_export]
macro_rules! version_deps {
(
let $version:ident = 0;
$(let $v:ident = $e:expr;)+
) => {
$(let $v = $e;)*
let $version = storm::version_tag::combine(&[$(storm::Tag::tag(&$v),)*]);
};
}
|
use std::collections::HashMap;
use bson::Array;
use mongocrypt::ctx::KmsProvider;
use serde::Deserialize;
use crate::{
bson::{Bson, Document},
client::options::TlsOptions,
error::{Error, Result},
Namespace,
};
/// Options related to automatic encryption.
///
/// Automatic encryption is an enterprise only feature that only applies to operations on a
/// collection. Automatic encryption is not supported for operations on a database or view, and
/// operations that are not bypassed will result in error (see [libmongocrypt: Auto Encryption
/// Allow-List](
/// https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/client-side-encryption.rst#libmongocrypt-auto-encryption-allow-list
/// )). To bypass automatic encryption for all operations, set bypassAutoEncryption=true in
/// AutoEncryptionOpts.
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub(crate) struct AutoEncryptionOptions {
/// Used for data key queries. Will default to an internal client if not set.
#[serde(skip)]
pub(crate) key_vault_client: Option<crate::Client>,
/// A collection that contains all data keys used for encryption and decryption (aka the key
/// vault collection).
#[serde(default = "default_key_vault_namespace")]
pub(crate) key_vault_namespace: Namespace,
/// Options individual to each KMS provider.
pub(crate) kms_providers: KmsProviders,
/// Specify a JSONSchema locally.
///
/// Supplying a `schema_map` provides more security than relying on JSON Schemas obtained from
/// the server. It protects against a malicious server advertising a false JSON Schema, which
/// could trick the client into sending unencrypted data that should be encrypted.
///
/// Schemas supplied in the `schema_map` only apply to configuring automatic encryption for
/// client side encryption. Other validation rules in the JSON schema will not be enforced by
/// the driver and will result in an error.
pub(crate) schema_map: Option<HashMap<String, Document>>,
/// Disable automatic encryption and do not spawn mongocryptd. Defaults to false.
pub(crate) bypass_auto_encryption: Option<bool>,
/// Options related to mongocryptd.
pub(crate) extra_options: Option<Document>,
/// Maps namespace to encrypted fields.
///
/// Supplying an `encrypted_fields_map` provides more security than relying on an
/// encryptedFields obtained from the server. It protects against a malicious server
/// advertising a false encryptedFields.
pub(crate) encrypted_fields_map: Option<HashMap<String, Document>>,
/// Disable serverside processing of encrypted indexed fields, allowing use of explicit
/// encryption with queryable encryption.
pub(crate) bypass_query_analysis: Option<bool>,
/// Disable loading crypt_shared.
#[cfg(test)]
#[serde(skip)]
pub(crate) disable_crypt_shared: Option<bool>,
}
fn default_key_vault_namespace() -> Namespace {
Namespace {
db: "keyvault".to_string(),
coll: "datakeys".to_string(),
}
}
impl AutoEncryptionOptions {
pub(crate) fn new(key_vault_namespace: Namespace, kms_providers: KmsProviders) -> Self {
Self {
key_vault_namespace,
kms_providers,
key_vault_client: None,
schema_map: None,
bypass_auto_encryption: None,
extra_options: None,
encrypted_fields_map: None,
bypass_query_analysis: None,
#[cfg(test)]
disable_crypt_shared: None,
}
}
}
#[derive(Deserialize, Debug, Clone)]
pub(crate) struct KmsProviders {
#[serde(flatten)]
credentials: HashMap<KmsProvider, Document>,
#[serde(skip)]
tls_options: Option<KmsProvidersTlsOptions>,
}
pub(crate) type KmsProvidersTlsOptions = HashMap<KmsProvider, TlsOptions>;
impl KmsProviders {
pub(crate) fn new(
providers: impl IntoIterator<Item = (KmsProvider, bson::Document, Option<TlsOptions>)>,
) -> Result<Self> {
let mut credentials = HashMap::new();
let mut tls_options = None;
for (provider, conf, tls) in providers.into_iter() {
credentials.insert(provider.clone(), conf);
if let Some(tls) = tls {
tls_options
.get_or_insert_with(KmsProvidersTlsOptions::new)
.insert(provider, tls);
}
}
if credentials.is_empty() {
return Err(crate::error::Error::invalid_argument("empty kms_providers"));
}
Ok(Self {
credentials,
tls_options,
})
}
pub(crate) fn credentials_doc(&self) -> Result<Document> {
Ok(bson::to_document(&self.credentials)?)
}
pub(crate) fn tls_options(&self) -> Option<&KmsProvidersTlsOptions> {
self.tls_options.as_ref()
}
pub(crate) fn credentials(&self) -> &HashMap<KmsProvider, Document> {
&self.credentials
}
#[cfg(test)]
pub(crate) fn set(&mut self, provider: KmsProvider, creds: Document, tls: Option<TlsOptions>) {
self.credentials.insert(provider.clone(), creds);
if let Some(tls) = tls {
self.tls_options
.get_or_insert_with(KmsProvidersTlsOptions::new)
.insert(provider, tls);
}
}
#[cfg(test)]
pub(crate) fn clear(&mut self, provider: &KmsProvider) {
self.credentials.remove(provider);
if let Some(tls_opts) = &mut self.tls_options {
tls_opts.remove(provider);
}
}
}
impl AutoEncryptionOptions {
pub(crate) fn extra_option<'a, Opt: ExtraOption<'a>>(
&'a self,
opt: &Opt,
) -> Result<Option<Opt::Output>> {
let key = opt.key();
match self.extra_options.as_ref().and_then(|o| o.get(key)) {
None => Ok(None),
Some(b) => match Opt::as_type(b) {
Some(v) => Ok(Some(v)),
None => Err(Error::invalid_argument(format!(
"unexpected type for extra option {:?}: {:?}",
key, b
))),
},
}
}
}
pub(crate) trait ExtraOption<'a> {
type Output;
fn key(&self) -> &'static str;
fn as_type(input: &'a Bson) -> Option<Self::Output>;
}
pub(crate) struct ExtraOptionStr(&'static str);
impl<'a> ExtraOption<'a> for ExtraOptionStr {
type Output = &'a str;
fn key(&self) -> &'static str {
self.0
}
fn as_type(input: &'a Bson) -> Option<&'a str> {
input.as_str()
}
}
pub(crate) struct ExtraOptionBool(&'static str);
impl<'a> ExtraOption<'a> for ExtraOptionBool {
type Output = bool;
fn key(&self) -> &'static str {
self.0
}
fn as_type(input: &'a Bson) -> Option<bool> {
input.as_bool()
}
}
pub(crate) struct ExtraOptionArray(&'static str);
impl<'a> ExtraOption<'a> for ExtraOptionArray {
type Output = &'a Array;
fn key(&self) -> &'static str {
self.0
}
fn as_type(input: &'a Bson) -> Option<&'a Array> {
input.as_array()
}
}
pub(crate) const EO_MONGOCRYPTD_URI: ExtraOptionStr = ExtraOptionStr("mongocryptdURI");
pub(crate) const EO_MONGOCRYPTD_BYPASS_SPAWN: ExtraOptionBool =
ExtraOptionBool("mongocryptdBypassSpawn");
pub(crate) const EO_MONGOCRYPTD_SPAWN_PATH: ExtraOptionStr = ExtraOptionStr("mongocryptdSpawnPath");
pub(crate) const EO_MONGOCRYPTD_SPAWN_ARGS: ExtraOptionArray =
ExtraOptionArray("mongocryptdSpawnArgs");
pub(crate) const EO_CRYPT_SHARED_LIB_PATH: ExtraOptionStr = ExtraOptionStr("cryptSharedLibPath");
pub(crate) const EO_CRYPT_SHARED_REQUIRED: ExtraOptionBool = ExtraOptionBool("cryptSharedRequired");
|
/**
* This example shows how you can use the mid-level wrapper directly as opposed to the
* high-level wrapper. You may want to do this if you prefer a near to 1:1 mapping
* to the GLFW API (the high level bindings alter it considerably).
*/
extern mod GLFW (name = "glfw");
use glfw = GLFW::ml; // use mid-level bindings
fn main() {
// Run this task on the main thread. Unlike C or C++, a Rust program
// automatically starts a new thread, so this line is _essential_ to ensure
// that the OS is able to update the window and recieve events from the user.
do task::spawn_sched(task::PlatformThread) {
use core::unstable::finally::Finally;
// The `glfw::{TRUE, FALSE}` constants are added for convenience. You could also use
// the `GL_TRUE` or `GL_FALSE` constants from you OpenGL bindings.
if glfw::init() == glfw::FALSE {
fail!(~"Failed to initialize GLFW");
}
// Using `do (|| { ... }).finally { glfw::terminate() }` allows us to ensure that
// `glfw::terminate` is called even when failure occurs during runtime
do (|| {
glfw::set_error_callback(error_callback);
let window = glfw::create_window(300, 300, "Hello this is window", ptr::null(), ptr::null());
if window.is_null() { fail!(~"Failed to initialize GLFW window\n"); }
glfw::set_key_callback(window, key_callback);
glfw::make_context_current(window);
while glfw::window_should_close(window) == glfw::FALSE {
glfw::poll_events();
}
}).finally {
glfw::terminate(); // terminate glfw on completion
}
}
}
extern fn key_callback(window: *glfw::GLFWwindow, key: libc::c_int, action: libc::c_int) {
if action == glfw::PRESS && key == glfw::KEY_ESCAPE {
glfw::set_window_should_close(window, glfw::TRUE);
}
}
extern fn error_callback(_error: libc::c_int, description: *libc::c_char) {
io::println(fmt!("GLFW Error: %s", unsafe { str::raw::from_c_str(description) }));
}
|
mod base64_ciphers;
fn main() {
let v = "abcd";
let en = base64_ciphers::encode(&v);
println!("{}", en);
let de = base64_ciphers::decode(&en).unwrap();
println!("{}", de);
}
|
// TLE
pub fn find_substring_1(s: String, words: Vec<String>) -> Vec<i32> {
use std::collections::HashSet;
let n = words.len();
let m = words[0].len();
let ns = s.len();
if n == 0 || m == 0 {
return vec![]
}
if ns < n * m {
return vec![]
}
fn perm(n: usize) -> Vec<Vec<usize>> {
match n {
0 => unreachable!(),
1 => vec![vec![0]],
2 => vec![vec![0, 1], vec![1, 0]],
n => {
let permutations = perm(n-1);
let mut result = Vec::<Vec<usize>>::with_capacity(permutations.len() * n);
for p in permutations {
for j in 0..n {
let mut r = Vec::<usize>::with_capacity(n);
for i in 0..j {
r.push(p[i]);
}
r.push(n-1);
for i in j..n-1 {
r.push(p[i]);
}
result.push(r);
}
}
result
}
}
}
let permutations = perm(n);
let occurance = {
let mut table = Vec::<HashSet<usize>>::new();
for i in 0..n {
table.push(HashSet::new());
for j in 0..=ns-m {
if &s.as_str()[j..j+m] == words[i].as_str() {
table[i].insert(j);
}
}
}
table
};
let mut result = HashSet::<i32>::new();
for l in 0..=ns-n*m {
'permut: for p in &permutations {
for i in 0..n {
if !occurance[p[i]].contains(&(l+i*m)) {
continue 'permut
}
}
result.insert(l as i32);
}
}
result.into_iter().collect()
}
pub fn find_substring(s: String, words: Vec<String>) -> Vec<i32> {
use std::collections::HashMap;
if words.len() == 0 || words[0].len() == 0 {
return vec![]
}
let n = words.len();
let m = words[0].len();
let ns = s.len();
if ns < n * m {
return vec![]
}
let occurance = {
let mut o = HashMap::<String, i32>::new();
for word in &words {
match o.get(word) {
None => {
o.insert(word.clone(), 1);
},
Some(n) => {
o.insert(word.clone(), n+1);
},
}
}
o
};
let mut result = vec![];
'outer: for i in 0..=ns-n*m {
let mut o = occurance.clone();
for l in 0..n {
let segment = &s.as_str()[i+l*m..i+(l+1)*m].to_string();
match o.get(segment) {
None => continue 'outer,
Some(n) => {
if n == &0 {
continue 'outer
} else {
o.insert(segment.clone(), n-1);
};
},
}
}
result.push(i as i32);
}
result
}
#[test]
fn test_find_substring() {
assert_eq!(find_substring("wordgoodgoodgoodbestword".to_string(), vec!["word".to_string(), "good".to_string(), "best".to_string(), "good".to_string()]), vec![8]);
assert_eq!(find_substring("barfoothefoobarman".to_string(), vec!["foo".to_string(), "bar".to_string()]), vec![0, 9]);
assert_eq!(find_substring("wordgoodgoodgoodbestword".to_string(), vec!["word".to_string(), "good".to_string(), "best".to_string(), "word".to_string()]), vec![]);
} |
#[doc = "Reader of register STATUS"]
pub type R = crate::R<u32, super::STATUS>;
#[doc = "Reader of field `EC_BUSY`"]
pub type EC_BUSY_R = crate::R<bool, bool>;
impl R {
#[doc = "Bit 0 - Inidicates whether the externally clocked logic is potentially accessing the EZ memory (this is only possible in EZ mode). This bit can be used by SW to determine whether it is safe to issue a SW access to the EZ memory (without bus wait states (a blocked SW access) or bus errors being generated). Note that the INTR_TX.BLOCKED and INTR_RX.BLOCKED interrupt causes are used to indicate whether a SW access was actually blocked by externally clocked logic."]
#[inline(always)]
pub fn ec_busy(&self) -> EC_BUSY_R {
EC_BUSY_R::new((self.bits & 0x01) != 0)
}
}
|
#[ link(name = "randgen",
vers = "0.1.2",
author = "smadhueagle") ];
#[pkg(id = "randgen", vers = "0.1.2")];
// Additional metadata attributes
#[ desc = "A simple random num generator example in rust"];
#[ license = "MIT" ];
#[crate_type = "bin"];
extern mod rustils;
use rustils::ioutils;
use std::io::WriterUtil;
use std::rand,
std::rand::RngUtil;
static MAX:uint = 25000000;
static LOW:int = 0;
static HIGH:int = 25000;
fn main(){
let file_path = Path("./test.txt");
let fwriter = ioutils::fwriter(~file_path);
//create a random number generator
let mut randgen = rand::rng();
let mut n = MAX;
while(n>0) {
let rand_num:int = randgen.gen_int_range(LOW,HIGH);
fwriter.write_int(rand_num);
fwriter.write_char('\n');
if (n%10000)==0 {
println(fmt!("Generated: %u Random uints in [%d, %d]", MAX-n+10000, LOW, HIGH));
}
n=n-1;
}
}
|
use std::path::{Path, PathBuf};
use std::ffi::OsString;
pub fn get_obj_path<TP: AsRef<Path>, BP: AsRef<Path>, FP: AsRef<Path>>(target: TP, base: BP, file: FP) -> PathBuf {
let mut base = base.as_ref();
let mut parents = 0;
loop {
if let Ok(stripped) = file.as_ref().strip_prefix(base) {
let mut second_part: OsString = format!("{}_", parents).into();
second_part.push(stripped);
break target.as_ref().join(second_part);
}
if let Some(parent) = base.parent() {
base = parent;
parents += 1;
} else {
let mut second_part: OsString = "x_".to_owned().into();
second_part.push(file.as_ref());
break target.as_ref().join(second_part);
}
}
}
#[cfg(test)]
mod tests {
use std::path::Path;
use super::get_obj_path;
#[test]
fn basic() {
assert_eq!(get_obj_path("/target", "/base/dir", "/base/dir/file"), Path::new("/target/0_file"));
}
#[test]
fn child() {
assert_eq!(get_obj_path("/target", "/base/dir", "/base/dir/child/file"), Path::new("/target/0_child/file"));
}
#[test]
fn parent() {
assert_eq!(get_obj_path("/target", "/base/dir", "/base/file"), Path::new("/target/1_file"));
}
#[test]
fn cousin() {
assert_eq!(get_obj_path("/target", "/base/dir", "/base/child/file"), Path::new("/target/1_child/file"));
}
#[test]
fn grand_parent() {
assert_eq!(get_obj_path("/target", "/base/dir", "/file"), Path::new("/target/2_file"));
}
#[test]
fn grand_cousin() {
assert_eq!(get_obj_path("/target", "/base/dir", "/child1/child2/file"), Path::new("/target/2_child1/child2/file"));
}
}
|
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use crate::game::game::*;
pub fn export_to_file(game: &Game) {
let b = game.b * game.max_values.y;
let m = (game.m * game.max_values.y) / game.max_values.x;
let output = format!("m,b\n{:.10},{:.10}", m, b);
let path = Path::new("output/save.csv");
let display = path.display();
let mut file = match File::create(&path) {
Err(why) => panic!("couldn't create {}: {}", display, why),
Ok(file) => file,
};
match file.write_all(output.as_bytes()) {
Err(why) => panic!("couldn't write to {}: {}", display, why),
Ok(_) => println!("successfully wrote to {}", display),
}
}
|
use ::{Value, AresResult, AresError};
use super::util::expect_arity;
pub fn some(args: &[Value]) -> AresResult<Value> {
try!(expect_arity(args, |l| l == 1, "exactly 1"));
let first = args[0].clone();
Ok(Value::Option(Some(Box::new(first))))
}
pub fn none(args: &[Value]) -> AresResult<Value> {
try!(expect_arity(args, |l| l == 0, "exactly 0"));
Ok(Value::Option(None))
}
pub fn unwrap(args: &[Value]) -> AresResult<Value> {
try!(expect_arity(args, |l| l == 1, "exactly 1"));
match &args[0] {
&Value::Option(Some(ref res)) => Ok((**res).clone()),
&Value::Option(None) => Err(AresError::UnwrapNone),
other => Err(AresError::UnexpectedType {
value: other.clone(),
expected: "Option".into()
})
}
}
|
//! Day 3.
//!
//! Counting valid triangles based on the length of each side. A
//! triangle is considered 'valid' if all sides are shorter than the
//! sum of the other two sides.
/// Counts the valid triangles in a list of sides.
fn count_valid_triangles(numbers: Vec<i32>) -> usize {
numbers.chunks(3)
.filter(|sides| is_triangular(sides))
.count()
}
/// Get the whitespace-separated numbers from a string.
fn get_numbers(triangles: &str) -> Vec<i32> {
triangles.split_whitespace()
.map(|s| i32::from_str_radix(s, 10).unwrap())
.collect()
}
/// Returns true if the given string represents a valid triangle.
pub fn is_triangular(sides: &[i32]) -> bool {
let total: i32 = sides.iter().sum();
for i in 0..3 {
let len = sides[i];
if (total - len) <= len {
return false;
}
}
true
}
/// Counts the valid triangles, taking sides from each line.
pub fn count_by_line(triangles: &str) -> usize {
let numbers = get_numbers(triangles);
count_valid_triangles(numbers)
}
/// Counts the valid triangles in a given column.
fn count_at_offset<'a, T>(numbers: T, offset: usize) -> usize
where T: IntoIterator<Item = &'a (usize, i32)>
{
let numbers = numbers.into_iter()
.filter_map(|&(i, side)| {
if (i + offset) % 3 == 0 {
Some(side)
} else {
None
}
})
.collect::<Vec<_>>();
count_valid_triangles(numbers)
}
/// Counts the valid triangles, taking sides from each column.
pub fn count_by_column(triangles: &str) -> usize {
let numbers = get_numbers(triangles)
.into_iter()
.enumerate()
.collect::<Vec<_>>();
(0..3)
.map(|i| count_at_offset(&numbers, i))
.sum()
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn is_triangular_valid_triangles() {
assert!(is_triangular(&[2, 1, 2]));
}
#[test]
fn is_triangular_with_invalid_triangles() {
assert!(!is_triangular(&[5, 10, 25]));
}
#[test]
fn vertical_example() {
const TRIANGLES: &'static str = "101 301 501
102 302 502
103 303 503
201 401 601
202 402 \
602
203 403 603";
assert_eq!(6, count_by_column(TRIANGLES));
}
}
|
use crate::{
bson::Document,
event::{
cmap::{CmapEvent, ConnectionCheckoutFailedReason, ConnectionClosedReason},
command::CommandEvent,
},
test::{Event, SdamEvent},
ServerType,
};
use serde::Deserialize;
#[derive(Debug, Deserialize)]
#[serde(untagged, deny_unknown_fields, rename_all = "camelCase")]
pub(crate) enum ExpectedEvent {
Cmap(ExpectedCmapEvent),
Command(ExpectedCommandEvent),
Sdam(Box<ExpectedSdamEvent>),
}
#[derive(Debug, Deserialize)]
#[serde(deny_unknown_fields)]
pub(crate) enum ExpectedCommandEvent {
#[serde(rename = "commandStartedEvent", rename_all = "camelCase")]
Started {
command_name: Option<String>,
database_name: Option<String>,
command: Option<Document>,
has_service_id: Option<bool>,
has_server_connection_id: Option<bool>,
},
#[serde(rename = "commandSucceededEvent", rename_all = "camelCase")]
Succeeded {
command_name: Option<String>,
reply: Option<Document>,
has_service_id: Option<bool>,
has_server_connection_id: Option<bool>,
},
#[serde(rename = "commandFailedEvent", rename_all = "camelCase")]
Failed {
command_name: Option<String>,
has_service_id: Option<bool>,
has_server_connection_id: Option<bool>,
},
}
#[derive(Debug, Deserialize)]
#[serde(deny_unknown_fields)]
pub(crate) enum ExpectedCmapEvent {
#[serde(rename = "poolCreatedEvent")]
PoolCreated {},
#[serde(rename = "poolReadyEvent")]
PoolReady {},
#[serde(rename = "poolClearedEvent", rename_all = "camelCase")]
PoolCleared { has_service_id: Option<bool> },
#[serde(rename = "poolClosedEvent")]
PoolClosed {},
#[serde(rename = "connectionCreatedEvent")]
ConnectionCreated {},
#[serde(rename = "connectionReadyEvent")]
ConnectionReady {},
#[serde(rename = "connectionClosedEvent", rename_all = "camelCase")]
ConnectionClosed {
reason: Option<ConnectionClosedReason>,
},
#[serde(rename = "connectionCheckOutStartedEvent")]
ConnectionCheckOutStarted {},
#[serde(rename = "connectionCheckOutFailedEvent", rename_all = "camelCase")]
ConnectionCheckOutFailed {
reason: Option<ConnectionCheckoutFailedReason>,
},
#[serde(rename = "connectionCheckedOutEvent")]
ConnectionCheckedOut {},
#[serde(rename = "connectionCheckedInEvent")]
ConnectionCheckedIn {},
}
#[derive(Debug, Deserialize)]
pub(crate) enum ExpectedSdamEvent {
#[serde(rename = "serverDescriptionChangedEvent", rename_all = "camelCase")]
ServerDescriptionChanged {
#[allow(unused)]
previous_description: Option<TestServerDescription>,
new_description: Option<TestServerDescription>,
},
#[serde(rename = "topologyDescriptionChangedEvent")]
TopologyDescriptionChanged {},
#[serde(rename = "serverHeartbeatSucceededEvent", rename_all = "camelCase")]
ServerHeartbeatSucceeded {},
#[serde(rename = "serverHeartbeatFailedEvent", rename_all = "camelCase")]
ServerHeartbeatFailed {},
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct TestServerDescription {
#[serde(rename = "type")]
pub(crate) server_type: Option<ServerType>,
}
#[derive(Copy, Clone, Debug, Deserialize)]
pub(crate) enum ObserveEvent {
#[serde(rename = "commandStartedEvent")]
CommandStarted,
#[serde(rename = "commandSucceededEvent")]
CommandSucceeded,
#[serde(rename = "commandFailedEvent")]
CommandFailed,
#[serde(rename = "poolCreatedEvent")]
PoolCreated,
#[serde(rename = "poolReadyEvent")]
PoolReady,
#[serde(rename = "poolClearedEvent")]
PoolCleared,
#[serde(rename = "poolClosedEvent")]
PoolClosed,
#[serde(rename = "connectionCreatedEvent")]
ConnectionCreated,
#[serde(rename = "connectionReadyEvent")]
ConnectionReady,
#[serde(rename = "connectionClosedEvent")]
ConnectionClosed,
#[serde(rename = "connectionCheckOutStartedEvent")]
ConnectionCheckOutStarted,
#[serde(rename = "connectionCheckOutFailedEvent")]
ConnectionCheckOutFailed,
#[serde(rename = "connectionCheckedOutEvent")]
ConnectionCheckedOut,
#[serde(rename = "connectionCheckedInEvent")]
ConnectionCheckedIn,
#[serde(rename = "serverDescriptionChangedEvent")]
ServerDescriptionChanged,
#[serde(rename = "topologyDescriptionChangedEvent")]
TopologyDescriptionChanged,
#[serde(rename = "serverHeartbeatStartedEvent")]
ServerHeartbeatStarted,
#[serde(rename = "serverHeartbeatSucceededEvent")]
ServerHeartbeatSucceeded,
#[serde(rename = "serverHeartbeatFailedEvent")]
ServerHeartbeatFailed,
}
impl ObserveEvent {
pub(crate) fn matches(&self, event: &Event) -> bool {
#[allow(clippy::match_like_matches_macro)]
match (self, event) {
(Self::CommandStarted, Event::Command(CommandEvent::Started(_))) => true,
(Self::CommandSucceeded, Event::Command(CommandEvent::Succeeded(_))) => true,
(Self::CommandFailed, Event::Command(CommandEvent::Failed(_))) => true,
(Self::PoolCreated, Event::Cmap(CmapEvent::PoolCreated(_))) => true,
(Self::PoolReady, Event::Cmap(CmapEvent::PoolReady(_))) => true,
(Self::PoolCleared, Event::Cmap(CmapEvent::PoolCleared(_))) => true,
(Self::PoolClosed, Event::Cmap(CmapEvent::PoolClosed(_))) => true,
(Self::ConnectionCreated, Event::Cmap(CmapEvent::ConnectionCreated(_))) => true,
(Self::ConnectionReady, Event::Cmap(CmapEvent::ConnectionReady(_))) => true,
(Self::ConnectionClosed, Event::Cmap(CmapEvent::ConnectionClosed(_))) => true,
(
Self::ConnectionCheckOutStarted,
Event::Cmap(CmapEvent::ConnectionCheckoutStarted(_)),
) => true,
(
Self::ConnectionCheckOutFailed,
Event::Cmap(CmapEvent::ConnectionCheckoutFailed(_)),
) => true,
(Self::ConnectionCheckedOut, Event::Cmap(CmapEvent::ConnectionCheckedOut(_))) => true,
(Self::ConnectionCheckedIn, Event::Cmap(CmapEvent::ConnectionCheckedIn(_))) => true,
(
Self::TopologyDescriptionChanged,
Event::Sdam(SdamEvent::TopologyDescriptionChanged(_)),
) => true,
(Self::ServerHeartbeatStarted, Event::Sdam(SdamEvent::ServerHeartbeatStarted(_))) => {
true
}
(
Self::ServerHeartbeatSucceeded,
Event::Sdam(SdamEvent::ServerHeartbeatSucceeded(_)),
) => true,
(Self::ServerHeartbeatFailed, Event::Sdam(SdamEvent::ServerHeartbeatFailed(_))) => true,
_ => false,
}
}
}
|
use std::cell::Cell;
trait Arg {
fn run(&self) -> i32;
}
impl Arg for i32 {
fn run(&self) -> i32 { *self }
}
struct B<'a> {
k: &'a Cell<i32>,
x1: &'a Arg,
x2: &'a Arg,
x3: &'a Arg,
x4: &'a Arg,
}
impl<'a> Arg for B<'a> {
fn run(&self) -> i32 {
self.k.set(self.k.get() - 1);
a(self.k.get(), self, self.x1, self.x2, self.x3, self.x4)
}
}
fn a(k: i32, x1: &Arg, x2: &Arg, x3: &Arg, x4: &Arg, x5: &Arg) -> i32 {
if k <= 0 {
x4.run() + x5.run()
} else {
B{
k: &Cell::new(k),
x1, x2, x3, x4
}.run()
}
}
pub fn main() {
println!("{}", a(10, &1, &-1, &-1, &1, &0));
}
|
use nom::{types::CompleteStr, *};
use crate::ir::{FnSig, Type};
#[derive(Clone, Debug, PartialEq)]
pub struct Define<'a> {
pub name: &'a str,
pub sig: FnSig<'a>,
pub stmts: Vec<Stmt<'a>>,
}
#[derive(Clone, Debug, PartialEq)]
pub enum Stmt<'a> {
// ` call void asm sideeffect "cpsid i"`
Asm(&'a str),
BitcastCall(Option<&'a str>),
DirectCall(&'a str),
IndirectCall(FnSig<'a>),
Comment,
// `start:`
Label,
Other,
}
#[derive(Clone, Debug, PartialEq)]
struct Parameter<'a>(Type<'a>);
named!(parameter<CompleteStr, Parameter>, do_parse!(
ty: call!(super::type_) >>
many0!(do_parse!(space >> call!(super::attribute) >> (()))) >>
opt!(do_parse!(space >> call!(super::alias) >> (()))) >>
(Parameter(ty))
));
named!(pub parse<CompleteStr, Define>, do_parse!(
tag!("define") >> space >>
many0!(do_parse!(call!(super::attribute) >> space >> (()))) >>
output: alt!(map!(call!(super::type_), Some) | map!(tag!("void"), |_| None)) >> space >>
name: call!(super::function) >>
// parameter list
char!('(') >>
inputs: separated_list!(
do_parse!(char!(',') >> space >> (())),
map!(parameter, |p| p.0)
) >> char!(')') >>
// TODO we likely want to parse the metadata (`!dbg !0`) that comes after the parameter list
not_line_ending >> line_ending >>
stmts: separated_nonempty_list!(many1!(line_ending), call!(super::define::stmt)) >>
opt!(line_ending) >> tag!("}") >>
(Define { name: name.0, stmts, sig: FnSig { inputs, output: output.map(Box::new) } })
));
named!(label<CompleteStr, Stmt>, do_parse!(
alt!(map!(super::ident, drop) | map!(super::string, drop)) >>
char!(':') >>
opt!(do_parse!(space >> call!(super::comment) >> ())) >>
(Stmt::Label)
));
named!(comment<CompleteStr, Stmt>, do_parse!(
call!(super::comment) >>
(Stmt::Comment)
));
named!(asm<CompleteStr, Stmt>, do_parse!(
opt!(do_parse!(tag!("tail") >> space >> (()))) >>
tag!("call") >> space >>
alt!(map!(call!(super::type_), drop) | map!(tag!("void"), drop)) >> space >>
tag!("asm") >> space >>
many0!(do_parse!(call!(super::attribute) >> space >> (()))) >>
asm: call!(super::string) >>
// NOTE shortcut
not_line_ending >>
(Stmt::Asm(asm.0))
));
#[derive(Clone, Debug, PartialEq)]
struct Argument<'a>(Type<'a>);
named!(argument<CompleteStr, Argument>, do_parse!(
ty: call!(super::type_) >> space >>
many0!(do_parse!(call!(super::attribute) >> space >> (()))) >>
alt!(
map!(call!(super::bitcast), drop) |
map!(call!(super::getelementptr), drop) |
map!(super::local, drop) |
map!(digit, drop)) >>
(Argument(ty))
));
named!(bitcast_call<CompleteStr, Stmt>, do_parse!(
opt!(do_parse!(tag!("tail") >> space >> (()))) >>
// XXX can this be `invoke`?
tag!("call") >> space >>
// not seen in practice (yet?)
// many0!(do_parse!(call!(super::attribute) >> space >> (()))) >>
alt!(map!(call!(super::type_), drop) | map!(tag!("void"), drop)) >> space >>
name: call!(super::bitcast) >>
// NOTE shortcut
not_line_ending >>
(Stmt::BitcastCall(name.0))
));
named!(direct_call<CompleteStr, Stmt>, do_parse!(
opt!(do_parse!(tag!("tail") >> space >> (()))) >>
alt!(tag!("call") | tag!("invoke")) >> space >>
many0!(do_parse!(call!(super::attribute) >> space >> (()))) >>
alt!(map!(call!(super::type_), drop) | map!(tag!("void"), drop)) >> space >>
name: call!(super::function) >>
// TODO we likely want to parse the metadata (`!dbg !0`) that comes after the argument list
// NOTE shortcut
char!('(') >> not_line_ending >>
(Stmt::DirectCall(name.0))
));
named!(indirect_call<CompleteStr, Stmt>, do_parse!(
opt!(do_parse!(tag!("tail") >> space >> (()))) >>
alt!(tag!("call") | tag!("invoke")) >> space >>
many0!(do_parse!(call!(super::attribute) >> space >> (()))) >>
output: alt!(map!(call!(super::type_), Some) | map!(tag!("void"), |_| None)) >> space >>
char!('%') >> digit >>
inputs: delimited!(
char!('('),
separated_list!(
do_parse!(char!(',') >> space >> (())),
map!(argument, |arg| arg.0)
),
char!(')')
) >>
// TODO we likely want to parse the metadata (`!dbg !0`) that comes after the argument list
// NOTE shortcut
not_line_ending >>
(Stmt::IndirectCall(FnSig { inputs, output: output.map(Box::new) }))
));
named!(other<CompleteStr, Stmt>, do_parse!(
separated_nonempty_list!(
space,
map_res!(is_not!(" \t\r\n"), |t: CompleteStr| if t.0 == "call" { Err(()) } else { Ok(()) })
) >>
(Stmt::Other)
));
// NOTE we discard the LHS of assignments
named!(assign<CompleteStr, Stmt>, do_parse!(
call!(super::local) >> space >> char!('=') >> space >>
rhs: alt!(asm | bitcast_call | direct_call | indirect_call | other) >>
(rhs)
));
named!(pub stmt<CompleteStr, Stmt>, alt!(
label |
comment |
do_parse!(
space >>
stmt: alt!(assign | asm | bitcast_call | direct_call | indirect_call | other) >>
(stmt)
)
));
#[cfg(test)]
mod tests {
use nom::types::CompleteStr as S;
use super::{Argument, Define, Parameter};
use crate::ir::{FnSig, Stmt, Type};
#[test]
fn argument() {
assert_eq!(
super::argument(S(r#"{}* nonnull align 1 %3"#)),
Ok((
S(""),
Argument(Type::Pointer(Box::new(Type::Struct(vec![]))))
))
);
assert_eq!(
super::argument(S(r#"[0 x i8]* noalias nonnull readonly align 1 bitcast (<{ [11 x i8] }>* @anon.f060a8fe91113516c6f72b45ea256765.59 to [0 x i8]*)"#)),
Ok((
S(""),
Argument(Type::Pointer(Box::new(Type::Array(0, Box::new(Type::Integer(8))))))
))
);
assert_eq!(
super::argument(S(r#"%"core::result::Result<(), io::error::Error>"* noalias nocapture nonnull sret dereferenceable(16) %26"#)),
Ok((
S(""),
Argument(Type::Pointer(Box::new(Type::Alias("core::result::Result<(), io::error::Error>"))))
))
);
assert_eq!(
super::argument(S(r#"{}* nonnull align 1 %723"#)),
Ok((
S(""),
Argument(Type::Pointer(Box::new(Type::Struct(vec![]))))
))
);
assert_eq!(
super::argument(S(r#"[0 x i8]* noalias nonnull readonly align 1 getelementptr inbounds (<{ [0 x i8] }>, <{ [0 x i8] }>* @anon.3751ff68b49c735a867036886cf6a576.71, i32 0, i32 0)"#)),
Ok((
S(""),
Argument(Type::Pointer(Box::new(Type::Array(0, Box::new(Type::Integer(8))))))
))
);
}
#[test]
fn asm() {
assert_eq!(
super::asm(S(
r#"call void asm sideeffect "cpsie i", "~{memory}"() #7, !dbg !5578, !srcloc !5475"#
)),
Ok((S(""), Stmt::Asm("cpsie i")))
);
assert_eq!(
super::asm(S(
r#"tail call i32 asm sideeffect "mrs $0, BASEPRI", "=r"() #5, !dbg !1270, !srcloc !1280"#
)),
Ok((S(""), Stmt::Asm("mrs $0, BASEPRI")))
);
}
#[test]
fn assign() {
assert_eq!(
super::assign(S(r#"%0 = tail call nonnull i32 (i32)* @foo(), !dbg !1200"#)),
Ok((S(""), Stmt::DirectCall("foo")))
);
assert_eq!(
super::assign(S(r#"%113 = call zeroext i1 %112({}* nonnull align 1 %109, [0 x i8]* noalias nonnull readonly align 1 %., i32 %.9) #10, !dbg !30714, !noalias !30727"#)),
Ok((S(""), Stmt::IndirectCall(FnSig {
inputs: vec![
Type::Pointer(Box::new(Type::Struct(vec![]))),
Type::Pointer(Box::new(Type::Array(0, Box::new(Type::Integer(8))))),
Type::Integer(32),
],
output: Some(Box::new(Type::Integer(1))),
})))
);
assert_eq!(
super::assign(S(r#"%_0.sroa.0.0.insert.insert.i.i39 = tail call i32 @llvm.bswap.i32(i32 %page.0.i38) #9"#)),
Ok((S(""), Stmt::DirectCall("llvm.bswap.i32")))
);
}
#[test]
fn bitcast_call() {
assert_eq!(
super::bitcast_call(S(
r#"tail call i32 bitcast (i8* @__sbss to i32 ()*)() #6, !dbg !1177"#
)),
Ok((S(""), Stmt::BitcastCall(Some("__sbss"))))
);
}
#[test]
fn direct_call() {
assert_eq!(
super::direct_call(S(
r#"call void @llvm.dbg.value(metadata %"blue_pill::ItmLogger"* %0, metadata !2111, metadata !DIExpression()), !dbg !2115"#
)),
Ok((S(""), Stmt::DirectCall("llvm.dbg.value")))
);
assert_eq!(
super::direct_call(S(r#"tail call nonnull i32 (i32)* @foo(), !dbg !1200"#)),
Ok((S(""), Stmt::DirectCall("foo")))
);
assert_eq!(
super::direct_call(S(r#"tail call i32 @llvm.bswap.i32(i32 %page.0.i) #9"#)),
Ok((S(""), Stmt::DirectCall("llvm.bswap.i32")))
);
}
#[test]
fn indirect_call() {
assert_eq!(
super::indirect_call(S(r#"tail call i32 %0(i32 0) #8, !dbg !1200"#)),
Ok((
S(""),
Stmt::IndirectCall(FnSig {
inputs: vec![Type::Integer(32)],
output: Some(Box::new(Type::Integer(32)))
})
))
);
assert_eq!(
super::indirect_call(S(r#"call zeroext i1 %8({}* nonnull align 1 %3, [0 x i8]* noalias nonnull readonly align 1 bitcast (<{ [11 x i8] }>* @anon.f060a8fe91113516c6f72b45ea256765.59 to [0 x i8]*), i64 11), !dbg !4725, !noalias !4742"#)),
Ok((
S(""),
Stmt::IndirectCall(FnSig {
inputs: vec![
Type::Pointer(Box::new(Type::Struct(vec![]))),
Type::Pointer(Box::new(Type::Array(0, Box::new(Type::Integer(8))))),
Type::Integer(64),
],
output: Some(Box::new(Type::Integer(1)))
})
))
);
assert_eq!(
super::indirect_call(S(r#"call zeroext i1 %98({}* nonnull align 1 %93, [0 x i8]* noalias nonnull readonly align 1 bitcast (<{ [10 x i8] }>* @1 to [0 x i8]*), i32 10) #10, !dbg !5301"#)),
Ok((
S(""),
Stmt::IndirectCall(FnSig {
inputs: vec![
Type::Pointer(Box::new(Type::Struct(vec![]))),
Type::Pointer(Box::new(Type::Array(0, Box::new(Type::Integer(8))))),
Type::Integer(32),
],
output: Some(Box::new(Type::Integer(1)))
})
))
);
}
#[test]
fn label() {
assert_eq!(
super::label(S(
r#""_ZN36_$LT$jnet..ether..Frame$LT$B$GT$$GT$11payload_mut17hc31fdc79b700f841E.exit.i.i": ; preds = %bb3.i96.i"#
)),
Ok((S(""), Stmt::Label))
);
assert_eq!(
super::label(S(
r#"bb3.i96.i: ; preds = %bb37.i"#
)),
Ok((S(""), Stmt::Label))
);
}
#[test]
fn other() {
assert_eq!(
super::other(S("ret void, !dbg !1377")),
Ok((S(""), Stmt::Other))
);
}
#[test]
fn parameter() {
assert_eq!(
super::parameter(S(
r#"%"enc28j60::Enc28j60<stm32f103xx_hal::spi::Spi<stm32f103xx::SPI1, (stm32f103xx_hal::gpio::gpioa::PA5<stm32f103xx_hal::gpio::Alternate<stm32f103xx_hal::gpio::PushPull>>, stm32f103xx_hal::gpio::gpioa::PA6<stm32f103xx_hal::gpio::Input<stm32f103xx_hal::gpio::Floating>>, stm32f103xx_hal::gpio::gpioa::PA7<stm32f103xx_hal::gpio::Alternate<stm32f103xx_hal::gpio::PushPull>>)>, stm32f103xx_hal::gpio::gpioa::PA4<stm32f103xx_hal::gpio::Output<stm32f103xx_hal::gpio::PushPull>>, enc28j60::Unconnected, stm32f103xx_hal::gpio::gpioa::PA3<stm32f103xx_hal::gpio::Output<stm32f103xx_hal::gpio::PushPull>>>"* nocapture align 2 dereferenceable(6)"#
)),
Ok((S(""), Parameter(Type::Pointer(Box::new(Type::Alias("enc28j60::Enc28j60<stm32f103xx_hal::spi::Spi<stm32f103xx::SPI1, (stm32f103xx_hal::gpio::gpioa::PA5<stm32f103xx_hal::gpio::Alternate<stm32f103xx_hal::gpio::PushPull>>, stm32f103xx_hal::gpio::gpioa::PA6<stm32f103xx_hal::gpio::Input<stm32f103xx_hal::gpio::Floating>>, stm32f103xx_hal::gpio::gpioa::PA7<stm32f103xx_hal::gpio::Alternate<stm32f103xx_hal::gpio::PushPull>>)>, stm32f103xx_hal::gpio::gpioa::PA4<stm32f103xx_hal::gpio::Output<stm32f103xx_hal::gpio::PushPull>>, enc28j60::Unconnected, stm32f103xx_hal::gpio::gpioa::PA3<stm32f103xx_hal::gpio::Output<stm32f103xx_hal::gpio::PushPull>>>"))))))
);
assert_eq!(
super::parameter(S(
r#"%"jnet::mac::Addr"* noalias nocapture readonly dereferenceable(6) %value"#
)),
Ok((
S(""),
Parameter(Type::Pointer(Box::new(Type::Alias("jnet::mac::Addr"))))
))
);
assert_eq!(
super::parameter(S(r#"float"#)),
Ok((S(""), Parameter(Type::Float)))
);
}
#[test]
fn parse() {
assert_eq!(
super::parse(S(
r#"define internal void @_ZN4core3ptr18real_drop_in_place17h10d0d6d6b26fb8afE(%"blue_pill::ItmLogger"* nocapture nonnull align 1) unnamed_addr #0 !dbg !2105 {
start:
ret void
}"#
)),
Ok(
(S(""),
Define {
name: "_ZN4core3ptr18real_drop_in_place17h10d0d6d6b26fb8afE",
stmts: vec![Stmt::Label, Stmt::Other],
sig: FnSig {
inputs: vec![Type::Pointer(Box::new(Type::Alias("blue_pill::ItmLogger")))],
output: None,
},
}))
);
assert_eq!(
super::parse(S(
r#"define internal fastcc void @_ZN3std10sys_common12thread_local22register_dtor_fallback17h254497a6d25774eeE(i8*, void (i8*)* nonnull) unnamed_addr #0 personality i32 (i32, i32, i64, %"unwind::libunwind::_Unwind_Exception"*, %"unwind::libunwind::_Unwind_Context"*)* @rust_eh_personality !dbg !5158 {
start:
ret void
}"#
)),
Ok(
(S(""),
Define {
name: "_ZN3std10sys_common12thread_local22register_dtor_fallback17h254497a6d25774eeE",
stmts: vec![Stmt::Label, Stmt::Other],
sig: FnSig {
inputs: vec![
Type::Pointer(Box::new(Type::Integer(8))),
Type::Pointer(Box::new(Type::Fn(FnSig {
inputs: vec![Type::Pointer(Box::new(Type::Integer(8)))],
output: None,
}))),
],
output: None,
},
}))
);
assert_eq!(
super::parse(S(
r#"define internal fastcc void @_ZN3std9panicking20rust_panic_with_hook17hac9cf78024704ab4E({}* nonnull align 1, [3 x i64]* noalias readonly align 8 dereferenceable(24), i64* noalias readonly align 8 dereferenceable_or_null(48), { [0 x i64], { [0 x i8]*, i64 }, [0 x i32], i32, [0 x i32], i32, [0 x i32] }* noalias nocapture readonly align 8 dereferenceable(24)) unnamed_addr #10 personality i32 (i32, i32, i64, %"unwind::libunwind::_Unwind_Exception"*, %"unwind::libunwind::_Unwind_Context"*)* @rust_eh_personality !dbg !6634 {
start:
ret void
}"#
)),
Ok(
(S(""),
Define {
name: "_ZN3std9panicking20rust_panic_with_hook17hac9cf78024704ab4E",
stmts: vec![Stmt::Label, Stmt::Other],
sig: FnSig {
inputs: vec![
Type::Pointer(Box::new(Type::Struct(vec![]))),
Type::Pointer(Box::new(Type::Array(3, Box::new(Type::Integer(64))))),
Type::Pointer(Box::new(Type::Integer(64))),
Type::Pointer(Box::new(Type::Struct(vec![
Type::Array(0, Box::new(Type::Integer(64))),
Type::Struct(vec![
Type::Pointer(Box::new(Type::Array(0, Box::new(Type::Integer(8))))),
Type::Integer(64),
]),
Type::Array(0, Box::new(Type::Integer(32))),
Type::Integer(32),
Type::Array(0, Box::new(Type::Integer(32))),
Type::Integer(32),
Type::Array(0, Box::new(Type::Integer(32))),
]))),
],
output: None,
},
}))
);
assert_eq!(
super::parse(S(
r#"define noalias void ()** @foo() unnamed_addr #0 !dbg !1272 {
start:
ret void ()** null, !dbg !1278
}"#
)),
Ok((
S(""),
Define {
name: "foo",
stmts: vec![Stmt::Label, Stmt::Other],
sig: FnSig {
inputs: vec![],
output: Some(Box::new(Type::Pointer(Box::new(Type::Pointer(Box::new(
Type::Fn(FnSig {
inputs: vec![],
output: None,
})
)))))),
},
}
))
);
assert_eq!(
super::parse(S(
r#"define internal fastcc float @_ZN3app3foo17h3337355bfdc88d96E(float) unnamed_addr #0 !dbg !1183 {
start:
call void @llvm.dbg.value(metadata float %0, metadata !1187, metadata !DIExpression()), !dbg !1188
%1 = fmul float %0, 0x3FF19999A0000000, !dbg !1189
ret float %1, !dbg !1190
}"#
)),
Ok((
S(""),
Define {
name: "_ZN3app3foo17h3337355bfdc88d96E",
stmts: vec![
Stmt::Label,
Stmt::DirectCall("llvm.dbg.value"),
Stmt::Other,
Stmt::Other,
],
sig: FnSig {
inputs: vec![Type::Float],
output: Some(Box::new(Type::Float)),
},
}
))
);
}
}
|
use crate::value::Value;
/// Error returned by `ocaml-rs` functions
#[derive(Debug)]
pub enum Error {
/// An index is out of bounds
OutOfBounds,
/// A value cannot be called using callback functions
NotCallable,
/// An OCaml exception
Exception(Value),
/// Array is not a double array
NotDoubleArray,
/// C String is invalid
InvalidCString,
}
|
use std::collections::HashMap;
use std::hash::Hash;
pub struct Env<N, V>(HashMap<N, V>);
impl<N, V> Env<N, V>
where
N: Clone + Hash + Eq,
{
pub fn new() -> Env<N, V> {
Env(HashMap::new())
}
pub fn get_mut(&mut self, name: &N) -> Option<&mut V> {
self.0.get_mut(name)
}
pub fn with_variables<Z, B, F>(
&mut self,
variable_names: Vec<N>,
create_binding: B,
f: F,
) -> (Z, Vec<V>)
where
B: Fn(usize, &N) -> V,
F: FnOnce(&mut Self) -> Z,
{
let mut shadowed = Vec::new();
{
// Modify the environment, save the shadowed bindings so
// we can revert the changes later.
for (idx, name) in variable_names.iter().enumerate() {
let binding = create_binding(idx, name);
let name_clone = name.clone();
self.0.insert(name_clone, binding).into_iter().for_each(
|old_binding| {
shadowed.push((idx, old_binding));
},
);
}
}
// invoke `f`
let z_result = f(self);
// pull the `V`s from the hash map
let v_result: Vec<V> = variable_names
.iter()
.map(|n| self.0.remove(n).unwrap())
.collect();
// revert changes: put the shadowed values back into self
for (i, restored_value) in shadowed.into_iter() {
let restored_name = variable_names[i].clone();
self.0.insert(restored_name, restored_value);
}
(z_result, v_result)
}
}
|
use crate::c_component::CModel;
use yew::prelude::*;
use yew::Properties;
use yew_router::component;
use yew_router::components::router_button::RouterButton;
use yew_router::route;
use yew_router::FromCaptures;
use yew_router::{Route, Router};
pub struct AModel {}
#[derive(PartialEq, Properties, FromCaptures)]
pub struct Props {}
pub enum Msg {}
impl Component for AModel {
type Message = Msg;
type Properties = Props;
fn create(_props: Self::Properties, _link: ComponentLink<Self>) -> Self {
AModel {}
}
fn update(&mut self, _msg: Self::Message) -> ShouldRender {
true
}
fn change(&mut self, _props: Self::Properties) -> ShouldRender {
true
}
}
impl Renderable<AModel> for AModel {
fn view(&self) -> Html<Self> {
html! {
<div>
{ "I am the A component"}
<div>
<RouterButton:
text=String::from("Go to a/c"),
link="/a/c",
/>
<RouterButton:
text=String::from("Go to a/d (Component does not exist)"),
link="/a/d",
/>
</div>
<div>
<Router>
<Route matcher=route!("/{}/c") render=component::<CModel>() />
</Router>
</div>
</div>
}
}
}
|
struct Circle {
x: f64,
y: f64,
radius: f64,
}
trait HasArea {
fn area(&self) -> f64;
}
impl HasArea for Circle {
fn area(&self) -> f64{
std::f64::consts::PI * (self.radius * self.radius)
}
}
fn print_area<T: HasArea>(shape: &T)
{
println!("shape have the area {}",shape.area());
}
fn main() {
let i_am_circle = Circle{x:0.0,y:0.0,radius:20.0};
print_area(&i_am_circle);
println!("{}",i_am_circle.area());
}
|
use std::collections::HashMap;
fn main() {
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Yellow"), 50);
println!("scores: {:?}", scores);
let teams = vec![String::from("Blue"), String::from("Yellow")];
let initial_scores = vec![10, 50];
// Needed here because it's possible to collect into many different
// data structures and Rust doesn't know which you want unless you specify
let scoresTwo: HashMap<_, _> = teams.iter().zip(initial_scores.iter()).collect();
println!("scoresTwo: {:?}", scoresTwo);
// **Hash Maps and Ownership**
// For types that implement the "Copy" trait, like i32, the values are
// copied into the hash map
// For owned values like String, the values will be moved and the hash map
// will be the owner of those values
let field_name = String::from("Favorite Color");
let field_value = String::from("Blue");
let mut map = HashMap::new();
// can't use field_name and field_value anymore
map.insert(field_name, field_value);
// can do this if we want to use the field_value again
// We can't do this for field_name because it is a string
// however, the values that the references point to must be valid for
// at least as long as the hash map is valid
// map.insert(field_name, &field_value);
// println!("field_name: {}", field_value);
// **Accessing Values in a Hash Map**
let mut scoresThree = HashMap::new();
scoresThree.insert(String::from("Blue"), 10);
scoresThree.insert(String::from("Yellow"), 50);
let team_name = String::from("Blue");
let scoresThree = scores.get(&team_name);
// Result is Some(10) which is really Some(&10) b/c of the reference
// Wrappin gin SOme b/c get returns an Option<&V>
println!("scoresThree: {:?}", scoresThree);
// **Iterating over hash maps**
let mut scoresFour = HashMap::new();
scoresFour.insert(String::from("Blue"), 10);
scoresFour.insert(String::from("Yellow"), 50);
// These pairs get printed in an arbitrary order
for (key, value) in &scoresFour {
println!("{}: {}", key, value);
}
}
|
use crate::util::bit_op;
use std::fmt;
#[derive(Clone)]
pub(crate) struct Registers {
af: AF,
bc: BC,
de: DE,
hl: HL,
sp: u16,
pc: u16,
}
impl fmt::Debug for Registers {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{{af: {:?}, bc: {:?}, de: {:?}, hl: {:?}, sp: {:#06X}, pc: {:#06X}}}",
self.af, self.bc, self.de, self.hl, self.sp, self.pc
)
}
}
#[derive(Clone, Copy)]
union AF {
single: AFSingle,
both: u16,
}
impl fmt::Debug for AF {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
unsafe {
write!(
f,
"{:#06X} (a: {:#04X}, f: {:#04X})",
self.both, self.single.a, self.single.f
)
}
}
}
impl AF {
fn new(boot_sequence: bool) -> AF {
AF {
both: if boot_sequence { 0 } else { 0x01b0 },
}
}
}
#[derive(Clone, Copy)]
union BC {
single: BCSingle,
both: u16,
}
impl fmt::Debug for BC {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
unsafe {
write!(
f,
"{:#06X} (b: {:#04X}, c: {:#04X})",
self.both, self.single.b, self.single.c
)
}
}
}
impl BC {
fn new(boot_sequence: bool) -> BC {
BC {
both: if boot_sequence { 0 } else { 0x0013 },
}
}
}
#[derive(Clone, Copy)]
union DE {
single: DESingle,
both: u16,
}
impl fmt::Debug for DE {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
unsafe {
write!(
f,
"{:#06X} (d: {:#04X}, e: {:#04X})",
self.both, self.single.d, self.single.e
)
}
}
}
impl DE {
fn new(boot_sequence: bool) -> DE {
DE {
both: if boot_sequence { 0 } else { 0x00d8 },
}
}
}
#[derive(Clone, Copy)]
union HL {
single: HLSingle,
both: u16,
}
impl fmt::Debug for HL {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
unsafe {
write!(
f,
"{:#06X} (h: {:#04X}, l: {:#04X})",
self.both, self.single.h, self.single.l
)
}
}
}
impl HL {
fn new(boot_sequence: bool) -> HL {
HL {
both: if boot_sequence { 0 } else { 0x014D },
}
}
}
#[derive(Debug, Copy, Clone)]
struct AFSingle {
f: u8,
a: u8,
}
#[derive(Debug, Copy, Clone)]
struct BCSingle {
c: u8,
b: u8,
}
#[derive(Debug, Copy, Clone)]
struct DESingle {
e: u8,
d: u8,
}
#[derive(Debug, Copy, Clone)]
struct HLSingle {
l: u8,
h: u8,
}
impl Registers {
pub fn new(boot_sequence: bool) -> Registers {
Registers {
af: AF::new(boot_sequence),
bc: BC::new(boot_sequence),
de: DE::new(boot_sequence),
hl: HL::new(boot_sequence),
sp: if boot_sequence { 0x0000 } else { 0xFFFE },
pc: if boot_sequence { 0x0000 } else { 0x0100 },
}
}
pub fn read_r(&self, target: RegisterR) -> u8 {
match target {
RegisterR::A => self.a(),
RegisterR::B => self.b(),
RegisterR::C => self.c(),
RegisterR::D => self.d(),
RegisterR::E => self.e(),
RegisterR::H => self.h(),
RegisterR::L => self.l(),
}
}
pub fn write_r(&mut self, target: RegisterR, value: u8) {
match target {
RegisterR::A => self.set_a(value),
RegisterR::B => self.set_b(value),
RegisterR::C => self.set_c(value),
RegisterR::D => self.set_d(value),
RegisterR::E => self.set_e(value),
RegisterR::H => self.set_h(value),
RegisterR::L => self.set_l(value),
}
}
pub fn write_dd(&mut self, target: RegisterDD, value: u16) {
match target {
RegisterDD::BC => self.set_bc(value),
RegisterDD::DE => self.set_de(value),
RegisterDD::HL => self.set_hl(value),
RegisterDD::SP => self.set_sp(value),
}
}
pub fn read_qq(&self, target: RegisterQQ) -> u16 {
match target {
RegisterQQ::AF => self.af(),
RegisterQQ::BC => self.bc(),
RegisterQQ::DE => self.de(),
RegisterQQ::HL => self.hl(),
}
}
pub fn write_qq(&mut self, target: RegisterQQ, value: u16) {
match target {
RegisterQQ::AF => self.set_af(value),
RegisterQQ::BC => self.set_bc(value),
RegisterQQ::DE => self.set_de(value),
RegisterQQ::HL => self.set_hl(value),
}
}
pub fn read_ss(&self, target: RegisterSS) -> u16 {
match target {
RegisterSS::BC => self.bc(),
RegisterSS::DE => self.de(),
RegisterSS::HL => self.hl(),
RegisterSS::SP => self.sp(),
}
}
pub fn write_ss(&mut self, target: RegisterSS, value: u16) {
match target {
RegisterSS::BC => self.set_bc(value),
RegisterSS::DE => self.set_de(value),
RegisterSS::HL => self.set_hl(value),
RegisterSS::SP => self.set_sp(value),
}
}
pub fn a(&self) -> u8 {
unsafe { self.af.single.a }
}
pub fn b(&self) -> u8 {
unsafe { self.bc.single.b }
}
pub fn c(&self) -> u8 {
unsafe { self.bc.single.c }
}
pub fn d(&self) -> u8 {
unsafe { self.de.single.d }
}
pub fn e(&self) -> u8 {
unsafe { self.de.single.e }
}
pub fn f(&self) -> u8 {
unsafe { self.af.single.f }
}
pub fn h(&self) -> u8 {
unsafe { self.hl.single.h }
}
pub fn l(&self) -> u8 {
unsafe { self.hl.single.l }
}
pub fn af(&self) -> u16 {
unsafe { self.af.both }
}
pub fn bc(&self) -> u16 {
unsafe { self.bc.both }
}
pub fn de(&self) -> u16 {
unsafe { self.de.both }
}
pub fn hl(&self) -> u16 {
unsafe { self.hl.both }
}
pub fn sp(&self) -> u16 {
self.sp
}
pub fn pc(&self) -> u16 {
self.pc
}
pub fn set_a(&mut self, value: u8) {
self.af.single.a = value;
}
pub fn set_b(&mut self, value: u8) {
self.bc.single.b = value;
}
pub fn set_c(&mut self, value: u8) {
self.bc.single.c = value;
}
pub fn set_d(&mut self, value: u8) {
self.de.single.d = value;
}
pub fn set_e(&mut self, value: u8) {
self.de.single.e = value;
}
pub fn set_f(&mut self, value: u8) {
self.af.single.f = value & 0xF0;
}
pub fn set_h(&mut self, value: u8) {
self.hl.single.h = value;
}
pub fn set_l(&mut self, value: u8) {
self.hl.single.l = value;
}
pub fn set_af(&mut self, value: u16) {
self.af.both = value & 0xFFF0;
}
pub fn set_bc(&mut self, value: u16) {
self.bc.both = value;
}
pub fn set_de(&mut self, value: u16) {
self.de.both = value;
}
pub fn set_hl(&mut self, value: u16) {
self.hl.both = value;
}
pub fn set_sp(&mut self, value: u16) {
self.sp = value;
}
pub fn set_pc(&mut self, value: u16) {
self.pc = value;
}
pub fn inc_pc(&mut self, value: u16) {
self.pc = self.pc.wrapping_add(value);
}
pub fn check_condition(&self, condition: Condition) -> bool {
match condition {
Condition::C => (self.f() >> 4) & 0b1 == 0b1,
Condition::NC => (self.f() >> 4) & 0b1 == 0b0,
Condition::NZ => (self.f() >> 7) & 0b1 == 0b0,
Condition::Z => (self.f() >> 7) & 0b1 == 0b1,
}
}
pub fn flag_cy(&self) -> u8 {
(self.f() >> 4) & 1
}
pub fn flag_h(&self) -> u8 {
(self.f() >> 5) & 1
}
pub fn flag_n(&self) -> u8 {
(self.f() >> 6) & 1
}
#[allow(unused)]
pub fn flag_z(&self) -> u8 {
(self.f() >> 7) & 1
}
pub fn set_flags(&mut self, z: u8, n: u8, h: u8, cy: u8) {
let mut flags = self.f();
flags = bit_op::change_bit_to(flags, 7, z);
flags = bit_op::change_bit_to(flags, 6, n);
flags = bit_op::change_bit_to(flags, 5, h);
flags = bit_op::change_bit_to(flags, 4, cy);
self.set_f(flags);
}
pub fn set_flags_add(&mut self, operand1: u8, operand2: u8, carry: u8, calc: FlagCalculations) {
let mut flags = self.f();
flags = Registers::calculate_flag_z(
FlagCalculationOperation::Add,
operand1,
operand2,
carry,
calc.zero,
flags,
);
flags = Registers::calculate_flag_n(calc.substraction, flags);
flags = Registers::calculate_flag_h(
FlagCalculationOperation::Add,
operand1,
operand2,
carry,
calc.halfcarry,
flags,
);
flags = Registers::calculate_flag_cy(
FlagCalculationOperation::Add,
operand1,
operand2,
carry,
calc.carry,
flags,
);
self.set_f(flags);
}
pub fn set_flags_sub(&mut self, operand1: u8, operand2: u8, carry: u8, calc: FlagCalculations) {
let mut flags = self.f();
flags = Registers::calculate_flag_z(
FlagCalculationOperation::Sub,
operand1,
operand2,
carry,
calc.zero,
flags,
);
flags = Registers::calculate_flag_n(calc.substraction, flags);
flags = Registers::calculate_flag_h(
FlagCalculationOperation::Sub,
operand1,
operand2,
carry,
calc.halfcarry,
flags,
);
flags = Registers::calculate_flag_cy(
FlagCalculationOperation::Sub,
operand1,
operand2,
carry,
calc.carry,
flags,
);
self.set_f(flags);
}
pub fn set_flags_add_u16(
&mut self,
operand1: u16,
operand2: u16,
carry: u8,
calc: FlagCalculations,
) {
let mut flags = self.f();
flags = Registers::calculate_flag_z_u16(
FlagCalculationOperation::Add,
operand1,
operand2,
carry,
calc.zero,
flags,
);
flags = Registers::calculate_flag_n(calc.substraction, flags);
flags = Registers::calculate_flag_h_u16(
FlagCalculationOperation::Add,
operand1,
operand2,
carry,
calc.halfcarry,
flags,
);
flags = Registers::calculate_flag_cy_u16(
FlagCalculationOperation::Add,
operand1,
operand2,
carry,
calc.carry,
flags,
);
self.set_f(flags);
}
pub fn set_flags_sub_u16(
&mut self,
operand1: u16,
operand2: u16,
carry: u8,
calc: FlagCalculations,
) {
let mut flags = self.f();
flags = Registers::calculate_flag_z_u16(
FlagCalculationOperation::Sub,
operand1,
operand2,
carry,
calc.zero,
flags,
);
flags = Registers::calculate_flag_n(calc.substraction, flags);
flags = Registers::calculate_flag_h_u16(
FlagCalculationOperation::Sub,
operand1,
operand2,
carry,
calc.halfcarry,
flags,
);
flags = Registers::calculate_flag_cy_u16(
FlagCalculationOperation::Sub,
operand1,
operand2,
carry,
calc.carry,
flags,
);
self.set_f(flags);
}
fn calculate_flag_z(
operation: FlagCalculationOperation,
operand1: u8,
operand2: u8,
carry: u8,
z: FlagCalculationStatus,
flags: u8,
) -> u8 {
let operand1 = i16::from(operand1);
let operand2 = i16::from(operand2);
let carry = i16::from(carry);
match z {
FlagCalculationStatus::Set => bit_op::set_bit(flags, 7),
FlagCalculationStatus::Clear => bit_op::clear_bit(flags, 7),
FlagCalculationStatus::Ignore => flags,
FlagCalculationStatus::Calculate => {
let result = match operation {
FlagCalculationOperation::Add => (operand1 + operand2 + carry) & 0xFF,
FlagCalculationOperation::Sub => (operand1 - operand2) - carry,
};
if result.trailing_zeros() >= 8 {
bit_op::set_bit(flags, 7)
} else {
bit_op::clear_bit(flags, 7)
}
}
}
}
fn calculate_flag_n(n: FlagCalculationStatus, flags: u8) -> u8 {
match n {
FlagCalculationStatus::Set => bit_op::set_bit(flags, 6),
FlagCalculationStatus::Clear => bit_op::clear_bit(flags, 6),
FlagCalculationStatus::Ignore => flags,
FlagCalculationStatus::Calculate => panic!(),
}
}
fn calculate_flag_h(
operation: FlagCalculationOperation,
operand1: u8,
operand2: u8,
carry: u8,
h: FlagCalculationStatus,
flags: u8,
) -> u8 {
match h {
FlagCalculationStatus::Set => bit_op::set_bit(flags, 5),
FlagCalculationStatus::Clear => bit_op::clear_bit(flags, 5),
FlagCalculationStatus::Ignore => flags,
FlagCalculationStatus::Calculate => match operation {
FlagCalculationOperation::Add => {
let result = ((operand1 & 0xF) + (operand2 & 0xF) + (carry & 0xF)) & 0x10;
if result == 0x10 {
bit_op::set_bit(flags, 5)
} else {
bit_op::clear_bit(flags, 5)
}
}
FlagCalculationOperation::Sub => {
let result = operand1.wrapping_sub(operand2).wrapping_sub(carry);
let result = ((operand1 ^ operand2 ^ result) & (1 << 4)) != 0;
if result {
bit_op::set_bit(flags, 5)
} else {
bit_op::clear_bit(flags, 5)
}
}
},
}
}
fn calculate_flag_cy(
operation: FlagCalculationOperation,
operand1: u8,
operand2: u8,
carry: u8,
h: FlagCalculationStatus,
flags: u8,
) -> u8 {
let operand1 = u16::from(operand1);
let operand2 = u16::from(operand2);
let carry = u16::from(carry);
match h {
FlagCalculationStatus::Set => bit_op::set_bit(flags, 4),
FlagCalculationStatus::Clear => bit_op::clear_bit(flags, 4),
FlagCalculationStatus::Ignore => flags,
FlagCalculationStatus::Calculate => match operation {
FlagCalculationOperation::Add => {
let result = operand1 + operand2 + carry;
if result > 0xFF {
bit_op::set_bit(flags, 4)
} else {
bit_op::clear_bit(flags, 4)
}
}
FlagCalculationOperation::Sub => {
let result = (i32::from(operand1) - i32::from(operand2)) - i32::from(carry);
if result < 0 {
bit_op::set_bit(flags, 4)
} else {
bit_op::clear_bit(flags, 4)
}
}
},
}
}
fn calculate_flag_z_u16(
operation: FlagCalculationOperation,
operand1: u16,
operand2: u16,
carry: u8,
z: FlagCalculationStatus,
flags: u8,
) -> u8 {
let operand1 = i32::from(operand1);
let operand2 = i32::from(operand2);
let carry = i32::from(carry);
match z {
FlagCalculationStatus::Set => bit_op::set_bit(flags, 7),
FlagCalculationStatus::Clear => bit_op::clear_bit(flags, 7),
FlagCalculationStatus::Ignore => flags,
FlagCalculationStatus::Calculate => {
let result = match operation {
FlagCalculationOperation::Add => (operand1 + operand2 + carry) & 0xFFFF,
FlagCalculationOperation::Sub => (operand1 - operand2) - carry,
};
if result == 0 {
bit_op::set_bit(flags, 7)
} else {
bit_op::clear_bit(flags, 7)
}
}
}
}
fn calculate_flag_h_u16(
operation: FlagCalculationOperation,
operand1: u16,
operand2: u16,
carry: u8,
h: FlagCalculationStatus,
flags: u8,
) -> u8 {
match h {
FlagCalculationStatus::Set => bit_op::set_bit(flags, 5),
FlagCalculationStatus::Clear => bit_op::clear_bit(flags, 5),
FlagCalculationStatus::Ignore => flags,
FlagCalculationStatus::Calculate => match operation {
FlagCalculationOperation::Add => {
let mut result = operand1 & 0xFFF;
result += operand2 & 0xFFF;
result += u16::from(carry) & 0xFFF;
if result >= 0x1000 {
bit_op::set_bit(flags, 5)
} else {
bit_op::clear_bit(flags, 5)
}
}
FlagCalculationOperation::Sub => {
let mut result: i32 = i32::from(operand1) & 0xFFF;
result -= i32::from(operand2) & 0xFFF;
result -= i32::from(carry) & 0xFFF;
if result < 0 {
bit_op::set_bit(flags, 5)
} else {
bit_op::clear_bit(flags, 5)
}
}
},
}
}
fn calculate_flag_cy_u16(
operation: FlagCalculationOperation,
operand1: u16,
operand2: u16,
carry: u8,
h: FlagCalculationStatus,
flags: u8,
) -> u8 {
let operand1 = u32::from(operand1);
let operand2 = u32::from(operand2);
let carry = u32::from(carry);
match h {
FlagCalculationStatus::Set => bit_op::set_bit(flags, 4),
FlagCalculationStatus::Clear => bit_op::clear_bit(flags, 4),
FlagCalculationStatus::Ignore => flags,
FlagCalculationStatus::Calculate => match operation {
FlagCalculationOperation::Add => {
let result = operand1 + operand2 + carry;
if result > 0xFFFF {
bit_op::set_bit(flags, 4)
} else {
bit_op::clear_bit(flags, 4)
}
}
FlagCalculationOperation::Sub => {
let result = (i64::from(operand1) - i64::from(operand2)) - i64::from(carry);
if result < 0 {
bit_op::set_bit(flags, 4)
} else {
bit_op::clear_bit(flags, 4)
}
}
},
}
}
}
#[derive(Debug, Copy, Clone)]
pub struct FlagCalculations {
pub zero: FlagCalculationStatus,
pub carry: FlagCalculationStatus,
pub halfcarry: FlagCalculationStatus,
pub substraction: FlagCalculationStatus,
}
impl FlagCalculations {
pub fn new() -> FlagCalculations {
FlagCalculations {
zero: FlagCalculationStatus::Ignore,
carry: FlagCalculationStatus::Ignore,
halfcarry: FlagCalculationStatus::Ignore,
substraction: FlagCalculationStatus::Ignore,
}
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) struct FlagCalculationsBuilder {
flag_calculations: FlagCalculations,
}
impl FlagCalculationsBuilder {
pub fn new() -> FlagCalculationsBuilder {
FlagCalculationsBuilder {
flag_calculations: FlagCalculations::new(),
}
}
pub fn build(self) -> FlagCalculations {
self.flag_calculations
}
pub fn zero(mut self, status: FlagCalculationStatus) -> FlagCalculationsBuilder {
self.flag_calculations.zero = status;
self
}
pub fn carry(mut self, status: FlagCalculationStatus) -> FlagCalculationsBuilder {
self.flag_calculations.carry = status;
self
}
pub fn halfcarry(mut self, status: FlagCalculationStatus) -> FlagCalculationsBuilder {
self.flag_calculations.halfcarry = status;
self
}
pub fn substraction(mut self, status: FlagCalculationStatus) -> FlagCalculationsBuilder {
self.flag_calculations.substraction = status;
self
}
}
#[derive(Debug, Copy, Clone)]
pub enum FlagCalculationStatus {
Set,
Clear,
Ignore,
Calculate,
}
#[derive(Debug, Copy, Clone)]
pub enum FlagCalculationOperation {
Add,
Sub,
}
#[derive(Debug, Copy, Clone)]
pub enum RegisterR {
A,
B,
C,
D,
E,
H,
L,
}
impl RegisterR {
pub fn new(value: u8) -> RegisterR {
match value {
0b111 => RegisterR::A,
0b000 => RegisterR::B,
0b001 => RegisterR::C,
0b010 => RegisterR::D,
0b011 => RegisterR::E,
0b100 => RegisterR::H,
0b101 => RegisterR::L,
_ => unreachable!(),
}
}
}
#[derive(Debug, Copy, Clone)]
pub enum RegisterDD {
BC,
DE,
HL,
SP,
}
impl RegisterDD {
pub fn new(value: u8) -> RegisterDD {
match value {
0b00 => RegisterDD::BC,
0b01 => RegisterDD::DE,
0b10 => RegisterDD::HL,
0b11 => RegisterDD::SP,
_ => unreachable!(),
}
}
}
#[derive(Debug, Copy, Clone)]
pub enum RegisterQQ {
BC,
DE,
HL,
AF,
}
impl RegisterQQ {
pub fn new(value: u8) -> RegisterQQ {
match value {
0b00 => RegisterQQ::BC,
0b01 => RegisterQQ::DE,
0b10 => RegisterQQ::HL,
0b11 => RegisterQQ::AF,
_ => unreachable!(),
}
}
}
#[derive(Debug, Copy, Clone)]
pub enum RegisterSS {
BC,
DE,
HL,
SP,
}
impl RegisterSS {
pub fn new(value: u8) -> RegisterSS {
match value {
0b00 => RegisterSS::BC,
0b01 => RegisterSS::DE,
0b10 => RegisterSS::HL,
0b11 => RegisterSS::SP,
_ => unreachable!(),
}
}
}
#[derive(Debug, Copy, Clone)]
pub enum Condition {
NZ,
Z,
NC,
C,
}
impl Condition {
pub fn new(value: u8) -> Condition {
match value {
0b00 => Condition::NZ,
0b01 => Condition::Z,
0b10 => Condition::NC,
0b11 => Condition::C,
_ => unreachable!(),
}
}
}
#[cfg(test)]
mod tests {
use crate::processor::registers::Registers;
#[test]
fn everything_setup_after_initialization_with_boot_sequence() {
let registers = Registers::new(true);
assert_eq!(registers.af(), 0);
assert_eq!(registers.bc(), 0);
assert_eq!(registers.de(), 0);
assert_eq!(registers.hl(), 0);
assert_eq!(registers.a(), 0);
assert_eq!(registers.b(), 0);
assert_eq!(registers.c(), 0);
assert_eq!(registers.d(), 0);
assert_eq!(registers.e(), 0);
assert_eq!(registers.f(), 0);
assert_eq!(registers.h(), 0);
assert_eq!(registers.l(), 0);
assert_eq!(registers.pc(), 0x0);
assert_eq!(registers.sp(), 0x0);
}
#[test]
fn everything_setup_after_initialization() {
let registers = Registers::new(false);
assert_eq!(registers.af(), 0x01B0);
assert_eq!(registers.bc(), 0x0013);
assert_eq!(registers.de(), 0x00D8);
assert_eq!(registers.hl(), 0x014D);
assert_eq!(registers.a(), 0x01);
assert_eq!(registers.b(), 0x00);
assert_eq!(registers.c(), 0x13);
assert_eq!(registers.d(), 0x00);
assert_eq!(registers.e(), 0xD8);
assert_eq!(registers.f(), 0xB0);
assert_eq!(registers.h(), 0x01);
assert_eq!(registers.l(), 0x4D);
assert_eq!(registers.pc(), 0x100);
assert_eq!(registers.sp(), 0xFFFE);
}
#[test]
fn set_af_correct() {
let mut registers = Registers::new(false);
registers.set_af(0xABCD);
assert_eq!(registers.af(), 0xABCD & 0xFFF0);
assert_eq!(registers.a(), 0xAB);
assert_eq!(registers.f(), 0xCD & 0xF0);
}
#[test]
fn set_af_singles_correct() {
let mut registers = Registers::new(false);
registers.set_a(0xAB);
registers.set_f(0xCD);
assert_eq!(registers.af(), 0xABCD & 0xFFF0);
assert_eq!(registers.a(), 0xAB);
assert_eq!(registers.f(), 0xCD & 0xF0);
}
#[test]
fn set_a_singles_correct() {
let mut registers = Registers::new(false);
registers.set_a(0xAB);
assert_eq!(registers.af(), 0xABB0);
assert_eq!(registers.a(), 0xAB);
assert_eq!(registers.f(), 0xB0);
}
#[test]
fn set_f_singles_correct() {
let mut registers = Registers::new(false);
registers.set_f(0xCD);
assert_eq!(registers.af(), 0x01CD & 0xFFF0);
assert_eq!(registers.a(), 0x01);
assert_eq!(registers.f(), 0xCD & 0xF0);
}
#[test]
fn set_bc_correct() {
let mut registers = Registers::new(false);
registers.set_bc(0xABCD);
assert_eq!(registers.bc(), 0xABCD);
assert_eq!(registers.b(), 0xAB);
assert_eq!(registers.c(), 0xCD);
}
#[test]
fn set_bc_singles_correct() {
let mut registers = Registers::new(false);
registers.set_b(0xAB);
registers.set_c(0xCD);
assert_eq!(registers.bc(), 0xABCD);
assert_eq!(registers.b(), 0xAB);
assert_eq!(registers.c(), 0xCD);
}
#[test]
fn set_b_singles_correct() {
let mut registers = Registers::new(false);
registers.set_b(0xAB);
assert_eq!(registers.bc(), 0xAB13);
assert_eq!(registers.b(), 0xAB);
assert_eq!(registers.c(), 0x13);
}
#[test]
fn set_c_singles_correct() {
let mut registers = Registers::new(false);
registers.set_c(0xCD);
assert_eq!(registers.bc(), 0x00CD);
assert_eq!(registers.b(), 0x00);
assert_eq!(registers.c(), 0xCD);
}
#[test]
fn set_de_correct() {
let mut registers = Registers::new(false);
registers.set_de(0xABCD);
assert_eq!(registers.de(), 0xABCD);
assert_eq!(registers.d(), 0xAB);
assert_eq!(registers.e(), 0xCD);
}
#[test]
fn set_de_singles_correct() {
let mut registers = Registers::new(false);
registers.set_d(0xAB);
registers.set_e(0xCD);
assert_eq!(registers.de(), 0xABCD);
assert_eq!(registers.d(), 0xAB);
assert_eq!(registers.e(), 0xCD);
}
#[test]
fn set_d_singles_correct() {
let mut registers = Registers::new(false);
registers.set_d(0xAB);
assert_eq!(registers.de(), 0xABD8);
assert_eq!(registers.d(), 0xAB);
assert_eq!(registers.e(), 0xD8);
}
#[test]
fn set_e_singles_correct() {
let mut registers = Registers::new(false);
registers.set_e(0xCD);
assert_eq!(registers.de(), 0x00CD);
assert_eq!(registers.d(), 0x00);
assert_eq!(registers.e(), 0xCD);
}
#[test]
fn set_hl_correct() {
let mut registers = Registers::new(false);
registers.set_hl(0xABCD);
assert_eq!(registers.hl(), 0xABCD);
assert_eq!(registers.h(), 0xAB);
assert_eq!(registers.l(), 0xCD);
}
#[test]
fn set_hl_singles_correct() {
let mut registers = Registers::new(false);
registers.set_h(0xAB);
registers.set_l(0xCD);
assert_eq!(registers.hl(), 0xABCD);
assert_eq!(registers.h(), 0xAB);
assert_eq!(registers.l(), 0xCD);
}
#[test]
fn set_h_singles_correct() {
let mut registers = Registers::new(false);
registers.set_h(0xAB);
assert_eq!(registers.hl(), 0xAB4D);
assert_eq!(registers.h(), 0xAB);
assert_eq!(registers.l(), 0x4D);
}
#[test]
fn set_l_singles_correct() {
let mut registers = Registers::new(false);
registers.set_l(0xCD);
assert_eq!(registers.hl(), 0x01CD);
assert_eq!(registers.h(), 0x01);
assert_eq!(registers.l(), 0xCD);
}
#[test]
fn set_sp_and_pc() {
let mut registers = Registers::new(false);
registers.set_sp(0xABCD);
registers.set_pc(0x1234);
assert_eq!(registers.sp(), 0xABCD);
assert_eq!(registers.pc(), 0x1234);
}
}
|
use ndarray::{arr2, Array2};
use std::convert::Into;
pub struct ModelMatrix {
model_matrix: Array2<f32>,
}
impl ModelMatrix {
pub fn new() -> Self {
Self {
model_matrix: arr2(&[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]),
}
}
pub fn with_movement(mut self, movement: &[f32; 3]) -> Self {
let m = movement;
let t = arr2(&[
[1.0, 0.0, 0.0, m[0]],
[0.0, 1.0, 0.0, m[1]],
[0.0, 0.0, 1.0, m[2]],
[0.0, 0.0, 0.0, 1.0],
]);
self.model_matrix = t.dot(&self.model_matrix);
self
}
pub fn with_scale(mut self, scale: &[f32; 3]) -> Self {
let s = scale;
let t = arr2(&[
[s[0], 0.0, 0.0, 0.0],
[0.0, s[1], 0.0, 0.0],
[0.0, 0.0, s[2], 0.0],
[0.0, 0.0, 0.0, 1.0],
]);
self.model_matrix = t.dot(&self.model_matrix);
self
}
pub fn with_x_axis_rotation(mut self, x_axis_rotation: f32) -> Self {
let (s, c) = x_axis_rotation.sin_cos();
let t = arr2(&[
[1.0, 0.0, 0.0, 0.0],
[0.0, c, -s, 0.0],
[0.0, s, c, 0.0],
[0.0, 0.0, 0.0, 1.0],
]);
self.model_matrix = t.dot(&self.model_matrix);
self
}
#[allow(dead_code)]
pub fn with_y_axis_rotation(mut self, y_axis_rotation: f32) -> Self {
let (s, c) = y_axis_rotation.sin_cos();
let t = arr2(&[
[c, 0.0, s, 0.0],
[0.0, 1.0, 0.0, 0.0],
[-s, 0.0, c, 0.0],
[0.0, 0.0, 0.0, 1.0],
]);
self.model_matrix = t.dot(&self.model_matrix);
self
}
pub fn with_z_axis_rotation(mut self, z_axis_rotation: f32) -> Self {
let (s, c) = z_axis_rotation.sin_cos();
let t = arr2(&[
[c, -s, 0.0, 0.0],
[s, c, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]);
self.model_matrix = t.dot(&self.model_matrix);
self
}
}
impl Into<Array2<f32>> for ModelMatrix {
fn into(self) -> Array2<f32> {
self.model_matrix
}
}
|
pub run() {
} |
use super::scenario::Scenario;
use crate::agents::Agents;
use crate::navmesh::{Navmesh, NavmeshBuilder};
use crate::vec2::Vec2;
use serde::Deserialize;
#[derive(Clone, Copy, Debug, PartialEq, Deserialize)]
pub struct EmptyScenario {}
impl EmptyScenario {
pub fn new() -> Self {
EmptyScenario {}
}
}
impl Default for EmptyScenario {
fn default() -> Self {
EmptyScenario::new()
}
}
impl Scenario for EmptyScenario {
fn generate(&self) -> (Agents, Navmesh) {
(
Agents::new(),
NavmeshBuilder::new()
.add_cell(Vec2::new(0., 0.), Vec2::new(1., 0.), Vec2::new(1., 1.))
.add_cell(Vec2::new(0., 0.), Vec2::new(0., 1.), Vec2::new(1., 1.))
.build(),
)
}
}
|
//! A Rubble BLE driver for the nRF51/nRF52-series radios.
#![no_std]
#![warn(rust_2018_idioms)]
pub mod radio;
pub mod timer;
pub mod utils;
|
use
{
thespis :: { * } ,
thespis_impl :: { * } ,
async_executors :: { * } ,
futures::executor :: { block_on } ,
};
#[ derive( Actor ) ] struct Sum( u64 );
struct Add (u64);
struct Show ;
impl Message for Add { type Return = () ; }
impl Message for Show { type Return = u64; }
impl Handler< Add > for Sum
{
fn handle( &mut self, msg: Add ) -> Return<()> { Box::pin( async move
{
self.0 += msg.0;
}) }
}
impl Handler< Show > for Sum
{
fn handle( &mut self, _msg: Show ) -> Return<u64> { Box::pin( async move
{
self.0
})}
}
fn main()
{
let program = async move
{
let sum = Sum(5);
let mut exec = ThreadPool::new().expect( "create threadpool" );
let mut addr = Addr::try_from( sum, &mut exec ).expect( "Failed to create address" );
for _i in 0..10_000_000usize
{
addr.call( Add( 10 ) ).await.expect( "Send failed" );
}
let res = addr.call( Show{} ).await.expect( "Call failed" );
assert_eq!( 100_000_005, res );
dbg!( res );
};
block_on( program );
}
|
#[macro_export]
macro_rules! bench_func {
($name: ident, $desc: expr, op => $func: ident, from => $from: expr) => {
pub(crate) fn $name(c: &mut Criterion) {
const SIZE: usize = 1 << 13;
let mut rng = support::PCG32::default();
let inputs =
criterion::black_box((0..SIZE).map(|_| $from(&mut rng)).collect::<Vec<_>>());
// pre-fill output vector with some random value
let mut outputs = vec![$func($from(&mut rng)); SIZE];
let mut i = 0;
c.bench_function($desc, |b| {
b.iter(|| {
i = (i + 1) & (SIZE - 1);
unsafe {
*outputs.get_unchecked_mut(i) = $func(*inputs.get_unchecked(i));
}
})
});
criterion::black_box(outputs);
}
};
}
#[macro_export]
macro_rules! bench_unop {
($name: ident, $desc: expr, op => $unop: ident, from => $from: expr) => {
pub(crate) fn $name(c: &mut Criterion) {
const SIZE: usize = 1 << 13;
let mut rng = support::PCG32::default();
let inputs =
criterion::black_box((0..SIZE).map(|_| $from(&mut rng)).collect::<Vec<_>>());
// pre-fill output vector with some random value
let mut outputs = vec![$from(&mut rng).$unop(); SIZE];
let mut i = 0;
c.bench_function($desc, |b| {
b.iter(|| {
i = (i + 1) & (SIZE - 1);
unsafe {
*outputs.get_unchecked_mut(i) = inputs.get_unchecked(i).$unop();
}
})
});
criterion::black_box(outputs);
}
};
}
#[macro_export]
macro_rules! bench_binop {
($name: ident, $desc: expr, op => $binop: ident, from1 => $from1:expr, from2 => $from2:expr) => {
pub(crate) fn $name(c: &mut Criterion) {
const SIZE: usize = 1 << 13;
let mut rng = support::PCG32::default();
let inputs1 =
criterion::black_box((0..SIZE).map(|_| $from1(&mut rng)).collect::<Vec<_>>());
let inputs2 =
criterion::black_box((0..SIZE).map(|_| $from2(&mut rng)).collect::<Vec<_>>());
// pre-fill output vector with some random value
let mut outputs = vec![$from1(&mut rng).$binop($from2(&mut rng)); SIZE];
let mut i = 0;
c.bench_function($desc, |b| {
b.iter(|| {
i = (i + 1) & (SIZE - 1);
unsafe {
*outputs.get_unchecked_mut(i) = inputs1.get_unchecked(i).$binop(*inputs2.get_unchecked(i));
}
})
});
criterion::black_box(outputs);
}
};
($name: ident, $desc: expr, op => $binop: ident, from => $from: expr) => {
bench_binop!($name, $desc, op => $binop, from1 => $from, from2 => $from);
};
}
#[macro_export]
macro_rules! bench_trinop {
($name: ident, $desc: expr, op => $trinop: ident, from1 => $from1:expr, from2 => $from2:expr, from3 => $from3:expr) => {
pub(crate) fn $name(c: &mut Criterion) {
const SIZE: usize = 1 << 13;
let mut rng = support::PCG32::default();
let inputs1 =
criterion::black_box((0..SIZE).map(|_| $from1(&mut rng)).collect::<Vec<_>>());
let inputs2 =
criterion::black_box((0..SIZE).map(|_| $from2(&mut rng)).collect::<Vec<_>>());
let inputs3 =
criterion::black_box((0..SIZE).map(|_| $from3(&mut rng)).collect::<Vec<_>>());
// pre-fill output vector with some random value
let mut outputs =
vec![$from1(&mut rng).$trinop($from2(&mut rng), $from3(&mut rng)); SIZE];
let mut i = 0;
c.bench_function($desc, |b| {
b.iter(|| {
i = (i + 1) & (SIZE - 1);
unsafe {
*outputs.get_unchecked_mut(i) = inputs1
.get_unchecked(i)
.$trinop(*inputs2.get_unchecked(i), *inputs3.get_unchecked(i));
}
})
});
criterion::black_box(outputs);
}
};
}
#[macro_export]
macro_rules! bench_select {
($name:ident, $desc:expr, ty => $ty: ident, op => $op: ident, from => $from:expr) => {
pub(crate) fn $name(c: &mut Criterion) {
const SIZE: usize = 1 << 13;
let mut rng = support::PCG32::default();
let inputs1 =
criterion::black_box((0..SIZE).map(|_| $from(&mut rng)).collect::<Vec<_>>());
let inputs2 =
criterion::black_box((0..SIZE).map(|_| $from(&mut rng)).collect::<Vec<_>>());
let masks = vec![$from(&mut rng).$op($from(&mut rng)); SIZE];
// pre-fill output vector with some random value
let mut outputs = vec![$from(&mut rng); SIZE];
let mut i = 0;
c.bench_function($desc, |b| {
b.iter(|| {
i = (i + 1) & (SIZE - 1);
unsafe {
*outputs.get_unchecked_mut(i) = $ty::select(
*masks.get_unchecked(i),
*inputs1.get_unchecked(i),
*inputs2.get_unchecked(i),
);
}
})
});
criterion::black_box(outputs);
}
};
}
#[macro_export]
macro_rules! bench_from_ypr {
($name: ident, $desc: expr, ty => $ty:ty) => {
pub(crate) fn $name(c: &mut Criterion) {
const SIZE: usize = 1 << 13;
let mut rng = support::PCG32::default();
let inputs = criterion::black_box(
(0..SIZE)
.map(|_| {
(
random_radians(&mut rng),
random_radians(&mut rng),
random_radians(&mut rng),
)
})
.collect::<Vec<_>>(),
);
let mut outputs = vec![<$ty>::default(); SIZE];
let mut i = 0;
c.bench_function($desc, |b| {
b.iter(|| {
i = (i + 1) & (SIZE - 1);
unsafe {
let data = inputs.get_unchecked(i);
*outputs.get_unchecked_mut(i) =
<$ty>::from_euler(glam::EulerRot::YXZ, data.0, data.1, data.2)
}
})
});
}
};
}
#[macro_export]
macro_rules! euler {
($name: ident, $desc: expr, ty => $t: ty, storage => $storage: ty, zero => $zero: expr, rand => $rand: ident) => {
pub(crate) fn $name(c: &mut Criterion) {
const UPDATE_RATE: f32 = 1.0 / 60.0;
const NUM_OBJECTS: usize = 10000;
struct TestData {
acc: Vec<$storage>,
vel: Vec<$storage>,
pos: Vec<$storage>,
}
let mut rng = support::PCG32::default();
let mut data = TestData {
acc: vec![$rand(&mut rng); NUM_OBJECTS],
vel: vec![$zero; NUM_OBJECTS],
pos: vec![$zero; NUM_OBJECTS],
};
let dt = <$t>::splat(UPDATE_RATE);
c.bench_function($desc, |b| {
b.iter(|| {
for ((position, acceleration), velocity) in
data.pos.iter_mut().zip(&data.acc).zip(&mut data.vel)
{
let local_acc: $t = (*acceleration).into();
let mut local_pos: $t = (*position).into();
let mut local_vel: $t = (*velocity).into();
local_vel += local_acc * dt;
local_pos += local_vel * dt;
*velocity = local_vel.into();
*position = local_pos.into();
}
})
});
}
};
}
|
use {
data::semantics::{
properties::{CuiProperty, Property},
Semantics, Value,
},
proc_macro2::TokenStream,
quote::quote,
};
impl Semantics {
fn static_render_all(&self, group_id: usize) -> TokenStream {
let elements = self.static_render_elements(group_id);
let classes = self.static_render_classes(group_id);
let listeners = self.static_render_listeners(group_id);
let variables = self.static_render_variables(group_id);
let properties = self.static_render_properties(group_id);
quote! {
#elements
#classes
#listeners
#variables
#properties
}
}
fn static_render_elements(&self, group_id: usize) -> TokenStream {
if self.groups[group_id].elements.is_empty() {
return quote! {};
}
let elements = self.groups[group_id].elements.iter().map(|&element_id| {
let rules = self.static_render_all(element_id);
let tag = self.groups[element_id].tag;
let class_names = &self.groups[element_id].class_names;
quote! {
element.append_child({
let mut element = static_render_element(#tag, vec![#( #class_names ),*], &mut classes);
#rules
&element.into()
}).unwrap();
}
});
quote! {
while let Some(child) = element.last_element_child() {
element.remove_child(&child.dyn_into::<Node>().unwrap()).unwrap();
}
#( #elements )*
}
}
fn static_render_classes(&self, group_id: usize) -> TokenStream {
self.groups[group_id]
.classes
.iter()
.flat_map(|(_, groups)| groups.iter())
.map(|&class_id| {
let selector = self.groups[class_id]
.selector
.as_ref()
.expect("dynamic classes should have selectors");
let rules = self.static_render_all(class_id);
let queue = self.static_register_all(class_id);
quote! {
let elements = document.get_elements_by_class_name(#selector);
for i in 0..elements.length() {
let mut element = elements
.item(i)
.unwrap()
.dyn_into::<HtmlElement>()
.unwrap();
#rules
}
let mut class = classes.entry(#selector).or_insert(Group::default());
#queue
}
})
.collect()
}
pub fn static_render_listeners(&self, group_id: usize) -> TokenStream {
self.groups[group_id]
.listeners
.iter()
.map(|&listener_id| {
let rules = self.static_render_all(listener_id);
let event = match &**self.groups[listener_id]
.name
.as_ref()
.expect("every listener should have an event id")
{
"blur" => quote! { set_onblur },
"focus" => quote! { set_onfocus },
"click" => quote! { set_onclick },
"mouseover" => quote! { set_onmouseover },
"mouseenter" => quote! { set_onmouseenter },
"mouseleave" => quote! { set_onmouseleave },
"mouseout" => quote! { set_onmouseout },
_ => panic!("unknown event id"),
};
quote! {
let closure = {
let mut element = element.clone();
Closure::wrap(Box::new(move |e: Event| {
e.stop_propagation();
let window = web_sys::window().unwrap();
let document = window.document().unwrap();
STATE.with(|state| {
CLASSES.with(|classes| {
let mut classes = classes.borrow_mut();
#rules
});
});
}) as Box<dyn FnMut(Event)>)
};
element.#event(Some(closure.as_ref().unchecked_ref()));
closure.forget();
}
})
.collect()
}
fn static_render_variables(&self, group_id: usize) -> TokenStream {
self.groups[group_id]
.variables
.iter()
.map(|(_, _)| {
// let value = self.get_static(&value).to_string();
quote! {}
})
.collect()
}
fn static_render_value(&self, value: &Value) -> TokenStream {
if let &Value::Variable(variable_id, _) = value {
if let (_, Some(mutable_id)) = self.variables[variable_id] {
return quote! { state[#mutable_id] };
}
}
quote! { #value }
}
fn static_render_properties(&self, group_id: usize) -> TokenStream {
let properties = &self.groups[group_id].properties;
let mut effects = Vec::new();
if let Some(value) = properties.get(&Property::Cui(CuiProperty::Text)) {
let value = self.static_render_value(value);
effects.push(quote! { element.text(#value); });
}
// if let Some(_value) = properties.get(&Property::Cui(CuiProperty::Link)) {
// effects.push(quote! {});
// }
for (property, value) in properties {
if let Property::Css(property) = property {
effects.push(quote! { element.css(#property, #value); });
}
}
effects.into_iter().collect()
}
}
|
use glow::HasContext;
use std::convert::TryInto;
use std::ops::Deref;
use luminance::shader::program::Program;
use crate::{
VertexSemantics,
pipeline::{
ShaderInterface,
shader::IsShader,
texture::Texture,
},
};
pub trait IsMaterial {
fn upload_fields(&self, gl: &glow::Context);
fn bind_texture(&self, gl: &glow::Context);
fn program(&self) -> &Program<VertexSemantics, (), ShaderInterface>;
}
pub struct Material<'a> {
pub shader: Box<dyn IsShader + 'a>,
pub albedo: [f32; 4],
pub metalness: f32,
pub roughness: f32,
pub main_texture: &'a Texture,
}
impl<'a> Material<'a> {
pub fn new(shader: Box<dyn IsShader + 'a>, texture: &'a Texture, albedo: [f32; 4], metalness: f32, roughness: f32) -> Self {
Self {
shader: shader,
albedo: albedo,
metalness: metalness,
roughness: roughness,
main_texture: texture
}
}
}
impl<'a> IsMaterial for Material<'a> {
fn upload_fields(&self, gl: &glow::Context) {
let handle = self.shader.program().deref().handle();
unsafe {
let albedo_loc = gl.get_uniform_location(handle, "material.albedo");
gl.uniform_4_f32(albedo_loc, self.albedo[0], self.albedo[1], self.albedo[2], self.albedo[3]);
let metalness_loc = gl.get_uniform_location(handle, "material.metalness");
gl.uniform_1_f32(metalness_loc, self.metalness);
let roughness_loc = gl.get_uniform_location(handle, "material.roughness");
gl.uniform_1_f32(roughness_loc, self.roughness);
}
}
fn bind_texture(&self, gl: &glow::Context) {
unsafe { gl.bind_texture(glow::TEXTURE_2D, Some(self.main_texture.gl_texture)); }
}
fn program(&self) -> &Program<VertexSemantics, (), ShaderInterface> {
&self.shader.program()
}
}
impl<'a> IsMaterial for &Material<'a> {
fn upload_fields(&self, gl: &glow::Context) {
let handle = self.shader.program().deref().handle();
unsafe {
let albedo_loc = gl.get_uniform_location(handle, "material.albedo");
gl.uniform_4_f32(albedo_loc, self.albedo[0], self.albedo[1], self.albedo[2], self.albedo[3]);
let metalness_loc = gl.get_uniform_location(handle, "material.metalness");
gl.uniform_1_f32(metalness_loc, self.metalness);
let roughness_loc = gl.get_uniform_location(handle, "material.roughness");
gl.uniform_1_f32(roughness_loc, self.roughness);
}
}
fn bind_texture(&self, gl: &glow::Context) {
unsafe { gl.bind_texture(glow::TEXTURE_2D, Some(self.main_texture.gl_texture)); }
}
fn program(&self) -> &Program<VertexSemantics, (), ShaderInterface> {
&self.shader.program()
}
}
pub struct Material2<'a> {
pub shader: Box<&'a dyn IsShader>,
pub albedo: [f32; 4],
pub metalness: f32,
pub roughness: f32,
}
impl<'a> Material2<'a> {
pub fn new(shader: Box<&'a dyn IsShader>, albedo: [f32; 4], metalness: f32, roughness: f32) -> Self {
Self {
shader: shader,
albedo: albedo,
metalness: metalness,
roughness: roughness,
}
}
}
impl IsMaterial for Material2<'_> {
fn upload_fields(&self, gl: &glow::Context) {
let handle = self.shader.program().deref().handle();
unsafe {
let albedo_loc = gl.get_uniform_location(handle, "material.albedo");
gl.uniform_4_f32(albedo_loc, self.albedo[0], self.albedo[1], self.albedo[2], self.albedo[3]);
let metalness_loc = gl.get_uniform_location(handle, "material.metalness");
gl.uniform_1_f32(metalness_loc, self.metalness);
let roughness_loc = gl.get_uniform_location(handle, "material.roughness");
gl.uniform_1_f32(roughness_loc, self.roughness);
}
}
fn bind_texture(&self, gl: &glow::Context) {
// unsafe { gl.bind_texture(glow::TEXTURE_2D, Some(self.main_texture.gl_texture)); }
}
fn program(&self) -> &Program<VertexSemantics, (), ShaderInterface> {
&self.shader.program()
}
}
impl IsMaterial for &Material2<'_> {
fn upload_fields(&self, gl: &glow::Context) {
let handle = self.shader.program().deref().handle();
unsafe {
let albedo_loc = gl.get_uniform_location(handle, "material.albedo");
gl.uniform_4_f32(albedo_loc, self.albedo[0], self.albedo[1], self.albedo[2], self.albedo[3]);
let metalness_loc = gl.get_uniform_location(handle, "material.metalness");
gl.uniform_1_f32(metalness_loc, self.metalness);
let roughness_loc = gl.get_uniform_location(handle, "material.roughness");
gl.uniform_1_f32(roughness_loc, self.roughness);
}
}
fn bind_texture(&self, gl: &glow::Context) {
// unsafe { gl.bind_texture(glow::TEXTURE_2D, Some(self.main_texture.gl_texture)); }
}
fn program(&self) -> &Program<VertexSemantics, (), ShaderInterface> {
&self.shader.program()
}
}
|
use crate::html::Element;
pub struct ElementMockOption {
pub name: String,
pub id: String,
pub class: String,
}
impl ElementMockOption {
pub fn new() -> ElementMockOption {
ElementMockOption {
name: String::from(""),
id: String::from(""),
class: String::from(""),
}
}
}
pub fn gen_mock_element(option: ElementMockOption) -> Element {
let name: &str = if !option.name.is_empty() {
option.name.as_str()
} else {
"div"
};
let id = if !option.id.is_empty() {
option.id.as_str()
} else {
"mock"
};
let class = if !option.class.is_empty() {
option.class.as_str()
} else {
"mock"
};
let mut elem = Element::new(String::from(name));
elem.set_attr("class", class);
elem.set_attr("id", id);
elem
}
|
use byteorder::*;
use fs2::FileExt;
use memmap::{Mmap, Protection};
use std::fs::{OpenOptions, File};
use std::io::{Seek, SeekFrom};
use std::path::Path;
use types::*;
/// A mapping from a message's sequence number to its byte offset in the log file.
///
/// A `MsgOffsets` index is always backed by a file. The file is a cache, and may be empty or
/// contain only a prefix of the messages. The file must be stored on a filesystem capable of
/// backing memory maps (ie. be careful with NFS).
//
// TODO: Make the index density configurable. Currently it's 1, but it could easily be 1/2, 1/3
// etc. at the expense of doing more reads/seeks.
#[derive(Debug)]
pub struct MsgOffsets(Mmap);
/// We assume that the given index file correctly maps sequence numbers to offsets into the given
/// log file, up to a certain message, but that the log may contain new data which was appended to
/// it since the index was last written. This function brings the index up-to-date by starting
/// where the index leaves off and, from there, jumping through the log file to find the offsets of
/// subsequent messages. These offsets are written back to the index file.
fn update_index(log_path: &Path, idx_file: &mut File) {
let mut log_file = File::open(log_path).unwrap();
let last_offset = match idx_file.seek(SeekFrom::End(-8)) {
Ok(_) => idx_file.read_u64::<BigEndian>().expect("Read last entry in index file"),
Err(_) => 0,
};
let new_data = log_file.metadata().expect("Query log file metadata").len() - last_offset;
if new_data < 8 {
info!("The index file is already up-to-date");
} else {
info!("The log file has grown by {} bytes since the index was last written. Updating...", new_data);
log_file.seek(SeekFrom::Start(last_offset)).expect("Seek to last offset");
loop {
if let Ok(len) = log_file.read_u64::<BigEndian>() {
if let Ok(offset) = log_file.seek(SeekFrom::Current(len as i64 - 8)) {
idx_file.write_u64::<BigEndian>(offset).expect("Write entry to index file");
} else {
break;
} } else { break; }
}
}
}
impl MsgOffsets {
/// Load an index from a file, updating it if necessary. The file is created if it doesn't
/// exist already.
pub fn load(log_path: &Path, idx_path: &Path) -> MsgOffsets {
let mut idx_file = OpenOptions::new()
.read(true).append(true).create(true)
.open(idx_path).expect("Open index file");
idx_file.lock_exclusive().expect("Lock index file"); // Try to make mmaping safer
update_index(log_path, &mut idx_file);
let msg_offsets = MsgOffsets(Mmap::open(&idx_file, Protection::Read).unwrap());
info!("Done loading msg offsets (last message: {:?})", msg_offsets.last());
msg_offsets
}
pub fn lookup(&self, msg: SeqNum) -> Option<ByteOffset> {
if msg > self.last() { None } else {
// This is unsafe if the index file is modified concurrently. We make an effort to prevent
// this by taking a flock. (See `load`).
unsafe {
let bs = self.0.as_slice();
let i = msg.0 as usize * 8;
let off = BigEndian::read_u64(&bs[i..i+8]);
Some(ByteOffset(off))
}
}
}
pub fn last(&self) -> SeqNum {
SeqNum(((self.0.len() / 8) - 1) as u64)
}
}
|
use std::collections::HashMap;
pub struct codon<'a> {
names: HashMap<&'a str, &'a str>
}
pub fn parse<'a>(pairs: Vec<(&'a str, &'a str)>) -> codon<'a> {
codon { names: pairs.iter().cloned().collect::<HashMap<_,_>>() }
}
impl <'a>codon<'a> {
pub fn name_for(&self, s: &'a str) -> Result<&'a str, &'static str> {
let instance: String = s.chars().map(|c| match c {
'Y' => 'C',
'R' => 'A',
'K' => 'G',
'M' => 'A',
'S' => 'C',
'W' => 'A',
'B' => 'C',
'D' => 'A',
'H' => 'A',
'V' => 'A',
'N' => 'A',
_ => c
}).collect::<String>();
match self.names.get(&instance.as_ref()) {
Some(name) => Ok(name),
None => Err("Not found")
}
}
}
|
// Copyright (c) The Starcoin Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::MyWorld;
use cucumber::{Steps, StepsBuilder};
use scmd::{CmdContext, Command};
use serde_json::Value;
use starcoin_cmd::dev::GetCoinCommand;
use starcoin_cmd::node::{InfoCommand, PeersCommand};
use starcoin_cmd::view::{AccountWithStateView, NodeInfoView, PeerInfoView, TransactionView};
use starcoin_cmd::wallet::{CreateCommand, ListCommand, ShowCommand, UnlockCommand};
use starcoin_cmd::{wallet, CliState, StarcoinOpt};
use starcoin_logger::prelude::*;
use starcoin_wallet_api::WalletAccount;
pub fn steps() -> Steps<MyWorld> {
let mut builder: StepsBuilder<MyWorld> = Default::default();
builder
.then("[cmd] node info", |world: &mut MyWorld, _step| {
let client = world.rpc_client.as_ref().take().unwrap();
let node_info = client.clone().node_info().unwrap();
let state = CliState::new(node_info.net, client.clone(), None);
let context = CmdContext::<CliState, StarcoinOpt>::with_state(state);
// let context = world.context.as_mut().take().unwrap( );
let result = context
.command(Command::with_name("node").subcommand(InfoCommand))
.exec_with_args::<NodeInfoView>(vec!["starcoin", "node", "info"])
.unwrap();
info!("result:{:?}", result);
})
.then("[cmd] node peers", |world: &mut MyWorld, _step| {
let client = world.rpc_client.as_ref().take().unwrap();
let node_info = client.clone().node_info().unwrap();
let state = CliState::new(node_info.net, client.clone(), None);
let context = CmdContext::<CliState, StarcoinOpt>::with_state(state);
// let context = world.context.as_mut().take().unwrap( );
let result = context
.command(Command::with_name("node").subcommand(PeersCommand))
.exec_with_args::<PeerInfoView>(vec!["starcoin", "node", "peers"])
.unwrap();
info!("result:{:?}", result);
})
.then("[cmd] wallet list", |world: &mut MyWorld, _step| {
let client = world.rpc_client.as_ref().take().unwrap();
let node_info = client.clone().node_info().unwrap();
let state = CliState::new(node_info.net, client.clone(), None);
// let state = world.cli_state.take().unwrap();
let context = CmdContext::<CliState, StarcoinOpt>::with_state(state);
let mut list_result = context
.command(Command::with_name("wallet").subcommand(ListCommand))
.exec_with_args::<Vec<WalletAccount>>(vec!["starcoin", "wallet", "list"])
.unwrap();
info!("wallet list result:{:?}", list_result);
world.default_address = Some(list_result.pop().unwrap().address);
})
.then("[cmd] wallet show", |world: &mut MyWorld, _step| {
let client = world.rpc_client.as_ref().take().unwrap();
let node_info = client.clone().node_info().unwrap();
let state = CliState::new(node_info.net, client.clone(), None);
let context = CmdContext::<CliState, StarcoinOpt>::with_state(state);
let show_result = context
.command(Command::with_name("wallet").subcommand(ShowCommand))
.exec_with_args::<AccountWithStateView>(vec!["starcoin", "wallet", "show"])
.unwrap();
info!("wallet show result:{:?}", show_result);
})
.then_regex(
r#"dev get_coin "([^"]*)""#,
|world: &mut MyWorld, args, _step| {
let amount = args[1].as_str();
let client = world.rpc_client.as_ref().take().unwrap();
let node_info = client.clone().node_info().unwrap();
let state = CliState::new(node_info.net, client.clone(), None);
let context = CmdContext::<CliState, StarcoinOpt>::with_state(state);
let get_result = context
.command(Command::with_name("dev").subcommand(GetCoinCommand))
.exec_with_args::<TransactionView>(vec![
"starcoin", "dev", "get_coin", "-v", amount,
])
.unwrap();
info!("get coin result:{:?}", get_result);
},
)
.then_regex(
r#"wallet create "([^"]*)""#,
|world: &mut MyWorld, args, _step| {
let password = args[1].as_str();
let client = world.rpc_client.as_ref().take().unwrap();
let node_info = client.clone().node_info().unwrap();
let state = CliState::new(node_info.net, client.clone(), None);
let context = CmdContext::<CliState, StarcoinOpt>::with_state(state);
let create_result = context
.command(Command::with_name("wallet").subcommand(CreateCommand))
.exec_with_args::<WalletAccount>(vec![
"starcoin", "wallet", "create", "-p", password,
])
.unwrap();
world.txn_account = Some(create_result.clone());
info!("wallet create result:{:?}", create_result);
},
)
.then_regex(
r#"wallet unlock password:"([^"]*)""#,
|world: &mut MyWorld, args, _step| {
let password = args[1].as_str();
let client = world.rpc_client.as_ref().take().unwrap();
let node_info = client.clone().node_info().unwrap();
let state = CliState::new(node_info.net, client.clone(), None);
let context = CmdContext::<CliState, StarcoinOpt>::with_state(state);
let unlock_result = context
.command(Command::with_name("wallet").subcommand(UnlockCommand))
.exec_with_args::<String>(vec![
"starcoin",
"wallet",
"unlock",
"account_address",
"-p",
password,
])
.unwrap();
info!("wallet unlock result:{:?}", unlock_result);
},
)
.then_regex(
r#"cmd cli: "([^"]*)""#,
|world: &mut MyWorld, args, _step| {
let client = world.rpc_client.as_ref().take().unwrap();
let node_info = client.clone().node_info().unwrap();
let state = CliState::new(node_info.net, client.clone(), None);
let context = CmdContext::<CliState, StarcoinOpt>::with_state(state);
// world.context = Some(context);
let mut vec = vec![];
vec.push("starcoin");
for parameter in args[1].as_str().split_whitespace() {
vec.push(parameter);
}
let result = context
.command(
Command::with_name("wallet")
.subcommand(wallet::CreateCommand)
.subcommand(wallet::ShowCommand),
)
.exec_with_args::<Value>(vec)
.unwrap();
println!("cmd cli: {:?}", result);
info!("cmd cli: {:?}", result);
},
);
builder.build()
}
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub mod database_instances {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn enumerate_database_instances(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
continuation_token: Option<&str>,
page_size: Option<i64>,
accept_language: Option<&str>,
) -> std::result::Result<DatabaseInstanceCollection, enumerate_database_instances::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/databaseInstances",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(enumerate_database_instances::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(continuation_token) = continuation_token {
req_builder = req_builder.query(&[("continuationToken", continuation_token)]);
}
if let Some(page_size) = page_size {
req_builder = req_builder.query(&[("pageSize", page_size)]);
}
if let Some(accept_language) = accept_language {
req_builder = req_builder.header("Accept-Language", accept_language);
}
let req = req_builder.build().context(enumerate_database_instances::BuildRequestError)?;
let rsp = client
.execute(req)
.await
.context(enumerate_database_instances::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(enumerate_database_instances::ResponseBytesError)?;
let rsp_value: DatabaseInstanceCollection =
serde_json::from_slice(&body).context(enumerate_database_instances::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(enumerate_database_instances::ResponseBytesError)?;
enumerate_database_instances::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod enumerate_database_instances {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn get_database_instance(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
database_instance_name: &str,
accept_language: Option<&str>,
) -> std::result::Result<DatabaseInstance, get_database_instance::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/databaseInstances/{}",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name, database_instance_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_database_instance::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(accept_language) = accept_language {
req_builder = req_builder.header("Accept-Language", accept_language);
}
let req = req_builder.build().context(get_database_instance::BuildRequestError)?;
let rsp = client.execute(req).await.context(get_database_instance::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_database_instance::ResponseBytesError)?;
let rsp_value: DatabaseInstance =
serde_json::from_slice(&body).context(get_database_instance::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_database_instance::ResponseBytesError)?;
get_database_instance::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod get_database_instance {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod databases {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn enumerate_databases(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
continuation_token: Option<&str>,
page_size: Option<i64>,
accept_language: Option<&str>,
) -> std::result::Result<DatabaseCollection, enumerate_databases::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/databases",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(enumerate_databases::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(continuation_token) = continuation_token {
req_builder = req_builder.query(&[("continuationToken", continuation_token)]);
}
if let Some(page_size) = page_size {
req_builder = req_builder.query(&[("pageSize", page_size)]);
}
if let Some(accept_language) = accept_language {
req_builder = req_builder.header("Accept-Language", accept_language);
}
let req = req_builder.build().context(enumerate_databases::BuildRequestError)?;
let rsp = client.execute(req).await.context(enumerate_databases::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(enumerate_databases::ResponseBytesError)?;
let rsp_value: DatabaseCollection =
serde_json::from_slice(&body).context(enumerate_databases::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(enumerate_databases::ResponseBytesError)?;
enumerate_databases::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod enumerate_databases {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn get_database(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
database_name: &str,
accept_language: Option<&str>,
) -> std::result::Result<Database, get_database::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/databases/{}",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name, database_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_database::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(accept_language) = accept_language {
req_builder = req_builder.header("Accept-Language", accept_language);
}
let req = req_builder.build().context(get_database::BuildRequestError)?;
let rsp = client.execute(req).await.context(get_database::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_database::ResponseBytesError)?;
let rsp_value: Database = serde_json::from_slice(&body).context(get_database::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_database::ResponseBytesError)?;
get_database::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod get_database {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod events {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn enumerate_events(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
continuation_token: Option<&str>,
page_size: Option<i64>,
accept_language: Option<&str>,
) -> std::result::Result<EventCollection, enumerate_events::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/migrateEvents",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(enumerate_events::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(continuation_token) = continuation_token {
req_builder = req_builder.query(&[("continuationToken", continuation_token)]);
}
if let Some(page_size) = page_size {
req_builder = req_builder.query(&[("pageSize", page_size)]);
}
if let Some(accept_language) = accept_language {
req_builder = req_builder.header("Accept-Language", accept_language);
}
let req = req_builder.build().context(enumerate_events::BuildRequestError)?;
let rsp = client.execute(req).await.context(enumerate_events::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(enumerate_events::ResponseBytesError)?;
let rsp_value: EventCollection = serde_json::from_slice(&body).context(enumerate_events::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(enumerate_events::ResponseBytesError)?;
enumerate_events::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod enumerate_events {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn get_event(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
event_name: &str,
) -> std::result::Result<MigrateEvent, get_event::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/migrateEvents/{}",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name, event_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_event::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get_event::BuildRequestError)?;
let rsp = client.execute(req).await.context(get_event::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_event::ResponseBytesError)?;
let rsp_value: MigrateEvent = serde_json::from_slice(&body).context(get_event::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_event::ResponseBytesError)?;
get_event::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod get_event {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete_event(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
event_name: &str,
) -> std::result::Result<(), delete_event::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/migrateEvents/{}",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name, event_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete_event::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete_event::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete_event::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(()),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete_event::ResponseBytesError)?;
delete_event::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod delete_event {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod machines {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn enumerate_machines(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
continuation_token: Option<&str>,
page_size: Option<i64>,
) -> std::result::Result<MachineCollection, enumerate_machines::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/machines",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(enumerate_machines::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(continuation_token) = continuation_token {
req_builder = req_builder.query(&[("continuationToken", continuation_token)]);
}
if let Some(page_size) = page_size {
req_builder = req_builder.query(&[("pageSize", page_size)]);
}
let req = req_builder.build().context(enumerate_machines::BuildRequestError)?;
let rsp = client.execute(req).await.context(enumerate_machines::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(enumerate_machines::ResponseBytesError)?;
let rsp_value: MachineCollection = serde_json::from_slice(&body).context(enumerate_machines::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(enumerate_machines::ResponseBytesError)?;
enumerate_machines::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod enumerate_machines {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn get_machine(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
machine_name: &str,
) -> std::result::Result<Machine, get_machine::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/machines/{}",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name, machine_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_machine::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get_machine::BuildRequestError)?;
let rsp = client.execute(req).await.context(get_machine::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_machine::ResponseBytesError)?;
let rsp_value: Machine = serde_json::from_slice(&body).context(get_machine::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_machine::ResponseBytesError)?;
get_machine::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod get_machine {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod migrate_projects {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn get_migrate_project(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
) -> std::result::Result<MigrateProject, get_migrate_project::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_migrate_project::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get_migrate_project::BuildRequestError)?;
let rsp = client.execute(req).await.context(get_migrate_project::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_migrate_project::ResponseBytesError)?;
let rsp_value: MigrateProject = serde_json::from_slice(&body).context(get_migrate_project::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_migrate_project::ResponseBytesError)?;
get_migrate_project::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod get_migrate_project {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn put_migrate_project(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
body: &MigrateProject,
accept_language: Option<&str>,
) -> std::result::Result<put_migrate_project::Response, put_migrate_project::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(put_migrate_project::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(body);
if let Some(accept_language) = accept_language {
req_builder = req_builder.header("Accept-Language", accept_language);
}
let req = req_builder.build().context(put_migrate_project::BuildRequestError)?;
let rsp = client.execute(req).await.context(put_migrate_project::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(put_migrate_project::ResponseBytesError)?;
let rsp_value: MigrateProject = serde_json::from_slice(&body).context(put_migrate_project::DeserializeError { body })?;
Ok(put_migrate_project::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(put_migrate_project::ResponseBytesError)?;
let rsp_value: MigrateProject = serde_json::from_slice(&body).context(put_migrate_project::DeserializeError { body })?;
Ok(put_migrate_project::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(put_migrate_project::ResponseBytesError)?;
put_migrate_project::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod put_migrate_project {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(MigrateProject),
Created201(MigrateProject),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn patch_migrate_project(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
body: &MigrateProject,
accept_language: Option<&str>,
) -> std::result::Result<MigrateProject, patch_migrate_project::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(patch_migrate_project::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(body);
if let Some(accept_language) = accept_language {
req_builder = req_builder.header("Accept-Language", accept_language);
}
let req = req_builder.build().context(patch_migrate_project::BuildRequestError)?;
let rsp = client.execute(req).await.context(patch_migrate_project::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(patch_migrate_project::ResponseBytesError)?;
let rsp_value: MigrateProject = serde_json::from_slice(&body).context(patch_migrate_project::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(patch_migrate_project::ResponseBytesError)?;
patch_migrate_project::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod patch_migrate_project {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete_migrate_project(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
accept_language: Option<&str>,
) -> std::result::Result<(), delete_migrate_project::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete_migrate_project::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(accept_language) = accept_language {
req_builder = req_builder.header("Accept-Language", accept_language);
}
let req = req_builder.build().context(delete_migrate_project::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete_migrate_project::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(()),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete_migrate_project::ResponseBytesError)?;
delete_migrate_project::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod delete_migrate_project {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn register_tool(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
input: &RegisterToolInput,
accept_language: Option<&str>,
) -> std::result::Result<RegistrationResult, register_tool::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/registerTool",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(register_tool::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(input);
if let Some(accept_language) = accept_language {
req_builder = req_builder.header("Accept-Language", accept_language);
}
let req = req_builder.build().context(register_tool::BuildRequestError)?;
let rsp = client.execute(req).await.context(register_tool::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(register_tool::ResponseBytesError)?;
let rsp_value: RegistrationResult = serde_json::from_slice(&body).context(register_tool::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(register_tool::ResponseBytesError)?;
register_tool::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod register_tool {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn refresh_migrate_project_summary(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
input: &RefreshSummaryInput,
) -> std::result::Result<RefreshSummaryResult, refresh_migrate_project_summary::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/refreshSummary",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(refresh_migrate_project_summary::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(input);
let req = req_builder.build().context(refresh_migrate_project_summary::BuildRequestError)?;
let rsp = client
.execute(req)
.await
.context(refresh_migrate_project_summary::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(refresh_migrate_project_summary::ResponseBytesError)?;
let rsp_value: RefreshSummaryResult =
serde_json::from_slice(&body).context(refresh_migrate_project_summary::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(refresh_migrate_project_summary::ResponseBytesError)?;
refresh_migrate_project_summary::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod refresh_migrate_project_summary {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod solutions {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn get_solution(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
solution_name: &str,
) -> std::result::Result<Solution, get_solution::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/solutions/{}",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name, solution_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_solution::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get_solution::BuildRequestError)?;
let rsp = client.execute(req).await.context(get_solution::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_solution::ResponseBytesError)?;
let rsp_value: Solution = serde_json::from_slice(&body).context(get_solution::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_solution::ResponseBytesError)?;
get_solution::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod get_solution {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn put_solution(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
solution_name: &str,
solution_input: &Solution,
) -> std::result::Result<put_solution::Response, put_solution::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/solutions/{}",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name, solution_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(put_solution::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(solution_input);
let req = req_builder.build().context(put_solution::BuildRequestError)?;
let rsp = client.execute(req).await.context(put_solution::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(put_solution::ResponseBytesError)?;
let rsp_value: Solution = serde_json::from_slice(&body).context(put_solution::DeserializeError { body })?;
Ok(put_solution::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(put_solution::ResponseBytesError)?;
let rsp_value: Solution = serde_json::from_slice(&body).context(put_solution::DeserializeError { body })?;
Ok(put_solution::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(put_solution::ResponseBytesError)?;
put_solution::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod put_solution {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(Solution),
Created201(Solution),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn patch_solution(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
solution_name: &str,
solution_input: &Solution,
) -> std::result::Result<Solution, patch_solution::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/solutions/{}",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name, solution_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(patch_solution::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(solution_input);
let req = req_builder.build().context(patch_solution::BuildRequestError)?;
let rsp = client.execute(req).await.context(patch_solution::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(patch_solution::ResponseBytesError)?;
let rsp_value: Solution = serde_json::from_slice(&body).context(patch_solution::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(patch_solution::ResponseBytesError)?;
patch_solution::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod patch_solution {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete_solution(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
solution_name: &str,
accept_language: Option<&str>,
) -> std::result::Result<(), delete_solution::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/solutions/{}",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name, solution_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete_solution::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(accept_language) = accept_language {
req_builder = req_builder.header("Accept-Language", accept_language);
}
let req = req_builder.build().context(delete_solution::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete_solution::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(()),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete_solution::ResponseBytesError)?;
delete_solution::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod delete_solution {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn enumerate_solutions(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
) -> std::result::Result<SolutionsCollection, enumerate_solutions::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/solutions",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(enumerate_solutions::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(enumerate_solutions::BuildRequestError)?;
let rsp = client.execute(req).await.context(enumerate_solutions::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(enumerate_solutions::ResponseBytesError)?;
let rsp_value: SolutionsCollection =
serde_json::from_slice(&body).context(enumerate_solutions::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(enumerate_solutions::ResponseBytesError)?;
enumerate_solutions::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod enumerate_solutions {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn get_config(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
solution_name: &str,
) -> std::result::Result<SolutionConfig, get_config::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/solutions/{}/getConfig",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name, solution_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_config::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0);
let req = req_builder.build().context(get_config::BuildRequestError)?;
let rsp = client.execute(req).await.context(get_config::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_config::ResponseBytesError)?;
let rsp_value: SolutionConfig = serde_json::from_slice(&body).context(get_config::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_config::ResponseBytesError)?;
get_config::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod get_config {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn cleanup_solution_data(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
migrate_project_name: &str,
solution_name: &str,
) -> std::result::Result<(), cleanup_solution_data::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Migrate/migrateProjects/{}/solutions/{}/cleanupData",
&operation_config.base_path, subscription_id, resource_group_name, migrate_project_name, solution_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(cleanup_solution_data::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0);
let req = req_builder.build().context(cleanup_solution_data::BuildRequestError)?;
let rsp = client.execute(req).await.context(cleanup_solution_data::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(()),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(cleanup_solution_data::ResponseBytesError)?;
cleanup_solution_data::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod cleanup_solution_data {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod operations {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationResultList, list::Error> {
let client = &operation_config.client;
let uri_str = &format!("{}/providers/Microsoft.Migrate/operations", &operation_config.base_path,);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: OperationResultList = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
list::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
|
/*
Mine simulator .. the mine is represented by an array
... a really poor one :D
*/
use rand::{thread_rng, Rng};
use std::io;
enum MineralType {
MITHRIL,
GOLD,
SILVER,
DIAMOND,
IRON,
CUPPER,
ROCK
}
struct MineSpot {
mineral: MineralType
}
fn init_gold_and_stuff() -> Option<MineSpot> {
if thread_rng().gen_range(0..2) == 1 {
// this spot has minerals
let spot: MineSpot = MineSpot {
mineral: match thread_rng().gen_range(0..5) {
0 => MineralType::MITHRIL,
1 => MineralType::GOLD,
2 => MineralType::SILVER,
3 => MineralType::DIAMOND,
4 => MineralType::IRON,
5 => MineralType::CUPPER,
_ => MineralType::ROCK
}
};
Some(spot)
}
else {
// this spot is empty
None
}
}
fn init_mine_with_gold(mine: &mut Vec<Option<MineSpot>>, mine_size: usize) {
println!("Current size of the mine: {}", mine.len());
for _index in 0..mine_size {
let spot: Option<MineSpot> = init_gold_and_stuff();
mine.push(spot);
}
}
fn main() {
let mut mine_size_str = String::new();
let mut mine_size: usize;
loop {
println!("Please enter the mine size. Choose a number between 1 and 20");
io::stdin()
.read_line(&mut mine_size_str)
.expect("Failed to read line");
mine_size = mine_size_str.trim().parse().expect ("Please type a number between 1 and 20!");
if (mine_size > 0) && (mine_size < 21) {
break;
}
}
// this init sucks
let mut mine: Vec<Option<MineSpot>> = Vec::new();
init_mine_with_gold(&mut mine, mine_size);
let mut output: String = "Mine [".to_owned();
let last_index = mine.len() -1;
for index in 0..mine.len() {
let opt: Option<&MineSpot> = mine[index].as_ref();
if opt.is_some() {
let spot = opt.unwrap();
match spot.mineral {
MineralType::MITHRIL => output.push_str("M"),
MineralType::GOLD => output.push_str("G"),
MineralType::SILVER => output.push_str("S"),
MineralType::DIAMOND => output.push_str("D"),
MineralType::IRON => output.push_str("I"),
MineralType::CUPPER => output.push_str("C"),
MineralType::ROCK => output.push_str("_")
}
} else {
output.push_str(" ");
}
if index < last_index {
output.push_str(", ");
}
}
output.push_str("]");
println!("{}", output)
}
#[cfg(test)]
mod tests {
// this runs only if `cargo test is called`
// make the functions outside visible
use super::*;
#[test]
fn test_init_mine_with_gold() {
let mut found: bool = false;
for _index in 0..10 {
let spot: Option<MineSpot> = init_gold_and_stuff();
if spot.is_some() {
found = true
}
}
assert!(found);
}
} |
use x86_64::VirtAddr;
use x86_64::structures::tss::TaskStateSegment;
use lazy_static::lazy_static;
// create a static GDT that includes a segment for TSS static:
use x86_64::structures::gdt::{GlobalDescriptorTable,Descriptor,SegmentSelector};
struct Selectors {
code_selector:SegmentSelector,
tss_selector:SegmentSelector,
}
lazy_static! {
static ref GDT:(GlobalDescriptorTable, Selectors) = {
let mut gdt = GlobalDescriptorTable::new();
let code_selector = gdt.add_entry(Descriptor::kernel_code_segment());
let tss_selector = gdt.add_entry(Descriptor::tss_segment(&TSS));
(gdt,Selectors {code_selector,tss_selector})
};
}
pub fn init() {
use x86_64::instructions::segmentation::set_cs;
use x86_64::instructions::tables::load_tss;
GDT.0.load();
unsafe {
set_cs(GDT.1.code_selector);//reload code segment
load_tss(GDT.1.tss_selector);//load TSS
}
}
pub const DOUBLE_FAULT_IST_INDEX:u16 = 0; //define 0th IST(Interrupt Stack Table) entry as double fault stack
lazy_static! {
static ref TSS: TaskStateSegment = {//got a tss
let mut tss = TaskStateSegment::new();
tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX as usize] = {
const STACK_SIZE:usize = 4096;
static mut STACK:[u8;STACK_SIZE] = [0;STACK_SIZE];//size of STACK is 4096byte
let stack_start = VirtAddr::from_ptr(unsafe{&STACK});//static mut risks data race
let stack_end = stack_start + STACK_SIZE;
stack_end//write to highest address coz stacks on x86 grow downwards
};
tss
};
} |
pub mod agents;
pub mod login;
pub mod permission_membership;
pub mod permissions;
pub mod routers;
pub mod tunnels;
pub mod users;
|
use std::{
io::{Read, Write},
net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4, TcpStream, UdpSocket},
time::{Duration, SystemTime},
};
use argh::FromArgs;
use color_eyre::eyre::WrapErr;
use polling::{Event, Poller};
use flatbuffers_structs::net_protocol::{ConfigArgs, Endpoint, HandshakeArgs};
use protocol::connection::Connection;
use vita_virtual_device::{VitaDevice, VitaVirtualDevice};
/// Create a virtual controller and fetch its data from a Vita
/// over the network.
#[derive(FromArgs)]
struct Args {
#[argh(option, short = 'p')]
/// port to connect to
/// (default: 5000)
port: Option<u16>,
#[argh(option)]
/// polling interval in microseconds
polling_interval: Option<u64>,
/// IP address of the Vita to connect to
#[argh(positional)]
ip: String,
}
fn filter_udp_nonblocking_error(
err: std::io::Error,
) -> Result<(usize, SocketAddr), std::io::Error> {
if err.kind() == std::io::ErrorKind::WouldBlock {
Ok((0, SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)))
} else {
Err(err)
}
}
fn main() -> color_eyre::Result<()> {
const NET_PORT: u16 = 5000;
const TIMEOUT: Duration = Duration::from_secs(25);
const BUFFER_SIZE: usize = 2048;
const MIN_POLLING_RATE: u64 = (1 * 1000 / 250) * 1000;
color_eyre::install()?;
pretty_env_logger::init();
let args: Args = argh::from_env();
let remote_port = args.port.unwrap_or(NET_PORT);
let polling_interval = args
.polling_interval
.map(|v| v.max(MIN_POLLING_RATE))
.unwrap_or(MIN_POLLING_RATE);
let addr = SocketAddr::V4(SocketAddrV4::new(
args.ip.parse().wrap_err("invalid IPv4 address")?,
remote_port,
));
let mut conn = Connection::new();
let mut ctrl_socket = TcpStream::connect_timeout(&addr, TIMEOUT).wrap_err(
"Failed to connect to device, please check that the IP address and port are correct",
)?;
let pad_socket =
UdpSocket::bind((Ipv4Addr::UNSPECIFIED, 0)).wrap_err("Failed to bind UDP socket")?;
pad_socket
.set_nonblocking(true)
.wrap_err("Failed to set non-blocking mode on socket")?;
let bound_port = pad_socket
.local_addr()
.expect("Failed to get connection info for data socket")
.port();
conn.send_handshake(HandshakeArgs {
endpoint: Endpoint::Client,
port: bound_port,
..Default::default()
});
ctrl_socket
.write_all(conn.retrieve_out_data().as_slice())
.wrap_err("Failed to send handshake to Vita")?;
log::info!("Handshake sent to Vita");
log::info!("Waiting for handshake response from Vita");
let mut buf = [0; BUFFER_SIZE];
let len = ctrl_socket
.read(&mut buf)
.wrap_err("Failed to read handshake response from Vita")?;
log::info!("Handshake response received from Vita");
conn.receive_data(&buf[..len]);
let event = conn
.events()
.next()
.expect("No handshake response received");
let handshake_response = match event {
Ok(protocol::events::Event::HandshakeResponseReceived { handshake }) => handshake,
Err(e) => return Err(e).wrap_err("Failed to receive handshake response from Vita"),
_ => unimplemented!("Unexpected event received"),
};
let heartbeat_freq = handshake_response.heartbeat_freq;
log::debug!("Heartbeat frequency: {}", heartbeat_freq);
// We just send it to bypass firewall
conn.send_heartbeat();
pad_socket
.send_to(conn.retrieve_out_data().as_slice(), addr)
.wrap_err("Failed to send heartbeat to Vita")?;
log::info!("Opened port for data on {}", bound_port);
let mut last_time = SystemTime::now();
let mut device = VitaDevice::create().wrap_err(
"Failed to create virtual device, \
please check that you have permissions on uinput device",
)?;
let identfiers = device.identifiers().map(|ids| ids.join(", ".as_ref()));
log::info!("Virtual device created");
if let Some(identifiers) = identfiers {
println!(
"Virtual device created with identifiers: {}",
identifiers.to_string_lossy()
);
}
println!("Connection established, press Ctrl+C to exit");
if polling_interval < MIN_POLLING_RATE {
log::warn!(
"Polling interval is too low, it has been set to {} microseconds",
MIN_POLLING_RATE
);
}
if polling_interval != MIN_POLLING_RATE {
conn.send_config(ConfigArgs {
polling_interval: polling_interval,
..Default::default()
});
ctrl_socket
.write_all(conn.retrieve_out_data().as_slice())
.wrap_err("Failed to send configuration to Vita")?;
}
let poller = Poller::new().wrap_err("Failed to create poller")?;
poller
.add_with_mode(&pad_socket, Event::readable(1), polling::PollMode::Level)
.wrap_err("Failed to add socket to poller")?;
let mut events = Vec::new();
let mut last_timestamp = 0;
loop {
log::trace!("Polling");
let timeout = Duration::from_secs(
(heartbeat_freq.saturating_sub(5) as u64)
.saturating_sub(last_time.elapsed().unwrap().as_secs()),
);
poller
.wait(&mut events, Some(timeout))
.wrap_err("Failed to poll")?;
if last_time
.elapsed()
.expect("Cannot get elapsed time")
.as_secs()
>= (heartbeat_freq.saturating_sub(5)).into()
{
log::debug!("Sending heartbeat to Vita");
conn.send_heartbeat();
ctrl_socket
.write_all(conn.retrieve_out_data().as_slice())
.wrap_err("Failed to send heartbeat to Vita")?;
log::debug!("Heartbeat sent to Vita");
last_time = SystemTime::now();
log::trace!("Last time updated to {last_time:?}");
}
if events.len() == 0 {
continue;
}
let (len, _) = pad_socket
.recv_from(&mut buf)
.or_else(filter_udp_nonblocking_error)
.wrap_err("Failed to receive data from Vita")?;
log::debug!("Received {len} bytes from Vita");
let received_data = &buf[..len];
log::trace!("Received bytes from Vita: {received_data:?}");
if received_data.is_empty() {
continue;
}
conn.receive_data(received_data);
for event in conn.events() {
log::debug!("Event received: {event:?}");
match event {
Ok(protocol::events::Event::PadDataReceived { data }) => {
if data.timestamp <= last_timestamp {
log::warn!("Timestamp is not increasing, dropping packet");
continue;
}
last_timestamp = data.timestamp;
let report = vita_reports::MainReport::from(data);
log::trace!("Sending report to virtual device: {report:?}");
device
.send_report(report)
.wrap_err("Failed to send report to virtual device")?;
}
Err(e) => eprintln!("Error when receiving data from Vita: {e}"),
_ => {}
}
}
events.clear();
}
}
|
use super::service::NewService;
use crate::{frame::*, server::tcp_server::TcpServer};
use futures::future;
use std::{future::Future, io::Error, net::SocketAddr};
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Server {
socket_addr: SocketAddr,
threads: Option<usize>,
}
impl Server {
/// Set the address for the server (mandatory).
pub fn new(socket_addr: SocketAddr) -> Self {
Self {
socket_addr,
threads: None,
}
}
/// Set the number of threads running simultaneous event loops (optional, Unix only).
pub fn threads(mut self, threads: usize) -> Self {
self.threads = Some(threads);
self
}
/// Start a Modbus TCP server that blocks the current thread.
pub fn serve<S>(self, service: S)
where
S: NewService<Request = crate::frame::Request, Response = crate::frame::Response>
+ Send
+ Sync
+ 'static,
S::Request: From<Request>,
S::Response: Into<Response>,
S::Error: Into<Error>,
S::Instance: Send + Sync + 'static,
{
self.serve_until(service, future::pending());
}
/// Start a Modbus TCP server that blocks the current thread.
pub fn serve_until<S, Sd>(self, service: S, shutdown_signal: Sd)
where
S: NewService<Request = crate::frame::Request, Response = crate::frame::Response>
+ Send
+ Sync
+ 'static,
Sd: Future<Output = ()> + Sync + Send + Unpin + 'static,
S::Request: From<Request>,
S::Response: Into<Response>,
S::Error: Into<Error>,
S::Instance: Send + Sync + 'static,
{
let mut server = TcpServer::new(self.socket_addr);
if let Some(threads) = self.threads {
server.threads(threads);
}
server.serve_until(service, shutdown_signal);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::Service;
use futures::future;
#[tokio::test]
async fn service_wrapper() {
#[derive(Clone)]
struct DummyService {
response: Response,
};
impl Service for DummyService {
type Request = Request;
type Response = Response;
type Error = Error;
type Future = future::Ready<Result<Self::Response, Self::Error>>;
fn call(&self, _: Self::Request) -> Self::Future {
future::ready(Ok(self.response.clone()))
}
}
let service = DummyService {
response: Response::ReadInputRegisters(vec![0x33]),
};
let pdu = Request::ReadInputRegisters(0, 1);
let rsp_adu = service.call(pdu).await.unwrap();
assert_eq!(rsp_adu, service.response);
}
}
|
use eos::types::*;
use std::str::FromStr;
use stdweb::web::error::SecurityError;
use stdweb::web::Location;
use types::*;
#[derive(Clone, PartialEq)]
pub enum Route {
Home(Option<ChainIdPrefix>),
Profile(ChainIdPrefix, AccountName),
PollVoting(ChainIdPrefix, PollId),
PollResults(ChainIdPrefix, PollId),
}
impl Default for Route {
fn default() -> Route {
Route::Home(None)
}
}
pub enum RouteError {
SecurityError(SecurityError),
NotFound(String),
}
impl Route {
pub fn from_location(location: &Location) -> Result<Route, RouteError> {
match location.pathname() {
Ok(pathname) => Route::from_str(pathname.as_str()),
Err(error) => Err(RouteError::SecurityError(error)),
}
}
fn from_strings(pathnames: &[String]) -> Result<Route, RouteError> {
let strs: Vec<&str> = pathnames.iter().map(|s| s.as_str()).collect();
match &strs[..] {
[""] => Ok(Route::Home(None)),
[chain_id_prefix, ""] => Ok(Route::Home(Some(chain_id_prefix.to_string().into()))),
[chain_id_prefix, "u", account] => Ok(Route::Profile(
chain_id_prefix.to_string().into(),
account.to_string(),
)),
[chain_id_prefix, "v", poll_id] => Ok(Route::PollVoting(
chain_id_prefix.to_string().into(),
poll_id.to_string(),
)),
[chain_id_prefix, "r", poll_id] => Ok(Route::PollResults(
chain_id_prefix.to_string().into(),
poll_id.to_string(),
)),
_ => Err(RouteError::NotFound(format!("/{}", pathnames.join("/")))),
}
}
pub fn to_absolute(&self) -> String {
// TODO: use localhost in development environment
format!("https://www.eosstrawpoll.com{}", self.to_string())
}
}
impl ToString for Route {
fn to_string(&self) -> String {
match self {
Route::Home(chain_id_prefix) => match chain_id_prefix {
Some(chain_id_prefix) => format!("/{}/", chain_id_prefix.to_string()),
None => "/".into(),
},
Route::Profile(chain_id_prefix, account) => {
format!("/{}/u/{}", chain_id_prefix.to_string(), account)
}
Route::PollVoting(chain_id_prefix, poll_id) => {
format!("/{}/v/{}", chain_id_prefix.to_string(), poll_id)
}
Route::PollResults(chain_id_prefix, poll_id) => {
format!("/{}/r/{}", chain_id_prefix.to_string(), poll_id)
}
}
}
}
impl FromStr for Route {
type Err = RouteError;
fn from_str(s: &str) -> Result<Route, Self::Err> {
let mut pathnames: Vec<String> = s.split('/').map(String::from).collect();
pathnames.remove(0); // remove empty string that is split from the first '/'
Route::from_strings(&pathnames)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.