text
stringlengths
8
4.13M
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub mod network_experiment_profiles { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list(operation_config: &crate::OperationConfig, subscription_id: &str) -> std::result::Result<ProfileList, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Network/NetworkExperimentProfiles", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: ProfileList = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<ProfileList, list_by_resource_group::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles", &operation_config.base_path, subscription_id, resource_group_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_resource_group::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; let rsp_value: ProfileList = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?; list_by_resource_group::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_by_resource_group { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, profile_name: &str, ) -> std::result::Result<Profile, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}", &operation_config.base_path, subscription_id, resource_group_name, profile_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: Profile = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, profile_name: &str, subscription_id: &str, resource_group_name: &str, parameters: &Profile, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}", &operation_config.base_path, subscription_id, resource_group_name, profile_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Profile = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Profile = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Profile = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(Profile), Created201(Profile), Accepted202(Profile), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, profile_name: &str, parameters: &ProfileUpdateModel, ) -> std::result::Result<update::Response, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}", &operation_config.base_path, subscription_id, resource_group_name, profile_name ); let mut req_builder = client.patch(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: Profile = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Ok200(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: Profile = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(update::DeserializeError { body })?; update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(Profile), Accepted202(Profile), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, profile_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}", &operation_config.base_path, subscription_id, resource_group_name, profile_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(delete::DeserializeError { body })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod preconfigured_endpoints { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, profile_name: &str, ) -> std::result::Result<PreconfiguredEndpointList, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/PreconfiguredEndpoints", &operation_config.base_path, subscription_id, resource_group_name, profile_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: PreconfiguredEndpointList = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod experiments { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list_by_profile( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, profile_name: &str, ) -> std::result::Result<ExperimentList, list_by_profile::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/Experiments", &operation_config.base_path, subscription_id, resource_group_name, profile_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_profile::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_profile::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_profile::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_profile::ResponseBytesError)?; let rsp_value: ExperimentList = serde_json::from_slice(&body).context(list_by_profile::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_profile::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_by_profile::DeserializeError { body })?; list_by_profile::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_by_profile { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, profile_name: &str, experiment_name: &str, ) -> std::result::Result<Experiment, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/Experiments/{}", &operation_config.base_path, subscription_id, resource_group_name, profile_name, experiment_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: Experiment = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, profile_name: &str, experiment_name: &str, parameters: &Experiment, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/Experiments/{}", &operation_config.base_path, subscription_id, resource_group_name, profile_name, experiment_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Experiment = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Experiment = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Experiment = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(Experiment), Created201(Experiment), Accepted202(Experiment), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, profile_name: &str, experiment_name: &str, parameters: &ExperimentUpdateModel, ) -> std::result::Result<update::Response, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/Experiments/{}", &operation_config.base_path, subscription_id, resource_group_name, profile_name, experiment_name ); let mut req_builder = client.patch(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: Experiment = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Ok200(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: Experiment = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(update::DeserializeError { body })?; update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(Experiment), Accepted202(Experiment), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, profile_name: &str, experiment_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/Experiments/{}", &operation_config.base_path, subscription_id, resource_group_name, profile_name, experiment_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(delete::DeserializeError { body })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod reports { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get_latency_scorecards( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, profile_name: &str, experiment_name: &str, end_date_time_utc: Option<&str>, country: Option<&str>, aggregation_interval: &str, ) -> std::result::Result<LatencyScorecard, get_latency_scorecards::Error> { let client = &operation_config.client; let uri_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/Experiments/{}/LatencyScorecard" , & operation_config . base_path , subscription_id , resource_group_name , profile_name , experiment_name) ; let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get_latency_scorecards::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(end_date_time_utc) = end_date_time_utc { req_builder = req_builder.query(&[("endDateTimeUTC", end_date_time_utc)]); } if let Some(country) = country { req_builder = req_builder.query(&[("country", country)]); } req_builder = req_builder.query(&[("aggregationInterval", aggregation_interval)]); let req = req_builder.build().context(get_latency_scorecards::BuildRequestError)?; let rsp = client.execute(req).await.context(get_latency_scorecards::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get_latency_scorecards::ResponseBytesError)?; let rsp_value: LatencyScorecard = serde_json::from_slice(&body).context(get_latency_scorecards::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get_latency_scorecards::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get_latency_scorecards::DeserializeError { body })?; get_latency_scorecards::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get_latency_scorecards { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get_timeseries( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, profile_name: &str, experiment_name: &str, start_date_time_utc: &str, end_date_time_utc: &str, aggregation_interval: &str, timeseries_type: &str, endpoint: Option<&str>, country: Option<&str>, ) -> std::result::Result<Timeseries, get_timeseries::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/Experiments/{}/Timeseries", &operation_config.base_path, subscription_id, resource_group_name, profile_name, experiment_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get_timeseries::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.query(&[("startDateTimeUTC", start_date_time_utc)]); req_builder = req_builder.query(&[("endDateTimeUTC", end_date_time_utc)]); req_builder = req_builder.query(&[("aggregationInterval", aggregation_interval)]); req_builder = req_builder.query(&[("timeseriesType", timeseries_type)]); if let Some(endpoint) = endpoint { req_builder = req_builder.query(&[("endpoint", endpoint)]); } if let Some(country) = country { req_builder = req_builder.query(&[("country", country)]); } let req = req_builder.build().context(get_timeseries::BuildRequestError)?; let rsp = client.execute(req).await.context(get_timeseries::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get_timeseries::ResponseBytesError)?; let rsp_value: Timeseries = serde_json::from_slice(&body).context(get_timeseries::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get_timeseries::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get_timeseries::DeserializeError { body })?; get_timeseries::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get_timeseries { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub async fn check_front_door_name_availability( operation_config: &crate::OperationConfig, check_front_door_name_availability_input: &CheckNameAvailabilityInput, ) -> std::result::Result<CheckNameAvailabilityOutput, check_front_door_name_availability::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/providers/Microsoft.Network/checkFrontDoorNameAvailability", &operation_config.base_path, ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(check_front_door_name_availability::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(check_front_door_name_availability_input); let req = req_builder.build().context(check_front_door_name_availability::BuildRequestError)?; let rsp = client .execute(req) .await .context(check_front_door_name_availability::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(check_front_door_name_availability::ResponseBytesError)?; let rsp_value: CheckNameAvailabilityOutput = serde_json::from_slice(&body).context(check_front_door_name_availability::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(check_front_door_name_availability::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(check_front_door_name_availability::DeserializeError { body })?; check_front_door_name_availability::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod check_front_door_name_availability { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn check_front_door_name_availability_with_subscription( operation_config: &crate::OperationConfig, check_front_door_name_availability_input: &CheckNameAvailabilityInput, subscription_id: &str, ) -> std::result::Result<CheckNameAvailabilityOutput, check_front_door_name_availability_with_subscription::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Network/checkFrontDoorNameAvailability", &operation_config.base_path, subscription_id ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(check_front_door_name_availability_with_subscription::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(check_front_door_name_availability_input); let req = req_builder .build() .context(check_front_door_name_availability_with_subscription::BuildRequestError)?; let rsp = client .execute(req) .await .context(check_front_door_name_availability_with_subscription::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp .bytes() .await .context(check_front_door_name_availability_with_subscription::ResponseBytesError)?; let rsp_value: CheckNameAvailabilityOutput = serde_json::from_slice(&body).context(check_front_door_name_availability_with_subscription::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp .bytes() .await .context(check_front_door_name_availability_with_subscription::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(check_front_door_name_availability_with_subscription::DeserializeError { body })?; check_front_door_name_availability_with_subscription::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod check_front_door_name_availability_with_subscription { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub mod front_doors { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<FrontDoorListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Network/frontDoors", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: FrontDoorListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<FrontDoorListResult, list_by_resource_group::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors", &operation_config.base_path, subscription_id, resource_group_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_resource_group::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; let rsp_value: FrontDoorListResult = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?; list_by_resource_group::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_by_resource_group { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, front_door_name: &str, ) -> std::result::Result<FrontDoor, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}", &operation_config.base_path, subscription_id, resource_group_name, front_door_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: FrontDoor = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, front_door_name: &str, front_door_parameters: &FrontDoor, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}", &operation_config.base_path, subscription_id, resource_group_name, front_door_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(front_door_parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: FrontDoor = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: FrontDoor = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: FrontDoor = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(FrontDoor), Created201(FrontDoor), Accepted202(FrontDoor), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, front_door_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}", &operation_config.base_path, subscription_id, resource_group_name, front_door_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(delete::DeserializeError { body })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn validate_custom_domain( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, front_door_name: &str, custom_domain_properties: &ValidateCustomDomainInput, ) -> std::result::Result<ValidateCustomDomainOutput, validate_custom_domain::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/validateCustomDomain", &operation_config.base_path, subscription_id, resource_group_name, front_door_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(validate_custom_domain::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(custom_domain_properties); let req = req_builder.build().context(validate_custom_domain::BuildRequestError)?; let rsp = client.execute(req).await.context(validate_custom_domain::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(validate_custom_domain::ResponseBytesError)?; let rsp_value: ValidateCustomDomainOutput = serde_json::from_slice(&body).context(validate_custom_domain::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(validate_custom_domain::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(validate_custom_domain::DeserializeError { body })?; validate_custom_domain::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod validate_custom_domain { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod frontend_endpoints { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list_by_front_door( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, front_door_name: &str, ) -> std::result::Result<FrontendEndpointsListResult, list_by_front_door::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/frontendEndpoints", &operation_config.base_path, subscription_id, resource_group_name, front_door_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_front_door::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_front_door::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_front_door::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_front_door::ResponseBytesError)?; let rsp_value: FrontendEndpointsListResult = serde_json::from_slice(&body).context(list_by_front_door::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_front_door::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_by_front_door::DeserializeError { body })?; list_by_front_door::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_by_front_door { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, front_door_name: &str, frontend_endpoint_name: &str, ) -> std::result::Result<FrontendEndpoint, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/frontendEndpoints/{}", &operation_config.base_path, subscription_id, resource_group_name, front_door_name, frontend_endpoint_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: FrontendEndpoint = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn enable_https( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, front_door_name: &str, frontend_endpoint_name: &str, custom_https_configuration: &CustomHttpsConfiguration, ) -> std::result::Result<enable_https::Response, enable_https::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/frontendEndpoints/{}/enableHttps", &operation_config.base_path, subscription_id, resource_group_name, front_door_name, frontend_endpoint_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(enable_https::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(custom_https_configuration); let req = req_builder.build().context(enable_https::BuildRequestError)?; let rsp = client.execute(req).await.context(enable_https::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(enable_https::Response::Ok200), StatusCode::ACCEPTED => Ok(enable_https::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(enable_https::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(enable_https::DeserializeError { body })?; enable_https::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod enable_https { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn disable_https( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, front_door_name: &str, frontend_endpoint_name: &str, ) -> std::result::Result<disable_https::Response, disable_https::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/frontendEndpoints/{}/disableHttps", &operation_config.base_path, subscription_id, resource_group_name, front_door_name, frontend_endpoint_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(disable_https::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(disable_https::BuildRequestError)?; let rsp = client.execute(req).await.context(disable_https::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(disable_https::Response::Ok200), StatusCode::ACCEPTED => Ok(disable_https::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(disable_https::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(disable_https::DeserializeError { body })?; disable_https::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod disable_https { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod endpoints { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn purge_content( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, front_door_name: &str, content_file_paths: &PurgeParameters, ) -> std::result::Result<purge_content::Response, purge_content::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/purge", &operation_config.base_path, subscription_id, resource_group_name, front_door_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(purge_content::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(content_file_paths); let req = req_builder.build().context(purge_content::BuildRequestError)?; let rsp = client.execute(req).await.context(purge_content::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(purge_content::Response::Ok200), StatusCode::ACCEPTED => Ok(purge_content::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(purge_content::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(purge_content::DeserializeError { body })?; purge_content::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod purge_content { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod rules_engines { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list_by_front_door( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, front_door_name: &str, ) -> std::result::Result<RulesEngineListResult, list_by_front_door::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/rulesEngines", &operation_config.base_path, subscription_id, resource_group_name, front_door_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_front_door::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_front_door::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_front_door::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_front_door::ResponseBytesError)?; let rsp_value: RulesEngineListResult = serde_json::from_slice(&body).context(list_by_front_door::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_front_door::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_by_front_door::DeserializeError { body })?; list_by_front_door::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_by_front_door { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, front_door_name: &str, rules_engine_name: &str, ) -> std::result::Result<RulesEngine, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/rulesEngines/{}", &operation_config.base_path, subscription_id, resource_group_name, front_door_name, rules_engine_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: RulesEngine = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, front_door_name: &str, rules_engine_name: &str, rules_engine_parameters: &RulesEngine, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/rulesEngines/{}", &operation_config.base_path, subscription_id, resource_group_name, front_door_name, rules_engine_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(rules_engine_parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: RulesEngine = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: RulesEngine = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: RulesEngine = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(RulesEngine), Created201(RulesEngine), Accepted202(RulesEngine), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, front_door_name: &str, rules_engine_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/rulesEngines/{}", &operation_config.base_path, subscription_id, resource_group_name, front_door_name, rules_engine_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(delete::DeserializeError { body })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod policies { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, ) -> std::result::Result<WebApplicationFirewallPolicyList, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallPolicies", &operation_config.base_path, subscription_id, resource_group_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: WebApplicationFirewallPolicyList = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, policy_name: &str, subscription_id: &str, ) -> std::result::Result<WebApplicationFirewallPolicy, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallPolicies/{}", &operation_config.base_path, subscription_id, resource_group_name, policy_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: WebApplicationFirewallPolicy = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_group_name: &str, policy_name: &str, subscription_id: &str, parameters: &WebApplicationFirewallPolicy, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallPolicies/{}", &operation_config.base_path, subscription_id, resource_group_name, policy_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: WebApplicationFirewallPolicy = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: WebApplicationFirewallPolicy = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: WebApplicationFirewallPolicy = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(WebApplicationFirewallPolicy), Created201(WebApplicationFirewallPolicy), Accepted202(WebApplicationFirewallPolicy), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, policy_name: &str, subscription_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallPolicies/{}", &operation_config.base_path, subscription_id, resource_group_name, policy_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod managed_rule_sets { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<ManagedRuleSetDefinitionList, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallManagedRuleSets", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: ManagedRuleSetDefinitionList = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::ErrorResponse, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } }
#![recursion_limit = "256"] extern crate serde; use self::serde::Deserialize; use anyhow::Error; use yew::format::{Json, Nothing}; use yew::services::fetch::{FetchService, FetchTask, Request, Response}; use yew::services::ConsoleService; use yew::{html, Component, ComponentLink, Html, ShouldRender}; pub struct Model { console: ConsoleService, fetch_service: FetchService, link: ComponentLink<Self>, fetching: bool, data: Option<Course>, ft: Option<FetchTask>, } #[derive(Deserialize, Debug)] pub struct ClassOffering { id: i32, credits: i32, days: Option<String>, time: Option<String>, crn: i32, timestamp: Option<String>, course: Course, instructor: Instructor, term: Term, } #[derive(Deserialize, Debug)] pub struct Course { id: i32, name: String, number: String, discipline: String, } #[derive(Deserialize, Debug)] pub struct Instructor { id: i32, full_name: String, first_name: Option<String>, last_name: Option<String>, rating: Option<f64>, url: Option<String>, timestamp: Option<String>, } #[derive(Deserialize, Debug)] pub struct Term { date: i32, description: String, } pub enum Msg { FetchData, FetchReady(Result<Course, Error>), Ignore, } impl Component for Model { type Message = Msg; type Properties = (); fn create(_: Self::Properties, link: ComponentLink<Self>) -> Self { Model { console: ConsoleService::new(), fetch_service: FetchService::new(), link, fetching: false, data: None, ft: None, } } fn update(&mut self, msg: Self::Message) -> ShouldRender { self.console.log("Update"); match msg { Msg::FetchData => { self.fetching = true; let task = self.fetch_json(); self.ft = Some(task); } Msg::FetchReady(response) => { self.fetching = false; self.data = response .map(|data| Course { id: data.id, name: data.name, number: data.number, discipline: data.discipline, }) .ok(); } Msg::Ignore => { return false; } } true } fn view(&self) -> Html { html! { <> <button onclick=self.link.callback(|_| Msg::FetchData)> { "Fetch Course Data" } </button> {self.view_course()} </> } } } impl Model { fn view_course(&self) -> Html { if let Some(data) = &self.data { let headers = ["Id", "Name", "Number", "Discipline"]; html! { <> <table> <thead> <tr> { for headers.iter().map(|h| html! { <th>{h}</th> }) } </tr> </thead> <tbody> <td>{ data.id.to_string() }</td> <td>{ data.name.to_string() }</td> <td>{ data.number.to_string() }</td> <td>{ data.discipline.to_string() }</td> </tbody> </table> </> } } else { html! { <p>{ "Course data hasn't fetched yet." }</p> } } } fn fetch_json(&mut self) -> yew::services::fetch::FetchTask { let callback = self.link .callback(move |response: Response<Json<Result<Course, Error>>>| { let (meta, Json(data)) = response.into_parts(); println!("META: {:?}, {:?}", meta, data); if meta.status.is_success() { Msg::FetchReady(data) } else { Msg::Ignore // FIXME: Handle this error accordingly. } }); let request = Request::get("http://localhost:8080/api/course/1") .body(Nothing) .unwrap(); self.fetch_service.fetch(request, callback) } }
// This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. #![allow(unused_comparisons)] use core::ops::{Range, RangeTo}; /// Objects that can be interpreted as a bounded range. pub trait BoundedRange<T> { /// Returns the range. fn to_range(self) -> Range<T>; } impl<T> BoundedRange<T> for Range<T> { fn to_range(self) -> Range<T> { self } } /// Integers. pub trait Int : Copy { /// Returns whether the value is negative. fn negative(self) -> bool; /// Casts the value to an `i64` and possibly discards significant bits. /// /// = Remarks /// /// For example, `u64::MAX.cast_i64() == -1`. fn cast_i64(self) -> i64; fn div_rem(self, other: Self) -> (Self, Self) where Self: Sized; } macro_rules! int_impl { ($name:ident) => { impl Int for $name { fn negative(self) -> bool { self < 0 } fn cast_i64(self) -> i64 { self as i64 } fn div_rem(self, other: Self) -> (Self, Self) { (self / other, self % other) } } } } int_impl!(u8); int_impl!(u16); int_impl!(u32); int_impl!(u64); int_impl!(u128); int_impl!(usize); int_impl!(i8); int_impl!(i16); int_impl!(i32); int_impl!(i64); int_impl!(i128); int_impl!(isize); /// Unsigned integers. pub trait UnsignedInt : Int+Sized { /// Calculates the next power of two greater or equal the current value. /// /// [return_value] /// The next power of two or `1` on overflow. fn next_power_of_two(&self) -> Self; /// Calculates the next power of two greater or equal the current value. /// /// [return_value] /// The next power of two or `None` on overflow. fn checked_next_power_of_two(&self) -> Option<Self>; } macro_rules! uint_impl { ($name:ident) => { impl UnsignedInt for $name { fn next_power_of_two(&self) -> $name { $name::next_power_of_two(*self) } fn checked_next_power_of_two(&self) -> Option<$name> { $name::checked_next_power_of_two(*self) } } impl BoundedRange<$name> for RangeTo<$name> { fn to_range(self) -> Range<$name> { Range { start: 0, end: self.end } } } } } uint_impl!(u8); uint_impl!(u16); uint_impl!(u32); uint_impl!(u64); uint_impl!(u128); uint_impl!(usize); /// Signed integers. pub trait SignedInt : Int { } macro_rules! sint_impl { ($name:ident) => { impl SignedInt for $name { } } } sint_impl!(i8); sint_impl!(i16); sint_impl!(i32); sint_impl!(i64); sint_impl!(i128); sint_impl!(isize);
#[derive(Deserialize, Debug)] pub struct GithubAuth { /// Client ID of the Github app client_id: String, /// Secret identifier of the Github app secret: String, /// Address to redirect to after a login attempt redirect: String, } impl GithubAuth { /// Gets the client ID from the config pub fn get_client_id(&self) -> &str { &self.client_id } /// Gets the secret identifier of the Github app from the config pub fn get_secret(&self) -> &str { &self.secret } /// Gets the address to redirect to after a login attempt pub fn get_redirect(&self) -> &str { &self.redirect } }
use std::process::Stdio; use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader, BufWriter}; use tokio::process::Command; use std::env; use std::fmt; mod calculation; mod geod_error; #[derive(Debug)] enum Error { ArgumentError, } impl std::error::Error for Error {} impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}", self) } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { let args: Vec<String> = env::args().collect(); eprintln!("args: {:?}", args); match args.len() { 2 => { let calcs: Vec<Calculation> = vec![Calculation::DirectP1ToP2, Calculation::DirectP2ToP1, Calculation::Inverse]; run(&args[1], &calcs).await } _ => { usage(); Err(Error::ArgumentError.into()) } } } fn usage() { println!( "USAGE: validate_geodsolve <path-string> WHERE: path-string: path to a binary like GeodSolve " ); } async fn run(bin_name: &str, calcs: &Vec<Calculation>) -> Result<(), Box<dyn std::error::Error>> { let mut test_case_reader = BufReader::new(tokio::io::stdin()).lines(); let geod = Geodesic::wgs84(); // Direct Calculation Process let mut direct_proc = Command::new(bin_name) .arg("-p") .arg("10") .arg("-f") .stdout(Stdio::piped()) .stdin(Stdio::piped()) .spawn() .expect("failed to spawn direct calculation command"); let mut direct_writer = BufWriter::new( direct_proc .stdin .take() .expect("geodsolve did not have a handle to stdin"), ); let mut direct_reader = BufReader::new( direct_proc .stdout .take() .expect("geodsolve did not have a handle to stdout"), ) .lines(); let mut max_position_error: Option<DirectError> = Option::None; let mut max_azi_error: Option<DirectError> = Option::None; let mut max_m12_error: Option<DirectError> = Option::None; // Inverse Calculation Process let mut inverse_proc = Command::new(bin_name) .arg("-i") .arg("-p") .arg("10") .arg("-f") .stdout(Stdio::piped()) .stdin(Stdio::piped()) .spawn() .expect("failed to spawn indirect calculation command"); let mut inverse_writer = BufWriter::new( inverse_proc .stdin .take() .expect("geodsolve did not have a handle to stdin"), ); let mut inverse_reader = BufReader::new( inverse_proc .stdout .take() .expect("geodsolve did not have a handle to stdout"), ) .lines(); let mut max_distance_error: Option<InverseError> = None; let mut line_number = 0; while let Some(test_case_line) = test_case_reader.next_line().await? { let test_case_fields = test_case_line .split(" ") .map(|s| s.parse::<f64>().unwrap()) .collect(); let calc = Calculation::DirectP1ToP2; if calcs.contains(&calc) { let input = format_input(&test_case_fields, calc); direct_writer .write_all(input.as_bytes()) .await .expect("write failed"); direct_writer.flush().await.expect("flush failed"); let direct_error: DirectError = match direct_reader.next_line().await { Err(e) => panic!("err: {:?}", e), Ok(None) => panic!("geodsolve should have output after giving it input"), Ok(Some(geodsolve_output_line)) => { let output_fields: Vec<f64> = geodsolve_output_line .split(" ") .map(|s| s.parse::<f64>().unwrap()) .collect(); DirectError::new( output_fields[3], output_fields[4], output_fields[5], output_fields[8], test_case_fields[3], test_case_fields[4], test_case_fields[5], test_case_fields[8], &geod, line_number, calc, ) } }; max_position_error = max_error(max_position_error, direct_error, |e| e.position_error); max_azi_error = max_error(max_azi_error, direct_error, |e| e.azi_error); max_m12_error = max_error(max_m12_error, direct_error, |e| e.m12_error); } let calc = Calculation::DirectP2ToP1; if calcs.contains(&calc) { let input = format_input(&test_case_fields, calc); direct_writer .write_all(input.as_bytes()) .await .expect("write failed"); direct_writer.flush().await.expect("flush failed"); let direct_error: DirectError = match direct_reader.next_line().await { Err(e) => panic!("err: {:?}", e), Ok(None) => panic!("geodsolve should have output after giving it input"), Ok(Some(geodsolve_output_line)) => { let output_fields: Vec<f64> = geodsolve_output_line .split(" ") .map(|s| s.parse::<f64>().unwrap()) .collect(); DirectError::new( output_fields[3], output_fields[4], output_fields[5], output_fields[8], test_case_fields[0], test_case_fields[1], test_case_fields[2], -test_case_fields[8], &geod, line_number, calc, ) } }; max_position_error = max_error(max_position_error, direct_error, |e| e.position_error); max_azi_error = max_error(max_azi_error, direct_error, |e| e.azi_error); max_m12_error = max_error(max_m12_error, direct_error, |e| e.m12_error); } let calc = Calculation::Inverse; if calcs.contains(&calc) { let input = format_input(&test_case_fields, calc); inverse_writer .write_all(input.as_bytes()) .await .expect("write failed"); inverse_writer.flush().await.expect("flush failed"); let inverse_error: InverseError = match inverse_reader.next_line().await { Err(e) => panic!("err: {:?}", e), Ok(None) => panic!("geodsolve should have output after giving it input"), Ok(Some(geodsolve_output_line)) => { let output_fields: Vec<f64> = geodsolve_output_line .split(" ") .map(|s| s.parse::<f64>().unwrap()) .collect(); InverseError::new( output_fields[6], test_case_fields[6], line_number, ) } }; max_distance_error = max_error(max_distance_error, inverse_error, |e| e.s12_error); } line_number += 1; } let mult: f64 = 1.0e9; let max_position_error = max_position_error.unwrap(); let max_azi_error = max_azi_error.unwrap(); let max_m12_error = max_m12_error.unwrap(); // Print output like GeodTest's println!( "position error (direct) 0 {:.2} {}", max_position_error.position_error * mult, max_position_error.line_number ); println!( "azi error (direct) 1 {:.2} {}", max_azi_error.azi_error * mult, max_azi_error.line_number ); println!( "m12 error (direct) 2 {:.2} {}", max_m12_error.m12_error * mult, max_m12_error.line_number ); let max_distance_error = max_distance_error.unwrap(); println!( "distance error (inverse) 3 {:.2} {}", max_distance_error.s12_error * mult, max_distance_error.line_number ); Ok(()) } use calculation::Calculation; use geod_error::{DirectError, InverseError}; use geographiclib_rs::Geodesic; fn format_input(fields: &Vec<f64>, calc: Calculation) -> String { match calc { Calculation::DirectP1ToP2 => { format!("{} {} {} {}\n", fields[0], fields[1], fields[2], fields[6]) } Calculation::DirectP2ToP1 => { format!("{} {} {} {}\n", fields[3], fields[4], fields[5], -fields[6]) } Calculation::Inverse => { format!("{} {} {} {}\n", fields[0], fields[1], fields[3], fields[4]) } } } fn max_error<T, F>(a: Option<T>, b: T, f: F) -> Option<T> where F: Fn(T) -> f64, T: Copy { Some(match a { None => b, Some(a) => { if f(b) > f(a) { b } else { a } } }) }
mod moves { // This function takes ownership of the heap allocated memory fn destroy_box(c: Box<i32>) { println!("Destroying a box that contains {}", c); } #[test] fn main() { // _Stack_ allocated integer let x = 5u32; // *Copy* `x` into `y` - no resources are moved let y = x; println!("x is {}, y is {}", x, y); // `a` is a pointer to a _heap_ allocated integer let a = Box::new(5i32); println!("a contains: {}", a); // *Move* `a` into `b` let b = a; // The pointer address of `a` is copied (not the data) into `b`. // Both are now pointers to the same heap allocated data, but // `b` now owns it. // Error! `a` can no longer access the data, because it no longer owns the // heap memory // println!("a contains: {}", a); // This function takes ownership of the heap allocated memory from `b` destroy_box(b); // Since the heap memory has been freed at this point, this action would // result in dereferencing freed memory, but it's forbidden by the compiler // println!("b contains: {}", b); } } mod mutability { #[test] fn main() { let imm = Box::new(5u32); println!("imm box: {}", imm); // Mutability error // *imm = 4; // *Move* the box, changing the ownership (and mutability) let mut mu = imm; println!("mu box: {}", mu); *mu = 4; println!("mu box: {}", mu); } } mod partial_moves { #[test] fn main() { #[derive(Debug)] struct Person { name: String, age: u8, } let person = Person { name: String::from("Alice"), age: 20, }; // `name` is moved out of person, but `age` is referenced let Person { name, ref age } = person; println!("age is {}", age); println!("name is {}", name); // Error! borrow of partially moved value: `person` partial move occurs // println!("person is {:?}", person); // `person` cannot be used but `person.age` can be used as it is not moved println!("person's age is {}", person.age); } }
// error-pattern:fail iter x() -> int { fail; put 0; } fn main() { let a = @0; for each x in x() { } }
use actix_web::{ HttpResponse, }; use super::super::{ service, response, }; pub fn index() -> HttpResponse { let domain_designs = &service::pickup::index(); response::pickup_index::response(domain_designs) }
use actix_files::NamedFile; use actix_web::{cookie, http, web, HttpMessage, HttpRequest, HttpResponse}; use actix_web::{get, Responder}; use std::path::PathBuf; use crate::application::app::AppState; use crate::handle_to_app; use serde::Deserialize; const DEPLOY_OR_STATIC: &str = "static"; #[derive(Deserialize)] pub struct CookieInfo { username: String, viewtype: String, game_id: String, password: String, } pub async fn set_cookies(cookie_info: web::Json<CookieInfo>, req: HttpRequest) -> HttpResponse { let (game_id, username, viewtype) = ( cookie_info.game_id.clone(), cookie_info.username.clone(), cookie_info.viewtype.clone(), ); println!( "Called set_cookies with args: \n Username: {:?}, Viewtype: {:?}, GameID: {:?}", username, viewtype, game_id ); if !game_id.chars().any(|c| c.is_ascii_digit()) { return HttpResponse::Ok() .content_type("plain/text") .body("Invalid Game ID"); } let addr = req.app_data::<web::Data<actix::Addr<AppState>>>().unwrap(); //* make sure isn't main director if let (Some(uuid), Some(viewtype), Some(game_id)) = ( req.cookie("uuid"), req.cookie("viewtype"), req.cookie("game_id"), ) { if viewtype.value() == "director" && addr .send(handle_to_app::IsMainDirector { game_id: game_id.value().to_string(), user_id: uuid.value().to_string(), }) .await .unwrap() { return HttpResponse::Ok() .content_type("plain/text") .body(format!("Cannot join a game while being a main director. Go to /direct/director/{}/index.html", game_id.value())); } } // ! SWITCH THIS REPL let mut id_cookie = cookie::Cookie::build("uuid", "") .same_site(cookie::SameSite::Strict) // .secure(true) .max_age(time::Duration::hours(2)) // 2 hrs .finish(); let mut viewtype_cookie = cookie::Cookie::build("viewtype", "") .same_site(cookie::SameSite::Strict) // .secure(true) .max_age(time::Duration::hours(2)) .finish(); let name_cookie = cookie::Cookie::build("username", username.clone()) .same_site(cookie::SameSite::Strict) // .secure(true) .max_age(time::Duration::hours(2)) .finish(); let game_id_cookie = cookie::Cookie::build("game_id", game_id.clone()) .same_site(cookie::SameSite::Strict) // .secure(true) .max_age(time::Duration::hours(2)) .finish(); let temp_uuid = uuid::Uuid::new_v4().to_hyphenated().to_string(); id_cookie.set_value(temp_uuid.to_string()); if addr .send(handle_to_app::DoesGameExist { game_id: game_id.clone(), }) .await .unwrap() { let is_open = addr .send(handle_to_app::IsGameOpen { game_id: game_id.clone(), }) .await .unwrap(); match viewtype.as_ref() { "player" => { if is_open { let player_type = addr .send(handle_to_app::NewPlayer { user_id: temp_uuid, game_id, username: username.clone(), }) .await .unwrap(); if player_type == "Name taken" { HttpResponse::Ok() .content_type("plain/text") .body("Name taken") } else { viewtype_cookie.set_value(player_type); HttpResponse::build(http::StatusCode::OK) .cookie(id_cookie) .cookie(name_cookie) .cookie(viewtype_cookie) .cookie(game_id_cookie) .content_type("plain/text") .body("Success") } } else { HttpResponse::Ok() .content_type("plain/text") .body("Game not open yet.") } } "director" => { let pswd = cookie_info.password.clone(); viewtype_cookie.set_value("director"); if addr .send(handle_to_app::IsRightPswd { pswd }) .await .unwrap() { addr.do_send(handle_to_app::NewDirector { user_id: temp_uuid, game_id, username: username.to_string(), }); HttpResponse::build(http::StatusCode::OK) .cookie(id_cookie) .cookie(name_cookie) .cookie(viewtype_cookie) .cookie(game_id_cookie) .content_type("plain/text") .body("Success") } else { HttpResponse::Ok() .content_type("plain/text") .body("Invalid Password") } } "viewer" => { viewtype_cookie.set_value("viewer"); if addr .send(handle_to_app::NewViewer { user_id: temp_uuid, game_id, username: username.clone(), }) .await .unwrap() { HttpResponse::build(http::StatusCode::OK) .cookie(id_cookie) .cookie(name_cookie) .cookie(viewtype_cookie) .cookie(game_id_cookie) .content_type("plain/text") .body("Success") } else { HttpResponse::Ok() .content_type("plain/text") .body("Name taken") } } _ => { println!("SMTH BAD HAPPENED"); HttpResponse::Ok() .content_type("plain/text") .body("Unknown Viewing Type") } } } else if viewtype == "director" { viewtype_cookie.set_value("director"); let pswd = cookie_info.password.clone(); if addr .send(handle_to_app::IsRightPswd { pswd }) .await .unwrap() { addr.send(handle_to_app::NewGame { user_id: temp_uuid.clone(), game_id, username: username.to_string(), }) .await .unwrap(); HttpResponse::Ok() .cookie(id_cookie) .cookie(name_cookie) .cookie(viewtype_cookie) .cookie(game_id_cookie) .content_type("plain/text") .body("Success") } else { HttpResponse::Ok() .content_type("plain/text") .body("Invalid Password") } } else { HttpResponse::Ok() .content_type("plain/text") .body("No Game with that ID Found") } } #[get("/{play_view_direct}/{type}/{gameid:\\d*}/{filename}.{ext}")] async fn get_html(req: HttpRequest) -> impl Responder { // http://localhost:8080/play/producer/gameid/index.html println!("Received request for Files"); let mut prepath = "../client/".to_owned(); // let prepath = "../client/root/static/"; let filename = req.match_info().get("filename").unwrap(); let ext = req.match_info().get("ext").unwrap(); let _play_view_direct = req.match_info().get("play_view_direct").unwrap(); let url_viewtype = req.match_info().get("type").unwrap(); let url_game_id = req.match_info().get("gameid").unwrap(); if let (Some(view_type), Some(game_id)) = (req.cookie("viewtype"), req.cookie("game_id")) { let addr = req.app_data::<web::Data<actix::Addr<AppState>>>().unwrap(); if view_type.value() == url_viewtype && game_id.value() == url_game_id && addr .send(handle_to_app::DoesGameExist { game_id: game_id.value().to_string(), }) .await .unwrap() { prepath = match view_type.value() { "viewer" => prepath + "viewer/", "producer" => prepath + "producer/", "consumer" => prepath + "consumer/", "director" => prepath + "director_auth/", _ => { return Ok(NamedFile::open(format!( "../client/404/{}/index.html", DEPLOY_OR_STATIC ))) } }; prepath += DEPLOY_OR_STATIC; prepath += "/"; let full_path = (*prepath).to_owned() + filename + "." + ext; match ext { "html" | "js" | "css" | "wasm" => { // if let Some(view_type) = req.cookie("viewtype") { println!("HI: {cat}", cat = full_path); return Ok(NamedFile::open( (prepath + filename + "." + ext).parse::<PathBuf>().unwrap(), )); } _ => (), } } } Err(actix_web::error::ErrorUnauthorized("Game does not exist.")) } pub async fn redirect(req: HttpRequest) -> impl Responder { if let Some(game_id) = req.cookie("game_id") { if let Some(viewtype) = req.cookie("viewtype") { match viewtype.value() { "director" => { return HttpResponse::build(http::StatusCode::FOUND) .header( http::header::LOCATION, format!("direct/director/{}/index.html", game_id.value()), ) .finish(); } "consumer" => { return HttpResponse::build(http::StatusCode::FOUND) .header( http::header::LOCATION, format!("play/consumer/{}/index.html", game_id.value()), ) .finish() } "producer" => { return HttpResponse::build(http::StatusCode::FOUND) .header( http::header::LOCATION, format!("play/producer/{}/index.html", game_id.value()), ) .finish() } "viewer" => { return HttpResponse::build(http::StatusCode::FOUND) .header( http::header::LOCATION, format!("view/viewer/{}/index.html", game_id.value()), ) .finish() } _ => (), } } } HttpResponse::build(http::StatusCode::FOUND) .header(http::header::LOCATION, "/login") .finish() } #[get("/{play_view_direct}/{type}/{gameid:\\d*}/assets/{rest_of_path:.*}")] async fn assets(req: HttpRequest) -> impl Responder { let mut prepath = "../client/".to_owned(); let url_viewtype = req.match_info().get("type").unwrap(); let path = req.match_info().get("rest_of_path").unwrap(); prepath = match url_viewtype { "viewer" => prepath + "viewer/", "producer" => prepath + "producer/", "consumer" => prepath + "consumer/", "director" => prepath + "director_auth/", _ => { let failed: actix_files::NamedFile = NamedFile::open(format!("../client/404/{}/index.html", DEPLOY_OR_STATIC)).unwrap(); return failed; } }; prepath += DEPLOY_OR_STATIC; prepath += "/assets/"; prepath += path; println!("{}", prepath); NamedFile::open(prepath).unwrap() } #[get("/{play_view_direct}/{type}/{gameid:\\d*}/snippets/{folder}/{rest_of_path:.*}.js")] async fn inline(req: HttpRequest) -> impl Responder { let mut prepath = "../client/".to_owned(); let url_viewtype = req.match_info().get("type").unwrap(); let folder = req.match_info().get("folder").unwrap(); let path = req.match_info().get("rest_of_path").unwrap(); prepath = match url_viewtype { "viewer" => prepath + "viewer/", "producer" => prepath + "producer/", "consumer" => prepath + "consumer/", "director" => prepath + "director_auth/", _ => { let failed: actix_files::NamedFile = NamedFile::open(format!("../client/404/{}/index.html", DEPLOY_OR_STATIC)).unwrap(); return failed; } }; prepath += DEPLOY_OR_STATIC; prepath += "/snippets/"; prepath += folder; prepath += "/"; prepath += path; prepath += ".js"; println!("{}", prepath); NamedFile::open(prepath).unwrap() }
/* ---Adding Words - Part 1--- Add two English words together! Implement a class Arith (struct struct Arith{value : &'static str,} in Rust) such that //javascript var k = new Arith("three"); k.add("seven"); //this should return "ten" //c++ Arith* k = new Arith("three"); k->add("seven"); //this should return string "ten" //Rust let c = Arith{value: "three"}; c.add("seven") // this should return &str "ten" Input - Will be between zero and ten and will always be in lower case Output - Word representation of the result of the addition. Should be in lower case */ use std::collections::HashMap; fn main() { let test = Arith{ value: "three" }; println!("ten = {:?}", test.add("seven")); println!("eleven = {:?}", test.add("eight")); println!("three = {:?}", test.add("zero")); } #[derive(Copy, Clone)] struct Arith { value: &'static str } impl Arith { fn add(self, value2:&'static str) -> &str { let number_map:HashMap<&str, i32> = [("one", 1), ("two", 2), ("three", 3), ("four", 4), ("five", 5), ("six", 6), ("seven", 7), ("eight", 8), ("nine", 9), ("ten", 10), ("eleven", 11), ("twelve", 12), ("thirteen", 13), ("fourteen", 14), ("fifteen", 15), ("sixteen", 16), ("seventeen", 17), ("eighteen", 18), ("nineteen", 19), ("twenty", 20), ("zero", 0)].iter().cloned().collect(); let result = number_map.get(self.value).unwrap() + number_map.get(value2).unwrap(); number_map.iter().find_map(|(key, &val)| if val == result { Some(key) } else { None }).unwrap() } }
//! An HTTP library for the Lunatic Virtual Machine. #![deny(missing_debug_implementations, unused_must_use)] use std::{collections::HashMap, io::Write, net::ToSocketAddrs}; #[macro_use] extern crate derivative; #[cfg(test)] mod regressions; use body::{mime::HTML, Body}; pub use puck_codegen::handler; pub use anyhow; pub use request::Request; pub use response::Response; use encoder::Encoder; pub mod body; pub mod encoder; pub mod request; pub mod response; pub mod ws; pub trait Handler { fn handle<ADDRESS>(address: ADDRESS) -> anyhow::Result<()> where ADDRESS: ToSocketAddrs; } pub fn serve<H: Handler, ADDRESS: ToSocketAddrs>(address: ADDRESS) -> anyhow::Result<()> { H::handle(address) } pub fn err_404(_: Request) -> Response { Response { headers: { let mut res = HashMap::new(); res.insert("Content-Type".to_string(), HTML.to_string()); res }, body: Body::from_string("<h1>404: Not found</h1>".to_string()), status: 404, reason: "not found".to_string(), } } pub fn err_400() -> Response { Response { headers: { let mut res = HashMap::new(); res.insert("Content-Type".to_string(), HTML.to_string()); res }, body: Body::from_string("<h1>400: bad request</h1>".to_string()), status: 400, reason: "bad request".to_string(), } } pub fn write_response(res: Response, stream: impl Write) { let mut encoder = Encoder::new(res); encoder.write_tcp_stream(stream).unwrap(); }
pub use crate::tmux_bindings::{ plugin, format_tree, format_entry, plugin__bindgen_ty_1 as plugin_inner, format_plugin, function_plugin, cmd_entry, notification_plugin, FORMAT_PLUGIN, FORMAT_FUNCTION_PLUGIN, CMD_PLUGIN, NOTIFICATION_PLUGIN, MULTI_PLUGIN, notification_cb, notify_entry, plugin_function_cb, client, session, window, window_pane, cmd_retval, cmd_retval_CMD_RETURN_ERROR, cmd_retval_CMD_RETURN_NORMAL, cmd_retval_CMD_RETURN_WAIT, cmd_retval_CMD_RETURN_STOP, cmd, cmd_entry__bindgen_ty_1, cmd_entry_flag, cmd_find_state, cmd_find_type, cmdq_item, cmdq_print, cmdq_error, event, grid_cell, mouse_event, termios, timeval, tty, tty_key, tty_term, utf8_data, window_mode, winlink, };
// warning: globals should be all caps // warning: unused const foo: &'static str = "foo"; fn main() { println!("Hello, world!"); }
use std::collections::HashMap; use std::time::Instant; use itertools::Itertools; const INPUT: &str = include_str!("../input.txt"); fn build_happiness_map() -> HashMap<&'static str, HashMap<&'static str, i64>> { let mut happiness_map: HashMap<&'static str, HashMap<&'static str, i64>> = HashMap::new(); for line in INPUT.lines() { let words: Vec<_> = line.split_ascii_whitespace().collect(); let recipient = words[0]; let giver = words[10].strip_suffix('.').unwrap(); let mut amount: i64 = words[3].parse().unwrap(); if words[2] == "lose" { amount = -amount; } happiness_map .entry(recipient) .or_insert_with(HashMap::new) .insert(giver, amount); } happiness_map } fn part1() -> i64 { let happiness_map = build_happiness_map(); let person_count = happiness_map.len(); let mut max_happiness = 0; for permutation in happiness_map.keys().permutations(person_count) { let mut cur_happiness = 0; for i in 0..person_count { let neighbors = if i == 0 { [1, person_count - 1] } else if i == person_count - 1 { [person_count - 2, 0] } else { [i - 1, i + 1] }; let cur_person = permutation[i]; let neighbor1 = permutation[neighbors[0]]; let neighbor2 = permutation[neighbors[1]]; cur_happiness += happiness_map[cur_person][neighbor1] + happiness_map[cur_person][neighbor2] } max_happiness = max_happiness.max(cur_happiness) } dbg!(&happiness_map); max_happiness } fn part2() -> i64 { let happiness_map = build_happiness_map(); let mut max_happiness = 0; let mut all_people: Vec<_> = happiness_map.keys().cloned().collect(); all_people.push("self"); let person_count = all_people.len(); for permutation in all_people.iter().permutations(all_people.len()) { let mut cur_happiness = 0; for i in 0..person_count { let neighbors = if i == 0 { [1, person_count - 1] } else if i == person_count - 1 { [person_count - 2, 0] } else { [i - 1, i + 1] }; let cur_person = permutation[i]; let neighbor1 = permutation[neighbors[0]]; let neighbor2 = permutation[neighbors[1]]; if let Some(map_for_person) = happiness_map.get(cur_person) { cur_happiness += map_for_person.get(neighbor1).unwrap_or(&0) + map_for_person.get(neighbor2).unwrap_or(&0); } } max_happiness = max_happiness.max(cur_happiness) } dbg!(&happiness_map); max_happiness } fn main() { let start = Instant::now(); println!("part 1: {}", part1()); println!("part 1 took {}ms", (Instant::now() - start).as_millis()); let start = Instant::now(); println!("part 2: {}", part2()); println!("part 2 took {}ms", (Instant::now() - start).as_millis()); } #[cfg(test)] mod tests { use super::*; #[test] fn test_part1() { assert_eq!(part1(), 733); } #[test] fn test_part2() { assert_eq!(part2(), 725); } }
use std::fs; use std::path::Path; use std::collections::BTreeMap; use serde::Serialize; use super::{BasicBlock, Branch}; /// Final trace representation, for serialization. #[derive(Debug, Clone, Serialize)] pub struct BasicBlockList { list: Vec<BasicBlock>, } impl BasicBlockList { /// Constructor, resolves: /// * Indirect call/jump and return destinations /// * Conditional jump taken or not taken pub fn from(list: Vec<BasicBlock>) -> BasicBlockList { let total_bbs = list.len(); let mut asid_map: BTreeMap<u64, Vec<BasicBlock>> = BTreeMap::new(); // Bin by ASID list.into_iter().for_each(|bb| { let bbv = asid_map.entry(bb.asid()).or_insert(vec![]); bbv.push(bb); }); // Resolution via looking at next BB executed for bbv in asid_map.values_mut() { // Guest execution order sort (via atomic sequence number) bbv.sort_unstable_by_key(|bb| bb.seq_num()); // NOTE: cannot use slice::windows(2) b/c need to mutate prev BB let bbv_len = bbv.len(); for idx in 0..bbv_len { let next_idx = idx + 1; if next_idx >= bbv_len { break; } let actual_dst_pc = bbv[next_idx].pc(); // TODO: add logic here to not fill in if next number is not correct sequence number (e.g. lift fail) if let Some(branch) = bbv[idx].branch_mut() { match branch { Branch::CallSentinel { site_pc, reg, .. } => { *branch = Branch::IndirectCall { site_pc: *site_pc, dst_pc: actual_dst_pc, reg_used: reg.to_string(), }; } Branch::ReturnSentinel { site_pc, .. } => { *branch = Branch::Return { site_pc: *site_pc, dst_pc: actual_dst_pc, }; } Branch::IndirectJumpSentinel { site_pc, reg, .. } => { *branch = Branch::IndirectJump { site_pc: *site_pc, dst_pc: actual_dst_pc, reg_used: reg.to_string(), }; } Branch::DirectJumpSentinel { site_pc, .. } => { *branch = Branch::DirectJump { site_pc: *site_pc, dst_pc: actual_dst_pc, taken: true, }; } Branch::DirectJump { site_pc, dst_pc, .. } => { let actual_taken = actual_dst_pc == *dst_pc; *branch = Branch::DirectJump { site_pc: *site_pc, dst_pc: *dst_pc, taken: actual_taken, }; } _ => continue, }; } } } let bbl = BasicBlockList { // TODO: into_values() once on stable list: asid_map.values().flat_map(|bbv| bbv).cloned().collect() }; assert_eq!(bbl.list.len(), total_bbs); bbl } /// Get count of translation errors pub fn trans_err_cnt(&self) -> usize { self.list.iter().filter(|bb| !bb.is_lifted()).count() } /// Get length pub fn len(&self) -> usize { self.list.len() } /// Check if empty pub fn is_empty(&self) -> bool { self.list.is_empty() } /// Return iterator to contained Basic Blocks pub fn blocks(&self) -> std::slice::Iter<'_, BasicBlock> { self.list.iter() } /// Serialize JSON pub fn to_branch_json<P: AsRef<Path>>( &self, file_path: P, white_space: bool, ) -> std::io::Result<()> { let bbs_with_branch: Vec<BasicBlock> = self .list .iter() .filter(|bb| bb.branch().is_some()) .cloned() .collect(); let branch_list = Self::from(bbs_with_branch); fs::write( file_path, match white_space { true => serde_json::to_string_pretty(&branch_list) .expect("serialization of BB list failed!"), false => { serde_json::to_string(&branch_list).expect("serialization of BB list failed!") } }, ) } }
pub mod generator; pub mod item; pub mod parser; pub mod table;
use crate::sleeper::Sleeper; use crate::waiter::*; use boolinator::Boolinator; use std::error::Error; use std::fmt; use std::path::PathBuf; use std::thread; pub struct FileWaiter { file_path: PathBuf, sleeper: Sleeper, } impl Waiter for FileWaiter { fn start(argument: &str, sleeper: Sleeper) -> WaiterStartResult { info!("Initializing FileWaiter for file [{}]", argument); let fp = PathBuf::from(argument); let mut waiter = FileWaiter::new(fp.clone(), sleeper); waiter .continue_waiting() .ok_or(Box::new(FileExistsError::new(fp)))?; info!("Starting thread for [{:?}].", waiter); Ok(thread::spawn(move || waiter.run())) } fn get_sleeper(&self) -> &Sleeper { &self.sleeper } fn continue_waiting(&mut self) -> bool { debug!("Checking if file [{}] exists", self.file_path.display()); !self.file_path.exists() } } impl FileWaiter { fn new(file_path: PathBuf, sleeper: Sleeper) -> Self { FileWaiter { file_path, sleeper } } } impl fmt::Debug for FileWaiter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("FileWaiter") .field("file_path", &self.file_path) .finish() } } #[derive(Debug, Clone)] pub struct FileExistsError { file_path: PathBuf, } impl FileExistsError { fn new(file_path: PathBuf) -> Self { FileExistsError { file_path } } } impl fmt::Display for FileExistsError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "File [{}] already exists.", self.file_path.display()) } } impl Error for FileExistsError {}
#[macro_use] extern crate cpython; extern crate curl; extern crate serde; extern crate serde_json; pub mod utils; use cpython::{Python, PyDict, PyList, PyResult, ToPyObject, PyObject}; use curl::http; use std::collections::BTreeMap; use serde_json::Value; fn retpy(py: Python) -> PyResult<String> { let locals = PyDict::new(py); locals.set_item(py, "os", py.import("os")?)?; let user: String = py.eval("os.getenv('HOME')", None, Some(&locals))?.extract(py)?; Ok(user.to_string()) } fn returl(py: Python) -> PyResult<PyObject> { let url = "https://api.chucknorris.io/jokes/random"; let resp = http::handle() .get(url) .exec() .unwrap_or_else(|e| { panic!("Falha ao abrir URL {}; Erro: {}", url, e); }); let body = std::str::from_utf8(resp.get_body()).unwrap_or_else(|e| { panic!("Falha ao interpretar resultado da URL {}; Erro: {}", url, e); }); let test: Value = serde_json::from_str(&body).unwrap(); let ret = utils::fromjson(py, test); Ok(ret) } fn retmapa(py: Python) -> PyResult<PyDict> { let dicionario = PyDict::new(py); dicionario.set_item(py, "chave1", "valor1")?; dicionario.set_item(py, "chave2", 123)?; Ok(dicionario) } fn reclista(py: Python, lista_python: Vec<i64>) -> PyResult<PyList> { let lista_soma = lista_python.iter().map(|&x| x + x).collect::<Vec<i64>>(); let lista_retorno = lista_soma.to_py_object(py); Ok(lista_retorno) } fn btree_mapa(py: Python) -> PyResult<PyDict> { let mut dicionario_btree = BTreeMap::<String, i32>::new(); dicionario_btree.insert("key".to_string(), 1); let py_map = dicionario_btree.to_py_object(py); Ok(py_map) } fn rec_btree_mapa(py: Python, mapa: PyDict) -> PyResult<PyDict> { let mut dicionario_btree = BTreeMap::<String, i32>::new(); let res = mapa.items(py); for f in res.iter() { dicionario_btree.insert(f.0.to_string(), f.1.to_string().parse::<i32>().unwrap()+10); } let ret_map = dicionario_btree.to_py_object(py); Ok(ret_map) } py_module_initializer!(mvrpl, initmvrpl, PyInit_mvrpl, |py, m| { try!(m.add(py, "__doc__", "Este módulo foi escrito em Rust.")); try!(m.add(py, "retpy", py_fn!(py, retpy()))); try!(m.add(py, "retmapa", py_fn!(py, retmapa()))); try!(m.add(py, "returl", py_fn!(py, returl()))); try!(m.add(py, "reclista", py_fn!(py, reclista(rand_int:Vec<i64>)))); try!(m.add(py, "mapa", py_fn!(py, btree_mapa()))); try!(m.add(py, "recmapa", py_fn!(py, rec_btree_mapa(rand_int:PyDict)))); Ok(()) });
mod http; mod kafka; pub use self::http::HttpSink; pub use kafka::*; use crate::sender::PublishOutcome; use async_trait::async_trait; use cloudevents::Event; use drogue_client::registry; use std::ops::Deref; use thiserror::Error; pub enum SinkTarget<'a> { Events(&'a registry::v1::Application), Commands(&'a registry::v1::Application), } impl<'a> Deref for SinkTarget<'a> { type Target = registry::v1::Application; fn deref(&self) -> &Self::Target { match self { SinkTarget::Commands(app) => app, SinkTarget::Events(app) => app, } } } #[async_trait] pub trait Sink: Clone + Send + Sync + 'static { type Error: std::error::Error + Send + 'static; #[allow(clippy::needless_lifetimes)] /// Publish an event. async fn publish<'a>( &self, target: SinkTarget<'a>, event: Event, ) -> Result<PublishOutcome, SinkError<Self::Error>>; } #[derive(Error, Debug)] pub enum SinkError<E: std::error::Error + 'static> { #[error("Build event error")] Build(#[from] cloudevents::event::EventBuilderError), #[error("Event error")] Event(#[from] cloudevents::message::Error), #[error("Transport error")] Transport(#[source] E), #[error("Target error")] Target(#[source] Box<dyn std::error::Error + Send>), }
static mut STASH: &i32 = &10; static CONST: i32 = 100; fn main() { let x = 10; let y = 20; let mut r = &x; struct Point { x: i32, y: i32 } let point = Point { x: 1000, y: 729 }; let r: &Point = &point; let rr: &&Point = &r; let rrr: &&&Point = &rr; let x = 10; let y = 10; let rx = &x; let ry = &y; let rrx = &rx; let rry = &ry; assert!(rrx <= rry); assert!(rrx == rry); assert!(rx == ry); assert!(!std::ptr::eq(rx, ry)); fn factorial(n: usize) -> usize { (1..n + 1).fold(1, |a, b| a * b) } let r = &factorial(6); assert_eq!(r + &1009, 1729); modify_stash(&CONST); static WORTH_POINTING_AT: i32 = 1000; let local = 4; modify_stash(&WORTH_POINTING_AT); // modify_stash(&local); let parabola = [9, 4, 1, 0, 1, 4, 9]; let s = smallest(&parabola); assert_eq!(*s, 0); let x = 3; let s = S { r: &x }; let x = 10; let r; { let y = 20; { let s = U { x: &x, y: &y }; r = s.x; }; }; let x = 1; let u = U { x: &2, y: &2 }; let s = sum_r_xy(&x, u); println!("sum_r_xy {}", s); let v = vec![4, 8, 19, 27, 34, 10]; let r = &v; let aside = v; // move vector to aside // r[0]; let mut wave = Vec::new(); let head = vec![0.0, 1.0]; let tail = vec![0.0, -1.0]; extend(&mut wave, &head); extend(&mut wave, &tail); assert_eq!(wave, vec![0.0, 1.0, 0.0, -1.0]); extend(&mut wave, &wave); let mut w = (107, 109); let r = &w; let r0 = &r.0; // ok: reborrowing shared as shared // let m1 = &mut r.1; } fn extend(vec: &mut Vec<f64>, slice: &[f64]) { for x in slice { vec.push(*x); } } struct StringTable { elements: Vec<String>, } impl StringTable { fn find_by_prefix(&self, prefix: &str) -> Option<&String> { for e in self.elements.iter() { if e.starts_with(prefix) { return Some(e); } } None } } fn sum_r_xy(r: &i32, u: U) -> i32 { r + u.x + u.y } struct T<'a> { s: S<'a>, } struct S<'a> { r: &'a i32, } struct U<'a> { x: &'a i32, y: &'a i32, } fn modify_stash(p: &'static i32) { unsafe { STASH = p; } } fn smallest(v: &[i32]) -> &i32 { let mut s = &v[0]; for r in v { if *r < *s { s = r; } } s }
pub use ugh_privacy::DbError; use byteorder; use openssl::ssl::error::SslError; use phf; use std::error; use std::convert::From; use std::fmt; use std::io; use Result; use types::Type; include!(concat!(env!("OUT_DIR"), "/sqlstate.rs")); /// Reasons a new Postgres connection could fail. #[derive(Debug)] pub enum ConnectError { /// The provided URL could not be parsed. InvalidUrl(String), /// The URL was missing a user. MissingUser, /// An error from the Postgres server itself. DbError(DbError), /// A password was required but not provided in the URL. MissingPassword, /// The Postgres server requested an authentication method not supported /// by the driver. UnsupportedAuthentication, /// The Postgres server does not support SSL encryption. NoSslSupport, /// There was an error initializing the SSL session. SslError(SslError), /// There was an error communicating with the server. IoError(io::Error), /// The server sent an unexpected response. BadResponse, } impl fmt::Display for ConnectError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { try!(fmt.write_str(error::Error::description(self))); match *self { ConnectError::InvalidUrl(ref msg) => write!(fmt, ": {}", msg), _ => Ok(()) } } } impl error::Error for ConnectError { fn description(&self) -> &str { match *self { ConnectError::InvalidUrl(_) => "Invalid URL", ConnectError::MissingUser => "User missing in URL", ConnectError::DbError(_) => "An error from the Postgres server itself", ConnectError::MissingPassword => "The server requested a password but none was provided", ConnectError::UnsupportedAuthentication => { "The server requested an unsupported authentication method" } ConnectError::NoSslSupport => "The server does not support SSL", ConnectError::SslError(_) => "Error initiating SSL session", ConnectError::IoError(_) => "Error communicating with server", ConnectError::BadResponse => "The server returned an unexpected response", } } fn cause(&self) -> Option<&error::Error> { match *self { ConnectError::DbError(ref err) => Some(err), ConnectError::SslError(ref err) => Some(err), ConnectError::IoError(ref err) => Some(err), _ => None } } } impl From<io::Error> for ConnectError { fn from(err: io::Error) -> ConnectError { ConnectError::IoError(err) } } impl From<DbError> for ConnectError { fn from(err: DbError) -> ConnectError { ConnectError::DbError(err) } } impl From<SslError> for ConnectError { fn from(err: SslError) -> ConnectError { ConnectError::SslError(err) } } impl From<byteorder::Error> for ConnectError { fn from(err: byteorder::Error) -> ConnectError { ConnectError::IoError(From::from(err)) } } /// Represents the position of an error in a query. #[derive(Clone, PartialEq, Eq, Debug)] pub enum ErrorPosition { /// A position in the original query. Normal(u32), /// A position in an internally generated query. Internal { /// The byte position. position: u32, /// A query generated by the Postgres server. query: String } } /// An error encountered when communicating with the Postgres server. #[derive(Debug)] pub enum Error { /// An error reported by the Postgres server. DbError(DbError), /// An error communicating with the Postgres server. IoError(io::Error), /// The communication channel with the Postgres server has desynchronized /// due to an earlier communications error. StreamDesynchronized, /// An attempt was made to convert between incompatible Rust and Postgres /// types. WrongType(Type), /// An attempt was made to read from a column that does not exist. InvalidColumn, /// A value was NULL but converted to a non-nullable Rust type. WasNull, /// The server returned an unexpected response. BadResponse, } impl fmt::Display for Error { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { try!(fmt.write_str(error::Error::description(self))); match *self { Error::WrongType(ref ty) => write!(fmt, ": saw type {:?}", ty), _ => Ok(()), } } } impl error::Error for Error { fn description(&self) -> &str { match *self { Error::DbError(_) => "An error reported by the Postgres server", Error::IoError(_) => "An error communicating with the Postgres server", Error::StreamDesynchronized => { "Communication with the server has desynchronized due to an earlier IO error" } Error::WrongType(_) => "Unexpected type", Error::InvalidColumn => "Invalid column", Error::WasNull => "The value was NULL", Error::BadResponse => "The server returned an unexpected response", } } fn cause(&self) -> Option<&error::Error> { match *self { Error::DbError(ref err) => Some(err), Error::IoError(ref err) => Some(err), _ => None } } } impl From<DbError> for Error { fn from(err: DbError) -> Error { Error::DbError(err) } } impl From<io::Error> for Error { fn from(err: io::Error) -> Error { Error::IoError(err) } } impl From<byteorder::Error> for Error { fn from(err: byteorder::Error) -> Error { Error::IoError(From::from(err)) } }
use std::env; use std::io::{self, Write}; use std::process; use std::collections::VecDeque; use std::collections::HashMap; extern crate pnet; use pnet::datalink::{self, NetworkInterface}; use pnet::packet::Packet; use pnet::packet::arp::ArpPacket; use pnet::packet::ethernet::{EtherTypes, EthernetPacket}; use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::ipv6::Ipv6Packet; pub mod data_link_handler; pub mod internet_handler; pub mod transport_handler; fn main() { use pnet::datalink::Channel::Ethernet; let iface_name = match env::args().nth(1) { Some(n) => n, None => { writeln!(io::stderr(), "USAGE: packetdump <NETWORK INTERFACE>").unwrap(); process::exit(1); } }; let interface_names_match = |iface: &NetworkInterface| iface.name == iface_name; // Find the network interface with the provided name let interfaces = datalink::interfaces(); let interface = interfaces.into_iter().filter(interface_names_match).next().unwrap(); // Create a channel to receive on let (_, mut rx) = match datalink::channel(&interface, Default::default()) { Ok(Ethernet(tx, rx)) => (tx, rx), Ok(_) => panic!("packetdump: unhandled channel type: {}"), Err(e) => panic!("packetdump: unable to create channel: {}", e), }; let mut iter = rx.iter(); // IP 重组所需的 HashMap let mut ip_defrag_hash_map: HashMap<u64, VecDeque<internet_handler::internet_handler::IpInfo>> = HashMap::new(); // TCP stream 重组所需的 HashMap let mut tcp_reassemble_hash_map: HashMap<u64, transport_handler::tcp::tcp_stream> = HashMap::new(); // TCP flow 重组所需的 HashMap let mut tcp_flow_hash_map: HashMap<u64, transport_handler::tcp::tcp_flow> = HashMap::new(); loop { match iter.next() { // 捕获数据 Ok(packet) => { // datalink /* 识别 使用的 ip 层协议 // TODO track&dump src_mac dst_mac */ let ether_type = data_link_handler::datalink_handler::handler(&interface.name[..], &packet); // Internet match ether_type { EtherTypes::Ipv4 => { println!("IPV4"); let ip_packet: internet_handler::internet_handler::NextLayer = internet_handler::internet_handler::handle_ipv4_packet(&interface.name[..], &packet, &mut ip_defrag_hash_map); if ip_packet.hash == 1 { println!("error ip"); } else { // transport transport_handler::handle_transport_protocol(&interface.name[..], ip_packet.src_ip, ip_packet.dst_ip, ip_packet.next_layer_proto_type, &ip_packet.payload, &mut tcp_reassemble_hash_map);//HashMap<u64, transport_handler::tcp::tcp_stream> } } _ => { println!("error ethertype"); } }; } Err(e) => panic!("packetdump: unable to receive packet: {}", e), } } }
use std::io::{self, Read}; fn main() { let mut input = String::new(); let stdin = io::stdin(); let mut handle = stdin.lock(); handle.read_to_string(&mut input).unwrap(); let integer: i32 = input.parse().unwrap(); println!(""); for i in 1..=integer { println!("{}", i); } }
//! A [`Visit`](`crate::visit::Visitor`) implementation which deploys the items. pub mod deployment; use cfg_if::cfg_if; use color_eyre::eyre::Context; use crate::profile::{source::PunktfSource, MergeMode}; use crate::visit::*; use crate::profile::transform::Transform as _; use crate::profile::LayeredProfile; use crate::visit::deploy::deployment::{Deployment, DeploymentBuilder, ItemStatus}; use std::borrow::Borrow; use std::path::Path; use crate::visit::{ResolvingVisitor, TemplateVisitor}; impl<'a> Item<'a> { /// Adds this item to the given /// [`DeploymentBuilder`](`crate::visit::deploy::deployment::DeploymentBuilder`). fn add_to_builder<S: Into<ItemStatus>>(&self, builder: &mut DeploymentBuilder, status: S) { let status = status.into(); let resolved_target_path = self .target_path .canonicalize() .unwrap_or_else(|_| self.target_path.clone()); match &self.kind { Kind::Root(dotfile) => { builder.add_dotfile(resolved_target_path, (*dotfile).clone(), status) } Kind::Child { root_target_path, .. } => { let resolved_root_target_path = root_target_path .canonicalize() .unwrap_or_else(|_| root_target_path.clone()); builder.add_child(resolved_target_path, resolved_root_target_path, status) } }; } } impl Symlink { /// Adds this item to the given /// [`DeploymentBuilder`](`crate::visit::deploy::deployment::DeploymentBuilder`). fn add_to_builder<S: Into<ItemStatus>>(&self, builder: &mut DeploymentBuilder, status: S) { builder.add_link( self.source_path.clone(), self.target_path.clone(), status.into(), ); } } /// Marks the given item as successfully deployed. macro_rules! success { ($builder:expr, $item:expr) => { $item.add_to_builder($builder, ItemStatus::success()); }; } /// Marks the given item as skipped. /// /// This will instantly return from the out function after reporting the skip. macro_rules! skipped { ($builder:expr, $item:expr, $reason:expr => $ret:expr ) => { $item.add_to_builder($builder, ItemStatus::skipped($reason)); return Ok($ret); }; ($builder:expr, $item:expr, $reason:expr) => { $item.add_to_builder($builder, ItemStatus::skipped($reason)); return Ok(()); }; } /// Marks the given item as failed. /// /// This will instantly return from the out function after reporting the error. macro_rules! failed { ($builder:expr, $item:expr, $reason:expr => Err($ret:expr) ) => { $item.add_to_builder($builder, ItemStatus::failed($reason)); return Err($ret); }; ($builder:expr, $item:expr, $reason:expr => $ret:expr ) => { $item.add_to_builder($builder, ItemStatus::failed($reason)); return Ok($ret); }; ($builder:expr, $item:expr, $reason:expr) => { $item.add_to_builder($builder, ItemStatus::failed($reason)); return Ok(()); }; } /// Configuration options for the [`Deployer`]. #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct DeployOptions { /// If this flag is set, it will prevent any write operations from occurring /// during the deployment. /// /// This includes write, copy and directory creation operations. pub dry_run: bool, } /// Responsible for deploying a [profile](`crate::profile::Profile`). /// /// This includes checking for merge conflicts, resolving children of a /// directory dotfile, parsing and resolving of templates and the actual /// writing of the dotfile to the target destination. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Deployer<F> { /// Configuration options options: DeployOptions, /// This function gets called when a dotfile at the target destination /// already exists and the merge mode is /// [MergeMode::Ask](`crate::profile::MergeMode::Ask`). /// /// The arguments for the function are (dotfile_source_path, dotfile_target_path). merge_ask_fn: F, /// Builder for the deployment. /// /// This holds information about each item which was processed, /// keeps track of the time and also stores a overall status of the deployment. builder: DeploymentBuilder, } impl<F> Deployer<F> where F: Fn(&Path, &Path) -> color_eyre::Result<bool>, { /// Creates a new instance. pub fn new(options: DeployOptions, merge_ask_fn: F) -> Self { Self { options, merge_ask_fn, builder: DeploymentBuilder::default(), } } /// Retrieves the finished deployment from this instance. pub fn into_deployment(self) -> Deployment { self.builder.finish() } /// Tries to deploy the given `profile`. /// /// # Errors /// /// Only hard errors will be returned as error, everything else will be /// recorded in the [Deployment](`crate::visit::deploy::deployment::Deployment`) /// on a dotfile level. pub fn deploy(self, source: &PunktfSource, profile: &mut LayeredProfile) -> Deployment { // General flow: // - get deployment path // - check if dotfile already deployed // - YES: // - compare priorities // - LOWER: continue next dotfile // - SAME/HIGHER: next step // - check if dotfile exists // - YES: // - check merge operation // - if merge operation == ASK // - Run merge_ask_fn // - FALSE: continue next dotfile // - check if template // - YES: resolve template // - IF FILE: write dotfile // - IF DIR: for each dotfile in dir START AT TOP for hook in profile.pre_hooks() { log::info!("Executing pre-hook: {}", hook.command()); // No files are deployed yet, meaning if an error during hook // execution occurs it will return with an error instead of just // logging it. if let Err(err) = hook .execute(source.profiles()) .wrap_err("Failed to execute pre-hook") { log::error!("Failed to execute pre-hook ({})", err); return self.builder.failed(err.to_string()); }; } let mut resolver = ResolvingVisitor(self); let walker = Walker::new(profile); if let Err(err) = walker.walk(source, &mut resolver) { return resolver.into_inner().builder.failed(err.to_string()); } let this = resolver.into_inner(); for hook in profile.post_hooks() { log::info!("Executing post-hook: {}", hook.command()); if let Err(err) = hook.execute(source.profiles()) { log::error!("Failed to execute post-hook ({})", err); return this.builder.failed(err.to_string()); } } this.into_deployment() } /// Checks common things for a given file item before deploying it. /// /// The returned boolean indicates if the deployment of the file should /// continue. fn pre_deploy_checks(&mut self, file: &File<'_>) -> color_eyre::Result<bool> { let other_priority = self.builder.get_priority(&file.target_path); match (file.dotfile().priority.as_ref(), other_priority) { (Some(a), Some(b)) if b > a => { log::info!( "{}: Dotfile with higher priority is already deployed at {}", file.relative_source_path.display(), file.target_path.display() ); skipped!(&mut self.builder, file, "Dotfile with higher priority is already deployed" => false); } (_, _) => {} }; if file.target_path.exists() { // No previously deployed dotfile at `deploy_path`. Check for merge. log::debug!( "{}: Dotfile already exists at {}", file.relative_source_path.display(), file.target_path.display() ); match file.dotfile().merge.unwrap_or_default() { MergeMode::Overwrite => { log::info!( "{}: Overwriting existing dotfile", file.relative_source_path.display() ) } MergeMode::Keep => { log::info!( "{}: Skipping existing dotfile", file.relative_source_path.display() ); skipped!(&mut self.builder, file, format!("Dotfile already exists and merge mode is {:?}", MergeMode::Keep) => false); } MergeMode::Ask => { log::info!("{}: Asking for action", file.relative_source_path.display()); let should_deploy = match (self.merge_ask_fn)(&file.source_path, file.target_path.borrow()) .wrap_err("Error evaluating user response") { Ok(should_deploy) => should_deploy, Err(err) => { log::error!( "{}: Failed to execute ask function ({})", file.relative_source_path.display(), err ); failed!(&mut self.builder, file, format!("Failed to execute merge ask function: {err}") => false); } }; if !should_deploy { log::info!("{}: Merge was denied", file.relative_source_path.display()); skipped!(&mut self.builder, file, "Dotfile already exists and merge ask was denied" => false); } } } } if let Some(parent) = file.target_path.parent() { if !self.options.dry_run { match std::fs::create_dir_all(parent) { Ok(_) => {} Err(err) => { log::error!( "{}: Failed to create directory ({})", file.relative_source_path.display(), err ); failed!(&mut self.builder, file, format!("Failed to create parent directory: {err}") => false); } } } } Ok(true) } /// Applies any relevant [`Transform`](`crate::profile::transform::Transform`) /// for the given file. fn transform_content( &mut self, profile: &LayeredProfile, file: &File<'_>, content: String, ) -> color_eyre::Result<String> { let mut content = content; // Copy so we exec_dotfile is not referenced by this in case an error occurs. let exec_transformers: Vec<_> = file.dotfile().transformers.to_vec(); // Apply transformers. // Order: // - Transformers which are specified in the profile root // - Transformers which are specified on a specific dotfile of a profile for transformer in profile.transformers().chain(exec_transformers.iter()) { content = match transformer.transform(content) { Ok(content) => content, Err(err) => { log::info!( "{}: Failed to apply content transformer `{}`: `{}`", file.relative_source_path.display(), transformer, err ); failed!(&mut self.builder, file, format!("Failed to apply content transformer `{transformer}`: `{err}`") => Err(err)); } }; } Ok(content) } } impl<F> Visitor for Deployer<F> where F: Fn(&Path, &Path) -> color_eyre::Result<bool>, { /// Accepts a file item and tries to deploy it. fn accept_file<'a>( &mut self, _: &PunktfSource, profile: &LayeredProfile, file: &File<'a>, ) -> Result { log::info!("{}: Deploying file", file.relative_source_path.display()); let cont = self.pre_deploy_checks(file)?; if !cont { return Ok(()); } // Fast path if profile.transformers_len() == 0 && file.dotfile().transformers.is_empty() { // File is no template and no transformers are specified. This means // we can take the fast path of just copying via the filesystem. // Allowed for readability #[allow(clippy::collapsible_else_if)] if !self.options.dry_run { if let Err(err) = std::fs::copy(&file.source_path, &file.target_path) { log::info!( "{}: Failed to copy file", file.relative_source_path.display() ); failed!(&mut self.builder, file, format!("Failed to copy: {err}")); } } } else { let content = match std::fs::read_to_string(&file.source_path) { Ok(content) => content, Err(err) => { log::info!( "{}: Failed to read file", file.relative_source_path.display() ); failed!(&mut self.builder, file, format!("Failed to read: {err}")); } }; let Ok(content) = self.transform_content(profile, file, content) else { // Error is already recorded return Ok(()); }; if !self.options.dry_run { if let Err(err) = std::fs::write(&file.target_path, content.as_bytes()) { log::info!( "{}: Failed to write content", file.relative_source_path.display() ); failed!( &mut self.builder, file, format!("Failed to write content: {err}") ); } } } log::info!( "{}: File successfully deployed", file.relative_source_path.display() ); success!(&mut self.builder, file); Ok(()) } /// Accepts a directory item and tries to deploy it. fn accept_directory<'a>( &mut self, _: &PunktfSource, _: &LayeredProfile, directory: &Directory<'a>, ) -> Result { log::info!( "{}: Deploying directory", directory.relative_source_path.display() ); if !self.options.dry_run { if let Err(err) = std::fs::create_dir_all(&directory.target_path) { log::error!( "{}: Failed to create directory ({})", directory.relative_source_path.display(), err ); failed!( &mut self.builder, directory, format!("Failed to create directory: {err}") ); } else { success!(&mut self.builder, directory); } } else { success!(&mut self.builder, directory); } log::info!( "{}: Directory successfully deployed", directory.relative_source_path.display() ); Ok(()) } /// Accepts a link item and tries to deploy it. fn accept_link(&mut self, _: &PunktfSource, _: &LayeredProfile, link: &Symlink) -> Result { log::info!("{}: Deploying symlink", link.source_path.display()); // Log an warning if deploying of links is not supported for the // operating system. #[cfg(all(not(unix), not(windows)))] { log::warn!( "[{}]: Symlink operations are only supported for unix and windows systems", source_path.display() ); skipped!( &mut self.builder, link, "Symlink operations are only supported on unix and windows systems" ); } let source_path = &link.source_path; let target_path = &link.target_path; // Check that the source exists if !source_path.exists() { log::error!("[{}]: Links source does not exist", source_path.display()); failed!(&mut self.builder, link, "Link source does not exist"); } // Check that either the target does not exist or that i can be replaced if target_path.exists() { if link.replace { if !self.options.dry_run { // Verify that the target is a symlink let target_metadata = match target_path.symlink_metadata() { Ok(m) => m, Err(err) => { log::error!("[{}]: Failed to read metadata", source_path.display()); failed!( &mut self.builder, link, format!("Failed get link target metadata: {err}") ); } }; if target_metadata.is_symlink() { // Get metadata of symlink target let res = if let Ok(target_metadata) = target_path.metadata() { if target_metadata.is_dir() { std::fs::remove_dir(target_path) } else { std::fs::remove_file(target_path) } } else { std::fs::remove_file(target_path) .or_else(|_| std::fs::remove_dir(target_path)) }; if let Err(err) = res { log::error!( "[{}]: Failed to remove old link at target", source_path.display() ); failed!( &mut self.builder, link, format!("Failed to remove old link target: {err}") ); } else { log::info!( "[{}]: Removed old link target at {}", source_path.display(), target_path.display() ); } } else { log::error!( "[{}]: Target already exists and is no link", source_path.display() ); failed!(&mut self.builder, link, "Not allowed to replace target"); } } } else { log::error!( "[{}]: Target already exists and is not allowed to be replaced", source_path.display() ); skipped!(&mut self.builder, link, "Link target does already exist"); } } if !self.options.dry_run { cfg_if! { if #[cfg(unix)] { if let Err(err) = std::os::unix::fs::symlink(source_path, target_path) { log::error!("[{}]: Failed to create link", source_path.display()); failed!(&mut self.builder, link, format!("Failed create link: {err}")); }; } else if #[cfg(windows)] { let metadata = match source_path.symlink_metadata() { Ok(m) => m, Err(err) => { log::error!("[{}]: Failed to read metadata", source_path.display()); failed!(&mut self.builder, link, format!("Failed get link source metadata: {err}")); } }; if metadata.is_dir() { if let Err(err) = std::os::windows::fs::symlink_dir(source_path, target_path) { log::error!("[{}]: Failed to create directory link", source_path.display()); failed!(&mut self.builder, link, format!("Failed create directory link: {err}")); }; } else if metadata.is_file() { if let Err(err) = std::os::windows::fs::symlink_file(source_path, target_path) { log::error!("[{}]: Failed to create file link", source_path.display()); failed!(&mut self.builder, link, format!("Failed create file link: {err}")); }; } else { log::error!("[{}]: Invalid link source type", source_path.display()); failed!(&mut self.builder, link, "Invalid type of link source"); } } else { log::warn!("[{}]: Link operations are only supported for unix and windows systems", source_path.display()); skipped!(&mut self.builder, link, "Link operations are only supported on unix and windows systems"); } } } success!(&mut self.builder, link); Ok(()) } /// Accepts a rejected item and reports it. fn accept_rejected<'a>( &mut self, _: &PunktfSource, _: &LayeredProfile, rejected: &Rejected<'a>, ) -> Result { log::info!( "[{}]: Rejected - {}", rejected.relative_source_path.display(), rejected.reason ); skipped!(&mut self.builder, rejected, rejected.reason.clone()); } /// Accepts a errored item and reports it. fn accept_errored<'a>( &mut self, _: &PunktfSource, _: &LayeredProfile, errored: &Errored<'a>, ) -> Result { log::error!( "[{}]: Failed - {}", errored.relative_source_path.display(), errored ); failed!(&mut self.builder, errored, errored.to_string()); } } impl<F> TemplateVisitor for Deployer<F> where F: Fn(&Path, &Path) -> color_eyre::Result<bool>, { /// Accepts a file template item and tries to deploy it. /// /// Before the deployment the template is parsed and resolved. fn accept_template<'a>( &mut self, _: &PunktfSource, profile: &LayeredProfile, file: &File<'a>, // Returns a function to resolve the content to make the resolving lazy // for upstream visitors. resolve_content: impl FnOnce(&str) -> color_eyre::Result<String>, ) -> Result { log::info!( "{}: Deploying template", file.relative_source_path.display() ); let cont = self.pre_deploy_checks(file)?; if !cont { return Ok(()); } let content = match std::fs::read_to_string(&file.source_path) { Ok(content) => content, Err(err) => { log::info!("{}: Failed read file", file.relative_source_path.display()); failed!(&mut self.builder, file, format!("Failed to read: {err}")); } }; let content = match resolve_content(&content) { Ok(content) => content, Err(err) => { log::info!( "{}: Failed to resolve template", file.relative_source_path.display() ); failed!( &mut self.builder, file, format!("Failed to resolve template: {err}") ); } }; let Ok(content) = self.transform_content(profile, file, content) else { // Error is already recorded return Ok(()); }; if !self.options.dry_run { if let Err(err) = std::fs::write(&file.target_path, content.as_bytes()) { log::info!( "{}: Failed to write content", file.relative_source_path.display() ); failed!( &mut self.builder, file, format!("Failed to write content: {err}") ); } } log::info!( "{}: Template successfully deployed", file.relative_source_path.display() ); success!(&mut self.builder, file); Ok(()) } }
use super::compile::Instruction; use std::convert::TryFrom; pub(crate) struct Machine { accumulator: i32, instruction_pointer: usize, instructions: Vec<Instruction>, } impl Machine { pub fn new(instructions: &[Instruction]) -> Self { Self { accumulator: 0, instruction_pointer: 0, instructions: instructions.to_owned(), } } fn execute_instruction(&mut self) -> Result<(), String> { match self.instructions.get(self.instruction_pointer) { Some(Instruction::Noop(_)) => { self.instruction_pointer += 1; Ok(()) } Some(Instruction::Accumulate(val)) => { self.accumulator += val; self.instruction_pointer += 1; Ok(()) } Some(Instruction::Jump(offset)) => { self.instruction_pointer = usize::try_from(i32::try_from(self.instruction_pointer).unwrap() + offset) .unwrap(); Ok(()) } None => Err("Out of bounds!".to_owned()), } } pub(crate) fn execute_until_first_repeat(&mut self) -> Result<i32, String> { let mut visited = vec![false; self.instructions.len()]; loop { match visited.get_mut(self.instruction_pointer) { Some(false) => { *visited.get_mut(self.instruction_pointer).unwrap() = true; self.execute_instruction()?; } Some(true) => return Ok(self.accumulator), None => return Err("Out of bounds!".to_owned()), } } } pub(crate) fn execute_until_end(&mut self) -> Result<i32, String> { match self.execute_until_first_repeat() { Ok(_) => { // Repetition detected, we don't want that. Err("Repeats".to_owned()) } Err(_) => { // It didn't loop! But did it terminate correctly? if self.instruction_pointer == self.instructions.len() { Ok(self.accumulator) } else { Err(format!( "Program stopped at instruction {} (should have been {}).", self.instruction_pointer, self.instructions.len(), )) } } } } }
//! Make sure that the timing information is preserved as floats use std::fs::File; use anyhow::Result; use libosu::prelude::*; const F64_EPSILON: f64 = 0.0001; const TEST_DATA: &[(&str, &[(usize, f64)])] = &[( "tests/files/129891.osu", &[ (0, 270.002700027), (1, -100.0), (2, -100.0), (8, -83.3333333333333), ], )]; #[test] fn test_preserved_timing_info() -> Result<()> { for (path, data) in TEST_DATA { let file = File::open(path)?; let beatmap = Beatmap::parse(file)?; for (idx, val) in *data { let tp = beatmap.timing_points.get(*idx).unwrap(); let s = tp.to_string(); let parts = s.split(",").collect::<Vec<_>>(); let actual_val = parts[1].parse::<f64>()?; assert!( (actual_val - val).abs() < F64_EPSILON, "expected {}, got {}", val, actual_val ); } } Ok(()) }
// -------------------------------------------------------------------------------// // Cryptopals, Set 1, Challenge 6: https://cryptopals.com/sets/1/challenges/6 // Impl by Frodo45127 // -------------------------------------------------------------------------------// use crate::utils::*; use std::io::{BufReader, Read}; use std::fs::File; use std::path::PathBuf; pub fn challenge() { // Get the file in a Vec we can actually manipulate. let mut file = BufReader::new(File::open(PathBuf::from("assets/1-6")).unwrap()); let mut data_to_decrypt = vec![]; file.read_to_end(&mut data_to_decrypt).unwrap(); // First, we get rid of the base64 encoding. let data_decrypted_base64 = decrypt_base64(&data_to_decrypt); // Now, find the XOR KeySize. let key_size = detect_fixed_xor_keysize(&data_decrypted_base64); let mut transposed_data = vec![]; for index in 0..key_size { transposed_data.push( data_decrypted_base64.iter().enumerate() .filter(|(y, _)| ( *y as isize - index as isize) % key_size as isize == 0 ) .map(|x| *x.1) .collect::<Vec<u8>>()); } // Now, decode every character of the key, one by one. let mut key = vec![]; for data in &transposed_data { let mut decoded_strings = vec![]; for index in 0..255 { let mut result = vec![]; data.iter().for_each(|x| result.push(x ^ index)); let string = String::from_utf8(result.to_vec()); if let Ok(string) = string { decoded_strings.push((index, string)); } } let mut most_scored = (0, 0, String::new()); let mut score = 0; for (index, string) in decoded_strings { score += string.matches("e").count() * 12; score += string.matches("t").count() * 12; score += string.matches("a").count() * 12; score += string.matches("o").count() * 12; score += string.matches("i").count() * 12; score += string.matches("n").count() * 12; score += string.matches("s").count() * 12; score += string.matches("h").count() * 12; score += string.matches("r").count() * 12; score += string.matches("d").count() * 12; score += string.matches("l").count() * 12; score += string.matches("u").count() * 12; if score > most_scored.0 { most_scored = (score, index, string); } score = 0; } key.push(most_scored.1); } println!("Key: {:?}", String::from_utf8_lossy(&key)); let mut decrypted_string = vec![]; for (position, character) in data_decrypted_base64.iter().enumerate() { decrypted_string.push(character ^ (key[position % key.len()])); } println!("Decrypted Text:\n{}", String::from_utf8_lossy(&decrypted_string)); }
use failure::Fail; use std::io; #[derive(Fail, Debug)] pub enum KvsError { /// IO error #[fail(display = "{}", _0)] Io(#[cause] io::Error), } impl From<io::Error> for KvsError { fn from(err: io::Error) -> KvsError { KvsError::Io(err) } } /// Result type for kvs pub type Result<T> = std::result::Result<T, KvsError>;
#[doc = "Register `ISR` reader"] pub type R = crate::R<ISR_SPEC>; #[doc = "Register `ISR` writer"] pub type W = crate::W<ISR_SPEC>; #[doc = "Field `ALRAWF` reader - Alarm A write flag"] pub type ALRAWF_R = crate::BitReader<ALRAWFR_A>; #[doc = "Alarm A write flag\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ALRAWFR_A { #[doc = "0: Alarm update not allowed"] UpdateNotAllowed = 0, #[doc = "1: Alarm update allowed"] UpdateAllowed = 1, } impl From<ALRAWFR_A> for bool { #[inline(always)] fn from(variant: ALRAWFR_A) -> Self { variant as u8 != 0 } } impl ALRAWF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ALRAWFR_A { match self.bits { false => ALRAWFR_A::UpdateNotAllowed, true => ALRAWFR_A::UpdateAllowed, } } #[doc = "Alarm update not allowed"] #[inline(always)] pub fn is_update_not_allowed(&self) -> bool { *self == ALRAWFR_A::UpdateNotAllowed } #[doc = "Alarm update allowed"] #[inline(always)] pub fn is_update_allowed(&self) -> bool { *self == ALRAWFR_A::UpdateAllowed } } #[doc = "Field `ALRBWF` reader - Alarm B write flag"] pub use ALRAWF_R as ALRBWF_R; #[doc = "Field `WUTWF` reader - Wakeup timer write flag"] pub type WUTWF_R = crate::BitReader<WUTWFR_A>; #[doc = "Wakeup timer write flag\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum WUTWFR_A { #[doc = "0: Wakeup timer configuration update not allowed"] UpdateNotAllowed = 0, #[doc = "1: Wakeup timer configuration update allowed"] UpdateAllowed = 1, } impl From<WUTWFR_A> for bool { #[inline(always)] fn from(variant: WUTWFR_A) -> Self { variant as u8 != 0 } } impl WUTWF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> WUTWFR_A { match self.bits { false => WUTWFR_A::UpdateNotAllowed, true => WUTWFR_A::UpdateAllowed, } } #[doc = "Wakeup timer configuration update not allowed"] #[inline(always)] pub fn is_update_not_allowed(&self) -> bool { *self == WUTWFR_A::UpdateNotAllowed } #[doc = "Wakeup timer configuration update allowed"] #[inline(always)] pub fn is_update_allowed(&self) -> bool { *self == WUTWFR_A::UpdateAllowed } } #[doc = "Field `SHPF` reader - Shift operation pending"] pub type SHPF_R = crate::BitReader<SHPFR_A>; #[doc = "Shift operation pending\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum SHPFR_A { #[doc = "0: No shift operation is pending"] NoShiftPending = 0, #[doc = "1: A shift operation is pending"] ShiftPending = 1, } impl From<SHPFR_A> for bool { #[inline(always)] fn from(variant: SHPFR_A) -> Self { variant as u8 != 0 } } impl SHPF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SHPFR_A { match self.bits { false => SHPFR_A::NoShiftPending, true => SHPFR_A::ShiftPending, } } #[doc = "No shift operation is pending"] #[inline(always)] pub fn is_no_shift_pending(&self) -> bool { *self == SHPFR_A::NoShiftPending } #[doc = "A shift operation is pending"] #[inline(always)] pub fn is_shift_pending(&self) -> bool { *self == SHPFR_A::ShiftPending } } #[doc = "Field `SHPF` writer - Shift operation pending"] pub type SHPF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, SHPFR_A>; impl<'a, REG, const O: u8> SHPF_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "No shift operation is pending"] #[inline(always)] pub fn no_shift_pending(self) -> &'a mut crate::W<REG> { self.variant(SHPFR_A::NoShiftPending) } #[doc = "A shift operation is pending"] #[inline(always)] pub fn shift_pending(self) -> &'a mut crate::W<REG> { self.variant(SHPFR_A::ShiftPending) } } #[doc = "Field `INITS` reader - Initialization status flag"] pub type INITS_R = crate::BitReader<INITSR_A>; #[doc = "Initialization status flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum INITSR_A { #[doc = "0: Calendar has not been initialized"] NotInitalized = 0, #[doc = "1: Calendar has been initialized"] Initalized = 1, } impl From<INITSR_A> for bool { #[inline(always)] fn from(variant: INITSR_A) -> Self { variant as u8 != 0 } } impl INITS_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> INITSR_A { match self.bits { false => INITSR_A::NotInitalized, true => INITSR_A::Initalized, } } #[doc = "Calendar has not been initialized"] #[inline(always)] pub fn is_not_initalized(&self) -> bool { *self == INITSR_A::NotInitalized } #[doc = "Calendar has been initialized"] #[inline(always)] pub fn is_initalized(&self) -> bool { *self == INITSR_A::Initalized } } #[doc = "Field `RSF` reader - Registers synchronization flag"] pub type RSF_R = crate::BitReader<RSFR_A>; #[doc = "Registers synchronization flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum RSFR_A { #[doc = "0: Calendar shadow registers not yet synchronized"] NotSynced = 0, #[doc = "1: Calendar shadow registers synchronized"] Synced = 1, } impl From<RSFR_A> for bool { #[inline(always)] fn from(variant: RSFR_A) -> Self { variant as u8 != 0 } } impl RSF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RSFR_A { match self.bits { false => RSFR_A::NotSynced, true => RSFR_A::Synced, } } #[doc = "Calendar shadow registers not yet synchronized"] #[inline(always)] pub fn is_not_synced(&self) -> bool { *self == RSFR_A::NotSynced } #[doc = "Calendar shadow registers synchronized"] #[inline(always)] pub fn is_synced(&self) -> bool { *self == RSFR_A::Synced } } #[doc = "Registers synchronization flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum RSFW_AW { #[doc = "0: This flag is cleared by software by writing 0"] Clear = 0, } impl From<RSFW_AW> for bool { #[inline(always)] fn from(variant: RSFW_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `RSF` writer - Registers synchronization flag"] pub type RSF_W<'a, REG, const O: u8> = crate::BitWriter0C<'a, REG, O, RSFW_AW>; impl<'a, REG, const O: u8> RSF_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "This flag is cleared by software by writing 0"] #[inline(always)] pub fn clear(self) -> &'a mut crate::W<REG> { self.variant(RSFW_AW::Clear) } } #[doc = "Field `INITF` reader - Initialization flag"] pub type INITF_R = crate::BitReader<INITFR_A>; #[doc = "Initialization flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum INITFR_A { #[doc = "0: Calendar registers update is not allowed"] NotAllowed = 0, #[doc = "1: Calendar registers update is allowed"] Allowed = 1, } impl From<INITFR_A> for bool { #[inline(always)] fn from(variant: INITFR_A) -> Self { variant as u8 != 0 } } impl INITF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> INITFR_A { match self.bits { false => INITFR_A::NotAllowed, true => INITFR_A::Allowed, } } #[doc = "Calendar registers update is not allowed"] #[inline(always)] pub fn is_not_allowed(&self) -> bool { *self == INITFR_A::NotAllowed } #[doc = "Calendar registers update is allowed"] #[inline(always)] pub fn is_allowed(&self) -> bool { *self == INITFR_A::Allowed } } #[doc = "Field `INIT` reader - Initialization mode"] pub type INIT_R = crate::BitReader<INIT_A>; #[doc = "Initialization mode\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum INIT_A { #[doc = "0: Free running mode"] FreeRunningMode = 0, #[doc = "1: Initialization mode used to program time and date register (RTC_TR and RTC_DR), and prescaler register (RTC_PRER). Counters are stopped and start counting from the new value when INIT is reset."] InitMode = 1, } impl From<INIT_A> for bool { #[inline(always)] fn from(variant: INIT_A) -> Self { variant as u8 != 0 } } impl INIT_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> INIT_A { match self.bits { false => INIT_A::FreeRunningMode, true => INIT_A::InitMode, } } #[doc = "Free running mode"] #[inline(always)] pub fn is_free_running_mode(&self) -> bool { *self == INIT_A::FreeRunningMode } #[doc = "Initialization mode used to program time and date register (RTC_TR and RTC_DR), and prescaler register (RTC_PRER). Counters are stopped and start counting from the new value when INIT is reset."] #[inline(always)] pub fn is_init_mode(&self) -> bool { *self == INIT_A::InitMode } } #[doc = "Field `INIT` writer - Initialization mode"] pub type INIT_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, INIT_A>; impl<'a, REG, const O: u8> INIT_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Free running mode"] #[inline(always)] pub fn free_running_mode(self) -> &'a mut crate::W<REG> { self.variant(INIT_A::FreeRunningMode) } #[doc = "Initialization mode used to program time and date register (RTC_TR and RTC_DR), and prescaler register (RTC_PRER). Counters are stopped and start counting from the new value when INIT is reset."] #[inline(always)] pub fn init_mode(self) -> &'a mut crate::W<REG> { self.variant(INIT_A::InitMode) } } #[doc = "Field `ALRAF` reader - Alarm A flag"] pub type ALRAF_R = crate::BitReader<ALRAFR_A>; #[doc = "Alarm A flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ALRAFR_A { #[doc = "1: This flag is set by hardware when the time/date registers (RTC_TR and RTC_DR) match the Alarm A register (RTC_ALRMAR)"] Match = 1, } impl From<ALRAFR_A> for bool { #[inline(always)] fn from(variant: ALRAFR_A) -> Self { variant as u8 != 0 } } impl ALRAF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<ALRAFR_A> { match self.bits { true => Some(ALRAFR_A::Match), _ => None, } } #[doc = "This flag is set by hardware when the time/date registers (RTC_TR and RTC_DR) match the Alarm A register (RTC_ALRMAR)"] #[inline(always)] pub fn is_match(&self) -> bool { *self == ALRAFR_A::Match } } #[doc = "Alarm A flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ALRAFW_AW { #[doc = "0: This flag is cleared by software by writing 0"] Clear = 0, } impl From<ALRAFW_AW> for bool { #[inline(always)] fn from(variant: ALRAFW_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `ALRAF` writer - Alarm A flag"] pub type ALRAF_W<'a, REG, const O: u8> = crate::BitWriter0C<'a, REG, O, ALRAFW_AW>; impl<'a, REG, const O: u8> ALRAF_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "This flag is cleared by software by writing 0"] #[inline(always)] pub fn clear(self) -> &'a mut crate::W<REG> { self.variant(ALRAFW_AW::Clear) } } #[doc = "Field `ALRBF` reader - Alarm B flag"] pub type ALRBF_R = crate::BitReader<ALRBFR_A>; #[doc = "Alarm B flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ALRBFR_A { #[doc = "1: This flag is set by hardware when the time/date registers (RTC_TR and RTC_DR) match the Alarm B register (RTC_ALRMBR)"] Match = 1, } impl From<ALRBFR_A> for bool { #[inline(always)] fn from(variant: ALRBFR_A) -> Self { variant as u8 != 0 } } impl ALRBF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<ALRBFR_A> { match self.bits { true => Some(ALRBFR_A::Match), _ => None, } } #[doc = "This flag is set by hardware when the time/date registers (RTC_TR and RTC_DR) match the Alarm B register (RTC_ALRMBR)"] #[inline(always)] pub fn is_match(&self) -> bool { *self == ALRBFR_A::Match } } #[doc = "Alarm B flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ALRBFW_AW { #[doc = "0: This flag is cleared by software by writing 0"] Clear = 0, } impl From<ALRBFW_AW> for bool { #[inline(always)] fn from(variant: ALRBFW_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `ALRBF` writer - Alarm B flag"] pub type ALRBF_W<'a, REG, const O: u8> = crate::BitWriter0C<'a, REG, O, ALRBFW_AW>; impl<'a, REG, const O: u8> ALRBF_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "This flag is cleared by software by writing 0"] #[inline(always)] pub fn clear(self) -> &'a mut crate::W<REG> { self.variant(ALRBFW_AW::Clear) } } #[doc = "Field `WUTF` reader - Wakeup timer flag"] pub type WUTF_R = crate::BitReader<WUTFR_A>; #[doc = "Wakeup timer flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum WUTFR_A { #[doc = "1: This flag is set by hardware when the wakeup auto-reload counter reaches 0"] Zero = 1, } impl From<WUTFR_A> for bool { #[inline(always)] fn from(variant: WUTFR_A) -> Self { variant as u8 != 0 } } impl WUTF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<WUTFR_A> { match self.bits { true => Some(WUTFR_A::Zero), _ => None, } } #[doc = "This flag is set by hardware when the wakeup auto-reload counter reaches 0"] #[inline(always)] pub fn is_zero(&self) -> bool { *self == WUTFR_A::Zero } } #[doc = "Wakeup timer flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum WUTFW_AW { #[doc = "0: This flag is cleared by software by writing 0"] Clear = 0, } impl From<WUTFW_AW> for bool { #[inline(always)] fn from(variant: WUTFW_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `WUTF` writer - Wakeup timer flag"] pub type WUTF_W<'a, REG, const O: u8> = crate::BitWriter0C<'a, REG, O, WUTFW_AW>; impl<'a, REG, const O: u8> WUTF_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "This flag is cleared by software by writing 0"] #[inline(always)] pub fn clear(self) -> &'a mut crate::W<REG> { self.variant(WUTFW_AW::Clear) } } #[doc = "Field `TSF` reader - Time-stamp flag"] pub type TSF_R = crate::BitReader<TSFR_A>; #[doc = "Time-stamp flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum TSFR_A { #[doc = "1: This flag is set by hardware when a time-stamp event occurs"] TimestampEvent = 1, } impl From<TSFR_A> for bool { #[inline(always)] fn from(variant: TSFR_A) -> Self { variant as u8 != 0 } } impl TSF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<TSFR_A> { match self.bits { true => Some(TSFR_A::TimestampEvent), _ => None, } } #[doc = "This flag is set by hardware when a time-stamp event occurs"] #[inline(always)] pub fn is_timestamp_event(&self) -> bool { *self == TSFR_A::TimestampEvent } } #[doc = "Time-stamp flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum TSFW_AW { #[doc = "0: This flag is cleared by software by writing 0"] Clear = 0, } impl From<TSFW_AW> for bool { #[inline(always)] fn from(variant: TSFW_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `TSF` writer - Time-stamp flag"] pub type TSF_W<'a, REG, const O: u8> = crate::BitWriter0C<'a, REG, O, TSFW_AW>; impl<'a, REG, const O: u8> TSF_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "This flag is cleared by software by writing 0"] #[inline(always)] pub fn clear(self) -> &'a mut crate::W<REG> { self.variant(TSFW_AW::Clear) } } #[doc = "Field `TSOVF` reader - Time-stamp overflow flag"] pub type TSOVF_R = crate::BitReader<TSOVFR_A>; #[doc = "Time-stamp overflow flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum TSOVFR_A { #[doc = "1: This flag is set by hardware when a time-stamp event occurs while TSF is already set"] Overflow = 1, } impl From<TSOVFR_A> for bool { #[inline(always)] fn from(variant: TSOVFR_A) -> Self { variant as u8 != 0 } } impl TSOVF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<TSOVFR_A> { match self.bits { true => Some(TSOVFR_A::Overflow), _ => None, } } #[doc = "This flag is set by hardware when a time-stamp event occurs while TSF is already set"] #[inline(always)] pub fn is_overflow(&self) -> bool { *self == TSOVFR_A::Overflow } } #[doc = "Time-stamp overflow flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum TSOVFW_AW { #[doc = "0: This flag is cleared by software by writing 0"] Clear = 0, } impl From<TSOVFW_AW> for bool { #[inline(always)] fn from(variant: TSOVFW_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `TSOVF` writer - Time-stamp overflow flag"] pub type TSOVF_W<'a, REG, const O: u8> = crate::BitWriter0C<'a, REG, O, TSOVFW_AW>; impl<'a, REG, const O: u8> TSOVF_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "This flag is cleared by software by writing 0"] #[inline(always)] pub fn clear(self) -> &'a mut crate::W<REG> { self.variant(TSOVFW_AW::Clear) } } #[doc = "Field `TAMP1F` reader - Tamper detection flag"] pub type TAMP1F_R = crate::BitReader<TAMP1FR_A>; #[doc = "Tamper detection flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum TAMP1FR_A { #[doc = "1: This flag is set by hardware when a tamper detection event is detected on the RTC_TAMPx input"] Tampered = 1, } impl From<TAMP1FR_A> for bool { #[inline(always)] fn from(variant: TAMP1FR_A) -> Self { variant as u8 != 0 } } impl TAMP1F_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<TAMP1FR_A> { match self.bits { true => Some(TAMP1FR_A::Tampered), _ => None, } } #[doc = "This flag is set by hardware when a tamper detection event is detected on the RTC_TAMPx input"] #[inline(always)] pub fn is_tampered(&self) -> bool { *self == TAMP1FR_A::Tampered } } #[doc = "Tamper detection flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum TAMP1FW_AW { #[doc = "0: Flag cleared by software writing 0"] Clear = 0, } impl From<TAMP1FW_AW> for bool { #[inline(always)] fn from(variant: TAMP1FW_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `TAMP1F` writer - Tamper detection flag"] pub type TAMP1F_W<'a, REG, const O: u8> = crate::BitWriter0C<'a, REG, O, TAMP1FW_AW>; impl<'a, REG, const O: u8> TAMP1F_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Flag cleared by software writing 0"] #[inline(always)] pub fn clear(self) -> &'a mut crate::W<REG> { self.variant(TAMP1FW_AW::Clear) } } #[doc = "Field `TAMP2F` reader - RTC_TAMP2 detection flag"] pub use TAMP1F_R as TAMP2F_R; #[doc = "Field `TAMP3F` reader - RTC_TAMP3 detection flag"] pub use TAMP1F_R as TAMP3F_R; #[doc = "Field `TAMP2F` writer - RTC_TAMP2 detection flag"] pub use TAMP1F_W as TAMP2F_W; #[doc = "Field `TAMP3F` writer - RTC_TAMP3 detection flag"] pub use TAMP1F_W as TAMP3F_W; #[doc = "Field `RECALPF` reader - Recalibration pending Flag"] pub type RECALPF_R = crate::BitReader<RECALPFR_A>; #[doc = "Recalibration pending Flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum RECALPFR_A { #[doc = "1: The RECALPF status flag is automatically set to 1 when software writes to the RTC_CALR register, indicating that the RTC_CALR register is blocked. When the new calibration settings are taken into account, this bit returns to 0"] Pending = 1, } impl From<RECALPFR_A> for bool { #[inline(always)] fn from(variant: RECALPFR_A) -> Self { variant as u8 != 0 } } impl RECALPF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<RECALPFR_A> { match self.bits { true => Some(RECALPFR_A::Pending), _ => None, } } #[doc = "The RECALPF status flag is automatically set to 1 when software writes to the RTC_CALR register, indicating that the RTC_CALR register is blocked. When the new calibration settings are taken into account, this bit returns to 0"] #[inline(always)] pub fn is_pending(&self) -> bool { *self == RECALPFR_A::Pending } } impl R { #[doc = "Bit 0 - Alarm A write flag"] #[inline(always)] pub fn alrawf(&self) -> ALRAWF_R { ALRAWF_R::new((self.bits & 1) != 0) } #[doc = "Bit 1 - Alarm B write flag"] #[inline(always)] pub fn alrbwf(&self) -> ALRBWF_R { ALRBWF_R::new(((self.bits >> 1) & 1) != 0) } #[doc = "Bit 2 - Wakeup timer write flag"] #[inline(always)] pub fn wutwf(&self) -> WUTWF_R { WUTWF_R::new(((self.bits >> 2) & 1) != 0) } #[doc = "Bit 3 - Shift operation pending"] #[inline(always)] pub fn shpf(&self) -> SHPF_R { SHPF_R::new(((self.bits >> 3) & 1) != 0) } #[doc = "Bit 4 - Initialization status flag"] #[inline(always)] pub fn inits(&self) -> INITS_R { INITS_R::new(((self.bits >> 4) & 1) != 0) } #[doc = "Bit 5 - Registers synchronization flag"] #[inline(always)] pub fn rsf(&self) -> RSF_R { RSF_R::new(((self.bits >> 5) & 1) != 0) } #[doc = "Bit 6 - Initialization flag"] #[inline(always)] pub fn initf(&self) -> INITF_R { INITF_R::new(((self.bits >> 6) & 1) != 0) } #[doc = "Bit 7 - Initialization mode"] #[inline(always)] pub fn init(&self) -> INIT_R { INIT_R::new(((self.bits >> 7) & 1) != 0) } #[doc = "Bit 8 - Alarm A flag"] #[inline(always)] pub fn alraf(&self) -> ALRAF_R { ALRAF_R::new(((self.bits >> 8) & 1) != 0) } #[doc = "Bit 9 - Alarm B flag"] #[inline(always)] pub fn alrbf(&self) -> ALRBF_R { ALRBF_R::new(((self.bits >> 9) & 1) != 0) } #[doc = "Bit 10 - Wakeup timer flag"] #[inline(always)] pub fn wutf(&self) -> WUTF_R { WUTF_R::new(((self.bits >> 10) & 1) != 0) } #[doc = "Bit 11 - Time-stamp flag"] #[inline(always)] pub fn tsf(&self) -> TSF_R { TSF_R::new(((self.bits >> 11) & 1) != 0) } #[doc = "Bit 12 - Time-stamp overflow flag"] #[inline(always)] pub fn tsovf(&self) -> TSOVF_R { TSOVF_R::new(((self.bits >> 12) & 1) != 0) } #[doc = "Bit 13 - Tamper detection flag"] #[inline(always)] pub fn tamp1f(&self) -> TAMP1F_R { TAMP1F_R::new(((self.bits >> 13) & 1) != 0) } #[doc = "Bit 14 - RTC_TAMP2 detection flag"] #[inline(always)] pub fn tamp2f(&self) -> TAMP2F_R { TAMP2F_R::new(((self.bits >> 14) & 1) != 0) } #[doc = "Bit 15 - RTC_TAMP3 detection flag"] #[inline(always)] pub fn tamp3f(&self) -> TAMP3F_R { TAMP3F_R::new(((self.bits >> 15) & 1) != 0) } #[doc = "Bit 16 - Recalibration pending Flag"] #[inline(always)] pub fn recalpf(&self) -> RECALPF_R { RECALPF_R::new(((self.bits >> 16) & 1) != 0) } } impl W { #[doc = "Bit 3 - Shift operation pending"] #[inline(always)] #[must_use] pub fn shpf(&mut self) -> SHPF_W<ISR_SPEC, 3> { SHPF_W::new(self) } #[doc = "Bit 5 - Registers synchronization flag"] #[inline(always)] #[must_use] pub fn rsf(&mut self) -> RSF_W<ISR_SPEC, 5> { RSF_W::new(self) } #[doc = "Bit 7 - Initialization mode"] #[inline(always)] #[must_use] pub fn init(&mut self) -> INIT_W<ISR_SPEC, 7> { INIT_W::new(self) } #[doc = "Bit 8 - Alarm A flag"] #[inline(always)] #[must_use] pub fn alraf(&mut self) -> ALRAF_W<ISR_SPEC, 8> { ALRAF_W::new(self) } #[doc = "Bit 9 - Alarm B flag"] #[inline(always)] #[must_use] pub fn alrbf(&mut self) -> ALRBF_W<ISR_SPEC, 9> { ALRBF_W::new(self) } #[doc = "Bit 10 - Wakeup timer flag"] #[inline(always)] #[must_use] pub fn wutf(&mut self) -> WUTF_W<ISR_SPEC, 10> { WUTF_W::new(self) } #[doc = "Bit 11 - Time-stamp flag"] #[inline(always)] #[must_use] pub fn tsf(&mut self) -> TSF_W<ISR_SPEC, 11> { TSF_W::new(self) } #[doc = "Bit 12 - Time-stamp overflow flag"] #[inline(always)] #[must_use] pub fn tsovf(&mut self) -> TSOVF_W<ISR_SPEC, 12> { TSOVF_W::new(self) } #[doc = "Bit 13 - Tamper detection flag"] #[inline(always)] #[must_use] pub fn tamp1f(&mut self) -> TAMP1F_W<ISR_SPEC, 13> { TAMP1F_W::new(self) } #[doc = "Bit 14 - RTC_TAMP2 detection flag"] #[inline(always)] #[must_use] pub fn tamp2f(&mut self) -> TAMP2F_W<ISR_SPEC, 14> { TAMP2F_W::new(self) } #[doc = "Bit 15 - RTC_TAMP3 detection flag"] #[inline(always)] #[must_use] pub fn tamp3f(&mut self) -> TAMP3F_W<ISR_SPEC, 15> { TAMP3F_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "initialization and status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`isr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`isr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct ISR_SPEC; impl crate::RegisterSpec for ISR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`isr::R`](R) reader structure"] impl crate::Readable for ISR_SPEC {} #[doc = "`write(|w| ..)` method takes [`isr::W`](W) writer structure"] impl crate::Writable for ISR_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0xff20; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets ISR to value 0x07"] impl crate::Resettable for ISR_SPEC { const RESET_VALUE: Self::Ux = 0x07; }
/* * Copyright 2019 Cargill Incorporated * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ----------------------------------------------------------------------------- */ //! A `Scheduler` which schedules transaction for execution one at time. mod core; mod execution; mod shared; use crate::context::ContextLifecycle; use crate::protocol::batch::BatchPair; use crate::scheduler::BatchExecutionResult; use crate::scheduler::ExecutionTask; use crate::scheduler::ExecutionTaskCompletionNotifier; use crate::scheduler::Scheduler; use crate::scheduler::SchedulerError; use std::sync::mpsc; use std::sync::mpsc::Sender; use std::sync::{Arc, Mutex}; // If the shared lock is poisoned, report an internal error since the scheduler cannot recover. impl From<std::sync::PoisonError<std::sync::MutexGuard<'_, shared::Shared>>> for SchedulerError { fn from( error: std::sync::PoisonError<std::sync::MutexGuard<'_, shared::Shared>>, ) -> SchedulerError { SchedulerError::Internal(format!("scheduler shared lock is poisoned: {}", error)) } } // If the core `Receiver` disconnects, report an internal error since the scheduler can't operate // without the core thread. impl From<std::sync::mpsc::SendError<core::CoreMessage>> for SchedulerError { fn from(error: std::sync::mpsc::SendError<core::CoreMessage>) -> SchedulerError { SchedulerError::Internal(format!("scheduler's core thread disconnected: {}", error)) } } /// A `Scheduler` implementation which schedules transactions for execution /// one at a time. pub struct SerialScheduler { shared_lock: Arc<Mutex<shared::Shared>>, core_handle: Option<std::thread::JoinHandle<()>>, core_tx: Sender<core::CoreMessage>, task_iterator: Option<Box<dyn Iterator<Item = ExecutionTask> + Send>>, } impl SerialScheduler { /// Returns a newly created `SerialScheduler`. pub fn new( context_lifecycle: Box<dyn ContextLifecycle>, state_id: String, ) -> Result<SerialScheduler, SchedulerError> { let (execution_tx, execution_rx) = mpsc::channel(); let (core_tx, core_rx) = mpsc::channel(); let shared_lock = Arc::new(Mutex::new(shared::Shared::new())); // Start the thread to accept and process CoreMessage messages let core_handle = core::SchedulerCore::new( shared_lock.clone(), core_rx, execution_tx, context_lifecycle, state_id, ) .start()?; Ok(SerialScheduler { shared_lock, core_handle: Some(core_handle), core_tx: core_tx.clone(), task_iterator: Some(Box::new(execution::SerialExecutionTaskIterator::new( core_tx, execution_rx, ))), }) } pub fn shutdown(mut self) { match self.core_tx.send(core::CoreMessage::Shutdown) { Ok(_) => { if let Some(join_handle) = self.core_handle.take() { join_handle.join().unwrap_or_else(|err| { // This should not never happen, because the core thread should never panic error!( "failed to join scheduler thread because it panicked: {:?}", err ) }); } } Err(err) => { warn!("failed to send to scheduler thread during drop: {}", err); } } } } impl Scheduler for SerialScheduler { fn set_result_callback( &mut self, callback: Box<dyn Fn(Option<BatchExecutionResult>) + Send>, ) -> Result<(), SchedulerError> { self.shared_lock.lock()?.set_result_callback(callback); Ok(()) } fn set_error_callback( &mut self, callback: Box<dyn Fn(SchedulerError) + Send>, ) -> Result<(), SchedulerError> { self.shared_lock.lock()?.set_error_callback(callback); Ok(()) } fn add_batch(&mut self, batch: BatchPair) -> Result<(), SchedulerError> { let mut shared = self.shared_lock.lock()?; if shared.finalized() { return Err(SchedulerError::SchedulerFinalized); } if shared.batch_already_queued(&batch) { return Err(SchedulerError::DuplicateBatch( batch.batch().header_signature().into(), )); } shared.add_unscheduled_batch(batch); // Notify the core that a batch has been added. Note that the batch is // not sent across the channel because the batch has already been added // to the unscheduled queue above, where we hold a lock; adding a batch // must be exclusive with finalize. self.core_tx.send(core::CoreMessage::BatchAdded)?; Ok(()) } fn cancel(&mut self) -> Result<Vec<BatchPair>, SchedulerError> { Ok(self.shared_lock.lock()?.drain_unscheduled_batches()) } fn finalize(&mut self) -> Result<(), SchedulerError> { self.shared_lock.lock()?.set_finalized(true); self.core_tx.send(core::CoreMessage::Finalized)?; Ok(()) } fn take_task_iterator( &mut self, ) -> Result<Box<dyn Iterator<Item = ExecutionTask> + Send>, SchedulerError> { self.task_iterator .take() .ok_or(SchedulerError::NoTaskIterator) } fn new_notifier(&mut self) -> Result<Box<dyn ExecutionTaskCompletionNotifier>, SchedulerError> { Ok(Box::new( execution::SerialExecutionTaskCompletionNotifier::new(self.core_tx.clone()), )) } } #[cfg(test)] mod tests { use super::*; use crate::scheduler::tests::*; use crate::scheduler::ExecutionTaskCompletionNotification; // General Scheduler tests /// In addition to the basic functionality verified by `test_scheduler_add_batch`, this test /// verifies that the SerialScheduler adds the batch to its unscheduled batches queue. #[test] fn test_serial_scheduler_add_batch() { let state_id = String::from("state0"); let context_lifecycle = Box::new(MockContextLifecycle::new()); let mut scheduler = SerialScheduler::new(context_lifecycle, state_id).expect("Failed to create scheduler"); let batch = test_scheduler_add_batch(&mut scheduler); assert!(scheduler .shared_lock .lock() .expect("shared lock is poisoned") .batch_already_queued(&batch)); scheduler.shutdown(); } /// In addition to the basic functionality verified by `test_scheduler_cancel`, this test /// verifies that the SerialScheduler drains all batches from its unscheduled batches queue. #[test] fn test_serial_scheduler_cancel() { let state_id = String::from("state0"); let context_lifecycle = Box::new(MockContextLifecycle::new()); let mut scheduler = SerialScheduler::new(context_lifecycle, state_id).expect("Failed to create scheduler"); test_scheduler_cancel(&mut scheduler); assert!(scheduler .shared_lock .lock() .expect("shared lock is poisoned") .unscheduled_batches_is_empty()); scheduler.shutdown(); } /// In addition to the basic functionality verified by `test_scheduler_finalize`, this test /// verifies that the SerialScheduler properly updates its internal state to finalized. #[test] fn test_serial_scheduler_finalize() { let state_id = String::from("state0"); let context_lifecycle = Box::new(MockContextLifecycle::new()); let mut scheduler = SerialScheduler::new(context_lifecycle, state_id).expect("Failed to create scheduler"); test_scheduler_finalize(&mut scheduler); assert!(scheduler .shared_lock .lock() .expect("shared lock is poisoned") .finalized()); scheduler.shutdown(); } /// Tests that the serial scheduler can process a batch with a single transaction. #[test] pub fn test_serial_scheduler_flow_with_one_transaction() { let state_id = String::from("state0"); let context_lifecycle = Box::new(MockContextLifecycle::new()); let mut scheduler = SerialScheduler::new(context_lifecycle, state_id).expect("Failed to create scheduler"); test_scheduler_flow_with_one_transaction(&mut scheduler); scheduler.shutdown(); } /// Tests that the serial scheduler can process a batch with multiple transactions. #[test] pub fn test_serial_scheduler_flow_with_multiple_transactions() { let state_id = String::from("state0"); let context_lifecycle = Box::new(MockContextLifecycle::new()); let mut scheduler = SerialScheduler::new(context_lifecycle, state_id).expect("Failed to create scheduler"); test_scheduler_flow_with_multiple_transactions(&mut scheduler); scheduler.shutdown(); } /// Tests that the serial scheduler invalidates the whole batch when one of its transactions is /// invalid. #[test] pub fn test_serial_scheduler_invalid_transaction_invalidates_batch() { let state_id = String::from("state0"); let context_lifecycle = Box::new(MockContextLifecycle::new()); let mut scheduler = SerialScheduler::new(context_lifecycle, state_id).expect("Failed to create scheduler"); test_scheduler_invalid_transaction_invalidates_batch(&mut scheduler); scheduler.shutdown(); } /// Tests that the serial scheduler returns the appropriate error via the error callback when /// an unexpected task completion notification is received. #[test] pub fn test_serial_scheduler_unexpected_notification() { let state_id = String::from("state0"); let context_lifecycle = Box::new(MockContextLifecycle::new()); let mut scheduler = SerialScheduler::new(context_lifecycle, state_id).expect("Failed to create scheduler"); test_scheduler_unexpected_notification(&mut scheduler); scheduler.shutdown(); } // SerialScheduler-specific tests /// This test will hang if join() fails within the scheduler. #[test] fn test_scheduler_thread_cleanup() { let state_id = String::from("state0"); let context_lifecycle = Box::new(MockContextLifecycle::new()); SerialScheduler::new(context_lifecycle, state_id) .expect("Failed to create scheduler") .shutdown(); } /// This test verifies that the SerialScheduler executes transactions strictly in order, and /// does not return the next execution task until the previous one is completed. #[test] fn test_serial_scheduler_ordering() { let state_id = String::from("state0"); let context_lifecycle = Box::new(MockContextLifecycle::new()); let mut scheduler = SerialScheduler::new(context_lifecycle, state_id).expect("Failed to create scheduler"); let transactions = mock_transactions(10); let batch = mock_batch(transactions.clone()); scheduler .add_batch(batch.clone()) .expect("Failed to add batch"); scheduler.finalize().expect("Failed to finalize"); let mut task_iterator = scheduler .take_task_iterator() .expect("Failed to get task iterator"); let notifier = scheduler .new_notifier() .expect("Failed to get new notifier"); let mut transaction_ids = transactions.into_iter(); // Get the first task, but take some time to execute it in a background thread; meanwhile, // wait for the next task. A channel is used to verify that the next task isn't returned // until the result for the first is received by the scheduler. let (tx, rx) = mpsc::channel(); let first_task_notifier = notifier.clone(); let first_task_txn_id = task_iterator .next() .expect("Failed to get 1st task") .pair() .transaction() .header_signature() .to_string(); assert_eq!( transaction_ids .next() .expect("Failed to get next transaction") .header_signature(), &first_task_txn_id, ); std::thread::Builder::new() .name("Thread-test_serial_scheduler_ordering".into()) .spawn(move || { std::thread::sleep(std::time::Duration::from_secs(1)); first_task_notifier.notify(ExecutionTaskCompletionNotification::Valid( mock_context_id(), first_task_txn_id, )); // This send must occur before the next task is returned. tx.send(()).expect("Failed to send"); }) .expect("Failed to spawn thread"); let second_task_txn_id = task_iterator .next() .expect("Failed to get 2nd task") .pair() .transaction() .header_signature() .to_string(); assert_eq!( transaction_ids .next() .expect("Failed to get next transaction") .header_signature(), &second_task_txn_id, ); // If the signal was never sent, this task is being returned before the // previous result was sent. rx.try_recv() .expect("Returned next task before previous completed"); notifier.notify(ExecutionTaskCompletionNotification::Valid( mock_context_id(), second_task_txn_id, )); // Process the rest of the execution tasks and verify the order loop { match task_iterator.next() { Some(task) => { let txn_id = task.pair().transaction().header_signature().to_string(); assert_eq!( transaction_ids .next() .expect("Failed to get next transaction") .header_signature(), &txn_id, ); notifier.notify(ExecutionTaskCompletionNotification::Valid( mock_context_id(), txn_id, )); } None => break, } } scheduler.shutdown(); } }
use playhead::PlayHead; #[test] fn start(){ let ph:PlayHead = PlayHead::new(); let rt = ph.time(); assert_eq!(rt,0); } //#[test] // fn play_pause(){ // let mut ph:PlayHead = PlayHead::new();; // ph.play(); // let start_time = ph.time(); // // let mut time_after_pause:u128 ; // let number = 10000; // for num in 0..number { // change it to get range // if num == 1 { // ph.pause(); // } // } // let time_after_pause = ph.time(); // let time_after_pause_again = ph.time(); // assert_eq!(time_after_pause,time_after_pause_again); // }
use crate::connection::MavConnection; use crate::{read_versioned_msg, MavHeader, MavlinkVersion, Message}; use std::fs::File; use std::io::{self}; use std::sync::Mutex; /// File MAVLINK connection pub fn open(file_path: &str) -> io::Result<FileConnection> { let file = match File::open(&file_path) { Err(e) => return Err(e), Ok(file) => file, }; Ok(FileConnection { file: Mutex::new(file), protocol_version: MavlinkVersion::V2, }) } pub struct FileConnection { file: Mutex<std::fs::File>, protocol_version: MavlinkVersion, } impl<M: Message> MavConnection<M> for FileConnection { fn recv(&self) -> io::Result<(MavHeader, M)> { let mut file = self.file.lock().unwrap(); loop { match read_versioned_msg(&mut *file, self.protocol_version) { Ok((h, m)) => { return Ok((h, m)); } Err(e) => match e.kind() { io::ErrorKind::UnexpectedEof => { return Err(e); } _ => {} }, } } } fn send(&self, _header: &MavHeader, _data: &M) -> io::Result<()> { Ok(()) } fn set_protocol_version(&mut self, version: MavlinkVersion) { self.protocol_version = version; } fn get_protocol_version(&self) -> MavlinkVersion { self.protocol_version } }
use crystalorb::{ client::{ stage::{Stage, StageMut}, Client, }, clocksync::ClockSyncMessage, command::Command, fixed_timestepper::Stepper, server::Server, timestamp::Timestamped, world::{DisplayState, World}, Config, TweeningMethod, }; use crystalorb_mock_network::MockNetwork; use js_sys::Array; use rapier2d::{na::Vector2, prelude::*}; use serde::{Deserialize, Serialize}; use std::fmt::{Debug, Display, Formatter}; use tracing::Level; use wasm_bindgen::prelude::*; const GRAVITY: Vector2<Real> = Vector2::new(0.0, -9.81 * 30.0); const TIMESTEP: f64 = 1.0 / 64.0; pub struct DemoWorld { pipeline: PhysicsPipeline, island_manager: IslandManager, broad_phase: BroadPhase, narrow_phase: NarrowPhase, bodies: RigidBodySet, colliders: ColliderSet, joints: JointSet, ccd_solver: CCDSolver, player_left: Player, player_right: Player, doodad: Player, } pub struct Player { body_handle: RigidBodyHandle, _collider_handle: ColliderHandle, input: PlayerInput, } #[derive(Serialize, Deserialize, Default, Debug, Clone, Copy)] pub struct PlayerInput { jump: bool, left: bool, right: bool, } #[wasm_bindgen] #[derive(Serialize, Deserialize, Debug, Clone)] pub struct DemoCommand { pub player_side: PlayerSide, pub command: PlayerCommand, pub value: bool, } #[wasm_bindgen] impl DemoCommand { #[wasm_bindgen(constructor)] pub fn new(player_side: PlayerSide, command: PlayerCommand, value: bool) -> Self { Self { player_side, command, value, } } } impl Display for DemoCommand { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!( f, "{} {} {}", match self.player_side { PlayerSide::Left => "P1", PlayerSide::Right => "P2", }, match self.command { PlayerCommand::Left => "Left", PlayerCommand::Right => "Right", PlayerCommand::Jump => "Jump", }, match self.value { true => "On", false => "Off", } ) } } #[wasm_bindgen] #[derive(Serialize, Deserialize, Debug, Clone, Copy)] pub enum PlayerSide { Left, Right, } #[wasm_bindgen] #[derive(Serialize, Deserialize, Debug, Clone, Copy)] pub enum PlayerCommand { Jump, Left, Right, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct DemoSnapshot { player_left: PlayerSnapshot, player_right: PlayerSnapshot, doodad: PlayerSnapshot, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PlayerSnapshot { position: Isometry<Real>, linvel: Vector2<Real>, angvel: Real, input: PlayerInput, } #[wasm_bindgen] #[derive(Clone, Debug)] pub struct DemoDisplayState { player_left_position: Isometry<Real>, player_right_position: Isometry<Real>, doodad_position: Isometry<Real>, } #[wasm_bindgen] impl DemoDisplayState { pub fn player_left_translation_x(&self) -> Real { self.player_left_position.translation.vector[0] } pub fn player_left_translation_y(&self) -> Real { self.player_left_position.translation.vector[1] } pub fn player_left_angle(&self) -> Real { self.player_left_position.rotation.angle() } pub fn player_right_translation_x(&self) -> Real { self.player_right_position.translation.vector[0] } pub fn player_right_translation_y(&self) -> Real { self.player_right_position.translation.vector[1] } pub fn player_right_angle(&self) -> Real { self.player_right_position.rotation.angle() } pub fn doodad_translation_x(&self) -> Real { self.doodad_position.translation.vector[0] } pub fn doodad_translation_y(&self) -> Real { self.doodad_position.translation.vector[1] } pub fn doodad_angle(&self) -> Real { self.doodad_position.rotation.angle() } } impl Default for DemoWorld { fn default() -> Self { Self::new() } } impl DemoWorld { pub fn new() -> Self { let mut bodies = RigidBodySet::new(); let mut colliders = ColliderSet::new(); colliders.insert_with_parent( ColliderBuilder::cuboid(1.0, 100.0).restitution(0.5).build(), bodies.insert( RigidBodyBuilder::new_static() .translation(vector![0.0, 0.0]) .ccd_enabled(true) .build(), ), &mut bodies, ); colliders.insert_with_parent( ColliderBuilder::cuboid(1.0, 100.0).restitution(0.5).build(), bodies.insert( RigidBodyBuilder::new_static() .translation(vector![180.0, 0.0]) .ccd_enabled(true) .build(), ), &mut bodies, ); colliders.insert_with_parent( ColliderBuilder::cuboid(180.0, 1.0).restitution(0.5).build(), bodies.insert( RigidBodyBuilder::new_static() .translation(vector![0.0, 0.0]) .ccd_enabled(true) .build(), ), &mut bodies, ); colliders.insert_with_parent( ColliderBuilder::cuboid(180.0, 1.0).restitution(0.5).build(), bodies.insert( RigidBodyBuilder::new_static() .translation(vector![0.0, 100.0]) .ccd_enabled(true) .build(), ), &mut bodies, ); let left_body_handle = bodies.insert( RigidBodyBuilder::new_dynamic() .translation(vector![10.0, 80.0]) .ccd_enabled(true) .build(), ); let right_body_handle = bodies.insert( RigidBodyBuilder::new_dynamic() .translation(vector![150.0, 80.0]) .ccd_enabled(true) .build(), ); let doodad_body_handle = bodies.insert( RigidBodyBuilder::new_dynamic() .translation(vector![80.0, 80.0]) .ccd_enabled(true) .build(), ); let left_collider_handle = colliders.insert_with_parent( ColliderBuilder::ball(10.0) .density(0.1) .restitution(0.5) .build(), left_body_handle, &mut bodies, ); let right_collider_handle = colliders.insert_with_parent( ColliderBuilder::ball(10.0) .density(0.1) .restitution(0.5) .build(), right_body_handle, &mut bodies, ); let doodad_collider_handle = colliders.insert_with_parent( ColliderBuilder::ball(10.0) .density(0.1) .restitution(0.5) .build(), doodad_body_handle, &mut bodies, ); Self { pipeline: PhysicsPipeline::new(), island_manager: IslandManager::new(), broad_phase: BroadPhase::new(), narrow_phase: NarrowPhase::new(), bodies, colliders, joints: JointSet::new(), ccd_solver: CCDSolver::new(), player_left: Player { body_handle: left_body_handle, _collider_handle: left_collider_handle, input: Default::default(), }, player_right: Player { body_handle: right_body_handle, _collider_handle: right_collider_handle, input: Default::default(), }, doodad: Player { body_handle: doodad_body_handle, _collider_handle: doodad_collider_handle, input: Default::default(), }, } } } impl World for DemoWorld { type CommandType = DemoCommand; type SnapshotType = DemoSnapshot; type DisplayStateType = DemoDisplayState; fn command_is_valid(command: &Self::CommandType, client_id: usize) -> bool { match command.player_side { PlayerSide::Left => client_id == 0, PlayerSide::Right => client_id == 1, } } fn apply_command(&mut self, command: &Self::CommandType) { let player_input = &mut match command.player_side { PlayerSide::Left => &mut self.player_left, PlayerSide::Right => &mut self.player_right, } .input; match command.command { PlayerCommand::Jump => player_input.jump = command.value, PlayerCommand::Left => player_input.left = command.value, PlayerCommand::Right => player_input.right = command.value, } } fn apply_snapshot(&mut self, snapshot: Self::SnapshotType) { let body_left = self.bodies.get_mut(self.player_left.body_handle).unwrap(); body_left.set_position(snapshot.player_left.position, true); body_left.set_linvel(snapshot.player_left.linvel, true); body_left.set_angvel(snapshot.player_left.angvel, true); let body_right = self.bodies.get_mut(self.player_right.body_handle).unwrap(); body_right.set_position(snapshot.player_right.position, true); body_right.set_linvel(snapshot.player_right.linvel, true); body_right.set_angvel(snapshot.player_right.angvel, true); let body_doodad = self.bodies.get_mut(self.doodad.body_handle).unwrap(); body_doodad.set_position(snapshot.doodad.position, true); body_doodad.set_linvel(snapshot.doodad.linvel, true); body_doodad.set_angvel(snapshot.doodad.angvel, true); self.player_left.input = snapshot.player_left.input; self.player_right.input = snapshot.player_right.input; self.doodad.input = snapshot.doodad.input; } fn snapshot(&self) -> Self::SnapshotType { let body_left = self.bodies.get(self.player_left.body_handle).unwrap(); let body_right = self.bodies.get(self.player_right.body_handle).unwrap(); let body_doodad = self.bodies.get(self.doodad.body_handle).unwrap(); DemoSnapshot { player_left: PlayerSnapshot { position: *body_left.position(), linvel: *body_left.linvel(), angvel: body_left.angvel(), input: self.player_left.input, }, player_right: PlayerSnapshot { position: *body_right.position(), linvel: *body_right.linvel(), angvel: body_right.angvel(), input: self.player_right.input, }, doodad: PlayerSnapshot { position: *body_doodad.position(), linvel: *body_doodad.linvel(), angvel: body_doodad.angvel(), input: self.doodad.input, }, } } fn display_state(&self) -> Self::DisplayStateType { let body_left = self.bodies.get(self.player_left.body_handle).unwrap(); let body_right = self.bodies.get(self.player_right.body_handle).unwrap(); let body_doodad = self.bodies.get(self.doodad.body_handle).unwrap(); DemoDisplayState { player_left_position: *body_left.position(), player_right_position: *body_right.position(), doodad_position: *body_doodad.position(), } } } impl Stepper for DemoWorld { fn step(&mut self) { for player in &mut [&mut self.player_left, &mut self.player_right] { let body = self.bodies.get_mut(player.body_handle).unwrap(); body.apply_force( Vector2::new( ((player.input.right as i32) - (player.input.left as i32)) as f32 * 4000.0, 0.0, ), true, ); if player.input.jump { body.apply_impulse(Vector2::new(0.0, 4000.0), true); player.input.jump = false; } } self.pipeline.step( &GRAVITY, &IntegrationParameters { dt: TIMESTEP as f32, ..Default::default() }, &mut self.island_manager, &mut self.broad_phase, &mut self.narrow_phase, &mut self.bodies, &mut self.colliders, &mut self.joints, &mut self.ccd_solver, &(), &(), ); } } impl Command for DemoCommand {} impl DisplayState for DemoDisplayState { fn from_interpolation(state1: &Self, state2: &Self, t: f64) -> Self { DemoDisplayState { player_left_position: state1 .player_left_position .lerp_slerp(&state2.player_left_position, t as f32), player_right_position: state1 .player_right_position .lerp_slerp(&state2.player_right_position, t as f32), doodad_position: state1 .doodad_position .lerp_slerp(&state2.doodad_position, t as f32), } } } struct NetworkedServer { server: Server<DemoWorld>, network: MockNetwork, } struct NetworkedClient { client: Client<DemoWorld>, network: MockNetwork, } #[wasm_bindgen] pub struct Demo { server: NetworkedServer, client_left: NetworkedClient, client_right: NetworkedClient, } #[wasm_bindgen] pub enum CommsChannel { ToServerClocksync, ToServerCommand, ToClientClocksync, ToClientCommand, ToClientSnapshot, } #[wasm_bindgen] impl Demo { #[wasm_bindgen(constructor)] pub fn new(seconds_since_startup: f64) -> Self { let config = Config { timestep_seconds: TIMESTEP, //snapshot_send_period: 0.3, //blend_latency: 0.5, tweening_method: TweeningMethod::MostRecentlyPassed, ..Default::default() }; let (server_network, (client_left_network, client_right_network)) = MockNetwork::new_mock_network::<DemoWorld>(); Self { server: NetworkedServer { server: Server::new(config.clone(), seconds_since_startup), network: server_network, }, client_left: NetworkedClient { client: Client::new(config.clone()), network: client_left_network, }, client_right: NetworkedClient { client: Client::new(config), network: client_right_network, }, } } pub fn update(&mut self, delta_seconds: f64, seconds_since_startup: f64) { self.server.network.tick(delta_seconds); self.client_left.network.tick(delta_seconds); self.client_right.network.tick(delta_seconds); self.server.server.update( delta_seconds, seconds_since_startup, &mut self.server.network, ); self.client_left.client.update( delta_seconds, seconds_since_startup, &mut self.client_left.network, ); self.client_right.client.update( delta_seconds, seconds_since_startup, &mut self.client_right.network, ); } fn client(&self, side: PlayerSide) -> &NetworkedClient { match side { PlayerSide::Left => &self.client_left, PlayerSide::Right => &self.client_right, } } fn client_mut(&mut self, side: PlayerSide) -> &mut NetworkedClient { match side { PlayerSide::Left => &mut self.client_left, PlayerSide::Right => &mut self.client_right, } } pub fn issue_command(&mut self, command: DemoCommand) { let client = self.client_mut(command.player_side); if let StageMut::Ready(mut ready_client) = client.client.stage_mut() { ready_client.issue_command(command, &mut client.network); } } pub fn get_server_commands(&mut self) -> Array { self.server .server .buffered_commands() .map(|(timestamp, commands)| { commands .iter() .map(move |command| JsValue::from(format!("{} {}", timestamp, command))) }) .flatten() .collect() } pub fn get_client_commands(&mut self, side: PlayerSide) -> Array { match self.client(side).client.stage() { Stage::Ready(client) => client .buffered_commands() .map(|(timestamp, commands)| { commands .iter() .map(move |command| JsValue::from(format!("{} {}", timestamp, command))) }) .flatten() .collect(), _ => Array::new(), } } pub fn new_comms_activity_count(&mut self, side: PlayerSide, channel: CommsChannel) -> usize { match &mut self.client_mut(side).network.connections.get_mut(&0) { Some(connection) => match channel { CommsChannel::ToServerCommand => connection .get_mut::<Timestamped<DemoCommand>>() .new_outgoing_activity_count(), CommsChannel::ToServerClocksync => connection .get_mut::<ClockSyncMessage>() .new_outgoing_activity_count(), CommsChannel::ToClientCommand => connection .get_mut::<Timestamped<DemoCommand>>() .new_incoming_activity_count(), CommsChannel::ToClientClocksync => connection .get_mut::<ClockSyncMessage>() .new_incoming_activity_count(), CommsChannel::ToClientSnapshot => connection .get_mut::<Timestamped<DemoSnapshot>>() .new_incoming_activity_count(), }, None => 0, } } pub fn set_network_delay(&mut self, side: PlayerSide, delay: f64) { self.client_mut(side).network.set_delay(delay); } pub fn connect(&mut self, side: PlayerSide) { self.client_mut(side).network.connect(); } pub fn disconnect(&mut self, side: PlayerSide) { self.client_mut(side).network.disconnect(); } pub fn client_timestamp(&self, side: PlayerSide) -> String { match self.client(side).client.stage() { Stage::SyncingClock(client) => format!( "Syncing {}/{}", client.sample_count(), client.samples_needed() ), Stage::SyncingInitialState(client) => { format!("{}", client.last_completed_timestamp()) } Stage::Ready(client) => format!("{}", client.last_completed_timestamp()), } } pub fn client_display_state(&self, side: PlayerSide) -> Option<DemoDisplayState> { match self.client(side).client.stage() { Stage::Ready(client) => Some(client.display_state().display_state().clone()), _ => None, } } pub fn client_reconciliation_status(&self, side: PlayerSide) -> String { match self.client(side).client.stage() { Stage::Ready(client) => format!("{}", client.reconciliation_status()), _ => String::from("Inactive"), } } pub fn server_timestamp(&self) -> String { format!("{}", self.server.server.last_completed_timestamp()) } pub fn server_display_state(&self) -> DemoDisplayState { self.server.server.display_state().inner().clone() } } #[wasm_bindgen(start)] pub fn start() -> Result<(), JsValue> { console_error_panic_hook::set_once(); tracing_wasm::set_as_global_default_with_config( tracing_wasm::WASMLayerConfigBuilder::new() .set_max_level(Level::INFO) .build(), ); Ok(()) }
use std::collections::HashMap; use bstr::ByteSlice; use crate::{ annotations::{AnnotationCollection, AnnotationRecord, ColumnKey}, gui::windows::filters::{ FilterNum, FilterNumOp, FilterString, FilterStringOp, }, }; use super::ColumnPickerMany; #[derive(Debug, Clone, PartialEq)] pub struct QuickFilter<T: ColumnKey> { filter: FilterString, columns: ColumnPickerMany<T>, column_picker_open: bool, } impl<T: ColumnKey> QuickFilter<T> { pub fn new(id: egui::Id) -> Self { let column_picker_id = id.with("column_picker"); Self { filter: Default::default(), columns: ColumnPickerMany::new(column_picker_id), column_picker_open: false, } } pub fn column_picker_mut(&mut self) -> &mut ColumnPickerMany<T> { &mut self.columns } pub fn filter_record<R>(&self, record: &R) -> bool where R: AnnotationRecord<ColumnKey = T>, { if self.filter.op == FilterStringOp::None { return true; } let enabled_cols = self .columns .enabled_columns .iter() .filter_map(|(c, enabled)| if *enabled { Some(c) } else { None }) .collect::<Vec<_>>(); if enabled_cols.is_empty() { return true; } enabled_cols.into_iter().any(|column| { let values = record.get_all(column); values.iter().any(|v| self.filter.filter_bytes(v)) }) } pub fn ui_compact(&mut self, ui: &mut egui::Ui) -> bool { let filter_resp = self.filter.ui(ui); let open = &mut self.column_picker_open; let column_picker = &mut self.columns; let ctx = ui.ctx(); column_picker.ui(ctx, None, open, "Quick filter columns"); if let Some(resp) = filter_resp { resp.has_focus() && ctx.input().key_pressed(egui::Key::Enter) } else { false } } pub fn ui(&mut self, ui: &mut egui::Ui) -> bool { ui.horizontal(|ui| { ui.heading("Quick filter"); if ui .selectable_label(self.column_picker_open, "Choose columns") .clicked() { self.column_picker_open = !self.column_picker_open; } }); let filter_resp = self.filter.ui(ui); let open = &mut self.column_picker_open; let column_picker = &mut self.columns; let ctx = ui.ctx(); column_picker.ui(ctx, None, open, "Quick filter columns"); if let Some(resp) = filter_resp { resp.has_focus() && ctx.input().key_pressed(egui::Key::Enter) } else { false } } } pub struct RecordFilter<T: ColumnKey> { seq_id: FilterString, start: FilterNum<usize>, end: FilterNum<usize>, columns: HashMap<T, FilterString>, pub quick_filter: QuickFilter<T>, } impl<T: ColumnKey> RecordFilter<T> { pub fn new<C>(id: egui::Id, records: &C) -> Self where C: AnnotationCollection<ColumnKey = T>, { let id = id.with("record_filter"); let mut columns: HashMap<T, FilterString> = HashMap::new(); let to_remove = [T::seq_id(), T::start(), T::end()]; let mut to_add = records.all_columns(); to_add.retain(|c| !to_remove.contains(c)); for column in to_add { columns.insert(column.to_owned(), FilterString::default()); } let mut quick_filter = QuickFilter::new(id); quick_filter.column_picker_mut().update_columns(records); Self { seq_id: FilterString::default(), start: FilterNum::default(), end: FilterNum::default(), columns, quick_filter, } } pub fn range_filter(&mut self, mut start: usize, mut end: usize) { if start > 0 { start -= 1; } end += 1; self.start.op = FilterNumOp::MoreThan; self.start.arg1 = start; self.end.op = FilterNumOp::LessThan; self.end.arg1 = end; } pub fn chr_range_filter( &mut self, seq_id: &[u8], start: usize, end: usize, ) { if let Ok(seq_id) = seq_id.to_str().map(String::from) { self.seq_id.op = FilterStringOp::ContainedIn; self.seq_id.arg = seq_id; } self.range_filter(start, end); } pub fn filter_record<R>(&self, record: &R) -> bool where R: AnnotationRecord<ColumnKey = T>, { let in_range = self.seq_id.filter_bytes(record.seq_id()) && self.start.filter(record.start()) && self.end.filter(record.end()); in_range && self.quick_filter.filter_record(record) && self.columns.iter().all(|(column, filter)| { if filter.op == FilterStringOp::None { return true; } let values = record.get_all(column); values.into_iter().any(|value| filter.filter_bytes(value)) }) } // TODO: Returns `true` if the filter has been updated and should be applied // pub fn ui(&mut self, ui: &mut egui::Ui) -> bool { pub fn ui(&mut self, ui: &mut egui::Ui) { let (optional, mandatory): (Vec<_>, Vec<_>) = self .columns .iter_mut() .partition(|(col, _filter)| T::is_column_optional(col)); ui.label(T::seq_id().to_string()); self.seq_id.ui(ui); ui.separator(); ui.label(T::start().to_string()); self.start.ui(ui); ui.separator(); ui.label(T::end().to_string()); self.end.ui(ui); ui.separator(); let max_height = ui.input().screen_rect.height() - 250.0; let scroll_height = (max_height / 2.0) - 50.0; ui.collapsing("Mandatory fields", |ui| { egui::ScrollArea::from_max_height(scroll_height).show(ui, |ui| { for (column, filter) in mandatory.into_iter() { ui.label(column.to_string()); filter.ui(ui); ui.separator(); } }); }); ui.collapsing("Optional fields", |ui| { egui::ScrollArea::from_max_height(scroll_height).show(ui, |ui| { for (column, filter) in optional.into_iter() { ui.label(column.to_string()); filter.ui(ui); ui.separator(); } }); }); } pub fn add_quick_filter(&mut self, ui: &mut egui::Ui) -> bool { self.quick_filter.ui_compact(ui) } }
use std::os::raw::c_void; use std::ffi::CString; use std::sync::mpsc::{Sender, Receiver, channel}; use windows::*; #[allow(non_camel_case_types, unused, non_snake_case)] #[repr(C)] pub struct SERVICE_STATUS { dwServiceType : SERVICE_TYPE, dwCurrentState : CURRENT_SERVICE_STATUS, dwControlsAccepted : ACCEPTED_CONTROLS, dwWin32ExitCode : u32, dwServiceSpecificExitCode : u32, dwCheckPoint : u32, dwWaitHint : u32, /// This is not part of the official MSDN SERVICE_STATUS struct. handle : *const SERVICE_STATUS_HANDLE, } impl SERVICE_STATUS { pub fn initialize(name : &str) -> (Self, Receiver<()>) { let (tx, rx) = channel(); let handle = unsafe { RegisterServiceCtrlHandlerExA( CString::new(name).unwrap().as_ptr(), Self::control_handler_ex, Box::new(tx)) }; let mut context = SERVICE_STATUS::new(handle); context.set_status(CURRENT_SERVICE_STATUS::SERVICE_START_PENDING); context.set_status(CURRENT_SERVICE_STATUS::SERVICE_RUNNING); (context, rx) } pub fn set_status(&mut self, status : CURRENT_SERVICE_STATUS) { if status != self.dwCurrentState { self.dwCurrentState = status; unsafe { SetServiceStatus(self.handle, self as *const Self); } } } pub fn exit_with(&mut self, code : u32) { self.set_status(CURRENT_SERVICE_STATUS::SERVICE_STOP_PENDING); self.dwWin32ExitCode = code; self.set_status(CURRENT_SERVICE_STATUS::SERVICE_STOPPED); } fn new(handle : *const SERVICE_STATUS_HANDLE) -> SERVICE_STATUS { SERVICE_STATUS { dwServiceType : SERVICE_TYPE::SERVICE_WIN32, dwCurrentState : CURRENT_SERVICE_STATUS::SERVICE_STOPPED, dwControlsAccepted : ACCEPTED_CONTROLS::SERVICE_ACCEPT_STOP_SHUTDOWN, dwWin32ExitCode : 0, dwServiceSpecificExitCode : 0, dwCheckPoint : 0, dwWaitHint : 10000, handle : handle, } } #[allow(unused, non_snake_case)] extern "system" fn control_handler_ex(dwControl : CONTROL_CODE, dwEventType : u32, lpEventData : c_void, tx : &Sender<()>) -> u32 { match dwControl { CONTROL_CODE::SERVICE_CONTROL_STOP | CONTROL_CODE::SERVICE_CONTROL_SHUTDOWN => unsafe { tx.send(()); }, _ => {} } 0 } }
use rl::Vector2; use rl::Color; const FPS: i32 = 120; const TRANSPARENT_WHITE: Color = Color { a: 10, ..rl::WHITE }; #[derive(Clone, Debug)] pub struct TurtleState { pub time: f32, /// in pixels pub pos: Vector2, /// in radians pub rot: f32, pub is_drawing: bool, } pub struct GameState { pub spawn_interval: f32, pub move_speed: f32, pub rotate_speed: f32, pub turtles: Vec<TurtleState>, pub running: bool, } pub fn rotate_vec2(angle: f32) -> Vector2 { Vector2 { x: angle.cos(), y: angle.sin(), } } use std::ops::{Add, Mul}; /// linear interpolate pub fn interp<T>(a: T, b: T, coefficient: f32) -> T where T: Mul<f32, Output=T> + Add<Output=T> { a * (1. - coefficient) + b * coefficient } pub fn interpolate_state(states: &Vec<TurtleState>, time: f32) -> Option<TurtleState> { let mut i = 0; while i < states.len() { let state_after = &states[i]; if state_after.time > time { if i == 0 { return None } let state_before = &states[i-1]; let interp_co: f32 = // coefficient (time - state_before.time) / (state_after.time - state_before.time); return Some(TurtleState { time, pos: interp(state_before.pos, state_after.pos, interp_co), rot: interp(state_before.rot, state_after.rot, interp_co), is_drawing: state_before.is_drawing }) } i += 1; } return None } /// Draw turtle /// return: if turtle is at the end of script pub fn draw_turtle(states: &Vec<TurtleState>, time: f32) -> bool { if let Some(current_state) = interpolate_state(&states, time) { { let p = current_state.pos; let a = current_state.rot; if current_state.is_drawing { rl::draw_circle_v(p, 3., rl::BLACK); rl::draw_triangle(rotate_vec2(a + 140.) * 5. + p, rotate_vec2(a + 0.) * 5. + p, rotate_vec2(a - 140.) * 5. + p, rl::VIOLET); } else { rl::draw_triangle_lines(rotate_vec2(a + 140.) * 5. + p, rotate_vec2(a + 0.) * 5. + p, rotate_vec2(a - 140.) * 5. + p, rl::VIOLET); } } true } else { false } } pub fn run_game(game_state: &GameState) { rl::set_trace_log(rl::LOG_WARNING); rl::init_window(800, 450, "raylib [core] example - basic window"); rl::set_target_fps(FPS); let states = &game_state.turtles; let screen_center = Vector2::new(rl::get_screen_width() as f32 / 2., rl::get_screen_height() as f32 / 2.); let camera = rl::Camera2D { target: Vector2::zero(), offset: screen_center, rotation: 0., zoom: 1., }; // all the turtles let mut time_offsets = vec![]; { let mut i = 1; while let Some(_) = interpolate_state(&states, game_state.spawn_interval * i as f32) { time_offsets.push(game_state.spawn_interval * i as f32); i += 1; } } let mut next_spawn_time = 0.; while !rl::window_should_close() && game_state.running { let dt = rl::get_frame_time(); if next_spawn_time <= 0. { next_spawn_time += game_state.spawn_interval; time_offsets.push(0.); } rl::begin_drawing(); // rl::clear_background(TRANSPARENT_WHITE); rl::draw_rectangle(0, 0, rl::get_screen_width(), rl::get_screen_height(), TRANSPARENT_WHITE); rl::begin_mode_2d(camera); time_offsets = time_offsets.into_iter().filter(|time| draw_turtle(&states, *time)).collect(); rl::end_mode_2d(); rl::end_drawing(); for time_offset in time_offsets.iter_mut() { *time_offset += dt; } next_spawn_time -= dt; } rl::close_window(); }
//! Contains the sync API. This is only available when the `sync` feature is enabled. mod change_stream; mod client; mod coll; mod cursor; mod db; pub mod gridfs; #[cfg(test)] mod test; pub use change_stream::{ChangeStream, SessionChangeStream}; pub use client::{session::ClientSession, Client}; pub use coll::Collection; pub use cursor::{Cursor, SessionCursor, SessionCursorIter}; pub use db::Database; #[cfg(feature = "tokio-sync")] lazy_static::lazy_static! { pub(crate) static ref TOKIO_RUNTIME: tokio::runtime::Runtime = { match tokio::runtime::Runtime::new() { Ok(runtime) => runtime, Err(err) => panic!("Error occurred when starting the underlying async runtime: {}", err) } }; }
use std::thread; use std::time::Duration; use tokio::prelude::*; use rustygear::client::{Client, WorkerJob}; /// When we use the status method, we need to be async! async fn status_user(mut job: WorkerJob) -> Result<Vec<u8>, io::Error> { job.work_status(50, 100).await?; thread::sleep(Duration::from_secs(1)); let mut rs = Vec::new(); rs.extend_from_slice("all done".as_bytes()); Ok(rs) } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { env_logger::init(); let worker = Client::new(); worker .add_server("127.0.0.1:4730") .connect() .await .expect("CONNECT failed") .can_do("reverse", |job| { let payload = String::from_utf8(job.payload().to_vec()).unwrap(); println!("reversing {}", payload); let reversed: String = payload.chars().rev().collect(); let reversed: Vec<u8> = reversed.into_bytes(); Ok(reversed) }) .await .expect("CAN_DO reverse failed") .can_do("alwaysfail", |_job| { Err(io::Error::new(io::ErrorKind::Other, "Always fails")) }) .await .expect("CAN_DO alwaysfail failed") //.can_do_async("status", status_user) .work() .await .expect("WORK FAILED"); Ok(()) }
use getopts; use rpassword; use std::fs::File; use std::io::{stdout, Read, Write}; use std::path::PathBuf; use std::path::Path; use std::process::exit; use audio_backend::{BACKENDS, Sink}; use authentication::{Credentials, facebook_login, discovery_login}; use cache::{Cache, DefaultCache, NoCache}; use player::Player; use session::{Bitrate, Config, Session}; use version; use APPKEY; pub fn find_backend(name: Option<&str>) -> &'static (Fn() -> Box<Sink> + Send + Sync) { match name { Some("?") => { println!("Available Backends : "); for (&(name, _), idx) in BACKENDS.iter().zip(0..) { if idx == 0 { println!("- {} (default)", name); } else { println!("- {}", name); } } exit(0); }, Some(name) => { BACKENDS.iter().find(|backend| name == backend.0).expect("Unknown backend").1 }, None => { BACKENDS.first().expect("No backends were enabled at build time").1 } } } pub fn load_appkey<P: AsRef<Path>>(path: Option<P>) -> Vec<u8> { path.map(|path| { let mut file = File::open(path).expect("Could not open app key."); let mut data = Vec::new(); file.read_to_end(&mut data).unwrap(); data }).or_else(|| APPKEY.map(ToOwned::to_owned)).unwrap() } pub fn add_session_arguments(opts: &mut getopts::Options) { opts.optopt("c", "cache", "Path to a directory where files will be cached.", "CACHE") .reqopt("n", "name", "Device name", "NAME") .optopt("b", "bitrate", "Bitrate (96, 160 or 320). Defaults to 160", "BITRATE"); if APPKEY.is_none() { opts.reqopt("a", "appkey", "Path to a spotify appkey", "APPKEY"); } else { opts.optopt("a", "appkey", "Path to a spotify appkey", "APPKEY"); }; } pub fn add_authentication_arguments(opts: &mut getopts::Options) { opts.optopt("u", "username", "Username to sign in with", "USERNAME") .optopt("p", "password", "Password", "PASSWORD"); if cfg!(feature = "facebook") { opts.optflag("", "facebook", "Login with a Facebook account"); } } pub fn add_player_arguments(opts: &mut getopts::Options) { opts.optopt("", "backend", "Audio backend to use. Use '?' to list options", "BACKEND"); } pub fn create_session(matches: &getopts::Matches) -> Session { info!("librespot {} ({}). Built on {}.", version::short_sha(), version::commit_date(), version::short_now()); let appkey = load_appkey(matches.opt_str("a")); let name = matches.opt_str("n").unwrap(); let bitrate = match matches.opt_str("b").as_ref().map(String::as_ref) { None => Bitrate::Bitrate160, // default value Some("96") => Bitrate::Bitrate96, Some("160") => Bitrate::Bitrate160, Some("320") => Bitrate::Bitrate320, Some(b) => { error!("Invalid bitrate {}", b); exit(1) } }; let cache = matches.opt_str("c").map(|cache_location| { Box::new(DefaultCache::new(PathBuf::from(cache_location)).unwrap()) as Box<Cache + Send + Sync> }).unwrap_or_else(|| Box::new(NoCache) as Box<Cache + Send + Sync>); let config = Config { application_key: appkey, user_agent: version::version_string(), device_name: name, bitrate: bitrate, }; Session::new(config, cache) } pub fn get_credentials(session: &Session, matches: &getopts::Matches) -> Credentials { let credentials = session.cache().get_credentials(); match (matches.opt_str("username"), matches.opt_str("password"), credentials) { (Some(username), Some(password), _) => Credentials::with_password(username, password), (Some(ref username), _, Some(ref credentials)) if *username == credentials.username => credentials.clone(), (Some(username), None, _) => { print!("Password for {}: ", username); stdout().flush().unwrap(); let password = rpassword::read_password().unwrap(); Credentials::with_password(username.clone(), password) } (None, _, _) if cfg!(feature = "facebook") && matches.opt_present("facebook") => facebook_login().unwrap(), (None, _, Some(credentials)) => credentials, (None, _, None) if cfg!(feature = "discovery") => { info!("No username provided and no stored credentials, starting discovery ..."); discovery_login(&session.config().device_name, session.device_id()).unwrap() } (None, _, None) => { error!("No credentials provided"); exit(1) } } } pub fn create_player(session: &Session, matches: &getopts::Matches) -> Player { let make_backend = find_backend(matches.opt_str("backend").as_ref().map(AsRef::as_ref)); Player::new(session.clone(), move || make_backend()) }
// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // use super::*; use std::fmt; use std::ops::Deref; use std::ptr; /// Unsized string-slice type guaranteed to contain a well-formed [IETF-RFC3986] [relative reference]. /// /// [relative reference]: https://tools.ietf.org/html/rfc3986#section-4.2 /// /// The sized counterpart is [`RelRefBuf`]. /// /// *This type cannot hold a network path*. If this type contains a path that looks like a network /// path, it will be considered [degenerate](crate::RelRef::is_degenerate) and you will not be able /// to losslessly convert it to a [`UriRef`](crate::UriRef) or [`UriRefBuf`](crate::UriRefBuf). /// See ["Network Path Support"](index.html#network-path-support) for more details. /// /// You can create static constants with this class by using the [`rel_ref!`] macro: /// /// [`rel_ref!`]: macro.rel_ref.html /// /// ``` /// # use async_coap_uri::*; /// let uri = rel_ref!("/test?query"); /// let components = uri.components(); /// /// assert_eq!(None, components.scheme()); /// assert_eq!(None, components.raw_host()); /// assert_eq!(None, components.port()); /// assert_eq!("/test", components.raw_path()); /// assert_eq!(Some("query"), components.raw_query()); /// ``` /// /// ## RelRef and Deref /// /// You might think that since both relative and absolute URIs are just special /// cases of URIs that they could both safely implement [`Deref<Target=UriRef>`](core::ops::Deref). /// This is true for [`Uri`], but not [`RelRef`]. This section is dedicated to explaining why. /// /// There is this pesky [section 4.2 of RFC3986](https://tools.ietf.org/html/rfc3986#section-4.2) /// that throws a wrench into that noble endeavour: /// /// > A path segment that contains a colon character (e.g., "this:that") /// > cannot be used as the first segment of a relative-path reference, as /// > it would be mistaken for a scheme name. Such a segment must be /// > preceded by a dot-segment (e.g., "./this:that") to make a relative- /// > path reference. /// /// This causes big problems for type-safety when derefing a [`RelRef`] into a [`UriRef`]: there is /// no way for [`UriRef`] to know that it came from a [`RelRef`] and thus recognize that something /// like `rel_ref!("this:that")` does *NOT* have a scheme of `this`. /// /// These are tricky edge cases that have serious security implications---it's important /// that this case be considered and handled appropriately. /// /// The solution used in this library is to make the transition from [`RelRef`] to [`UriRef`] not /// guaranteed. However, a transition from a [`RelRef`] to a [`RelRefBuf`] is guaranteed, since the /// offending colon can be escaped in that case. This is preferred instead of prepending a `"./"`, /// due to the additional complications that could occur when manipulating paths. /// /// You can check any [`RelRef`] for this degenerate condition via the method /// [`is_degenerate()`](#method.is_degenerate). /// /// [IETF-RFC3986]: https://tools.ietf.org/html/rfc3986 #[derive(Eq, Hash)] pub struct RelRef(pub(super) UriRef); _impl_uri_traits_base!(RelRef); impl Deref for RelRef { type Target = str; fn deref(&self) -> &Self::Target { self.as_str() } } impl Default for &RelRef { /// Returns an *empty relative reference*. /// /// Empty relative references do nothing but clear the base fragment when resolved /// against a base. fn default() -> Self { irel_ref!("") } } impl Default for &mut RelRef { /// Mutable version of `(&RelRef)::default`. /// /// Despite being marked mutable, since the length is zero the value is effectively immutable. fn default() -> Self { use std::slice::from_raw_parts_mut; use std::str::from_utf8_unchecked_mut; unsafe { // SAFETY: An empty slice is pretty harmless, mutable or not. let empty_slice = from_raw_parts_mut(ptr::null_mut::<u8>(), 0); let empty_string = from_utf8_unchecked_mut(empty_slice); RelRef::from_str_unchecked_mut(empty_string) } } } impl AnyUriRef for RelRef { unsafe fn write_to_unsafe<T: fmt::Write + ?Sized>(&self, write: &mut T) -> fmt::Result { if let Some(i) = self.colon_in_first_path_segment() { write!(write, "{}%3A{}", &self[..i], &self[i + 1..]) } else { if self.starts_with("//") { write.write_str("/.")?; } write.write_str(self.as_str()) } } fn is_empty(&self) -> bool { self.0.is_empty() } /// Determines what kind of relative reference this is: /// /// This function may return any one of the following values: /// /// * [`UriType::Fragment`](enum.UriType.html#variant.Fragment) /// * [`UriType::Query`](enum.UriType.html#variant.Query) /// * [`UriType::AbsolutePath`](enum.UriType.html#variant.AbsolutePath) /// * [`UriType::RelativePath`](enum.UriType.html#variant.RelativePath) fn uri_type(&self) -> UriType { if self.starts_with('#') { UriType::Fragment } else if self.starts_with('?') { UriType::Query } else if self.starts_with('/') { UriType::AbsolutePath } else { UriType::RelativePath } } /// Breaks down this relative reference into its [raw components][UriRawComponents]. fn components(&self) -> UriRawComponents<'_> { UriRawComponents { scheme: None, authority: None, userinfo: None, host: None, port: None, path: self.path_as_rel_ref(), query: self.raw_query(), fragment: self.raw_fragment(), } } } /// RelRef will always format the relative reference for display in an unambiguous fashion. impl fmt::Display for RelRef { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.write_to(f) } } impl RelRef { /// Attempts to convert a string slice into a [`&RelRef`](RelRef), returning `Err(ParseError)` /// if the string slice contains data that is not a valid relative-reference. pub fn from_str(s: &str) -> Result<&RelRef, ParseError> { if let Some(first_error) = s.unescape_uri().first_error() { Err(ParseError::from(first_error)) } else { Ok(unsafe { Self::from_str_unchecked(s) }) } } /// Determines if the given string can be considered a well-formed [relative-reference]. /// [relative-reference]: https://tools.ietf.org/html/rfc3986#section-4.2 pub fn is_str_valid<S: AsRef<str>>(s: S) -> bool { s.as_ref().unescape_uri().first_error().is_none() } /// Constructs a new `RelRefBuf` from a `&RelRef`, disambiguating if degenerate. #[inline(always)] pub fn to_rel_ref_buf(&self) -> RelRefBuf { RelRefBuf::from_rel_ref(self) } /// Constructs a new `UriRefBuf` from a `&RelRef`, disambiguating if degenerate. pub fn to_uri_ref_buf(&self) -> UriRefBuf { self.to_rel_ref_buf().into() } /// Casts this relative reference to a string slice. #[inline(always)] pub const fn as_str(&self) -> &str { self.0.as_str() } /// Casts a non-degenerate relative reference to a `&UriRef`. /// Returns `None` if the relative reference [is degenerate][RelRef::is_degenerate]. pub fn try_as_uri_ref(&self) -> Option<&UriRef> { if self.is_degenerate() { None } else { Some(&self.0) } } /// Returns a [`Cow<UriRef>`] that usually just contains a reference to /// this slice, but will contain an owned instance if this relative reference /// [is degenerate][RelRef::is_degenerate]. #[cfg(feature = "std")] pub fn as_uri_ref(&self) -> Cow<'_, UriRef> { if let Some(uri_ref) = self.try_as_uri_ref() { Cow::Borrowed(uri_ref) } else { Cow::Owned(self.to_uri_ref_buf()) } } } /// # URI Component Accessors impl RelRef { /// Trims the query and fragment from this relative reference, leaving only the path. /// /// See also [`RelRef::trim_query`]. #[must_use = "this returns a new slice, without modifying the original"] pub fn path_as_rel_ref(&self) -> &RelRef { self.trim_query() } /// See [`UriRef::query_as_rel_ref`] for more information. #[must_use = "this returns a new slice, without modifying the original"] #[inline(always)] pub fn query_as_rel_ref(&self) -> Option<&RelRef> { self.0.query_as_rel_ref() } /// See [`UriRef::raw_path`] for more information. #[must_use] #[inline(always)] pub fn raw_path(&self) -> &str { self.path_as_rel_ref().as_str() } /// See [`UriRef::raw_query`] for more information. #[must_use = "this returns a new slice, without modifying the original"] #[inline(always)] pub fn raw_query(&self) -> Option<&str> { self.0.raw_query() } /// See [`UriRef::raw_fragment`] for more information. #[must_use = "this returns a new slice, without modifying the original"] #[inline(always)] pub fn raw_fragment(&self) -> Option<&str> { self.0.raw_fragment() } /// See [`UriRef::raw_path_segments`] for more information. pub fn raw_path_segments(&self) -> impl Iterator<Item = &str> { let path = self.path_as_rel_ref(); let mut ret = path.as_str().split('/'); if path.is_empty() { // Skip non-existant segments let _ = ret.next(); } else if path.starts_with('/') { // Skip leading slash. let _ = ret.next(); } ret } /// See [`UriRef::raw_query_items`] for more information. #[inline(always)] pub fn raw_query_items(&self) -> impl Iterator<Item = &str> { self.0.raw_query_items() } /// See [`UriRef::raw_query_key_values`] for more information. #[inline(always)] pub fn raw_query_key_values(&self) -> impl Iterator<Item = (&str, &str)> { self.0.raw_query_key_values() } /// See [`UriRef::fragment`] for more information. #[must_use] #[cfg(feature = "std")] #[inline(always)] pub fn fragment(&self) -> Option<Cow<'_, str>> { self.0.fragment() } /// See [`UriRef::path_segments`] for more information. #[cfg(feature = "std")] #[inline(always)] pub fn path_segments(&self) -> impl Iterator<Item = Cow<'_, str>> { self.0.path_segments() } /// See [`UriRef::query_items`] for more information. #[cfg(feature = "std")] #[inline(always)] pub fn query_items(&self) -> impl Iterator<Item = Cow<'_, str>> { self.0.query_items() } /// See [`UriRef::query_key_values`] for more information. #[cfg(feature = "std")] #[inline(always)] pub fn query_key_values(&self) -> impl Iterator<Item = (Cow<'_, str>, Cow<'_, str>)> { self.0.query_key_values() } /// See [`UriRef::has_trailing_slash`] for more information. #[must_use] #[inline(always)] pub fn has_trailing_slash(&self) -> bool { self.0.has_trailing_slash() } /// Determines if this [`RelRef`] is degenerate specifically because it is a relative path /// with a colon in the first path segment and no special characters appearing /// before it. /// /// See the section ["RelRef"](#relref-and-deref) for more details. #[must_use] pub fn colon_in_first_path_segment(&self) -> Option<usize> { for (i, b) in self.bytes().enumerate() { match b { b if i == 0 && (b as char).is_numeric() => return None, b if (b as char).is_ascii_alphanumeric() => continue, b'+' | b'-' | b'.' => continue, b':' => return Some(i), _ => return None, } } None } /// Determines if this [`RelRef`] is degenerate. /// /// See the section ["RelRef"](#relref-and-deref) for more details. pub fn is_degenerate(&self) -> bool { self.starts_with("//") || self.colon_in_first_path_segment().is_some() } } /// # URI Resolution impl RelRef { /// Resolves a relative URI against this relative URI, yielding a /// new relative URI as a `RelRefBuf`. #[cfg(feature = "std")] #[must_use] pub fn resolved_rel_ref<UF: AsRef<RelRef>>(&self, dest: UF) -> RelRefBuf { let mut ret = String::with_capacity(self.len() + dest.as_ref().len()); self.write_resolved(dest.as_ref(), &mut ret) .expect("URI resolution failed"); ret.shrink_to_fit(); // SAFETY: `write_resolved` is guaranteed to write well-formed RelRefs when // both the base and target are RelRefs. let mut ret = unsafe { RelRefBuf::from_string_unchecked(ret) }; ret.disambiguate(); ret } } /// # Trimming impl RelRef { /// Returns this relative reference slice without the fragment component. #[must_use = "this returns the trimmed uri as a new slice, \ without modifying the original"] pub fn trim_fragment(&self) -> &RelRef { // SAFETY: Trimming on a boundary guaranteed not to be inside of an escaped byte. unsafe { RelRef::from_str_unchecked(self.0.trim_fragment().as_str()) } } /// Returns this relative reference slice without the query or fragment components. #[must_use = "this returns the trimmed uri as a new slice, \ without modifying the original"] pub fn trim_query(&self) -> &RelRef { // SAFETY: Trimming on a boundary guaranteed not to be inside of an escaped byte. unsafe { RelRef::from_str_unchecked(self.0.trim_query().as_str()) } } /// See [`UriRef::trim_resource`] for more information. #[must_use = "this returns the trimmed uri as a new slice, \ without modifying the original"] pub fn trim_resource(&self) -> &RelRef { // SAFETY: Trimming on a boundary guaranteed not to be inside of an escaped byte. unsafe { RelRef::from_str_unchecked(self.0.trim_resource().as_str()) } } /// Removes any trailing slash that might be at the end of the path, along with /// the query and fragment. /// /// If the path consists of a single slash ("`/`"), then it is not removed. /// /// ## Examples /// /// ``` /// use async_coap_uri::prelude::*; /// assert_eq!(rel_ref!("a").trim_trailing_slash(), rel_ref!("a")); /// assert_eq!(rel_ref!("a/b/c/?blah#frag").trim_trailing_slash(),rel_ref!("a/b/c")); /// assert_eq!(rel_ref!("/").trim_trailing_slash(), rel_ref!("/")); /// assert_eq!(rel_ref!(unsafe "//").trim_trailing_slash(), rel_ref!("/")); /// assert_eq!(rel_ref!(unsafe "//foo/?bar").trim_trailing_slash(),rel_ref!(unsafe "//foo")); /// ``` /// /// Note that the behavior of this method is different than the behavior for /// [`UriRef::trim_trailing_slash`]\: "`//`" is considered to be a path starting with two /// slashes rather than a network path with an empty authority and an empty path: /// /// ``` /// # use async_coap_uri::prelude::*; /// assert_eq!(rel_ref!(unsafe "//").trim_trailing_slash(), rel_ref!("/")); /// assert_eq!(rel_ref!(unsafe "///").trim_trailing_slash(), rel_ref!(unsafe "//")); /// assert_eq!(rel_ref!(unsafe "////").trim_trailing_slash(), rel_ref!(unsafe "///")); /// ``` /// #[must_use = "this returns the trimmed uri as a new slice, \ without modifying the original"] pub fn trim_trailing_slash(&self) -> &RelRef { let path_end = self.0.path_end(); if path_end > 1 && &self[path_end - 1..path_end] == "/" { unsafe { Self::from_str_unchecked(&self[..path_end - 1]) } } else { self.trim_query() } } /// Returns this relative reference slice without any leading slashes. #[must_use = "this returns the trimmed uri as a new slice, \ without modifying the original"] pub fn trim_leading_slashes(&self) -> &RelRef { // SAFETY: Trimming on a boundary guaranteed not to be inside of an escaped byte. unsafe { RelRef::from_str_unchecked(self.trim_start_matches('/')) } } /// Returns this relative reference slice without any leading instances of `"./"` or `"/."`. #[must_use = "this returns the trimmed uri as a new slice, \ without modifying the original"] pub fn trim_leading_dot_slashes(&self) -> &RelRef { // SAFETY: Trimming on a boundary guaranteed not to be inside of an escaped byte. unsafe { let mut str_ref = self.as_str(); while str_ref.starts_with("/./") { str_ref = &str_ref[2..]; } str_ref = str_ref.trim_start_matches("./"); if str_ref == "." { str_ref = &str_ref[..0]; } RelRef::from_str_unchecked(str_ref) } } /// Returns this relative reference slice without its first path segment. #[must_use = "this returns the leading path item trimmed uri as a new slice, \ without modifying the original"] pub fn trim_leading_path_segment(&self) -> (&str, &RelRef) { let trimmed = self.trim_leading_slashes(); if let Some(i) = trimmed.find(|c| c == '/' || c == '?' || c == '#') { match trimmed.as_bytes()[i] { b'/' => (&trimmed[..i], unsafe { // SAFETY: Trimming on a boundary guaranteed not to // be inside of an escaped byte. RelRef::from_str_unchecked(&trimmed[i + 1..]) }), _ => (&trimmed[..i], unsafe { // SAFETY: Trimming on a boundary guaranteed not to // be inside of an escaped byte. RelRef::from_str_unchecked(&trimmed[i..]) }), } } else { (trimmed.as_str(), unsafe { // SAFETY: Trimming on a boundary guaranteed not to // be inside of an escaped byte. RelRef::from_str_unchecked(&trimmed[trimmed.len()..]) }) } } #[must_use] fn _trim_leading_n_path_segments(&self, n: usize) -> (&str, &RelRef) { let mut next = self; for _ in 0..n { next = next.trim_leading_path_segment().1; } let i = next.as_ptr() as usize - self.as_ptr() as usize; ((&self[..i]).trim_end_matches('/'), next) } /// Returns a tuple with a string slice contianing the first `n` path segments and /// a `&RelRef` containing the rest of the relative reference. #[must_use = "this returns the trimmed uri as a new slice, without modifying the original"] pub fn trim_leading_n_path_segments(&self, n: usize) -> (&str, &RelRef) { self.trim_leading_slashes()._trim_leading_n_path_segments(n) } /// Attempts to return a shortened version of this relative reference that is /// relative to `base`. #[must_use = "this returns the trimmed uri as a new slice, without modifying the original"] pub fn trim_to_shorten(&self, base: &RelRef) -> Option<&RelRef> { self.0.trim_to_shorten(base.try_as_uri_ref()?) } } /// # Unsafe Methods /// /// `RelRef` needs some unsafe methods in order to function properly. This section is where /// they are all located. impl RelRef { /// Converts a string slice to a `RelRef` slice without checking /// that the string contains valid URI-Reference. /// /// See the safe version, [`from_str`](#method.from_str), for more information. /// /// ## Safety /// /// This function is unsafe because it does not check that the string passed to /// it is a valid URI-reference. If this constraint is violated, undefined behavior /// results. #[inline(always)] pub unsafe fn from_str_unchecked(s: &str) -> &RelRef { &*(s as *const str as *const RelRef) } /// Converts a string slice to a `RelRef` slice without checking /// that the string contains valid URI-Reference; mutable version. /// /// See the immutable version, [`from_str_unchecked`](#method.from_str), for more information. #[inline(always)] pub unsafe fn from_str_unchecked_mut(s: &mut str) -> &mut RelRef { &mut *(s as *mut str as *mut RelRef) } /// Returns this slice as a mutable `str` slice. /// /// ## Safety /// /// This is unsafe because it allows you to change the contents of the slice in /// such a way that would make it no longer consistent with the `UriRef`'s promise /// that it can only ever contain a valid URI-reference. #[inline(always)] pub unsafe fn as_mut_str(&mut self) -> &mut str { self.0.as_mut_str() } /// Directly converts this `&RelRef` to a `&UriRef`, without performing the /// checks that [`as_uri_ref()`](#method.as_uri_ref) does. /// /// This is unsafe for the reasons described [here](#relref-and-deref). #[inline(always)] pub const unsafe fn as_uri_ref_unchecked(&self) -> &UriRef { &self.0 } /// Mutable version of [`RelRef::path_as_rel_ref`]. Trims the query and fragment from this /// relative reference, leaving only the path. #[doc(hidden)] #[must_use = "this returns a new slice, without modifying the original"] pub unsafe fn path_as_rel_ref_mut(&mut self) -> &mut RelRef { let i = self.trim_query().len(); let str_mut: &mut str = core::mem::transmute(self.as_mut_str()); RelRef::from_str_unchecked_mut(&mut str_mut[..i]) } /// See [`UriRef::query_as_rel_ref_mut`] for more information. #[doc(hidden)] #[must_use = "this returns a new slice, without modifying the original"] pub unsafe fn query_as_rel_ref_mut(&mut self) -> Option<&mut RelRef> { self.0.query_as_rel_ref_mut() } /// **Experimental**: Similar to [`raw_path_segment_iter()`], but uses the space of the mutable `UriRef` /// to individually unescape the items. /// /// ## Safety /// /// This method is marked as unsafe because the contents of `self` is undefined /// after it terminates. The method can be used safely as long the buffer which /// held `self` is no longer accessed directly. See [`UriUnescapeBuf`] for an example. /// /// [`raw_path_segment_iter()`]: #method.raw_path_segment_iter pub unsafe fn unsafe_path_segment_iter(&mut self) -> impl Iterator<Item = &str> { let path = self.path_as_rel_ref_mut(); let is_empty = path.is_empty(); let starts_with_slash = path.starts_with('/'); let mut_bytes = path.as_mut_str().as_bytes_mut(); let mut ret = mut_bytes.split_mut(|b| *b == b'/').filter_map(|seg| { let seg = std::str::from_utf8_unchecked_mut(seg); if seg == "." { None } else { Some(&*seg.unescape_uri_in_place()) } }); if is_empty || starts_with_slash { // Skip non-existant segments or leading slash let _ = ret.next(); } ret } /// **Experimental**: Similar to [`raw_query_item_iter()`], but uses the space of the mutable `UriRef` /// to individually unescape the query items. /// /// ## Safety /// /// This method is marked as unsafe because the contents of `self` is undefined /// after it terminates. The method can be used safely as long the `&mut UriRef` (and its /// owner) is never directly used again. See [`UriUnescapeBuf`] for an example. /// /// [`raw_query_item_iter()`]: #method.raw_query_item_iter pub unsafe fn unsafe_query_item_iter(&mut self) -> impl Iterator<Item = &str> { let query = self.query_as_rel_ref_mut().unwrap_or_default(); let is_empty = query.is_empty(); let starts_with_delim = query.starts_with(|c| c == '&' || c == ';'); let mut mut_bytes = query.as_mut_str().as_bytes_mut(); if !is_empty && mut_bytes[0] == b'?' { mut_bytes = &mut mut_bytes[1..]; } let mut ret = mut_bytes .split_mut(|b| *b == b'&' || *b == b';') .map(|seg| { let seg = std::str::from_utf8_unchecked_mut(seg); &*seg.unescape_uri_in_place() }); if is_empty || starts_with_delim { // Skip non-existant segments or leading slash let _ = ret.next(); } ret } } #[cfg(test)] mod tests { use super::*; #[test] fn path() { assert_eq!( irel_ref!("example/"), irel_ref!("example/").path_as_rel_ref() ); assert_eq!( irel_ref!(unsafe "http:example.com/blah/"), irel_ref!(unsafe "http:example.com/blah/?q").path_as_rel_ref() ); } #[test] fn path_segment_iter() { assert_eq!( vec!["example", ""], irel_ref!("example/") .raw_path_segments() .collect::<Vec::<_>>() ); assert_eq!( vec!["http:example.com", "blah", ""], irel_ref!(unsafe "http:example.com/blah/?q") .raw_path_segments() .collect::<Vec::<_>>() ); } #[test] fn avoid_scheme_confusion() { assert_eq!(None, irel_ref!("this/that").colon_in_first_path_segment()); assert_eq!(None, irel_ref!("1this:that").colon_in_first_path_segment()); assert_eq!(None, irel_ref!("/this:that").colon_in_first_path_segment()); assert_eq!( None, irel_ref!("%20this:that").colon_in_first_path_segment() ); assert_eq!( Some(4), irel_ref!(unsafe "this:that").colon_in_first_path_segment() ); assert_eq!( Some(4), irel_ref!(unsafe "th1s:that").colon_in_first_path_segment() ); assert_eq!( None, irel_ref!(unsafe "this:that").to_uri_ref_buf().scheme() ); assert_eq!(None, irel_ref!(unsafe "this:that").try_as_uri_ref()); assert_eq!( &irel_ref!(unsafe "this:that").to_uri_ref_buf(), irel_ref!("this%3Athat"), ); } #[test] fn trim_leading_path_segment() { assert_eq!( ("example", irel_ref!("")), irel_ref!("example/").trim_leading_path_segment() ); assert_eq!( ("example", irel_ref!("")), irel_ref!("/example/").trim_leading_path_segment() ); assert_eq!( ("a", irel_ref!("b/c/d/")), irel_ref!("a/b/c/d/").trim_leading_path_segment() ); assert_eq!( ("a", irel_ref!("?query")), irel_ref!("a?query").trim_leading_path_segment() ); assert_eq!( ("a", irel_ref!("#frag")), irel_ref!("a#frag").trim_leading_path_segment() ); assert_eq!( ("fool:ish", irel_ref!("/thoughts?")), irel_ref!(unsafe "fool:ish//thoughts?").trim_leading_path_segment() ); assert_eq!( ("", irel_ref!("")), irel_ref!("").trim_leading_path_segment() ); } #[test] fn trim_leading_n_path_segments() { assert_eq!( ("", irel_ref!("a/b/c/d")), irel_ref!("a/b/c/d").trim_leading_n_path_segments(0) ); assert_eq!( ("a", irel_ref!("b/c/d")), irel_ref!("a/b/c/d").trim_leading_n_path_segments(1) ); assert_eq!( ("a/b", irel_ref!("c/d")), irel_ref!("a/b/c/d").trim_leading_n_path_segments(2) ); assert_eq!( ("a/b/c", irel_ref!("d")), irel_ref!("a/b/c/d").trim_leading_n_path_segments(3) ); assert_eq!( ("a/b/c/d", irel_ref!("")), irel_ref!("a/b/c/d").trim_leading_n_path_segments(4) ); assert_eq!( ("a/b/c/d", irel_ref!("")), irel_ref!("a/b/c/d").trim_leading_n_path_segments(5) ); assert_eq!( ("a/b/c", irel_ref!("d?blah")), irel_ref!("a/b/c/d?blah").trim_leading_n_path_segments(3) ); assert_eq!( ("a/b/c/d", irel_ref!("?blah")), irel_ref!("a/b/c/d?blah").trim_leading_n_path_segments(4) ); assert_eq!( ("a/b/c/d", irel_ref!("?blah")), irel_ref!("a/b/c/d?blah").trim_leading_n_path_segments(5) ); assert_eq!( ("a/b/c", irel_ref!("d?blah")), irel_ref!("/a/b/c/d?blah").trim_leading_n_path_segments(3) ); } }
use std::cmp::max; use std::collections::BTreeMap; struct Service { a: i64, b: i64, c: i64, } fn main() { let (n, c) = { let mut line = String::new(); std::io::stdin().read_line(&mut line).unwrap(); let mut ws = line.trim_end().split_whitespace(); let n1 = ws.next().unwrap().parse().unwrap(); let n2 = ws.next().unwrap().parse().unwrap(); (n1, n2) }; let mut services = Vec::new(); (0..n).for_each(|_| { let (a, b, c) = { let mut line = String::new(); std::io::stdin().read_line(&mut line).unwrap(); let mut ws = line.trim_end().split_whitespace(); let n1 = ws.next().unwrap().parse().unwrap(); let n2 = ws.next().unwrap().parse().unwrap(); let n3 = ws.next().unwrap().parse().unwrap(); (n1, n2, n3) }; services.push(Service { a, b, c }) }); let stdout = solve(c, services); stdout.iter().for_each(|s| { println!("{}", s); }) } fn solve(c: i64, ss: Vec<Service>) -> Vec<String> { // key-1日目とkey日目の間にvalue円の変動あり let mut events: BTreeMap<i64, i64> = BTreeMap::new(); let mut max_index = 0; ss.iter().for_each(|s| { let key = s.a - 1; events.insert( key, if events.contains_key(&key) { events.get(&key).unwrap() + s.c } else { s.c }, ); let key = (s.b - 1) + 1; events.insert( key, if events.contains_key(&key) { events.get(&key).unwrap() - s.c } else { -s.c }, ); max_index = max(max_index, key); }); let mut total = 0; let mut non_prime_price = 0; let mut index = 0; for (k, v) in events.iter() { total += (k - index) * if non_prime_price > c { c } else { non_prime_price }; non_prime_price += v; index = *k; } let mut buf = Vec::new(); buf.push(format!("{}", total)); buf }
pub mod pwm_inverter; pub mod blocking_delay; pub mod rotor_position_sensor; pub mod current_sensor;
#![no_main] extern crate abxml; #[macro_use] extern crate libfuzzer_sys; use abxml::chunks::XmlTagEndWrapper; use abxml::model::TagEnd; fuzz_target!(|data: &[u8]| { let xtew = XmlTagEndWrapper::new(data); xtew.get_id(); });
use awc::Client; use axum_websockets::{ configuration::get_configuration, message::ResultMessage, telemetry::{get_subscriber, init_subscriber}, Application, }; use futures::{SinkExt, StreamExt}; use once_cell::sync::Lazy; use std::time::Duration; // Ensure that 'tracing' stack is only initialized once using `once_cell` static TRACING: Lazy<()> = Lazy::new(|| { let default_filter_level = "info".to_string(); let subscriber_name = "test".to_string(); if std::env::var("TEST_LOG").is_ok() { let subscriber = get_subscriber(subscriber_name, default_filter_level, std::io::stdout); init_subscriber(subscriber); } else { let subscriber = get_subscriber(subscriber_name, default_filter_level, std::io::sink); init_subscriber(subscriber); } }); pub struct TestApp { pub address: String, pub port: u16, } impl TestApp { pub async fn get_first_result(&self, message: &str) -> ResultMessage { let (_response, mut connection) = Client::new() .ws(format!("{}/ws", self.address)) .connect() .await .expect("Failed to connect to websocket."); connection .send(awc::ws::Message::Text(message.into())) .await .expect("Failed to send message."); loop { match connection.next().await { Some(Ok(awc::ws::Frame::Text(msg))) => { let msg = serde_json::from_slice::<ResultMessage>(&msg) .expect(&format!("Failed to parse JSON: {:?}", msg)); tracing::info!("RESULT: {:?}", msg); return msg; } Some(Ok(awc::ws::Frame::Ping(_))) => {} err => { tracing::error!("Receive message: {:?}", err); panic!("Failed to receive message."); } } } } } pub async fn spawn_app() -> TestApp { // Set up tracing Lazy::force(&TRACING); // Randomise configuration to ensure test isolation let configuration = { let mut c = get_configuration().expect("Failed to read configuration."); // Port 0 give us a random available port c.port = 0; c.websocket.heartbeat_interval = Duration::from_millis(50); c.websocket.client_timeout = Duration::from_millis(250); c }; // Launch app as background task let application = Application::build(configuration).expect("Failed to build application."); let application_port = application.port(); // let _ = tokio::spawn(application.run_until_stopped()); let _a = tokio::spawn(async move { application.run_until_stopped().await }); let test_app = TestApp { address: format!("http://127.0.0.1:{}", application_port), port: application_port, }; test_app }
use futures::stream::StreamExt; use tokio::net::TcpListener; #[derive(Debug)] pub struct Server { listener: TcpListener } impl Server { pub async fn new(bind_addr: String, db_addr: String) -> Server { Server { listener: TcpListener::bind(bind_addr).await.unwrap() } } pub async fn run(&mut self) { let mut incoming = self.listener.incoming(); while let Some(conn) = incoming.next().await { match conn { Ok(mut socket) => { info!("Accepted connection from {:?}", socket.peer_addr()); tokio::spawn(async move { let (mut reader, mut writer) = socket.split(); match tokio::io::copy(&mut reader, &mut writer).await { Ok(amt) => { println!("wrote {} bytes", amt); } Err(err) => { eprintln!("IO error {:?}", err); } } }); } Err(err) => { // Handle error by printing to STDOUT. error!("accept error = {:?}", err); } } } } }
use crate::consts::SELECTION_TOLERANCE; use crate::message_prelude::*; use crate::tool::ToolActionHandlerData; use glam::DVec2; use graphene::{Operation, Quad}; #[derive(Default)] pub struct Fill; #[impl_message(Message, ToolMessage, Fill)] #[derive(PartialEq, Clone, Debug, Hash)] pub enum FillMessage { MouseDown, } impl<'a> MessageHandler<ToolMessage, ToolActionHandlerData<'a>> for Fill { fn process_action(&mut self, _action: ToolMessage, data: ToolActionHandlerData<'a>, responses: &mut VecDeque<Message>) { let mouse_pos = data.2.mouse.position; let tolerance = DVec2::splat(SELECTION_TOLERANCE); let quad = Quad::from_box([mouse_pos - tolerance, mouse_pos + tolerance]); if let Some(path) = data.0.document.intersects_quad_root(quad).last() { responses.push_back( Operation::FillLayer { path: path.to_vec(), color: data.1.primary_color, } .into(), ); } } advertise_actions!(FillMessageDiscriminant; MouseDown); }
use bytes::{as_i32_le, as_i64_le, i32_as_u8_le, i64_as_u8_le, u32_as_u8_le}; use std::{slice, str}; use wasmtime_runtime::VMMemoryDefinition; #[derive(Debug)] pub struct Mem { pub definition: Option<*mut VMMemoryDefinition>, } impl Mem { pub fn new() -> Self { Self { definition: None } } } pub trait Actions { fn mut_mem_slice(&mut self, start: usize, end: usize) -> &mut [u8]; fn mem_slice(&self, start: usize, end: usize) -> &[u8]; fn get_i32(&self, sp: i32) -> i32 { let spu = sp as usize; as_i32_le(self.mem_slice(spu, spu + 8)) } fn set_i32(&mut self, sp: i32, num: i32) { self.mut_mem_slice(sp as usize, (sp + 4) as usize) .clone_from_slice(&i32_as_u8_le(num)); } fn get_bytes(&self, sp: i32) -> &[u8] { let saddr = self.get_i32(sp) as usize; let ln = self.get_i32(sp + 8) as usize; self._get_bytes(saddr, ln) } fn _get_bytes(&self, address: usize, ln: usize) -> &[u8] { self.mem_slice(address, address + ln) } fn get_string(&self, sp: i32) -> &str { str::from_utf8(self.get_bytes(sp)).unwrap() } fn get_f64(&self, sp: i32) -> f64 { f64::from_bits(self.get_i64(sp) as u64) } fn set_i64(&mut self, sp: i32, num: i64) { self.mut_mem_slice(sp as usize, (sp + 8) as usize) .clone_from_slice(&i64_as_u8_le(num)); } fn set_u32(&mut self, sp: i32, num: u32) { self.mut_mem_slice(sp as usize, (sp + 4) as usize) .clone_from_slice(&u32_as_u8_le(num)); } fn set_bool(&mut self, addr: i32, value: bool) { let val = if value { 1 } else { 0 }; self.mut_mem_slice(addr as usize, (addr + 1) as usize) .clone_from_slice(&[val]); } fn set_f64(&mut self, sp: i32, num: f64) { self.set_i64(sp, num.to_bits() as i64); } fn get_i64(&self, sp: i32) -> i64 { let spu = sp as usize; as_i64_le(self.mem_slice(spu, spu + 8)) } } impl Actions for Mem { fn mut_mem_slice(&mut self, start: usize, end: usize) -> &mut [u8] { unsafe { let memory_def = &*self.definition.unwrap(); &mut slice::from_raw_parts_mut(memory_def.base, memory_def.current_length)[start..end] } } fn mem_slice(&self, start: usize, end: usize) -> &[u8] { unsafe { let memory_def = &*self.definition.unwrap(); &slice::from_raw_parts(memory_def.base, memory_def.current_length)[start..end] } } } #[cfg(test)] mod tests { use super::*; struct TestMem { mem: Vec<u8>, } impl TestMem { fn new() -> Self { Self { mem: vec![0; 1000] } } } impl Actions for TestMem { fn mut_mem_slice(&mut self, start: usize, end: usize) -> &mut [u8] { &mut self.mem[start..end] } fn mem_slice(&self, start: usize, end: usize) -> &[u8] { &self.mem[start..end] } } #[test] fn get_set_i32() { let mut mem = TestMem::new(); mem.set_i32(100, 3456); assert_eq!(mem.get_i32(100), 3456); } #[test] fn get_set_i64() { let mut mem = TestMem::new(); mem.set_i64(100, 3456); assert_eq!(mem.get_i64(100), 3456); } }
#[macro_use] extern crate diesel; mod schema { table! { posts (id) { id -> Int4, title -> Varchar, body -> Text, published -> Bool, } } } mod models { use schema::posts; #[derive(Queryable)] pub struct Post { pub id: i32, pub title: String, pub body: String, pub published: bool, } // apparently this can be done without heap storage, but lifetimes spread far.. #[derive(Insertable)] #[table_name="posts"] pub struct NewPost { pub title: String, pub body: String, } } use diesel::prelude::*; use diesel::sqlite::SqliteConnection; fn main() { let database_url = std::env::var("DATABASE_URL").unwrap_or("main.db".into()); SqliteConnection::establish(&database_url).unwrap(); }
use crate::data::Data; use std::iter; use crate::data::subnet::SubnetState; use crate::data::component::components::*; use crate::data::component::{Component, ComponentId}; use crate::data::component::statefuls::*; #[cfg(test)] mod test; #[no_mangle] pub extern "C" fn init() -> *mut Data { Box::into_raw(Box::new(Data::new())) } #[no_mangle] pub extern "C" fn exit(data: *mut Data) { unsafe { drop(Box::from_raw(data)) }; } #[no_mangle] pub extern "C" fn add_subnet(data: *mut Data, id: i32) -> bool { let data = unsafe { &mut *data}; data.add_subnet(id) } #[no_mangle] pub extern "C" fn remove_subnet(data: *mut Data, id: i32) -> bool { let data = unsafe { &mut *data}; data.remove_subnet(id) } #[no_mangle] pub extern "C" fn add_component(data: *mut Data, component: ComponentId) -> i32 { let data = unsafe { &mut *data}; let comp: Box<dyn Component> = match component { ComponentId::Constant => Box::new(Constant::new()), ComponentId::Output => Box::new(OutputGate {}), ComponentId::Input => Box::new(InputGate {}), ComponentId::LED => Box::new(LED {}), ComponentId::Button => Box::new(Button::new()), ComponentId::Switch => Box::new(Switch::new()), ComponentId::Buffer => Box::new(Buffer {}), ComponentId::Not => Box::new(NOT {}), ComponentId::And => Box::new(AND {}), ComponentId::Nand => Box::new(NAND {}), ComponentId::Or => Box::new(OR {}), ComponentId::Nor => Box::new(NOR {}), ComponentId::Xor => Box::new(XOR {}), ComponentId::Xnor => Box::new(XNOR {}), ComponentId::TriStateBuffer => Box::new(TriBuffer {}), ComponentId::TriStateInverter => Box::new(TriInverter {}), ComponentId::DFlipFlop => Box::new(DFlipFlop::new()), ComponentId::TFlipFlop => Box::new(TFlipFlop::new()), ComponentId::JKFlipFlop => Box::new(JKFlipFlop::new()), ComponentId::SRFlipFlop => Box::new(SRFlipFlop::new()), ComponentId::Probe => Box::new(Probe {}), ComponentId::Clock => Box::new(Clock::new()), }; let p = comp.ports(); let res = data.add_component(comp, iter::repeat(None).take(p).collect()).unwrap(); if component == ComponentId::Clock { data.clock(res); } res } #[no_mangle] pub extern "C" fn remove_component(data: *mut Data, id: i32) -> bool { let data = unsafe { &mut *data }; data.remove_component(id) } #[no_mangle] pub extern "C" fn link(data: *mut Data, component: i32, port: i32, subnet: i32) -> bool { let data = unsafe { &mut *data }; data.link(component, port as usize, subnet) } #[no_mangle] pub extern "C" fn unlink(data: *mut Data, component: i32, port: i32, subnet: i32) -> bool { let data = unsafe { &mut *data }; data.unlink(component, port as usize, subnet) } #[no_mangle] pub extern "C" fn tick(data: *mut Data) { let data = unsafe { &mut *data }; data.time_step(); } #[no_mangle] pub extern "C" fn subnet_state(data: *mut Data, subnet: i32) -> SubnetState { let data = unsafe { &mut *data }; data.subnet_state(subnet).unwrap() } #[no_mangle] pub extern "C" fn port_state(data: *mut Data, component: i32, port: i32) -> SubnetState { let data = unsafe { &mut *data }; data.port_state(component, port as usize).unwrap_or(SubnetState::Floating) } #[no_mangle] pub extern "C" fn press_component(data: *mut Data, id: i32) -> SubnetState { let data = unsafe { &mut *data }; data.press_component(id) } #[no_mangle] pub extern "C" fn release_component(data: *mut Data, id: i32) -> SubnetState { let data = unsafe { &mut *data }; data.release_component(id) }
#[cfg(not(windows))] use crate::os::HRESULT; use com_rs::{com_interface, IUnknown, IID}; extern "C" { static IID_IUnknown: IID; } #[cfg(not(windows))] // Steal the interface ID from IUnknown: com_interface! { /// Insert complete object and deleting destructor on non-Windows platforms, where Dxc shims IUnknown in WinAdapter. /// This requires a virtual destructor (delete is actually used on the base class) which unfortunately makes the struct /// binary incompatible. /// /// See the third and fourth entry: /// ```cmd /// vtable for 'DxcLibrary' @ 0x7ffff7cbc5f8 (subobject @ 0x5555556bb9e0): /// [0]: 0x7ffff6a56d40 <DxcLibrary::QueryInterface(_GUID const&, void**)> /// [1]: 0x7ffff6a56d20 <DxcLibrary::AddRef()> /// [2]: 0x7ffff6a56d30 <DxcLibrary::Release()> /// [3]: 0x7ffff6b36bc0 <IUnknown::~IUnknown()> /// [4]: 0x7ffff6a57130 <DxcLibrary::~DxcLibrary()> /// [5]: 0x7ffff6a56d50 <DxcLibrary::SetMalloc(IMalloc*)> /// [6]: 0x7ffff6a56d60 <DxcLibrary::CreateBlobFromBlob(IDxcBlob*, unsigned int, unsigned int, IDxcBlob**)> /// [7]: 0x7ffff6a56d70 <DxcLibrary::CreateBlobFromFile(wchar_t const*, unsigned int*, IDxcBlobEncoding**)> /// [8]: 0x7ffff6a56d80 <DxcLibrary::CreateBlobWithEncodingFromPinned(void const*, unsigned int, unsigned int, IDxcBlobEncoding**)> /// [9]: 0x7ffff6a56d90 <DxcLibrary::CreateBlobWithEncodingOnHeapCopy(void const*, unsigned int, unsigned int, IDxcBlobEncoding**)> /// [10]: 0x7ffff6a56da0 <DxcLibrary::CreateBlobWithEncodingOnMalloc(void const*, IMalloc*, unsigned int, unsigned int, IDxcBlobEncoding**)> /// [11]: 0x7ffff6a56db0 <DxcLibrary::CreateIncludeHandler(IDxcIncludeHandler**)> /// [12]: 0x7ffff6a56dc0 <DxcLibrary::CreateStreamFromBlobReadOnly(IDxcBlob*, IStream**)> /// [13]: 0x7ffff6a56dd0 <DxcLibrary::GetBlobAsUtf8(IDxcBlob*, IDxcBlobEncoding**)> /// [14]: 0x7ffff6a56e90 <DxcLibrary::GetBlobAsUtf16(IDxcBlob*, IDxcBlobEncoding**)> /// ``` interface IDxcUnknownShim: IUnknown { iid: IID_IUnknown, vtable: IDxcUnknownShimVtbl, fn complete_object_destructor() -> HRESULT; fn deleting_destructor() -> HRESULT; } } #[cfg(windows)] com_interface! { /// Forwards to IUnknown. No-op on Windows interface IDxcUnknownShim: IUnknown { iid: IID_IUnknown, vtable: IDxcUnknownShimVtbl, } }
use crate::ping_result_processors::ping_result_processor_console_logger::PingResultProcessorConsoleLogger; use crate::ping_result_processors::ping_result_processor_csv_logger::PingResultProcessorCsvLogger; use crate::ping_result_processors::ping_result_processor_json_logger::PingResultProcessorJsonLogger; use crate::ping_result_processors::ping_result_processor_latency_bucket_logger::PingResultProcessorLatencyBucketLogger; use crate::ping_result_processors::ping_result_processor_latency_scatter_logger::PingResultProcessorLatencyScatterLogger; use crate::ping_result_processors::ping_result_processor_result_scatter_logger::PingResultProcessorResultScatterLogger; use crate::ping_result_processors::ping_result_processor_text_logger::PingResultProcessorTextLogger; use crate::{PingResultProcessor, PingResultProcessorConfig}; use futures_intrusive::sync::ManualResetEvent; use std::sync::Arc; pub fn new( config: &PingResultProcessorConfig, mut extra_ping_result_processors: Vec<Box<dyn PingResultProcessor + Send + Sync>>, ping_stop_event: Arc<ManualResetEvent>, ) -> Vec<Box<dyn PingResultProcessor + Send + Sync>> { let common_config = Arc::new(config.common_config.clone()); let mut processors = Vec::new(); // We always create the console logger for keeping our user informed. let console_logger: Box<dyn PingResultProcessor + Send + Sync> = Box::new(PingResultProcessorConsoleLogger::new( common_config.clone(), ping_stop_event.clone(), config.exit_on_fail, config.exit_failure_reason.clone(), )); processors.push(console_logger); if let Some(csv_log_path) = &config.csv_log_path { let csv_logger: Box<dyn PingResultProcessor + Send + Sync> = Box::new(PingResultProcessorCsvLogger::new(common_config.clone(), csv_log_path)); processors.push(csv_logger); } if let Some(json_log_path) = &config.json_log_path { let json_logger: Box<dyn PingResultProcessor + Send + Sync> = Box::new(PingResultProcessorJsonLogger::new(common_config.clone(), json_log_path)); processors.push(json_logger); } if let Some(text_log_path) = &config.text_log_path { let text_logger: Box<dyn PingResultProcessor + Send + Sync> = Box::new(PingResultProcessorTextLogger::new(common_config.clone(), text_log_path)); processors.push(text_logger); } if config.show_result_scatter { let result_scatter_logger: Box<dyn PingResultProcessor + Send + Sync> = Box::new(PingResultProcessorResultScatterLogger::new(common_config.clone())); processors.push(result_scatter_logger); } if config.show_latency_scatter { let latency_scatter_logger: Box<dyn PingResultProcessor + Send + Sync> = Box::new(PingResultProcessorLatencyScatterLogger::new(common_config.clone())); processors.push(latency_scatter_logger); } if let Some(latency_buckets) = &config.latency_buckets { let latency_bucket_logger: Box<dyn PingResultProcessor + Send + Sync> = Box::new(PingResultProcessorLatencyBucketLogger::new(common_config.clone(), latency_buckets)); processors.push(latency_bucket_logger); } // Move all extra ping result processors into the processors processors.append(&mut extra_ping_result_processors); return processors; } #[cfg(test)] mod tests { use crate::ping_result_processors::ping_result_processor_factory::new; use crate::*; use futures_intrusive::sync::ManualResetEvent; use std::path::PathBuf; use std::sync::Arc; #[test] fn create_ping_result_processor_should_work_with_empty_config() { let config = PingResultProcessorConfig { common_config: PingResultProcessorCommonConfig { quiet_level: RNP_QUIET_LEVEL_NONE }, exit_on_fail: false, exit_failure_reason: None, csv_log_path: None, json_log_path: None, text_log_path: None, show_result_scatter: false, show_latency_scatter: false, latency_buckets: None, }; let ping_clients = new(&config, vec![], Arc::new(ManualResetEvent::new(false))); assert_eq!(1, ping_clients.len()); } #[test] fn create_ping_result_processor_should_work_with_valid_config() { let config = PingResultProcessorConfig { common_config: PingResultProcessorCommonConfig { quiet_level: RNP_QUIET_LEVEL_NO_PING_RESULT }, exit_on_fail: false, exit_failure_reason: None, csv_log_path: Some(PathBuf::from("log.csv")), json_log_path: Some(PathBuf::from("log.json")), text_log_path: Some(PathBuf::from("log.txt")), show_result_scatter: true, show_latency_scatter: true, latency_buckets: Some(vec![0.1, 0.5, 1.0, 10.0]), }; let ping_clients = new(&config, vec![], Arc::new(ManualResetEvent::new(false))); assert_eq!(7, ping_clients.len()); } }
/* * Datadog API V1 Collection * * Collection of all Datadog Public endpoints. * * The version of the OpenAPI document: 1.0 * Contact: support@datadoghq.com * Generated by: https://openapi-generator.tech */ /// SyntheticsGlobalVariable : Synthetics global variable. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SyntheticsGlobalVariable { /// Description of the global variable. #[serde(rename = "description")] pub description: String, /// Unique identifier of the global variable. #[serde(rename = "id", skip_serializing_if = "Option::is_none")] pub id: Option<String>, /// Name of the global variable. #[serde(rename = "name")] pub name: String, #[serde(rename = "parse_test_options", skip_serializing_if = "Option::is_none")] pub parse_test_options: Option<Box<crate::models::SyntheticsGlobalVariableParseTestOptions>>, /// A Synthetic test ID to use as a test to generate the variable value. #[serde(rename = "parse_test_public_id", skip_serializing_if = "Option::is_none")] pub parse_test_public_id: Option<String>, /// Tags of the global variable. #[serde(rename = "tags")] pub tags: Vec<String>, #[serde(rename = "value")] pub value: Box<crate::models::SyntheticsGlobalVariableValue>, } impl SyntheticsGlobalVariable { /// Synthetics global variable. pub fn new(description: String, name: String, tags: Vec<String>, value: crate::models::SyntheticsGlobalVariableValue) -> SyntheticsGlobalVariable { SyntheticsGlobalVariable { description, id: None, name, parse_test_options: None, parse_test_public_id: None, tags, value: Box::new(value), } } }
mod assembler; mod token; mod lexer; mod parser; pub use self::assembler::{Assembler, CodeSegment}; pub use self::token::LexerToken; pub use self::lexer::Lexer;
#[doc = "Register `OPTSR2_PRG` reader"] pub type R = crate::R<OPTSR2_PRG_SPEC>; #[doc = "Register `OPTSR2_PRG` writer"] pub type W = crate::W<OPTSR2_PRG_SPEC>; #[doc = "Field `SRAM1_3_RST` reader - SRAM1 and SRAM3 erase upon system reset Note: SRAM erase is triggered by option byte change operation, when enabling this feature."] pub type SRAM1_3_RST_R = crate::BitReader; #[doc = "Field `SRAM1_3_RST` writer - SRAM1 and SRAM3 erase upon system reset Note: SRAM erase is triggered by option byte change operation, when enabling this feature."] pub type SRAM1_3_RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `SRAM2_RST` reader - SRAM2 erase when system reset Note: SRAM erase is triggered by option byte change operation, when enabling this feature."] pub type SRAM2_RST_R = crate::BitReader; #[doc = "Field `SRAM2_RST` writer - SRAM2 erase when system reset Note: SRAM erase is triggered by option byte change operation, when enabling this feature."] pub type SRAM2_RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `BKPRAM_ECC` reader - Backup RAM ECC detection and correction disable"] pub type BKPRAM_ECC_R = crate::BitReader; #[doc = "Field `BKPRAM_ECC` writer - Backup RAM ECC detection and correction disable"] pub type BKPRAM_ECC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `SRAM3_ECC` reader - SRAM3 ECC detection and correction disable"] pub type SRAM3_ECC_R = crate::BitReader; #[doc = "Field `SRAM3_ECC` writer - SRAM3 ECC detection and correction disable"] pub type SRAM3_ECC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `SRAM2_ECC` reader - SRAM2 ECC detection and correction disable"] pub type SRAM2_ECC_R = crate::BitReader; #[doc = "Field `SRAM2_ECC` writer - SRAM2 ECC detection and correction disable"] pub type SRAM2_ECC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `USBPD_DIS` reader - USB power delivery configuration option bit"] pub type USBPD_DIS_R = crate::BitReader; #[doc = "Field `USBPD_DIS` writer - USB power delivery configuration option bit"] pub type USBPD_DIS_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `TZEN` reader - TrustZone enable configuration bits This bit enables the device is in TrustZone mode during an option byte change."] pub type TZEN_R = crate::FieldReader; #[doc = "Field `TZEN` writer - TrustZone enable configuration bits This bit enables the device is in TrustZone mode during an option byte change."] pub type TZEN_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 8, O>; impl R { #[doc = "Bit 2 - SRAM1 and SRAM3 erase upon system reset Note: SRAM erase is triggered by option byte change operation, when enabling this feature."] #[inline(always)] pub fn sram1_3_rst(&self) -> SRAM1_3_RST_R { SRAM1_3_RST_R::new(((self.bits >> 2) & 1) != 0) } #[doc = "Bit 3 - SRAM2 erase when system reset Note: SRAM erase is triggered by option byte change operation, when enabling this feature."] #[inline(always)] pub fn sram2_rst(&self) -> SRAM2_RST_R { SRAM2_RST_R::new(((self.bits >> 3) & 1) != 0) } #[doc = "Bit 4 - Backup RAM ECC detection and correction disable"] #[inline(always)] pub fn bkpram_ecc(&self) -> BKPRAM_ECC_R { BKPRAM_ECC_R::new(((self.bits >> 4) & 1) != 0) } #[doc = "Bit 5 - SRAM3 ECC detection and correction disable"] #[inline(always)] pub fn sram3_ecc(&self) -> SRAM3_ECC_R { SRAM3_ECC_R::new(((self.bits >> 5) & 1) != 0) } #[doc = "Bit 6 - SRAM2 ECC detection and correction disable"] #[inline(always)] pub fn sram2_ecc(&self) -> SRAM2_ECC_R { SRAM2_ECC_R::new(((self.bits >> 6) & 1) != 0) } #[doc = "Bit 8 - USB power delivery configuration option bit"] #[inline(always)] pub fn usbpd_dis(&self) -> USBPD_DIS_R { USBPD_DIS_R::new(((self.bits >> 8) & 1) != 0) } #[doc = "Bits 24:31 - TrustZone enable configuration bits This bit enables the device is in TrustZone mode during an option byte change."] #[inline(always)] pub fn tzen(&self) -> TZEN_R { TZEN_R::new(((self.bits >> 24) & 0xff) as u8) } } impl W { #[doc = "Bit 2 - SRAM1 and SRAM3 erase upon system reset Note: SRAM erase is triggered by option byte change operation, when enabling this feature."] #[inline(always)] #[must_use] pub fn sram1_3_rst(&mut self) -> SRAM1_3_RST_W<OPTSR2_PRG_SPEC, 2> { SRAM1_3_RST_W::new(self) } #[doc = "Bit 3 - SRAM2 erase when system reset Note: SRAM erase is triggered by option byte change operation, when enabling this feature."] #[inline(always)] #[must_use] pub fn sram2_rst(&mut self) -> SRAM2_RST_W<OPTSR2_PRG_SPEC, 3> { SRAM2_RST_W::new(self) } #[doc = "Bit 4 - Backup RAM ECC detection and correction disable"] #[inline(always)] #[must_use] pub fn bkpram_ecc(&mut self) -> BKPRAM_ECC_W<OPTSR2_PRG_SPEC, 4> { BKPRAM_ECC_W::new(self) } #[doc = "Bit 5 - SRAM3 ECC detection and correction disable"] #[inline(always)] #[must_use] pub fn sram3_ecc(&mut self) -> SRAM3_ECC_W<OPTSR2_PRG_SPEC, 5> { SRAM3_ECC_W::new(self) } #[doc = "Bit 6 - SRAM2 ECC detection and correction disable"] #[inline(always)] #[must_use] pub fn sram2_ecc(&mut self) -> SRAM2_ECC_W<OPTSR2_PRG_SPEC, 6> { SRAM2_ECC_W::new(self) } #[doc = "Bit 8 - USB power delivery configuration option bit"] #[inline(always)] #[must_use] pub fn usbpd_dis(&mut self) -> USBPD_DIS_W<OPTSR2_PRG_SPEC, 8> { USBPD_DIS_W::new(self) } #[doc = "Bits 24:31 - TrustZone enable configuration bits This bit enables the device is in TrustZone mode during an option byte change."] #[inline(always)] #[must_use] pub fn tzen(&mut self) -> TZEN_W<OPTSR2_PRG_SPEC, 24> { TZEN_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "FLASH option status register 2\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`optsr2_prg::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`optsr2_prg::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct OPTSR2_PRG_SPEC; impl crate::RegisterSpec for OPTSR2_PRG_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`optsr2_prg::R`](R) reader structure"] impl crate::Readable for OPTSR2_PRG_SPEC {} #[doc = "`write(|w| ..)` method takes [`optsr2_prg::W`](W) writer structure"] impl crate::Writable for OPTSR2_PRG_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets OPTSR2_PRG to value 0"] impl crate::Resettable for OPTSR2_PRG_SPEC { const RESET_VALUE: Self::Ux = 0; }
#![doc = "generated by AutoRust 0.1.0"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorResponse { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CanceledSubscriptionId { #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RenamedSubscriptionId { #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnabledSubscriptionId { #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SubscriptionName { #[serde(rename = "subscriptionName", default, skip_serializing_if = "Option::is_none")] pub subscription_name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Operation>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Operation { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub display: Option<operation::Display>, } pub mod operation { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Display { #[serde(default, skip_serializing_if = "Option::is_none")] pub provider: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub resource: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub operation: Option<String>, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AdPrincipal { #[serde(rename = "objectId")] pub object_id: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SubscriptionCreationResult { #[serde(rename = "subscriptionLink", default, skip_serializing_if = "Option::is_none")] pub subscription_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SubscriptionCreationParameters { #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(rename = "managementGroupId", default, skip_serializing_if = "Option::is_none")] pub management_group_id: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub owners: Vec<AdPrincipal>, #[serde(rename = "offerType", default, skip_serializing_if = "Option::is_none")] pub offer_type: Option<subscription_creation_parameters::OfferType>, } pub mod subscription_creation_parameters { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum OfferType { #[serde(rename = "MS-AZR-0017P")] MsAzr0017p, #[serde(rename = "MS-AZR-0148P")] MsAzr0148p, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ModernSubscriptionCreationParameters { #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(rename = "skuId", default, skip_serializing_if = "Option::is_none")] pub sku_id: Option<String>, #[serde(rename = "costCenter", default, skip_serializing_if = "Option::is_none")] pub cost_center: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub owner: Option<AdPrincipal>, #[serde(rename = "managementGroupId", default, skip_serializing_if = "Option::is_none")] pub management_group_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ModernCspSubscriptionCreationParameters { #[serde(rename = "displayName")] pub display_name: String, #[serde(rename = "skuId")] pub sku_id: String, #[serde(rename = "resellerId", default, skip_serializing_if = "Option::is_none")] pub reseller_id: Option<String>, }
fn main() { let vec = read_line(); // 0 0 1 2 let sx : i32 = vec[0]; let sy : i32 = vec[1]; let tx : i32 = vec[2]; let ty : i32 = vec[3]; // 1 s-->t // 2 t-->s { // go ahead let h = (sy - ty).abs(); let w = (sx - tx).abs(); for _ in 0..h { print!("U"); } for _ in 0..w { print!("R"); } // go back for _ in 0..h { print!("D"); } for _ in 0..w { print!("L"); } } // 3 s-->t { let sx = sx - 1; let ty = ty + 1; let h = (sy - ty).abs(); let w = (sx - tx).abs(); print!("L"); for _ in 0..h { print!("U"); } for _ in 0..w { print!("R"); } print!("D"); } // 4 t-->s { let sy = sy - 1; let tx = tx + 1; let h = (sy - ty).abs(); let w = (sx - tx).abs(); print!("R"); for _ in 0..h { print!("D"); } for _ in 0..w { print!("L"); } print!("U"); } println!(""); } fn read_line() -> Vec<i32> { let mut s = String::new(); std::io::stdin().read_line(&mut s).ok(); let v = s.trim() .split_whitespace() .map(|e| e.parse().ok().unwrap()) .collect(); return v }
#[cfg(feature = "simdcompression")] mod build { extern crate cc; pub fn build() { let mut config = cc::Build::new(); config .include("./cpp/simdcomp/include") .file("cpp/simdcomp/src/avxbitpacking.c") .file("cpp/simdcomp/src/simdintegratedbitpacking.c") .file("cpp/simdcomp/src/simdbitpacking.c") .file("cpp/simdcomp/src/simdpackedsearch.c") .file("cpp/simdcomp/src/simdcomputil.c") .file("cpp/simdcomp/src/simdpackedselect.c") .file("cpp/simdcomp/src/simdfor.c") .file("cpp/simdcomp_wrapper.c"); if !cfg!(debug_assertions) { config.opt_level(3); if cfg!(target_env = "msvc") { config .define("NDEBUG", None) .flag("/Gm-") .flag("/GS-") .flag("/Gy") .flag("/Oi") .flag("/GL"); } } if !cfg!(target_env = "msvc") { config .include("./cpp/streamvbyte/include") .file("cpp/streamvbyte/src/streamvbyte.c") .file("cpp/streamvbyte/src/streamvbytedelta.c") .flag("-msse4.1") .flag("-march=native") .flag("-std=c99"); } config.compile("libsimdcomp.a"); // Workaround for linking static libraries built with /GL // https://github.com/rust-lang/rust/issues/26003 if !cfg!(debug_assertions) && cfg!(target_env = "msvc") { println!("cargo:rustc-link-lib=dylib=simdcomp"); } println!("cargo:rerun-if-changed=cpp"); } } #[cfg(not(feature = "simdcompression"))] mod build { pub fn build() {} } fn main() { build::build(); }
use crate::asm; use crate::console::{inthandler0c, inthandler0d}; use crate::keyboard::inthandler21; use crate::mouse::inthandler2c; use crate::timer::inthandler20; use crate::{exception_handler, handler}; use asm::{interrupt_hrb_api, load_gdtr, load_idtr}; #[derive(Debug, Clone, Copy)] #[repr(C)] pub struct SegmentDescriptor { limit_low: u16, base_low: u16, base_mid: u8, access_right: u8, limit_high: u8, base_high: u8, } impl SegmentDescriptor { pub fn new(mut limit: u32, base: i32, mut ar: i32) -> SegmentDescriptor { if limit > 0xfffff { ar |= 0x8000; limit /= 0x1000; } SegmentDescriptor { limit_low: limit as u16, base_low: base as u16, base_mid: (base >> 16) as u8, access_right: ar as u8, limit_high: ((limit >> 16) as u8 & 0x0f) | ((ar >> 8) as u8 & 0xf0), base_high: (base >> 24) as u8, } } } #[derive(Debug, Clone, Copy)] #[repr(C, packed)] struct GateDescriptor { pub offset_low: u16, pub selector: u16, pub dw_count: u8, pub access_right: u8, pub offset_high: u16, } impl GateDescriptor { fn new(offset: u32, selector: i32, ar: i32) -> GateDescriptor { GateDescriptor { offset_low: offset as u16, selector: selector as u16, dw_count: (ar >> 8) as u8, access_right: ar as u8, offset_high: (offset >> 16) as u16, } } } pub const ADR_GDT: i32 = 0x00270000; const LIMIT_GDT: i32 = 0x0000ffff; const ADR_IDT: i32 = 0x0026f800; const LIMIT_IDT: i32 = 0x000007ff; const ADR_BOTPAK: i32 = 0x00280000; const LIMIT_BOTPAK: u32 = 0x0007ffff; pub const AR_TSS32: i32 = 0x0089; const AR_INTGATE32: i32 = 0x008e; pub const AR_DATA32_RW: i32 = 0x4092; pub const AR_CODE32_ER: i32 = 0x409a; pub const AR_LDT: i32 = 0x0082; pub fn init() { // GDTの初期化 for i in 0..=(LIMIT_GDT / 8) { let gdt = unsafe { &mut *((ADR_GDT + i * 8) as *mut SegmentDescriptor) }; *gdt = SegmentDescriptor::new(0, 0, 0); } let gdt = unsafe { &mut *((ADR_GDT + 1 * 8) as *mut SegmentDescriptor) }; *gdt = SegmentDescriptor::new(0xffffffff, 0x00000000, AR_DATA32_RW); let gdt = unsafe { &mut *((ADR_GDT + 2 * 8) as *mut SegmentDescriptor) }; *gdt = SegmentDescriptor::new(LIMIT_BOTPAK, ADR_BOTPAK, AR_CODE32_ER); load_gdtr(LIMIT_GDT, ADR_GDT); // IDTの初期化 for i in 0..=(LIMIT_IDT / 8) { let idt = unsafe { &mut *((ADR_IDT + i * 8) as *mut GateDescriptor) }; *idt = GateDescriptor::new(0, 0, 0); } // 割り込みの設定 let idt = unsafe { &mut *((ADR_IDT + 0x0c * 8) as *mut GateDescriptor) }; *idt = GateDescriptor::new(exception_handler!(inthandler0c) as u32, 2 * 8, AR_INTGATE32); let idt = unsafe { &mut *((ADR_IDT + 0x0d * 8) as *mut GateDescriptor) }; *idt = GateDescriptor::new(exception_handler!(inthandler0d) as u32, 2 * 8, AR_INTGATE32); let idt = unsafe { &mut *((ADR_IDT + 0x21 * 8) as *mut GateDescriptor) }; *idt = GateDescriptor::new(handler!(inthandler21) as u32, 2 * 8, AR_INTGATE32); let idt = unsafe { &mut *((ADR_IDT + 0x2c * 8) as *mut GateDescriptor) }; *idt = GateDescriptor::new(handler!(inthandler2c) as u32, 2 * 8, AR_INTGATE32); let idt = unsafe { &mut *((ADR_IDT + 0x20 * 8) as *mut GateDescriptor) }; *idt = GateDescriptor::new(handler!(inthandler20) as u32, 2 * 8, AR_INTGATE32); let idt = unsafe { &mut *((ADR_IDT + 0x40 * 8) as *mut GateDescriptor) }; *idt = GateDescriptor::new(interrupt_hrb_api as u32, 2 * 8, AR_INTGATE32 + 0x60); load_idtr(LIMIT_IDT, ADR_IDT); }
#[macro_use] extern crate failure; extern crate chrono; extern crate nom; extern crate prettytable; #[macro_use] extern crate lazy_static; extern crate pdatastructs; mod app; mod common; mod execution; mod logical; mod syntax; use crate::app::AppError; use clap::load_yaml; use clap::App; use prettytable::{Cell, Row, Table}; use regex::Regex; use std::path::Path; use std::str::FromStr; lazy_static! { //FIXME: use different type for string hostname and Ipv4 static ref TABLE_SPEC_REGEX: Regex = Regex::new(r#"([0-9a-zA-Z]+):([a-zA-Z]+)=([^=\s"':]+)"#).unwrap(); } fn main() { let yaml = load_yaml!("cli.yml"); let app_m = App::from_yaml(yaml).get_matches(); match app_m.subcommand() { ("query", Some(sub_m)) => { if let Some(query_str) = sub_m.value_of("query") { let lower_case_query_str = query_str.to_ascii_lowercase(); let output_mode = if let Some(output_format) = sub_m.value_of("output") { match app::OutputMode::from_str(output_format) { Ok(output_mode) => output_mode, Err(e) => { eprintln!("{}", e); std::process::exit(1); } } } else { app::OutputMode::Table }; let result = if let Some(table_spec_string) = sub_m.value_of("table") { if let Some(cap) = TABLE_SPEC_REGEX.captures(table_spec_string) { let table_name = cap.get(1).map_or("", |m| m.as_str()).to_string(); let file_format = cap.get(2).map_or("", |m| m.as_str()).to_string(); let file_path = cap.get(3).map_or("", |m| m.as_str()).to_string(); if !["elb", "alb", "squid", "s3", "jsonl"].contains(&&*file_format) { Err(AppError::InvalidLogFileFormat) } else { if file_path == "stdin" { let data_source = common::types::DataSource::Stdin(file_format, table_name); app::run(&*lower_case_query_str, data_source, output_mode) } else { let path = Path::new(&file_path); let data_source = common::types::DataSource::File(path.to_path_buf(), file_format, table_name); app::run(&*lower_case_query_str, data_source, output_mode) } } } else { Err(AppError::InvalidTableSpecString) } } else { Err(AppError::InvalidTableSpecString) }; if let Err(e) = result { println!("{}", e); } } else { println!("{}", sub_m.usage()); } } ("explain", Some(sub_m)) => { if let Some(query_str) = sub_m.value_of("query") { let lower_case_query_str = query_str.to_ascii_lowercase(); let data_source = common::types::DataSource::Stdin("jsonl".to_string(), "it".to_string()); let result = app::explain(&*lower_case_query_str, data_source); if let Err(e) = result { println!("{}", e); } } else { println!("{}", sub_m.usage()); } } ("schema", Some(sub_m)) => { if let Some(type_str) = sub_m.value_of("type") { if type_str == "elb" { let schema = execution::datasource::ClassicLoadBalancerLogField::schema(); let mut table = Table::new(); for (field, datatype) in schema.iter() { table.add_row(Row::new(vec![ Cell::new(&*field.to_string()), Cell::new(&*datatype.to_string()), ])); } table.printstd(); } else if type_str == "alb" { let schema = execution::datasource::ApplicationLoadBalancerLogField::schema(); let mut table = Table::new(); for (field, datatype) in schema.iter() { table.add_row(Row::new(vec![ Cell::new(&*field.to_string()), Cell::new(&*datatype.to_string()), ])); } table.printstd(); } else if type_str == "s3" { let schema = execution::datasource::S3Field::schema(); let mut table = Table::new(); for (field, datatype) in schema.iter() { table.add_row(Row::new(vec![ Cell::new(&*field.to_string()), Cell::new(&*datatype.to_string()), ])); } table.printstd(); } else if type_str == "squid" { let schema = execution::datasource::SquidLogField::schema(); let mut table = Table::new(); for (field, datatype) in schema.iter() { table.add_row(Row::new(vec![ Cell::new(&*field.to_string()), Cell::new(&*datatype.to_string()), ])); } table.printstd(); } else { eprintln!("Unknown log format"); } } else { println!("The supported log format"); println!("* elb"); println!("* alb"); println!("* squid"); println!("* s3"); } } _ => { println!("{}", app_m.usage()); } } }
use std::ops::Bound; use std::sync::Arc; use anyhow::Context; use axum::extract::{Extension, Path}; use hyper::{Body, Response}; use serde_derive::Deserialize; use sqlx::Acquire; use svc_authn::AccountId; use svc_utils::extractors::AccountIdExtractor; use uuid::Uuid; use super::{find, AppResult}; use crate::app::error::ErrorExt; use crate::app::error::ErrorKind as AppErrorKind; use crate::app::http::Json; use crate::app::services::lock_interaction; use crate::app::AppContext; use crate::app::{authz::AuthzObject, metrics::AuthorizeMetrics}; use crate::clients::event::LockedTypes; use crate::db::class; use crate::db::class::AsClassType; use crate::db::class::BoundedDateTimeTuple; #[derive(Deserialize)] pub struct ClassRecreatePayload { #[serde(default, with = "crate::serde::ts_seconds_option_bound_tuple")] time: Option<BoundedDateTimeTuple>, #[serde(default = "class::default_locked_chat")] locked_chat: bool, #[serde(default = "class::default_locked_questions")] locked_questions: bool, } impl ClassRecreatePayload { fn locked_types(&self) -> LockedTypes { LockedTypes { message: self.locked_chat, reaction: self.locked_chat, question: self.locked_questions, question_reaction: self.locked_questions, } } } pub async fn recreate<T: AsClassType>( Extension(ctx): Extension<Arc<dyn AppContext>>, Path(id): Path<Uuid>, AccountIdExtractor(account_id): AccountIdExtractor, Json(body): Json<ClassRecreatePayload>, ) -> AppResult { do_recreate::<T>(ctx.as_ref(), &account_id, id, body).await } async fn do_recreate<T: AsClassType>( state: &dyn AppContext, account_id: &AccountId, id: Uuid, body: ClassRecreatePayload, ) -> AppResult { let webinar = find::<T>(state, id) .await .error(AppErrorKind::ClassNotFound)?; let object = AuthzObject::new(&["classrooms", &webinar.id().to_string()]).into(); let time = body.time.unwrap_or((Bound::Unbounded, Bound::Unbounded)); state .authz() .authorize( webinar.audience().to_owned(), account_id.clone(), object, "update".into(), ) .await .measure()?; let (event_room_id, conference_room_id) = crate::app::services::create_event_and_conference_rooms(state, &webinar, &time).await?; let query = crate::db::class::RecreateQuery::new( webinar.id(), time.into(), event_room_id, conference_room_id, ); let webinar = { let mut conn = state.get_conn().await.error(AppErrorKind::DbQueryFailed)?; let mut txn = conn .begin() .await .context("Failed to acquire transaction") .error(AppErrorKind::DbQueryFailed)?; let webinar = query .execute(&mut txn) .await .with_context(|| format!("Failed to update {}", T::as_str())) .error(AppErrorKind::DbQueryFailed)?; crate::db::recording::DeleteQuery::new(webinar.id()) .execute(&mut txn) .await .context("Failed to delete recording") .error(AppErrorKind::DbQueryFailed)?; txn.commit() .await .context("Convert transaction failed") .error(AppErrorKind::DbQueryFailed)?; webinar }; let locked_types = body.locked_types(); if locked_types.any_locked() { lock_interaction(state, event_room_id, locked_types).await; } let body = serde_json::to_string(&webinar) .context("Failed to serialize webinar") .error(AppErrorKind::SerializationFailed)?; let response = Response::builder().body(Body::from(body)).unwrap(); Ok(response) } #[cfg(test)] mod tests { mod recreate { use super::super::*; use crate::{ db::class::{WebinarReadQuery, WebinarType}, test_helpers::prelude::*, }; use mockall::predicate as pred; use uuid::Uuid; #[tokio::test] async fn recreate_webinar_unauthorized() { let agent = TestAgent::new("web", "user1", USR_AUDIENCE); let db_pool = TestDb::new().await; let webinar = { let mut conn = db_pool.get_conn().await; factory::Webinar::new( random_string(), USR_AUDIENCE.to_string(), (Bound::Unbounded, Bound::Unbounded).into(), Uuid::new_v4(), Uuid::new_v4(), ) .insert(&mut conn) .await }; let authz = TestAuthz::new(); let state = TestState::new_with_pool(db_pool, authz); let state = Arc::new(state); // Default value let body: ClassRecreatePayload = serde_json::from_str("{}").unwrap(); do_recreate::<WebinarType>(state.as_ref(), agent.account_id(), webinar.id(), body) .await .expect_err("Unexpected success, should fail due to authz"); } #[tokio::test] async fn recreate_webinar() { let agent = TestAgent::new("web", "user1", USR_AUDIENCE); let recreated_event_room_id = Uuid::new_v4(); let recreated_conference_room_id = Uuid::new_v4(); let db_pool = TestDb::new().await; let webinar = { let mut conn = db_pool.get_conn().await; factory::Webinar::new( random_string(), USR_AUDIENCE.to_string(), (Bound::Unbounded, Bound::Unbounded).into(), Uuid::new_v4(), Uuid::new_v4(), ) .insert(&mut conn) .await }; let mut authz = TestAuthz::new(); authz.allow( agent.account_id(), vec!["classrooms", &webinar.id().to_string()], "update", ); let mut state = TestState::new_with_pool(db_pool, authz); create_mocks( &mut state, webinar.id(), recreated_event_room_id, recreated_conference_room_id, ); let state = Arc::new(state); // Default value let body: ClassRecreatePayload = serde_json::from_str("{}").unwrap(); do_recreate::<WebinarType>(state.as_ref(), agent.account_id(), webinar.id(), body) .await .expect("Failed to recreate"); // Assert DB changes. let mut conn = state.get_conn().await.expect("Failed to get conn"); let new_webinar = WebinarReadQuery::by_scope(USR_AUDIENCE, &webinar.scope()) .execute(&mut conn) .await .expect("Failed to fetch webinar") .expect("Webinar not found"); assert_eq!(new_webinar.event_room_id(), recreated_event_room_id); assert_eq!( new_webinar.conference_room_id(), recreated_conference_room_id ); } fn create_mocks( state: &mut TestState, classroom_id: Uuid, event_room_id: Uuid, conference_room_id: Uuid, ) { state .event_client_mock() .expect_create_room() .withf(move |_time, _audience, _, _tags, cid| { assert_eq!(*cid, Some(classroom_id)); true }) .returning(move |_, _, _, _, _| Ok(event_room_id)); state .event_client_mock() .expect_update_locked_types() .with(pred::eq(event_room_id), pred::always()) .returning(move |_room_id, _locked_types| Ok(())); state .conference_client_mock() .expect_create_room() .withf(move |_time, _audience, _policy, _reserve, _tags, cid| { assert_eq!(*cid, Some(classroom_id)); true }) .returning(move |_, _, _, _, _, _| Ok(conference_room_id)); } } }
use crate::autofrag::auto_fragmentization; use crate::consts::{ typename_field_def, typename_field_node, EMPTY_DIRECTIVES, MUTATION_TYPE_NAME, QUERY_TYPE_NAME, TYPENAME_FIELD_NAME, }; use crate::context::*; use crate::federation::Federation; use crate::groups::{ FetchGroup, GroupForField, GroupForSubField, ParallelGroupForField, SerialGroupForField, }; use crate::helpers::*; use crate::model::Selection as ModelSelection; use crate::model::SelectionSet as ModelSelectionSet; use crate::model::{FetchNode, FlattenNode, GraphQLDocument, PlanNode, QueryPlan, ResponsePath}; use crate::{context, model, QueryPlanError, QueryPlanningOptions, Result}; use graphql_parser::query::refs::{FieldRef, InlineFragmentRef, SelectionRef, SelectionSetRef}; use graphql_parser::query::*; use graphql_parser::schema::TypeDefinition; use graphql_parser::{query, schema, DisplayMinified, Name}; use linked_hash_map::LinkedHashMap; use std::collections::HashSet; use std::rc::Rc; use tracing::instrument; #[instrument(skip(schema, query, options))] pub(crate) fn build_query_plan( schema: &schema::Document, query: &Document, options: QueryPlanningOptions, ) -> Result<QueryPlan> { let mut ops = get_operations(query); if ops.is_empty() { return Ok(QueryPlan { node: None }); } if ops.len() > 1 { return Err(QueryPlanError::InvalidQuery( "multiple operations are not supported", )); } if let Operation::Subscription = ops[0].kind { return Err(QueryPlanError::InvalidQuery( "subscriptions are not supported", )); } let types = names_to_types(schema); // TODO(ran)(p2)(#114) see if we can optimize and memoize the stuff we build only using the schema. let context = QueryPlanningContext { schema, operation: ops.pop().expect("ops has exactly one item"), fragments: query .definitions .iter() .filter_map(|d| match d { Definition::Fragment(frag) => Some((frag.name, frag)), _ => None, }) .collect(), possible_types: build_possible_types(schema, &types), variable_name_to_def: variable_name_to_def(query), federation: Federation::new(schema), names_to_types: types, options, }; let is_mutation = context.operation.kind.as_str() == "mutation"; let root_type = if is_mutation { context.names_to_types[MUTATION_TYPE_NAME] } else { context.names_to_types[QUERY_TYPE_NAME] }; let fields = collect_fields( &context, context.new_scope(root_type, None), SelectionSetRef::from(context.operation.selection_set), ); let groups = if is_mutation { split_root_fields_serially(&context, fields) } else { split_root_fields(&context, fields) }; let nodes: Vec<PlanNode> = groups .into_iter() .map(|group| execution_node_for_group(&context, group, Some(root_type))) .collect(); let node = if nodes.is_empty() { None } else if is_mutation { Some(flat_wrap(NodeCollectionKind::Sequence, nodes)) } else { Some(flat_wrap(NodeCollectionKind::Parallel, nodes)) }; Ok(QueryPlan { node }) } pub(crate) fn collect_fields<'q>( context: &'q QueryPlanningContext<'q>, scope: Rc<Scope<'q>>, selection_set: SelectionSetRef<'q>, ) -> FieldSet<'q> { if selection_set.items.is_empty() { return vec![]; } macro_rules! collect_inline_fragment { ($inline:ident, $selection_set:expr, $context:ident, $scope:ident, $visited_fragment_names:ident, $fields:ident, $directives:expr) => { let fragment_condition = $inline .type_condition .map(|tc| $context.names_to_types[tc]) .unwrap_or_else(|| $scope.parent_type); let new_scope = $context.new_scope_with_directives( fragment_condition, Some($scope.clone()), Some($directives), ); if !new_scope.possible_types.is_empty() { collect_fields_rec( $context, new_scope, $selection_set, $visited_fragment_names, $fields, ) } }; } fn collect_fields_rec<'a, 'q>( context: &'q QueryPlanningContext<'q>, scope: Rc<Scope<'q>>, selection_set: SelectionSetRef<'q>, visited_fragment_names: &'a mut HashSet<&'q str>, fields: &'a mut FieldSet<'q>, ) { let selections_without_introspection = selection_set .items .into_iter() .filter(|s| is_not_introspection_field(s)); for selection in selections_without_introspection { match selection { SelectionRef::FieldRef(field) => { let name = field.name; fields.push(context::Field { scope: scope.clone(), field_node: field, field_def: get_field_def_from_type(&scope.parent_type, name), }) } SelectionRef::Field(field) | SelectionRef::Ref(Selection::Field(field)) => fields .push(context::Field { scope: scope.clone(), field_node: field_ref!(field), field_def: get_field_def_from_type(&scope.parent_type, field.name), }), SelectionRef::Ref(Selection::InlineFragment(inline)) => { collect_inline_fragment!( inline, SelectionSetRef::from(&inline.selection_set), context, scope, visited_fragment_names, fields, &inline.directives ); } SelectionRef::InlineFragmentRef(inline_ref) => { collect_inline_fragment!( inline_ref, inline_ref.selection_set, context, scope, visited_fragment_names, fields, &inline_ref.directives ); } SelectionRef::Ref(Selection::FragmentSpread(spread)) => { let fragment = context.fragments[spread.fragment_name]; if !visited_fragment_names.contains(spread.fragment_name) { let new_scope = context.new_scope( context.names_to_types[fragment.type_condition], Some(scope.clone()), ); if !new_scope.possible_types.is_empty() { visited_fragment_names.insert(spread.fragment_name); collect_fields_rec( context, new_scope, SelectionSetRef::from(&fragment.selection_set), visited_fragment_names, fields, ); } } } SelectionRef::FragmentSpreadRef(_) => { unreachable!("FragmentSpreadRef is only used at the end of query planning") } } } } let mut visited_fragment_names: HashSet<&str> = HashSet::new(); let mut fields = vec![]; collect_fields_rec( context, scope, selection_set, &mut visited_fragment_names, &mut fields, ); fields } fn split_root_fields<'q>( context: &'q QueryPlanningContext<'q>, fields: FieldSet<'q>, ) -> Vec<FetchGroup<'q>> { let mut group_for_service = ParallelGroupForField::new(context); split_fields(context, vec![], fields, &mut group_for_service); group_for_service.into_groups() } fn split_root_fields_serially<'q>( context: &'q QueryPlanningContext<'q>, fields: FieldSet<'q>, ) -> Vec<FetchGroup<'q>> { let mut serial_group_for_field = SerialGroupForField::new(context); split_fields(context, vec![], fields, &mut serial_group_for_field); serial_group_for_field.into_groups() } fn split_fields<'a, 'q: 'a>( context: &'q QueryPlanningContext<'q>, path: ResponsePath, fields: FieldSet<'q>, grouper: &'a mut dyn GroupForField<'q>, ) { let grouped = group_by(fields, |f| f.field_node.response_name()); let fields_for_response_names: Vec<FieldSet> = values!(grouped); for field_for_resposne_name in fields_for_response_names { let fields_by_parent_type: LinkedHashMap<&str, FieldSet> = group_by(field_for_resposne_name, |f| f.scope.parent_type.as_name()); for (parent_type, fields_for_parent_type) in fields_by_parent_type { let field = &fields_for_parent_type[0]; let scope = &field.scope; let field_def = field.field_def; if is_introspection_type(field_def.field_type.as_name()) || (field_def.name == TYPENAME_FIELD_NAME && (parent_type == QUERY_TYPE_NAME || parent_type == MUTATION_TYPE_NAME)) { continue; } let can_find_group = matches!( context.names_to_types[parent_type], schema::TypeDefinition::Object(obj) if scope.possible_types.contains(&obj) ); if can_find_group { let group = grouper.group_for_field(scope.parent_type, field_def); complete_field( context, scope.clone(), group, path.clone(), fields_for_parent_type, ) } else { let has_no_extending_field_defs = scope .possible_types .iter() .map(|runtime_type| get_field_def!(runtime_type, field.field_node.name)) .all(|field_def| { context .federation .service_name_for_field(field_def) .is_none() }); if has_no_extending_field_defs { let group = grouper.group_for_field(scope.parent_type, field_def); complete_field( context, scope.clone(), group, path.clone(), fields_for_parent_type, ); continue; } for runtime_parent_obj_type in scope.possible_types.iter() { let field_def = get_field_def!(runtime_parent_obj_type, field.field_node.name); let new_scope = context.new_scope( context.type_def_for_object(runtime_parent_obj_type), Some(scope.clone()), ); let group = grouper.group_for_field(new_scope.parent_type, field_def); let fields_with_runtime_parent_type = fields_for_parent_type .iter() .map(|field| context::Field { scope: field.scope.clone(), field_node: field.field_node.clone(), field_def, }) .collect(); complete_field( context, new_scope, group, path.clone(), fields_with_runtime_parent_type, ); } } } } } pub(crate) fn get_field_def_from_type<'q>( td: &'q TypeDefinition<'q>, name: &'q str, ) -> &'q schema::Field<'q> { if name == TYPENAME_FIELD_NAME { typename_field_def() } else { match td { TypeDefinition::Object(obj) => get_field_def!(obj, name), TypeDefinition::Interface(iface) => get_field_def!(iface, name), _ => unreachable!(), } } } fn complete_field<'a, 'q: 'a>( context: &'q QueryPlanningContext<'q>, scope: Rc<Scope<'q>>, parent_group: &'a mut FetchGroup<'q>, path: ResponsePath, fields: FieldSet<'q>, ) { let field: context::Field = { let type_name = fields[0].field_def.field_type.as_name(); // the type_name could be a primitive type which is not in our names_to_types map. let return_type = context.names_to_types.get(type_name); if return_type.is_none() || !return_type.expect("checked not None").is_composite_type() { let mut fields = fields; context::Field { scope, ..fields.pop().expect("fields cannot be empty") } } else { let return_type = return_type.expect("Already checked this is not None"); let (head, tail) = fields.head(); let field_path = add_path( path, head.field_node.response_name(), &head.field_def.field_type, ); let mut sub_group = FetchGroup::new( parent_group.service_name.clone(), field_path.clone(), context.get_provided_fields(head.field_def, &parent_group.service_name), ); if return_type.is_abstract_type() { sub_group.fields.push(context::Field { scope: context.new_scope(return_type, Some(scope.clone())), field_node: typename_field_node(), field_def: typename_field_def(), }) } let mut response_field = context::Field { scope, field_def: head.field_def, field_node: field_ref!(head.field_node, SelectionSetRef::default()), }; let fields: FieldSet = vec![head].into_iter().chain(tail).collect(); let sub_fields = collect_sub_fields(context, return_type, fields); let sub_group = split_sub_fields(context, field_path, sub_fields, sub_group); let selection_set_ref = selection_set_from_field_set(sub_group.fields, Some(return_type), context); let mut sub_group_dependent_groups = { values!(iter sub_group.dependent_groups_by_service) .chain(sub_group.other_dependent_groups.into_iter()) .collect() }; parent_group .other_dependent_groups .append(&mut sub_group_dependent_groups); response_field.field_node.selection_set = selection_set_ref; response_field } }; parent_group.fields.push(field); } fn add_path(mut path: ResponsePath, response_name: &str, typ: &Type) -> ResponsePath { path.push(String::from(response_name)); let mut typ = typ; loop { match typ { Type::NamedType(_) => break, Type::ListType(t) => { path.push(String::from("@")); typ = t.as_ref() } Type::NonNullType(t) => typ = t.as_ref(), } } path } fn collect_sub_fields<'q>( context: &'q QueryPlanningContext<'q>, return_type: &'q TypeDefinition<'q>, fields: FieldSet<'q>, ) -> FieldSet<'q> { fields .into_iter() .flat_map(|field| { collect_fields( context, context.new_scope(return_type, None), field.field_node.selection_set, ) }) .collect() } fn split_sub_fields<'q>( context: &'q QueryPlanningContext<'q>, field_path: ResponsePath, sub_fields: FieldSet<'q>, parent_group: FetchGroup<'q>, ) -> FetchGroup<'q> { let mut grouper = GroupForSubField::new(context, parent_group); split_fields(context, field_path, sub_fields, &mut grouper); grouper.into_groups().pop().expect("groups cannot be empty") } fn execution_node_for_group( context: &QueryPlanningContext, group: FetchGroup, parent_type: Option<&TypeDefinition>, ) -> PlanNode { let FetchGroup { service_name, fields, required_fields, dependent_groups_by_service, other_dependent_groups, merge_at, .. } = group; let selection_set = selection_set_from_field_set(fields, parent_type, context); let requires = if !required_fields.is_empty() { Some(ref_into_model_selection_set(selection_set_from_field_set( required_fields, None, context, ))) } else { None }; let (variable_names, variable_defs) = context.get_variable_usages(&selection_set); let operation = if requires.is_some() { operation_for_entities_fetch(selection_set, variable_defs) } else { operation_for_root_fetch( context, selection_set, variable_defs, context.operation.kind, ) }; let fetch_node = PlanNode::Fetch(FetchNode { service_name, variable_usages: variable_names, requires, operation, }); let plan_node = if !merge_at.is_empty() { PlanNode::Flatten(FlattenNode { path: merge_at, node: Box::new(fetch_node), }) } else { fetch_node }; if !dependent_groups_by_service.is_empty() || !other_dependent_groups.is_empty() { let dependent_nodes = values!(iter dependent_groups_by_service) .chain(other_dependent_groups.into_iter()) .map(|group| execution_node_for_group(context, group, None)) .collect(); flat_wrap( NodeCollectionKind::Sequence, vec![ plan_node, flat_wrap(NodeCollectionKind::Parallel, dependent_nodes), ], ) } else { plan_node } } fn selection_set_from_field_set<'q>( fields: FieldSet<'q>, parent_type: Option<&'q TypeDefinition<'q>>, context: &'q QueryPlanningContext<'q>, ) -> SelectionSetRef<'q> { fn wrap_in_inline_fragment_if_needed<'q>( selections: Vec<SelectionRef<'q>>, type_condition: &'q TypeDefinition<'q>, parent_type: Option<&'q TypeDefinition<'q>>, directives: Option<&'q Vec<Directive<'q>>>, ) -> Vec<SelectionRef<'q>> { if parent_type.map(|pt| pt == type_condition).unwrap_or(false) { selections } else { vec![SelectionRef::InlineFragmentRef(InlineFragmentRef { position: pos(), type_condition: type_condition.name(), directives: directives.unwrap_or(&EMPTY_DIRECTIVES), selection_set: SelectionSetRef { span: span(), items: selections, }, })] } } fn combine_fields<'q>( fields_with_same_reponse_name: FieldSet<'q>, context: &'q QueryPlanningContext, ) -> SelectionRef<'q> { let is_composite_type = { let name = fields_with_same_reponse_name[0] .field_def .field_type .as_name(); // NB: we don't have specified types (i.e. primitives) in our map. // They are not composite types. context .names_to_types .get(name) .map(|td| td.is_composite_type()) .unwrap_or(false) }; if !is_composite_type || fields_with_same_reponse_name.len() == 1 { let field_ref = fields_with_same_reponse_name .into_iter() .next() .expect("There must be only one field") .field_node; SelectionRef::FieldRef(field_ref) } else { let nodes: Vec<FieldRef> = fields_with_same_reponse_name .into_iter() .map(|f| f.field_node) .collect(); let field_ref = field_ref!(nodes[0], merge_selection_sets(nodes)); SelectionRef::FieldRef(field_ref) } } let mut items: Vec<SelectionRef<'q>> = vec![]; let fields_by_parent_type = group_by(fields, |f| f.scope.parent_type.as_name()); for (_, fields_by_parent_type) in fields_by_parent_type { let type_condition = fields_by_parent_type[0].scope.parent_type; let directives = fields_by_parent_type[0].scope.scope_directives; let fields_by_response_name: LinkedHashMap<&str, FieldSet> = group_by(fields_by_parent_type, |f| f.field_node.response_name()); let selections = wrap_in_inline_fragment_if_needed( fields_by_response_name .into_iter() .map(|(_, fs)| combine_fields(fs, context)) .collect(), type_condition, parent_type, directives, ); items.extend(selections); } SelectionSetRef { span: span(), items, } } fn operation_for_entities_fetch<'q>( selection_set: SelectionSetRef<'q>, variable_definitions: Vec<&'q VariableDefinition<'q>>, ) -> GraphQLDocument { let vars = vec![String::from("$representations:[_Any!]!")] .into_iter() .chain(variable_definitions.iter().map(|vd| vd.minified())) .collect::<String>(); format!( "query({}){{_entities(representations:$representations){}}}", vars, selection_set.minified(), ) } fn operation_for_root_fetch<'q>( context: &'q QueryPlanningContext<'q>, selection_set: SelectionSetRef<'q>, variable_definitions: Vec<&'q VariableDefinition<'q>>, op_kind: Operation, ) -> GraphQLDocument { let vars = if variable_definitions.is_empty() { String::from("") } else { format!( "({})", variable_definitions .iter() .map(|vd| vd.minified()) .collect::<String>() ) }; let (frags, selection_set) = maybe_auto_fragmentization(context, selection_set); let op_kind = match op_kind { Operation::Query if vars.is_empty() => "", _ => op_kind.as_str(), }; format!("{}{}{}{}", op_kind, vars, selection_set, frags) } fn field_into_model_selection(field: &query::Field) -> ModelSelection { ModelSelection::Field(model::Field { alias: field.alias.map(String::from), name: String::from(field.name), selections: if field.selection_set.items.is_empty() { None } else { Some(into_model_selection_set(&field.selection_set)) }, }) } fn into_model_selection(sel: &Selection) -> ModelSelection { match sel { Selection::Field(field) => field_into_model_selection(field), Selection::InlineFragment(inline) => { ModelSelection::InlineFragment(model::InlineFragment { type_condition: inline.type_condition.map(String::from), selections: into_model_selection_set(&inline.selection_set), }) } Selection::FragmentSpread(_) => unreachable!( "the current query planner doesn't seem to support these in the resulting query plan" ), } } fn ref_into_model_selection_set(selection_set_ref: SelectionSetRef) -> ModelSelectionSet { fn ref_into_model_selection(sel_ref: SelectionRef) -> ModelSelection { match sel_ref { SelectionRef::Ref(sel) => into_model_selection(sel), SelectionRef::Field(field) => field_into_model_selection(field), SelectionRef::FieldRef(field) => ModelSelection::Field(model::Field { alias: field.alias.map(String::from), name: String::from(field.name), selections: if field.selection_set.items.is_empty() { None } else { Some(ref_into_model_selection_set(field.selection_set)) }, }), SelectionRef::InlineFragmentRef(inline) => { ModelSelection::InlineFragment(model::InlineFragment { type_condition: inline.type_condition.map(String::from), selections: ref_into_model_selection_set(inline.selection_set), }) } SelectionRef::FragmentSpreadRef(_) => { unreachable!("FragmentSpreadRef is only used at the end of query planning") } } } selection_set_ref .items .into_iter() .map(ref_into_model_selection) .collect() } fn into_model_selection_set(selection_set: &SelectionSet) -> ModelSelectionSet { selection_set .items .iter() .map(into_model_selection) .collect() } fn flat_wrap(kind: NodeCollectionKind, mut nodes: Vec<PlanNode>) -> PlanNode { if nodes.is_empty() { panic!("programming error: should always be called with nodes") } if nodes.len() == 1 { nodes.pop().expect("nodes length is 1") } else { let nodes = nodes .into_iter() .flat_map(|n| match n { PlanNode::Sequence { nodes } if matches!(kind, NodeCollectionKind::Sequence) => { nodes } PlanNode::Parallel { nodes } if matches!(kind, NodeCollectionKind::Parallel) => { nodes } n => vec![n], }) .collect(); match kind { NodeCollectionKind::Sequence => PlanNode::Sequence { nodes }, NodeCollectionKind::Parallel => PlanNode::Parallel { nodes }, } } } fn maybe_auto_fragmentization<'q>( context: &'q QueryPlanningContext<'q>, selection_set: SelectionSetRef<'q>, ) -> (String, String) { if context.options.auto_fragmentization { let (frags, selection_set) = auto_fragmentization(context, selection_set); let frags = frags .into_iter() .map(|fd| fd.minified()) .collect::<String>(); (frags, selection_set.minified()) } else { (String::from(""), selection_set.minified()) } }
use ethereum_types::U256; use juniper::{ graphql_object, graphql_value, EmptyMutation, EmptySubscription, FieldError, FieldResult, RootNode, }; use rustc_hex::{FromHex, ToHex}; use simple_program::state::SimpleProgram; use solana_client::rpc_client::RpcClient; use solana_sdk::{program_pack::Pack, pubkey::Pubkey}; use std::str::FromStr; use tokio::task::JoinHandle; use uniswap_program::state::UniswapOracle; use warp::{http::Response, Filter}; #[derive(Clone)] pub struct Database; impl juniper::Context for Database {} impl Database { fn new() -> Self { Self {} } } struct Pricefeed { token0: [u8; 20], decimal0: u8, amount0: [u8; 32], token1: [u8; 20], decimal1: u8, amount1: [u8; 32], } #[graphql_object(context = "Database")] impl Pricefeed { fn token0(&self) -> String { self.token0.to_hex() } fn decimal0(&self) -> i32 { self.decimal0 as i32 } fn amount0(&self) -> f64 { let amount0 = U256::from_big_endian(&self.amount0[..]); let amount0 = amount0.as_u128(); amount0 as f64 / 10u128.pow(self.decimal0 as u32) as f64 } fn token1(&self) -> String { self.token1.to_hex() } fn decimal1(&self) -> i32 { self.decimal1 as i32 } fn amount1(&self) -> f64 { let amount1 = U256::from_big_endian(&self.amount1[..]); let amount1 = amount1.as_u128(); amount1 as f64 / 10u128.pow(self.decimal1 as u32) as f64 } async fn priceToken0Token1(&self) -> f64 { let amount0 = U256::from_big_endian(&self.amount0[..]); let amount0 = amount0.as_u128(); let amount0 = amount0 as f64 / 10u128.pow(self.decimal0 as u32) as f64; let amount1 = U256::from_big_endian(&self.amount1[..]); let amount1 = amount1.as_u128(); let amount1 = amount1 as f64 / 10u128.pow(self.decimal1 as u32) as f64; amount0 / amount1 } async fn priceToken1Token0(&self) -> f64 { let amount0 = U256::from_big_endian(&self.amount0[..]); let amount0 = amount0.as_u128(); let amount0 = amount0 as f64 / 10u128.pow(self.decimal0 as u32) as f64; let amount1 = U256::from_big_endian(&self.amount1[..]); let amount1 = amount1.as_u128(); let amount1 = amount1 as f64 / 10u128.pow(self.decimal1 as u32) as f64; amount1 / amount0 } } struct SimpleData { val_bytes32: [u8; 32], val_address: [u8; 20], val_uint256: [u8; 32], } #[graphql_object(context = "Database")] impl SimpleData { fn val_bytes32(&self) -> String { self.val_bytes32.to_hex() } fn val_address(&self) -> String { self.val_address.to_hex() } fn val_uint256(&self) -> String { self.val_uint256.to_hex() } } struct Query; #[graphql_object(context = "Database")] impl Query { async fn simple_data(account_id: String) -> FieldResult<SimpleData> { let simple_program_account = Pubkey::from_str(&account_id).map_err(|e| { FieldError::new( &format!("Could not decode bs58: {}", e), graphql_value!({ "internal_error": "Connection refused" }), ) })?; let task: JoinHandle<Result<SimpleData, FieldError>> = tokio::task::spawn_blocking(move || { let rpc_client = RpcClient::new(String::from("http://localhost:8899")); let data = rpc_client .get_account_data(&simple_program_account) .map_err(|e| { FieldError::new( &format!("Could not get account: {}", e), graphql_value!({ "internal_error": "Connection refused" }), ) })?; let simple = SimpleProgram::unpack_unchecked(data.as_slice()).map_err(|e| { FieldError::new( &format!("Could not unpack account data: {}", e), graphql_value!({ "internal_error": "Connection refused" }), ) })?; Ok(SimpleData { val_bytes32: simple.val_bytes32, val_address: simple.val_address, val_uint256: simple.val_uint256, }) }); Ok(task.await??) } async fn uniswap_oracle(token0: String, token1: String) -> FieldResult<Pricefeed> { let token0 = token0.from_hex::<Vec<u8>>().map_err(|e| { FieldError::new( &format!("Could not decode hex: {}", e), graphql_value!({ "internal_error": "Connection refused" }), ) })?; let token1 = token1.from_hex::<Vec<u8>>().map_err(|e| { FieldError::new( &format!("Could not decode hex: {}", e), graphql_value!({ "internal_error": "Connection refused" }), ) })?; let (account, _) = Pubkey::find_program_address( &[token0.as_slice(), token1.as_slice()], &uniswap_program::id(), ); let task: JoinHandle<Result<Pricefeed, FieldError>> = tokio::task::spawn_blocking(move || { let rpc_client = RpcClient::new(String::from("https://devnet.solana.com")); let data = rpc_client.get_account_data(&account).map_err(|e| { FieldError::new( &format!("Could not get account: {}", e), graphql_value!({ "internal_error": "Connection refused" }), ) })?; let oracle = UniswapOracle::unpack_unchecked(data.as_slice()).map_err(|e| { FieldError::new( &format!("Could not unpack account data: {}", e), graphql_value!({ "internal_error": "Connection refused" }), ) })?; Ok(Pricefeed { token0: oracle.token0, decimal0: oracle.decimal0, amount0: oracle.amount0, token1: oracle.token1, decimal1: oracle.decimal1, amount1: oracle.amount1, }) }); Ok(task.await??) } } type Schema = RootNode<'static, Query, EmptyMutation<Database>, EmptySubscription<Database>>; fn schema() -> Schema { Schema::new(Query, EmptyMutation::new(), EmptySubscription::new()) } #[tokio::main] async fn main() { let homepage = warp::path::end().map(|| { Response::builder() .header("content-type", "text/html") .body( "<html><h1>juniper_warp</h1><div>visit <a href=\"/graphiql\">/graphiql</a></html>" .to_string(), ) }); let state = warp::any().map(Database::new); let graphql_filter = juniper_warp::make_graphql_filter(schema(), state.boxed()); warp::serve( warp::get() .and(warp::path("graphiql")) .and(juniper_warp::graphiql_filter("/graphql", None)) .or(homepage) .or(warp::path("graphql").and(graphql_filter)), ) .run(([127, 0, 0, 1], 8080)) .await }
#[doc = "Register `CR` reader"] pub type R = crate::R<CR_SPEC>; #[doc = "Register `CR` writer"] pub type W = crate::W<CR_SPEC>; #[doc = "Field `WUCKSEL` reader - ck_wut wakeup clock selection 10x: ck_spre (usually 1Â Hz) clock is selected 11x: ck_spre (usually 1Â Hz) clock is selected and 216Â is added to the WUT counter value"] pub type WUCKSEL_R = crate::FieldReader; #[doc = "Field `WUCKSEL` writer - ck_wut wakeup clock selection 10x: ck_spre (usually 1Â Hz) clock is selected 11x: ck_spre (usually 1Â Hz) clock is selected and 216Â is added to the WUT counter value"] pub type WUCKSEL_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 3, O>; #[doc = "Field `TSEDGE` reader - Timestamp event active edge TSE must be reset when TSEDGE is changed to avoid unwanted TSF setting."] pub type TSEDGE_R = crate::BitReader; #[doc = "Field `TSEDGE` writer - Timestamp event active edge TSE must be reset when TSEDGE is changed to avoid unwanted TSF setting."] pub type TSEDGE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `REFCKON` reader - RTC_REFIN reference clock detection enable (50 or 60Â Hz) Note: PREDIV_S must be 0x00FF."] pub type REFCKON_R = crate::BitReader; #[doc = "Field `REFCKON` writer - RTC_REFIN reference clock detection enable (50 or 60Â Hz) Note: PREDIV_S must be 0x00FF."] pub type REFCKON_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `BYPSHAD` reader - Bypass the shadow registers Note: If the frequency of the APB1 clock is less than seven times the frequency of RTCCLK, BYPSHAD must be set to 1."] pub type BYPSHAD_R = crate::BitReader; #[doc = "Field `BYPSHAD` writer - Bypass the shadow registers Note: If the frequency of the APB1 clock is less than seven times the frequency of RTCCLK, BYPSHAD must be set to 1."] pub type BYPSHAD_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `FMT` reader - Hour format"] pub type FMT_R = crate::BitReader; #[doc = "Field `FMT` writer - Hour format"] pub type FMT_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `ALRAE` reader - Alarm A enable"] pub type ALRAE_R = crate::BitReader; #[doc = "Field `ALRAE` writer - Alarm A enable"] pub type ALRAE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `ALRBE` reader - Alarm B enable"] pub type ALRBE_R = crate::BitReader; #[doc = "Field `ALRBE` writer - Alarm B enable"] pub type ALRBE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `WUTE` reader - Wakeup timer enable Note: When the wakeup timer is disabled, wait for WUTWF=1 before enabling it again."] pub type WUTE_R = crate::BitReader; #[doc = "Field `WUTE` writer - Wakeup timer enable Note: When the wakeup timer is disabled, wait for WUTWF=1 before enabling it again."] pub type WUTE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `TSE` reader - timestamp enable"] pub type TSE_R = crate::BitReader; #[doc = "Field `TSE` writer - timestamp enable"] pub type TSE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `ALRAIE` reader - Alarm A interrupt enable"] pub type ALRAIE_R = crate::BitReader; #[doc = "Field `ALRAIE` writer - Alarm A interrupt enable"] pub type ALRAIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `ALRBIE` reader - Alarm B interrupt enable"] pub type ALRBIE_R = crate::BitReader; #[doc = "Field `ALRBIE` writer - Alarm B interrupt enable"] pub type ALRBIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `WUTIE` reader - Wakeup timer interrupt enable"] pub type WUTIE_R = crate::BitReader; #[doc = "Field `WUTIE` writer - Wakeup timer interrupt enable"] pub type WUTIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `TSIE` reader - Timestamp interrupt enable"] pub type TSIE_R = crate::BitReader; #[doc = "Field `TSIE` writer - Timestamp interrupt enable"] pub type TSIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `ADD1H` writer - Add 1 hour (summer time change) When this bit is set outside initialization mode, 1 hour is added to the calendar time. This bit is always read as 0."] pub type ADD1H_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `SUB1H` writer - Subtract 1 hour (winter time change) When this bit is set outside initialization mode, 1 hour is subtracted to the calendar time if the current hour is not 0. This bit is always read as 0. Setting this bit has no effect when current hour is 0."] pub type SUB1H_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `BKP` reader - Backup This bit can be written by the user to memorize whether the daylight saving time change has been performed or not."] pub type BKP_R = crate::BitReader; #[doc = "Field `BKP` writer - Backup This bit can be written by the user to memorize whether the daylight saving time change has been performed or not."] pub type BKP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `COSEL` reader - Calibration output selection When COE = 1, this bit selects which signal is output on CALIB. These frequencies are valid for RTCCLK at 32.768Â kHz and prescalers at their default values (PREDIV_A = 127 and PREDIV_S = 255). Refer to ."] pub type COSEL_R = crate::BitReader; #[doc = "Field `COSEL` writer - Calibration output selection When COE = 1, this bit selects which signal is output on CALIB. These frequencies are valid for RTCCLK at 32.768Â kHz and prescalers at their default values (PREDIV_A = 127 and PREDIV_S = 255). Refer to ."] pub type COSEL_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `POL` reader - Output polarity This bit is used to configure the polarity of TAMPALRM output."] pub type POL_R = crate::BitReader; #[doc = "Field `POL` writer - Output polarity This bit is used to configure the polarity of TAMPALRM output."] pub type POL_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `OSEL` reader - Output selection These bits are used to select the flag to be routed to TAMPALRM output."] pub type OSEL_R = crate::FieldReader; #[doc = "Field `OSEL` writer - Output selection These bits are used to select the flag to be routed to TAMPALRM output."] pub type OSEL_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `COE` reader - Calibration output enable This bit enables the CALIB output"] pub type COE_R = crate::BitReader; #[doc = "Field `COE` writer - Calibration output enable This bit enables the CALIB output"] pub type COE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `ITSE` reader - timestamp on internal event enable"] pub type ITSE_R = crate::BitReader; #[doc = "Field `ITSE` writer - timestamp on internal event enable"] pub type ITSE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `TAMPTS` reader - Activate timestamp on tamper detection event TAMPTS is valid even if TSE = 0 in the RTC_CR register. Timestamp flag is set after the tamper flags, therefore if TAMPTS and TSIE are set, it is recommended to disable the tamper interrupts in order to avoid servicing 2 interrupts."] pub type TAMPTS_R = crate::BitReader; #[doc = "Field `TAMPTS` writer - Activate timestamp on tamper detection event TAMPTS is valid even if TSE = 0 in the RTC_CR register. Timestamp flag is set after the tamper flags, therefore if TAMPTS and TSIE are set, it is recommended to disable the tamper interrupts in order to avoid servicing 2 interrupts."] pub type TAMPTS_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `TAMPOE` reader - Tamper detection output enable on TAMPALRM"] pub type TAMPOE_R = crate::BitReader; #[doc = "Field `TAMPOE` writer - Tamper detection output enable on TAMPALRM"] pub type TAMPOE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `TAMPALRM_PU` reader - TAMPALRM pull-up enable"] pub type TAMPALRM_PU_R = crate::BitReader; #[doc = "Field `TAMPALRM_PU` writer - TAMPALRM pull-up enable"] pub type TAMPALRM_PU_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `TAMPALRM_TYPE` reader - TAMPALRM output type"] pub type TAMPALRM_TYPE_R = crate::BitReader; #[doc = "Field `TAMPALRM_TYPE` writer - TAMPALRM output type"] pub type TAMPALRM_TYPE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `OUT2EN` reader - RTC_OUT2 output enable Setting this bit allows to remap the RTC outputs on RTC_OUT2 as follows: OUT2EN = 0: RTC output 2 disable If OSEL â\u{89} 00 or TAMPOE = 1: TAMPALRM is output on RTC_OUT1 If OSEL = 00 and TAMPOE = 0 and COE = 1: CALIB is output on RTC_OUT1 OUT2EN = 1: RTC output 2 enable If (OSEL â\u{89} 00 or TAMPOE = 1) and COE = 0: TAMPALRM is output on RTC_OUT2 If OSEL = 00 and TAMPOE = 0 and COE = 1: CALIB is output on RTC_OUT2 If (OSELâ\u{89} 00 or TAMPOE = 1) and COE = 1: CALIB is output on RTC_OUT2 and TAMPALRM is output on RTC_OUT1."] pub type OUT2EN_R = crate::BitReader; #[doc = "Field `OUT2EN` writer - RTC_OUT2 output enable Setting this bit allows to remap the RTC outputs on RTC_OUT2 as follows: OUT2EN = 0: RTC output 2 disable If OSEL â\u{89} 00 or TAMPOE = 1: TAMPALRM is output on RTC_OUT1 If OSEL = 00 and TAMPOE = 0 and COE = 1: CALIB is output on RTC_OUT1 OUT2EN = 1: RTC output 2 enable If (OSEL â\u{89} 00 or TAMPOE = 1) and COE = 0: TAMPALRM is output on RTC_OUT2 If OSEL = 00 and TAMPOE = 0 and COE = 1: CALIB is output on RTC_OUT2 If (OSELâ\u{89} 00 or TAMPOE = 1) and COE = 1: CALIB is output on RTC_OUT2 and TAMPALRM is output on RTC_OUT1."] pub type OUT2EN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; impl R { #[doc = "Bits 0:2 - ck_wut wakeup clock selection 10x: ck_spre (usually 1Â Hz) clock is selected 11x: ck_spre (usually 1Â Hz) clock is selected and 216Â is added to the WUT counter value"] #[inline(always)] pub fn wucksel(&self) -> WUCKSEL_R { WUCKSEL_R::new((self.bits & 7) as u8) } #[doc = "Bit 3 - Timestamp event active edge TSE must be reset when TSEDGE is changed to avoid unwanted TSF setting."] #[inline(always)] pub fn tsedge(&self) -> TSEDGE_R { TSEDGE_R::new(((self.bits >> 3) & 1) != 0) } #[doc = "Bit 4 - RTC_REFIN reference clock detection enable (50 or 60Â Hz) Note: PREDIV_S must be 0x00FF."] #[inline(always)] pub fn refckon(&self) -> REFCKON_R { REFCKON_R::new(((self.bits >> 4) & 1) != 0) } #[doc = "Bit 5 - Bypass the shadow registers Note: If the frequency of the APB1 clock is less than seven times the frequency of RTCCLK, BYPSHAD must be set to 1."] #[inline(always)] pub fn bypshad(&self) -> BYPSHAD_R { BYPSHAD_R::new(((self.bits >> 5) & 1) != 0) } #[doc = "Bit 6 - Hour format"] #[inline(always)] pub fn fmt(&self) -> FMT_R { FMT_R::new(((self.bits >> 6) & 1) != 0) } #[doc = "Bit 8 - Alarm A enable"] #[inline(always)] pub fn alrae(&self) -> ALRAE_R { ALRAE_R::new(((self.bits >> 8) & 1) != 0) } #[doc = "Bit 9 - Alarm B enable"] #[inline(always)] pub fn alrbe(&self) -> ALRBE_R { ALRBE_R::new(((self.bits >> 9) & 1) != 0) } #[doc = "Bit 10 - Wakeup timer enable Note: When the wakeup timer is disabled, wait for WUTWF=1 before enabling it again."] #[inline(always)] pub fn wute(&self) -> WUTE_R { WUTE_R::new(((self.bits >> 10) & 1) != 0) } #[doc = "Bit 11 - timestamp enable"] #[inline(always)] pub fn tse(&self) -> TSE_R { TSE_R::new(((self.bits >> 11) & 1) != 0) } #[doc = "Bit 12 - Alarm A interrupt enable"] #[inline(always)] pub fn alraie(&self) -> ALRAIE_R { ALRAIE_R::new(((self.bits >> 12) & 1) != 0) } #[doc = "Bit 13 - Alarm B interrupt enable"] #[inline(always)] pub fn alrbie(&self) -> ALRBIE_R { ALRBIE_R::new(((self.bits >> 13) & 1) != 0) } #[doc = "Bit 14 - Wakeup timer interrupt enable"] #[inline(always)] pub fn wutie(&self) -> WUTIE_R { WUTIE_R::new(((self.bits >> 14) & 1) != 0) } #[doc = "Bit 15 - Timestamp interrupt enable"] #[inline(always)] pub fn tsie(&self) -> TSIE_R { TSIE_R::new(((self.bits >> 15) & 1) != 0) } #[doc = "Bit 18 - Backup This bit can be written by the user to memorize whether the daylight saving time change has been performed or not."] #[inline(always)] pub fn bkp(&self) -> BKP_R { BKP_R::new(((self.bits >> 18) & 1) != 0) } #[doc = "Bit 19 - Calibration output selection When COE = 1, this bit selects which signal is output on CALIB. These frequencies are valid for RTCCLK at 32.768Â kHz and prescalers at their default values (PREDIV_A = 127 and PREDIV_S = 255). Refer to ."] #[inline(always)] pub fn cosel(&self) -> COSEL_R { COSEL_R::new(((self.bits >> 19) & 1) != 0) } #[doc = "Bit 20 - Output polarity This bit is used to configure the polarity of TAMPALRM output."] #[inline(always)] pub fn pol(&self) -> POL_R { POL_R::new(((self.bits >> 20) & 1) != 0) } #[doc = "Bits 21:22 - Output selection These bits are used to select the flag to be routed to TAMPALRM output."] #[inline(always)] pub fn osel(&self) -> OSEL_R { OSEL_R::new(((self.bits >> 21) & 3) as u8) } #[doc = "Bit 23 - Calibration output enable This bit enables the CALIB output"] #[inline(always)] pub fn coe(&self) -> COE_R { COE_R::new(((self.bits >> 23) & 1) != 0) } #[doc = "Bit 24 - timestamp on internal event enable"] #[inline(always)] pub fn itse(&self) -> ITSE_R { ITSE_R::new(((self.bits >> 24) & 1) != 0) } #[doc = "Bit 25 - Activate timestamp on tamper detection event TAMPTS is valid even if TSE = 0 in the RTC_CR register. Timestamp flag is set after the tamper flags, therefore if TAMPTS and TSIE are set, it is recommended to disable the tamper interrupts in order to avoid servicing 2 interrupts."] #[inline(always)] pub fn tampts(&self) -> TAMPTS_R { TAMPTS_R::new(((self.bits >> 25) & 1) != 0) } #[doc = "Bit 26 - Tamper detection output enable on TAMPALRM"] #[inline(always)] pub fn tampoe(&self) -> TAMPOE_R { TAMPOE_R::new(((self.bits >> 26) & 1) != 0) } #[doc = "Bit 29 - TAMPALRM pull-up enable"] #[inline(always)] pub fn tampalrm_pu(&self) -> TAMPALRM_PU_R { TAMPALRM_PU_R::new(((self.bits >> 29) & 1) != 0) } #[doc = "Bit 30 - TAMPALRM output type"] #[inline(always)] pub fn tampalrm_type(&self) -> TAMPALRM_TYPE_R { TAMPALRM_TYPE_R::new(((self.bits >> 30) & 1) != 0) } #[doc = "Bit 31 - RTC_OUT2 output enable Setting this bit allows to remap the RTC outputs on RTC_OUT2 as follows: OUT2EN = 0: RTC output 2 disable If OSEL â\u{89} 00 or TAMPOE = 1: TAMPALRM is output on RTC_OUT1 If OSEL = 00 and TAMPOE = 0 and COE = 1: CALIB is output on RTC_OUT1 OUT2EN = 1: RTC output 2 enable If (OSEL â\u{89} 00 or TAMPOE = 1) and COE = 0: TAMPALRM is output on RTC_OUT2 If OSEL = 00 and TAMPOE = 0 and COE = 1: CALIB is output on RTC_OUT2 If (OSELâ\u{89} 00 or TAMPOE = 1) and COE = 1: CALIB is output on RTC_OUT2 and TAMPALRM is output on RTC_OUT1."] #[inline(always)] pub fn out2en(&self) -> OUT2EN_R { OUT2EN_R::new(((self.bits >> 31) & 1) != 0) } } impl W { #[doc = "Bits 0:2 - ck_wut wakeup clock selection 10x: ck_spre (usually 1Â Hz) clock is selected 11x: ck_spre (usually 1Â Hz) clock is selected and 216Â is added to the WUT counter value"] #[inline(always)] #[must_use] pub fn wucksel(&mut self) -> WUCKSEL_W<CR_SPEC, 0> { WUCKSEL_W::new(self) } #[doc = "Bit 3 - Timestamp event active edge TSE must be reset when TSEDGE is changed to avoid unwanted TSF setting."] #[inline(always)] #[must_use] pub fn tsedge(&mut self) -> TSEDGE_W<CR_SPEC, 3> { TSEDGE_W::new(self) } #[doc = "Bit 4 - RTC_REFIN reference clock detection enable (50 or 60Â Hz) Note: PREDIV_S must be 0x00FF."] #[inline(always)] #[must_use] pub fn refckon(&mut self) -> REFCKON_W<CR_SPEC, 4> { REFCKON_W::new(self) } #[doc = "Bit 5 - Bypass the shadow registers Note: If the frequency of the APB1 clock is less than seven times the frequency of RTCCLK, BYPSHAD must be set to 1."] #[inline(always)] #[must_use] pub fn bypshad(&mut self) -> BYPSHAD_W<CR_SPEC, 5> { BYPSHAD_W::new(self) } #[doc = "Bit 6 - Hour format"] #[inline(always)] #[must_use] pub fn fmt(&mut self) -> FMT_W<CR_SPEC, 6> { FMT_W::new(self) } #[doc = "Bit 8 - Alarm A enable"] #[inline(always)] #[must_use] pub fn alrae(&mut self) -> ALRAE_W<CR_SPEC, 8> { ALRAE_W::new(self) } #[doc = "Bit 9 - Alarm B enable"] #[inline(always)] #[must_use] pub fn alrbe(&mut self) -> ALRBE_W<CR_SPEC, 9> { ALRBE_W::new(self) } #[doc = "Bit 10 - Wakeup timer enable Note: When the wakeup timer is disabled, wait for WUTWF=1 before enabling it again."] #[inline(always)] #[must_use] pub fn wute(&mut self) -> WUTE_W<CR_SPEC, 10> { WUTE_W::new(self) } #[doc = "Bit 11 - timestamp enable"] #[inline(always)] #[must_use] pub fn tse(&mut self) -> TSE_W<CR_SPEC, 11> { TSE_W::new(self) } #[doc = "Bit 12 - Alarm A interrupt enable"] #[inline(always)] #[must_use] pub fn alraie(&mut self) -> ALRAIE_W<CR_SPEC, 12> { ALRAIE_W::new(self) } #[doc = "Bit 13 - Alarm B interrupt enable"] #[inline(always)] #[must_use] pub fn alrbie(&mut self) -> ALRBIE_W<CR_SPEC, 13> { ALRBIE_W::new(self) } #[doc = "Bit 14 - Wakeup timer interrupt enable"] #[inline(always)] #[must_use] pub fn wutie(&mut self) -> WUTIE_W<CR_SPEC, 14> { WUTIE_W::new(self) } #[doc = "Bit 15 - Timestamp interrupt enable"] #[inline(always)] #[must_use] pub fn tsie(&mut self) -> TSIE_W<CR_SPEC, 15> { TSIE_W::new(self) } #[doc = "Bit 16 - Add 1 hour (summer time change) When this bit is set outside initialization mode, 1 hour is added to the calendar time. This bit is always read as 0."] #[inline(always)] #[must_use] pub fn add1h(&mut self) -> ADD1H_W<CR_SPEC, 16> { ADD1H_W::new(self) } #[doc = "Bit 17 - Subtract 1 hour (winter time change) When this bit is set outside initialization mode, 1 hour is subtracted to the calendar time if the current hour is not 0. This bit is always read as 0. Setting this bit has no effect when current hour is 0."] #[inline(always)] #[must_use] pub fn sub1h(&mut self) -> SUB1H_W<CR_SPEC, 17> { SUB1H_W::new(self) } #[doc = "Bit 18 - Backup This bit can be written by the user to memorize whether the daylight saving time change has been performed or not."] #[inline(always)] #[must_use] pub fn bkp(&mut self) -> BKP_W<CR_SPEC, 18> { BKP_W::new(self) } #[doc = "Bit 19 - Calibration output selection When COE = 1, this bit selects which signal is output on CALIB. These frequencies are valid for RTCCLK at 32.768Â kHz and prescalers at their default values (PREDIV_A = 127 and PREDIV_S = 255). Refer to ."] #[inline(always)] #[must_use] pub fn cosel(&mut self) -> COSEL_W<CR_SPEC, 19> { COSEL_W::new(self) } #[doc = "Bit 20 - Output polarity This bit is used to configure the polarity of TAMPALRM output."] #[inline(always)] #[must_use] pub fn pol(&mut self) -> POL_W<CR_SPEC, 20> { POL_W::new(self) } #[doc = "Bits 21:22 - Output selection These bits are used to select the flag to be routed to TAMPALRM output."] #[inline(always)] #[must_use] pub fn osel(&mut self) -> OSEL_W<CR_SPEC, 21> { OSEL_W::new(self) } #[doc = "Bit 23 - Calibration output enable This bit enables the CALIB output"] #[inline(always)] #[must_use] pub fn coe(&mut self) -> COE_W<CR_SPEC, 23> { COE_W::new(self) } #[doc = "Bit 24 - timestamp on internal event enable"] #[inline(always)] #[must_use] pub fn itse(&mut self) -> ITSE_W<CR_SPEC, 24> { ITSE_W::new(self) } #[doc = "Bit 25 - Activate timestamp on tamper detection event TAMPTS is valid even if TSE = 0 in the RTC_CR register. Timestamp flag is set after the tamper flags, therefore if TAMPTS and TSIE are set, it is recommended to disable the tamper interrupts in order to avoid servicing 2 interrupts."] #[inline(always)] #[must_use] pub fn tampts(&mut self) -> TAMPTS_W<CR_SPEC, 25> { TAMPTS_W::new(self) } #[doc = "Bit 26 - Tamper detection output enable on TAMPALRM"] #[inline(always)] #[must_use] pub fn tampoe(&mut self) -> TAMPOE_W<CR_SPEC, 26> { TAMPOE_W::new(self) } #[doc = "Bit 29 - TAMPALRM pull-up enable"] #[inline(always)] #[must_use] pub fn tampalrm_pu(&mut self) -> TAMPALRM_PU_W<CR_SPEC, 29> { TAMPALRM_PU_W::new(self) } #[doc = "Bit 30 - TAMPALRM output type"] #[inline(always)] #[must_use] pub fn tampalrm_type(&mut self) -> TAMPALRM_TYPE_W<CR_SPEC, 30> { TAMPALRM_TYPE_W::new(self) } #[doc = "Bit 31 - RTC_OUT2 output enable Setting this bit allows to remap the RTC outputs on RTC_OUT2 as follows: OUT2EN = 0: RTC output 2 disable If OSEL â\u{89} 00 or TAMPOE = 1: TAMPALRM is output on RTC_OUT1 If OSEL = 00 and TAMPOE = 0 and COE = 1: CALIB is output on RTC_OUT1 OUT2EN = 1: RTC output 2 enable If (OSEL â\u{89} 00 or TAMPOE = 1) and COE = 0: TAMPALRM is output on RTC_OUT2 If OSEL = 00 and TAMPOE = 0 and COE = 1: CALIB is output on RTC_OUT2 If (OSELâ\u{89} 00 or TAMPOE = 1) and COE = 1: CALIB is output on RTC_OUT2 and TAMPALRM is output on RTC_OUT1."] #[inline(always)] #[must_use] pub fn out2en(&mut self) -> OUT2EN_W<CR_SPEC, 31> { OUT2EN_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "RTC control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct CR_SPEC; impl crate::RegisterSpec for CR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`cr::R`](R) reader structure"] impl crate::Readable for CR_SPEC {} #[doc = "`write(|w| ..)` method takes [`cr::W`](W) writer structure"] impl crate::Writable for CR_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets CR to value 0"] impl crate::Resettable for CR_SPEC { const RESET_VALUE: Self::Ux = 0; }
use crate::cargo::{self, Metadata, PackageMetadata}; use crate::dependencies::{self, Dependency, EditionOrInherit}; use crate::directory::Directory; use crate::env::Update; use crate::error::{Error, Result}; use crate::expand::{expand_globs, ExpandedTest}; use crate::flock::Lock; use crate::manifest::{Bin, Build, Config, Manifest, Name, Package, Workspace}; use crate::message::{self, Fail, Warn}; use crate::normalize::{self, Context, Variations}; use crate::{features, rustflags, Expected, Runner, Test}; use serde_derive::Deserialize; use std::collections::{BTreeMap as Map, BTreeSet as Set}; use std::env; use std::ffi::{OsStr, OsString}; use std::fs::{self, File}; use std::mem; use std::path::{Path, PathBuf}; use std::str; #[derive(Debug)] pub struct Project { pub dir: Directory, source_dir: Directory, pub target_dir: Directory, pub name: String, update: Update, pub has_pass: bool, has_compile_fail: bool, pub features: Option<Vec<String>>, pub workspace: Directory, pub path_dependencies: Vec<PathDependency>, manifest: Manifest, pub keep_going: bool, } #[derive(Debug)] pub struct PathDependency { pub name: String, pub normalized_path: Directory, } struct Report { failures: usize, created_wip: usize, } impl Runner { pub fn run(&mut self) { let mut tests = expand_globs(&self.tests); filter(&mut tests); let (project, _lock) = (|| { let mut project = self.prepare(&tests)?; let lock = Lock::acquire(path!(project.dir / ".lock")); self.write(&mut project)?; Ok((project, lock)) })() .unwrap_or_else(|err| { message::prepare_fail(err); panic!("tests failed"); }); print!("\n\n"); let len = tests.len(); let mut report = Report { failures: 0, created_wip: 0, }; if tests.is_empty() { message::no_tests_enabled(); } else if project.keep_going && !project.has_pass { report = match self.run_all(&project, tests) { Ok(failures) => failures, Err(err) => { message::test_fail(err); Report { failures: len, created_wip: 0, } } } } else { for test in tests { match test.run(&project) { Ok(Outcome::Passed) => {} Ok(Outcome::CreatedWip) => report.created_wip += 1, Err(err) => { report.failures += 1; message::test_fail(err); } } } } print!("\n\n"); if report.failures > 0 && project.name != "trybuild-tests" { panic!("{} of {} tests failed", report.failures, len); } if report.created_wip > 0 && project.name != "trybuild-tests" { panic!( "successfully created new stderr files for {} test cases", report.created_wip, ); } } fn prepare(&self, tests: &[ExpandedTest]) -> Result<Project> { let Metadata { target_directory: target_dir, workspace_root: workspace, packages, } = cargo::metadata()?; let mut has_pass = false; let mut has_compile_fail = false; for e in tests { match e.test.expected { Expected::Pass => has_pass = true, Expected::CompileFail => has_compile_fail = true, } } let source_dir = cargo::manifest_dir()?; let source_manifest = dependencies::get_manifest(&source_dir)?; let mut features = features::find(); let path_dependencies = source_manifest .dependencies .iter() .filter_map(|(name, dep)| { let path = dep.path.as_ref()?; if packages.iter().any(|p| &p.name == name) { // Skip path dependencies coming from the workspace itself None } else { Some(PathDependency { name: name.clone(), normalized_path: path.canonicalize().ok()?, }) } }) .collect(); let crate_name = &source_manifest.package.name; let project_dir = path!(target_dir / "tests" / "trybuild" / crate_name /); fs::create_dir_all(&project_dir)?; let project_name = format!("{}-tests", crate_name); let manifest = self.make_manifest( &workspace, &project_name, &source_dir, &packages, tests, source_manifest, )?; if let Some(enabled_features) = &mut features { enabled_features.retain(|feature| manifest.features.contains_key(feature)); } Ok(Project { dir: project_dir, source_dir, target_dir, name: project_name, update: Update::env()?, has_pass, has_compile_fail, features, workspace, path_dependencies, manifest, keep_going: false, }) } fn write(&self, project: &mut Project) -> Result<()> { let manifest_toml = basic_toml::to_string(&project.manifest)?; let config = self.make_config(); let config_toml = basic_toml::to_string(&config)?; fs::create_dir_all(path!(project.dir / ".cargo"))?; fs::write(path!(project.dir / ".cargo" / "config.toml"), config_toml)?; fs::write(path!(project.dir / "Cargo.toml"), manifest_toml)?; let main_rs = b"\ #![allow(unused_crate_dependencies, missing_docs)]\n\ fn main() {}\n\ "; fs::write(path!(project.dir / "main.rs"), &main_rs[..])?; cargo::build_dependencies(project)?; Ok(()) } fn make_manifest( &self, workspace: &Directory, project_name: &str, source_dir: &Directory, packages: &[PackageMetadata], tests: &[ExpandedTest], source_manifest: dependencies::Manifest, ) -> Result<Manifest> { let crate_name = source_manifest.package.name; let workspace_manifest = dependencies::get_workspace_manifest(workspace); let edition = match source_manifest.package.edition { EditionOrInherit::Edition(edition) => edition, EditionOrInherit::Inherit => workspace_manifest .workspace .package .edition .ok_or(Error::NoWorkspaceManifest)?, }; let mut dependencies = Map::new(); dependencies.extend(source_manifest.dependencies); dependencies.extend(source_manifest.dev_dependencies); let cargo_toml_path = source_dir.join("Cargo.toml"); let mut has_lib_target = true; for package_metadata in packages { if package_metadata.manifest_path == cargo_toml_path { has_lib_target = package_metadata .targets .iter() .any(|target| target.crate_types != ["bin"]); } } if has_lib_target { dependencies.insert( crate_name.clone(), Dependency { version: None, path: Some(source_dir.clone()), optional: false, default_features: false, features: Vec::new(), git: None, branch: None, tag: None, rev: None, workspace: false, rest: Map::new(), }, ); } let mut targets = source_manifest.target; for target in targets.values_mut() { let dev_dependencies = mem::take(&mut target.dev_dependencies); target.dependencies.extend(dev_dependencies); } let mut features = source_manifest.features; for (feature, enables) in &mut features { enables.retain(|en| { let dep_name = match en.strip_prefix("dep:") { Some(dep_name) => dep_name, None => return false, }; if let Some(Dependency { optional: true, .. }) = dependencies.get(dep_name) { return true; } for target in targets.values() { if let Some(Dependency { optional: true, .. }) = target.dependencies.get(dep_name) { return true; } } false }); if has_lib_target { enables.insert(0, format!("{}/{}", crate_name, feature)); } } let mut manifest = Manifest { package: Package { name: project_name.to_owned(), version: "0.0.0".to_owned(), edition, resolver: source_manifest.package.resolver, publish: false, }, features, dependencies, target: targets, bins: Vec::new(), workspace: Some(Workspace { dependencies: workspace_manifest.workspace.dependencies, }), // Within a workspace, only the [patch] and [replace] sections in // the workspace root's Cargo.toml are applied by Cargo. patch: workspace_manifest.patch, replace: workspace_manifest.replace, }; manifest.bins.push(Bin { name: Name(project_name.to_owned()), path: Path::new("main.rs").to_owned(), }); for expanded in tests { if expanded.error.is_none() { manifest.bins.push(Bin { name: expanded.name.clone(), path: source_dir.join(&expanded.test.path), }); } } Ok(manifest) } fn make_config(&self) -> Config { Config { build: Build { rustflags: rustflags::make_vec(), }, } } fn run_all(&self, project: &Project, tests: Vec<ExpandedTest>) -> Result<Report> { let mut report = Report { failures: 0, created_wip: 0, }; let mut path_map = Map::new(); for t in &tests { let src_path = project.source_dir.join(&t.test.path); path_map.insert(src_path, (&t.name, &t.test)); } let output = cargo::build_all_tests(project)?; let parsed = parse_cargo_json(project, &output.stdout, &path_map); let fallback = Stderr::default(); for mut t in tests { let show_expected = false; message::begin_test(&t.test, show_expected); if t.error.is_none() { t.error = check_exists(&t.test.path).err(); } if t.error.is_none() { let src_path = project.source_dir.join(&t.test.path); let this_test = parsed.stderrs.get(&src_path).unwrap_or(&fallback); match t.test.check(project, &t.name, this_test, "") { Ok(Outcome::Passed) => {} Ok(Outcome::CreatedWip) => report.created_wip += 1, Err(error) => t.error = Some(error), } } if let Some(err) = t.error { report.failures += 1; message::test_fail(err); } } Ok(report) } } enum Outcome { Passed, CreatedWip, } impl Test { fn run(&self, project: &Project, name: &Name) -> Result<Outcome> { let show_expected = project.has_pass && project.has_compile_fail; message::begin_test(self, show_expected); check_exists(&self.path)?; let mut path_map = Map::new(); let src_path = project.source_dir.join(&self.path); path_map.insert(src_path.clone(), (name, self)); let output = cargo::build_test(project, name)?; let parsed = parse_cargo_json(project, &output.stdout, &path_map); let fallback = Stderr::default(); let this_test = parsed.stderrs.get(&src_path).unwrap_or(&fallback); self.check(project, name, this_test, &parsed.stdout) } fn check( &self, project: &Project, name: &Name, result: &Stderr, build_stdout: &str, ) -> Result<Outcome> { let check = match self.expected { Expected::Pass => Test::check_pass, Expected::CompileFail => Test::check_compile_fail, }; check( self, project, name, result.success, build_stdout, &result.stderr, ) } fn check_pass( &self, project: &Project, name: &Name, success: bool, build_stdout: &str, variations: &Variations, ) -> Result<Outcome> { let preferred = variations.preferred(); if !success { message::failed_to_build(preferred); return Err(Error::CargoFail); } let mut output = cargo::run_test(project, name)?; output.stdout.splice(..0, build_stdout.bytes()); message::output(preferred, &output); if output.status.success() { Ok(Outcome::Passed) } else { Err(Error::RunFailed) } } fn check_compile_fail( &self, project: &Project, _name: &Name, success: bool, build_stdout: &str, variations: &Variations, ) -> Result<Outcome> { let preferred = variations.preferred(); if success { message::should_not_have_compiled(); message::fail_output(Fail, build_stdout); message::warnings(preferred); return Err(Error::ShouldNotHaveCompiled); } let stderr_path = self.path.with_extension("stderr"); if !stderr_path.exists() { let outcome = match project.update { Update::Wip => { let wip_dir = Path::new("wip"); fs::create_dir_all(wip_dir)?; let gitignore_path = wip_dir.join(".gitignore"); fs::write(gitignore_path, "*\n")?; let stderr_name = stderr_path .file_name() .unwrap_or_else(|| OsStr::new("test.stderr")); let wip_path = wip_dir.join(stderr_name); message::write_stderr_wip(&wip_path, &stderr_path, preferred); fs::write(wip_path, preferred).map_err(Error::WriteStderr)?; Outcome::CreatedWip } Update::Overwrite => { message::overwrite_stderr(&stderr_path, preferred); fs::write(stderr_path, preferred).map_err(Error::WriteStderr)?; Outcome::Passed } }; message::fail_output(Warn, build_stdout); return Ok(outcome); } let expected = fs::read_to_string(&stderr_path) .map_err(Error::ReadStderr)? .replace("\r\n", "\n"); if variations.any(|stderr| expected == stderr) { message::ok(); return Ok(Outcome::Passed); } match project.update { Update::Wip => { message::mismatch(&expected, preferred); Err(Error::Mismatch) } Update::Overwrite => { message::overwrite_stderr(&stderr_path, preferred); fs::write(stderr_path, preferred).map_err(Error::WriteStderr)?; Ok(Outcome::Passed) } } } } fn check_exists(path: &Path) -> Result<()> { if path.exists() { return Ok(()); } match File::open(path) { Ok(_) => Ok(()), Err(err) => Err(Error::Open(path.to_owned(), err)), } } impl ExpandedTest { fn run(self, project: &Project) -> Result<Outcome> { match self.error { None => self.test.run(project, &self.name), Some(error) => { let show_expected = false; message::begin_test(&self.test, show_expected); Err(error) } } } } // Filter which test cases are run by trybuild. // // $ cargo test -- ui trybuild=tuple_structs.rs // // The first argument after `--` must be the trybuild test name i.e. the name of // the function that has the #[test] attribute and calls trybuild. That's to get // Cargo to run the test at all. The next argument starting with `trybuild=` // provides a filename filter. Only test cases whose filename contains the // filter string will be run. #[allow(clippy::needless_collect)] // false positive https://github.com/rust-lang/rust-clippy/issues/5991 fn filter(tests: &mut Vec<ExpandedTest>) { let filters = env::args_os() .flat_map(OsString::into_string) .filter_map(|mut arg| { const PREFIX: &str = "trybuild="; if arg.starts_with(PREFIX) && arg != PREFIX { Some(arg.split_off(PREFIX.len())) } else { None } }) .collect::<Vec<String>>(); if filters.is_empty() { return; } tests.retain(|t| { filters .iter() .any(|f| t.test.path.to_string_lossy().contains(f)) }); } #[derive(Deserialize)] struct CargoMessage { #[allow(dead_code)] reason: Reason, target: RustcTarget, message: RustcMessage, } #[derive(Deserialize)] enum Reason { #[serde(rename = "compiler-message")] CompilerMessage, } #[derive(Deserialize)] struct RustcTarget { src_path: PathBuf, } #[derive(Deserialize)] struct RustcMessage { rendered: String, level: String, } struct ParsedOutputs { stdout: String, stderrs: Map<PathBuf, Stderr>, } struct Stderr { success: bool, stderr: Variations, } impl Default for Stderr { fn default() -> Self { Stderr { success: true, stderr: Variations::default(), } } } fn parse_cargo_json( project: &Project, stdout: &[u8], path_map: &Map<PathBuf, (&Name, &Test)>, ) -> ParsedOutputs { let mut map = Map::new(); let mut nonmessage_stdout = String::new(); let mut remaining = &*String::from_utf8_lossy(stdout); let mut seen = Set::new(); while !remaining.is_empty() { let begin = match remaining.find("{\"reason\":") { Some(begin) => begin, None => break, }; let (nonmessage, rest) = remaining.split_at(begin); nonmessage_stdout.push_str(nonmessage); let len = match rest.find('\n') { Some(end) => end + 1, None => rest.len(), }; let (message, rest) = rest.split_at(len); remaining = rest; if !seen.insert(message) { // Discard duplicate messages. This might no longer be necessary // after https://github.com/rust-lang/rust/issues/106571 is fixed. // Normally rustc would filter duplicates itself and I think this is // a short-lived bug. continue; } if let Ok(de) = serde_json::from_str::<CargoMessage>(message) { if de.message.level != "failure-note" { let (name, test) = match path_map.get(&de.target.src_path) { Some(test) => test, None => continue, }; let entry = map .entry(de.target.src_path) .or_insert_with(Stderr::default); if de.message.level == "error" { entry.success = false; } let normalized = normalize::diagnostics( &de.message.rendered, Context { krate: &name.0, source_dir: &project.source_dir, workspace: &project.workspace, input_file: &test.path, target_dir: &project.target_dir, path_dependencies: &project.path_dependencies, }, ); entry.stderr.concat(&normalized); } } } nonmessage_stdout.push_str(remaining); ParsedOutputs { stdout: nonmessage_stdout, stderrs: map, } }
use pyo3::prelude::*; use pyo3::wrap_pyfunction; #[pyclass] struct MyClass{ #[pyo3(get, set)] num: usize, #[pyo3(get, set)] val: String, } #[pyfunction] fn sum_as_str(a: usize, b: usize) -> MyClass { MyClass { num: (a + b), val: (a+b).to_string() } } #[pymodule] fn {{crate_name}}(_: Python, m: &PyModule) -> PyResult<()> { m.add_wrapped(wrap_pyfunction!(sum_as_str))?; Ok(()) }
use paras_vesting_contract::ContractContract as VestingContract; use near_sdk::json_types::{U128, U64}; use near_sdk::serde_json::json; use near_sdk_sim::{ deploy, init_simulator, to_yocto, ContractAccount, UserAccount, DEFAULT_GAS, STORAGE_AMOUNT, }; // Load in contract bytes at runtime near_sdk_sim::lazy_static_include::lazy_static_include_bytes! { FT_WASM_BYTES => "res/fungible_token.wasm", VESTING_WASM_BYTES => "res/paras_vesting_contract.wasm", } pub const FT_ID: &str = "ft"; pub const VESTING_ID: &str = "vesting"; pub const ONE_MONTH: u64 = 2629746000000000; // 30.436875*24*60*60*10**9 pub const TWO_YEARS: u64 = ONE_MONTH * 12 * 2; pub const JUNE_1_2021: u64 = 1622505600000000000; // Tuesday, June 1, 2021 12:00:00 AM GMT pub const OCTOBER_1_2021: u64 = 1633046400000000000; //const ONE_DAY:u64 = 86400000000000; pub const SIX_MONTHS: u64 = ONE_MONTH * 6; pub const ONE_MILLION_COIN: u128 = 1_000_000 * 10u128.pow(18); /// PARAS to yoctoPARAS pub fn ptoy(paras_amount: u128) -> u128 { paras_amount * 10u128.pow(18) } pub fn ytop(paras_amount: u128) -> u128 { paras_amount / 10u128.pow(18) } pub fn register_user(user: &near_sdk_sim::UserAccount) { user.call( FT_ID.to_string(), "storage_deposit", &json!({ "account_id": user.valid_account_id() }) .to_string() .into_bytes(), near_sdk_sim::DEFAULT_GAS / 2, near_sdk::env::storage_byte_cost() * 125, // attached deposit ) .assert_success(); } pub fn init(is_one_month: bool) -> (UserAccount, UserAccount, ContractAccount<VestingContract>, UserAccount) { // Use `None` for default genesis configuration; more info below let root = init_simulator(None); let ft = root.deploy( &FT_WASM_BYTES, FT_ID.to_string(), STORAGE_AMOUNT, // attached deposit ); ft.call( FT_ID.into(), "new_paras_meta", &json!({ "owner_id": root.valid_account_id(), }) .to_string() .into_bytes(), DEFAULT_GAS / 2, 0, ) .assert_success(); let alice = root.create_user( "alice".to_string(), to_yocto("100") // initial balance ); register_user(&alice); let vesting: ContractAccount<VestingContract>; if is_one_month { vesting = deploy!( contract: VestingContract, contract_id: VESTING_ID, bytes: &VESTING_WASM_BYTES, signer_account: root, init_method: new( root.valid_account_id(), alice.valid_account_id(), ft.valid_account_id(), U128::from(ONE_MILLION_COIN), (OCTOBER_1_2021 - ONE_MONTH).into(), TWO_YEARS.into(), // duration U64::from(0), // cliff true// revocable ) ); } else { vesting = deploy!( contract: VestingContract, contract_id: VESTING_ID, bytes: &VESTING_WASM_BYTES, signer_account: root, init_method: new( root.valid_account_id(), alice.valid_account_id(), ft.valid_account_id(), U128::from(ONE_MILLION_COIN), JUNE_1_2021.into(), // start TWO_YEARS.into(), // duration SIX_MONTHS.into(), // cliff true// revocable ) ); } register_user(&vesting.user_account); (root, ft, vesting, alice) }
pub mod gpio; pub mod max31855;
use std::fmt::Debug; use std::ops::Deref; #[derive(Debug)] enum List<T> { Cons(T, Box<List<T>>), Nil, } #[derive(Debug)] struct MyBox<T: Debug>(T); impl<T> MyBox<T> where T: Debug, { fn new(x: T) -> MyBox<T> { MyBox(x) } } impl<T> Deref for MyBox<T> where T: Debug, { type Target = T; fn deref(&self) -> &T { &self.0 } } impl<T> Drop for MyBox<T> where T: Debug, { fn drop(&mut self) { println!("Dropping MyBox wahoo!: {:?}", self); } } use crate::List::{Cons, Nil}; fn hello(name: &str) { println!("Hello, {}!", name); } fn main() { let b = Box::new(5); println!("b = {}", b); let list = Cons(1, Box::new(Cons(2, Box::new(Cons(3, Box::new(Nil)))))); println!("b = {:?}", list); let x = 5; let y = &x; assert_eq!(x, 5); assert_eq!(*y, 5); let x = 5; let y = Box::new(x); assert_eq!(x, 5); assert_eq!(*y, 5); let x = 5; let y = MyBox::new(x); assert_eq!(x, 5); assert_eq!(*y, 5); let m = MyBox::new(String::from("test")); hello(&m); let t = MyBox::new(String::from("early drop test")); hello(&t); std::mem::drop(t); println!("early drop test dropped before end of main."); }
use super::{Concrete, Endpoint, Logical}; use crate::{resolve, stack_labels}; use linkerd_app_core::{ classify, config::ProxyConfig, metrics, profiles, proxy::{api_resolve::Metadata, core::Resolve, http}, retry, svc, tls::ReasonForNoPeerName, Addr, Error, CANONICAL_DST_HEADER, DST_OVERRIDE_HEADER, }; use tracing::debug_span; pub fn stack<B, E, ESvc, R>( config: &ProxyConfig, endpoint: E, resolve: R, metrics: metrics::Proxy, ) -> impl svc::NewService< Logical, Service = impl svc::Service< http::Request<B>, Response = http::Response<http::BoxBody>, Error = Error, Future = impl Send, >, > + Clone where B: http::HttpBody<Error = Error> + std::fmt::Debug + Default + Send + 'static, B::Data: Send + 'static, E: svc::NewService<Endpoint, Service = ESvc> + Clone + Send + 'static, ESvc: svc::Service<http::Request<http::BoxBody>, Response = http::Response<http::BoxBody>> + Send + 'static, ESvc::Error: Into<Error>, ESvc::Future: Send, R: Resolve<Addr, Endpoint = Metadata, Error = Error> + Clone + Send + 'static, R::Resolution: Send, R::Future: Send, { let ProxyConfig { buffer_capacity, cache_max_idle_age, dispatch_timeout, .. } = config.clone(); let watchdog = cache_max_idle_age * 2; svc::stack(endpoint.clone()) .check_new_service::<Endpoint, http::Request<http::BoxBody>>() .push_on_response( svc::layers() .push(svc::layer::mk(svc::SpawnReady::new)) .push( metrics .stack .layer(stack_labels("http", "balance.endpoint")), ) .push(http::BoxRequest::layer()), ) .check_new_service::<Endpoint, http::Request<_>>() .push(resolve::layer(resolve, watchdog)) .check_service::<Concrete>() .push_on_response( svc::layers() .push(http::balance::layer( crate::EWMA_DEFAULT_RTT, crate::EWMA_DECAY, )) .push(svc::layer::mk(svc::SpawnReady::new)) // If the balancer has been empty/unavailable for 10s, eagerly fail // requests. .push(svc::FailFast::layer("HTTP Balancer", dispatch_timeout)) .push(metrics.stack.layer(stack_labels("http", "concrete"))), ) .push(svc::MapErrLayer::new(Into::into)) .into_new_service() .check_new_service::<Concrete, http::Request<_>>() .instrument(|c: &Concrete| match c.resolve.as_ref() { None => debug_span!("concrete"), Some(addr) => debug_span!("concrete", %addr), }) .check_new_service::<Concrete, http::Request<_>>() // The concrete address is only set when the profile could be // resolved. Endpoint resolution is skipped when there is no // concrete address. .push_map_target(Concrete::from) .check_new_service::<(Option<Addr>, Logical), http::Request<_>>() .push(profiles::split::layer()) .check_new_service::<Logical, http::Request<_>>() // Drives concrete stacks to readiness and makes the split // cloneable, as required by the retry middleware. .push_on_response( svc::layers() .push(svc::FailFast::layer("HTTP Logical", dispatch_timeout)) .push_spawn_buffer(buffer_capacity), ) .check_new_service::<Logical, http::Request<_>>() .push(profiles::http::route_request::layer( svc::proxies() .push( metrics .http_route_actual .to_layer::<classify::Response, _>(), ) // Sets an optional retry policy. .push(retry::layer(metrics.http_route_retry)) // Sets an optional request timeout. .push(http::MakeTimeoutLayer::default()) // Records per-route metrics. .push(metrics.http_route.to_layer::<classify::Response, _>()) // Sets the per-route response classifier as a request // extension. .push(classify::NewClassify::layer()) .push_map_target(Logical::mk_route) .into_inner(), )) .check_new_service::<Logical, http::Request<_>>() .push(http::NewHeaderFromTarget::layer(CANONICAL_DST_HEADER)) .push_on_response( svc::layers() // Strips headers that may be set by this proxy. .push(http::strip_header::request::layer(DST_OVERRIDE_HEADER)) .push(http::BoxResponse::layer()), ) .instrument(|l: &Logical| debug_span!("logical", dst = %l.addr())) .check_new_service::<Logical, http::Request<_>>() .push_switch( Logical::should_resolve, svc::stack(endpoint) .push_on_response(http::BoxRequest::layer()) .push_map_target(Endpoint::from_logical( ReasonForNoPeerName::NotProvidedByServiceDiscovery, )) .into_inner(), ) .into_inner() }
#![allow(clippy::type_complexity)] #![allow(clippy::unused_unit)] #![allow(clippy::from_over_into)] #[macro_use] extern crate serde_derive; mod client; mod connection; mod misc; pub use client::WebsocketClientTransport; pub use connection::WebsocketConnection; pub use misc::{connect, new_payload, JsClient, JsPayload};
use crate::service::id_generator::snow_worker::SnowWorkerM1; use crate::service::id_generator::id_generator_options::IdGeneratorOptions; use std::sync::{Mutex}; use lazy_static::lazy_static; lazy_static! { pub static ref WORKPOLL:Mutex<Vec<SnowWorkerM1>> = Mutex::new(Vec::new()); } pub fn run() { //let mut work_pool: Mutex<Vec<SnowWorkerM1>> = Mutex::new(Vec::new()) ; for i in 1..64 { //定义work_id 对应的配置 let id_generator_options = IdGeneratorOptions::new(i as u32); //初始化对象 let object = SnowWorkerM1::new(id_generator_options); //添加到数据组中 WORKPOLL.lock().unwrap().push(object); println!("work_id {} 加载中", i) } println!("work_id_object加载完毕"); //return work_pool; }
#![feature(phase)] #![feature(macro_rules)] extern crate document; #[phase(plugin, link)] extern crate xpath; use std::collections::HashMap; use std::num::Float; use document::Package; use document::dom4::{Document,Root,Element,Text}; use xpath::XPathValue::{Boolean,Number,String,Nodes}; use xpath::{Functions,Variables}; use xpath::{XPathValue,XPathEvaluationContext}; use xpath::nodeset::ToNode; use xpath::token::XPathToken; use xpath::tokenizer::TokenResult; use xpath::expression::{XPathExpression,SubExpression}; use xpath::parser::{XPathParser,ParseResult}; use xpath::parser::ParseErr::{ EmptyPredicate, ExtraUnparsedTokens, InvalidNodeTest, InvalidXPathAxis, RanOutOfInput, RightHandSideExpressionMissing, TokenizerError, TrailingSlash, UnexpectedToken, }; macro_rules! tokens( ($($e:expr),*) => ({ // leading _ to allow empty construction without a warning. let mut _temp: Vec<TokenResult> = ::std::vec::Vec::new(); $(_temp.push(Ok($e));)* _temp }); ($($e:expr),+,) => (tokens!($($e),+)) ) trait ApproxEq { fn is_approx_eq(&self, other: &Self) -> bool; } impl ApproxEq for f64 { fn is_approx_eq(&self, other: &f64) -> bool { (*self - *other).abs() < 1.0e-6 } } impl<'d> ApproxEq for XPathValue<'d> { fn is_approx_eq(&self, other: &XPathValue<'d>) -> bool { match (self, other) { (&Number(ref x), &Number(ref y)) => x.is_approx_eq(y), _ => panic!("It's nonsensical to compare these quantities"), } } } macro_rules! assert_approx_eq( ($a:expr, $b:expr) => ({ let (a, b) = (&$a, &$b); assert!(a.is_approx_eq(b), "{} is not approximately equal to {}", *a, *b); }) ) struct TestDoc<'d>(Document<'d>); impl<'d> TestDoc<'d> { fn root(&'d self) -> Root<'d> { let &TestDoc(ref doc) = self; doc.root() } fn top_node(&'d self) -> Element<'d> { let &TestDoc(ref doc) = self; let kids = doc.root().children(); match kids.len() { 0 => { let n = doc.create_element("the-top-node"); doc.root().append_child(n); n }, 1 => { kids[0].element().expect("not an element") }, _ => panic!("Too many top nodes"), } } fn add_top_child(&'d self, name: &str) -> Element<'d> { self.add_child(self.top_node(), name) } fn add_child(&'d self, parent: Element<'d>, name: &str) -> Element<'d> { let &TestDoc(ref doc) = self; let n = doc.create_element(name); parent.append_child(n); n } fn add_text(&'d self, parent: Element<'d>, value: &str) -> Text<'d> { let &TestDoc(ref doc) = self; let tn = doc.create_text(value); parent.append_child(tn); tn } } struct Exercise<'d> { doc: &'d TestDoc<'d>, functions: Functions, variables: Variables<'d>, parser: XPathParser, } impl<'d> Exercise<'d> { fn new(doc: &'d TestDoc<'d>) -> Exercise<'d> { let mut functions = HashMap::new(); xpath::function::register_core_functions(&mut functions); Exercise { doc: doc, functions: functions, variables: HashMap::new(), parser: XPathParser::new(), } } fn add_var(&mut self, name: &str, value: XPathValue<'d>) { self.variables.insert(name.to_string(), value); } fn parse_raw(&self, tokens: Vec<TokenResult>) -> ParseResult { self.parser.parse(tokens.into_iter()) } fn parse(&self, tokens: Vec<TokenResult>) -> SubExpression { self.parse_raw(tokens).unwrap().unwrap() } fn evaluate(&'d self, expr: &XPathExpression) -> XPathValue<'d> { self.evaluate_on(expr, self.doc.top_node()) } fn evaluate_on<N : ToNode<'d>>(&self, expr: &XPathExpression, node: N) -> XPathValue<'d> { let node = node.to_node(); let mut context = XPathEvaluationContext::new(node, &self.functions, &self.variables); context.next(node); expr.evaluate(&context) } } #[test] fn parses_string_as_child() { let tokens = tokens![XPathToken::String("hello".to_string())]; let package = Package::new(); let doc = TestDoc(package.as_document()); let hello = doc.add_top_child("hello"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![hello]), ex.evaluate_on(&*expr, doc.top_node())); } #[test] fn parses_two_strings_as_grandchild() { let tokens = tokens![ XPathToken::String("hello".to_string()), XPathToken::Slash, XPathToken::String("world".to_string()) ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let hello = doc.add_top_child("hello"); let world = doc.add_child(hello, "world"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![world]), ex.evaluate_on(&*expr, doc.top_node())); } #[test] fn parses_self_axis() { let tokens = tokens![ XPathToken::Axis("self".to_string()), XPathToken::DoubleColon, XPathToken::String("the-top-node".to_string()) ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![doc.top_node()]), ex.evaluate_on(&*expr, doc.top_node())); } #[test] fn parses_parent_axis() { let tokens = tokens![ XPathToken::Axis("parent".to_string()), XPathToken::DoubleColon, XPathToken::String("the-top-node".to_string()) ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let hello = doc.add_top_child("hello"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![doc.top_node()]), ex.evaluate_on(&*expr, hello)); } #[test] fn parses_descendant_axis() { let tokens = tokens![ XPathToken::Axis("descendant".to_string()), XPathToken::DoubleColon, XPathToken::String("two".to_string()) ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let one = doc.add_top_child("one"); let two = doc.add_child(one, "two"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![two]), ex.evaluate_on(&*expr, doc.top_node())); } #[test] fn parses_descendant_or_self_axis() { let tokens = tokens![ XPathToken::Axis("descendant-or-self".to_string()), XPathToken::DoubleColon, XPathToken::String("*".to_string()) ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let one = doc.add_top_child("one"); let two = doc.add_child(one, "two"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![one, two]), ex.evaluate_on(&*expr, one)); } #[test] fn parses_attribute_axis() { let tokens = tokens![ XPathToken::Axis("attribute".to_string()), XPathToken::DoubleColon, XPathToken::String("*".to_string()) ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let one = doc.add_top_child("one"); let attr = one.set_attribute_value("hello", "world"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![attr]), ex.evaluate_on(&*expr, one)); } #[test] fn parses_child_with_same_name_as_an_axis() { let tokens = tokens![XPathToken::String("self".to_string())]; let package = Package::new(); let doc = TestDoc(package.as_document()); let element = doc.add_top_child("self"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![element]), ex.evaluate_on(&*expr, doc.top_node())); } #[test] fn parses_node_node_test() { let tokens = tokens![ XPathToken::NodeTest("node".to_string()), XPathToken::LeftParen, XPathToken::RightParen ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let one = doc.add_top_child("one"); let two = doc.add_child(one, "two"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![two]), ex.evaluate_on(&*expr, one)); } #[test] fn parses_text_node_test() { let tokens = tokens![ XPathToken::NodeTest("text".to_string()), XPathToken::LeftParen, XPathToken::RightParen ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let one = doc.add_top_child("one"); let text = doc.add_text(one, "text"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![text]), ex.evaluate_on(&*expr, one)); } #[test] fn parses_axis_and_node_test() { let tokens = tokens![ XPathToken::Axis("self".to_string()), XPathToken::DoubleColon, XPathToken::NodeTest("text".to_string()), XPathToken::LeftParen, XPathToken::RightParen ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let one = doc.add_top_child("one"); let text = doc.add_text(one, "text"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![text]), ex.evaluate_on(&*expr, text)); } #[test] fn numeric_predicate_selects_indexed_node() { let tokens = tokens![ XPathToken::String("*".to_string()), XPathToken::LeftBracket, XPathToken::Number(2.0), XPathToken::RightBracket ]; let package = Package::new(); let doc = TestDoc(package.as_document()); doc.add_top_child("first"); let second = doc.add_top_child("second"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![second]), ex.evaluate_on(&*expr, doc.top_node())); } #[test] fn string_literal() { let tokens = tokens![XPathToken::Literal("string".to_string())]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(String("string".to_string()), ex.evaluate(&*expr)); } #[test] fn predicate_accepts_any_expression() { let tokens = tokens![ XPathToken::String("*".to_string()), XPathToken::LeftBracket, XPathToken::Function("true".to_string()), XPathToken::LeftParen, XPathToken::RightParen, XPathToken::Or, XPathToken::Function("false".to_string()), XPathToken::LeftParen, XPathToken::RightParen, XPathToken::RightBracket ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let first = doc.add_top_child("first"); let second = doc.add_top_child("second"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![first, second]), ex.evaluate_on(&*expr, doc.top_node())); } #[test] fn true_function_predicate_selects_all_nodes() { let tokens = tokens![ XPathToken::String("*".to_string()), XPathToken::LeftBracket, XPathToken::Function("true".to_string()), XPathToken::LeftParen, XPathToken::RightParen, XPathToken::RightBracket ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let first = doc.add_top_child("first"); let second = doc.add_top_child("second"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![first, second]), ex.evaluate_on(&*expr, doc.top_node())); } #[test] fn false_function_predicate_selects_no_nodes() { let tokens = tokens![ XPathToken::String("*".to_string()), XPathToken::LeftBracket, XPathToken::Function("false".to_string()), XPathToken::LeftParen, XPathToken::RightParen, XPathToken::RightBracket ]; let package = Package::new(); let doc = TestDoc(package.as_document()); doc.add_top_child("first"); doc.add_top_child("second"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![]), ex.evaluate_on(&*expr, doc.top_node())); } #[test] fn multiple_predicates() { let tokens = tokens![ XPathToken::String("*".to_string()), XPathToken::LeftBracket, XPathToken::Number(2.0), XPathToken::RightBracket, XPathToken::LeftBracket, XPathToken::Number(1.0), XPathToken::RightBracket ]; let package = Package::new(); let doc = TestDoc(package.as_document()); doc.add_top_child("first"); let second = doc.add_top_child("second"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![second]), ex.evaluate_on(&*expr, doc.top_node())); } #[test] fn functions_accept_arguments() { let tokens = tokens![ XPathToken::Function("not".to_string()), XPathToken::LeftParen, XPathToken::Function("true".to_string()), XPathToken::LeftParen, XPathToken::RightParen, XPathToken::RightParen, ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Boolean(false), ex.evaluate(&*expr)); } #[test] fn functions_accept_any_expression_as_an_argument() { let tokens = tokens![ XPathToken::Function("not".to_string()), XPathToken::LeftParen, XPathToken::Function("true".to_string()), XPathToken::LeftParen, XPathToken::RightParen, XPathToken::Or, XPathToken::Function("false".to_string()), XPathToken::LeftParen, XPathToken::RightParen, XPathToken::RightParen, ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Boolean(false), ex.evaluate(&*expr)); } #[test] fn numeric_literal() { let tokens = tokens![XPathToken::Number(3.2)]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_approx_eq!(Number(3.2), ex.evaluate(&*expr)); } #[test] fn addition_of_two_numbers() { let tokens = tokens![ XPathToken::Number(1.1), XPathToken::PlusSign, XPathToken::Number(2.2) ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_approx_eq!(Number(3.3), ex.evaluate(&*expr)); } #[test] fn addition_of_multiple_numbers() { let tokens = tokens![ XPathToken::Number(1.1), XPathToken::PlusSign, XPathToken::Number(2.2), XPathToken::PlusSign, XPathToken::Number(3.3) ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_approx_eq!(Number(6.6), ex.evaluate(&*expr)); } #[test] fn subtraction_of_two_numbers() { let tokens = tokens![ XPathToken::Number(1.1), XPathToken::MinusSign, XPathToken::Number(2.2), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_approx_eq!(Number(-1.1), ex.evaluate(&*expr)); } #[test] fn additive_expression_is_left_associative() { let tokens = tokens![ XPathToken::Number(1.1), XPathToken::MinusSign, XPathToken::Number(2.2), XPathToken::MinusSign, XPathToken::Number(3.3), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_approx_eq!(Number(-4.4), ex.evaluate(&*expr)); } #[test] fn multiplication_of_two_numbers() { let tokens = tokens![ XPathToken::Number(1.1), XPathToken::Multiply, XPathToken::Number(2.2), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_approx_eq!(Number(2.42), ex.evaluate(&*expr)); } #[test] fn division_of_two_numbers() { let tokens = tokens![ XPathToken::Number(7.1), XPathToken::Divide, XPathToken::Number(0.1), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_approx_eq!(Number(71.0), ex.evaluate(&*expr)); } #[test] fn remainder_of_two_numbers() { let tokens = tokens![ XPathToken::Number(7.1), XPathToken::Remainder, XPathToken::Number(3.0), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_approx_eq!(Number(1.1), ex.evaluate(&*expr)); } #[test] fn unary_negation() { let tokens = tokens![ XPathToken::MinusSign, XPathToken::Number(7.2), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_approx_eq!(Number(-7.2), ex.evaluate(&*expr)); } #[test] fn repeated_unary_negation() { let tokens = tokens![ XPathToken::MinusSign, XPathToken::MinusSign, XPathToken::MinusSign, XPathToken::Number(7.2), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_approx_eq!(Number(-7.2), ex.evaluate(&*expr)); } #[test] fn top_level_function_call() { let tokens = tokens![ XPathToken::Function("true".to_string()), XPathToken::LeftParen, XPathToken::RightParen, ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Boolean(true), ex.evaluate(&*expr)); } #[test] fn or_expression() { let tokens = tokens![ XPathToken::Function("true".to_string()), XPathToken::LeftParen, XPathToken::RightParen, XPathToken::Or, XPathToken::Function("false".to_string()), XPathToken::LeftParen, XPathToken::RightParen, ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Boolean(true), ex.evaluate(&*expr)); } #[test] fn and_expression() { let tokens = tokens![ XPathToken::Number(1.2), XPathToken::And, XPathToken::Number(0.0), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Boolean(false), ex.evaluate(&*expr)); } #[test] fn equality_expression() { let tokens = tokens![ XPathToken::Number(1.2), XPathToken::Equal, XPathToken::Number(1.1), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Boolean(false), ex.evaluate(&*expr)); } #[test] fn inequality_expression() { let tokens = tokens![ XPathToken::Number(1.2), XPathToken::NotEqual, XPathToken::Number(1.2), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Boolean(false), ex.evaluate(&*expr)); } #[test] fn less_than_expression() { let tokens = tokens![ XPathToken::Number(1.2), XPathToken::LessThan, XPathToken::Number(1.2), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Boolean(false), ex.evaluate(&*expr)); } #[test] fn less_than_or_equal_expression() { let tokens = tokens![ XPathToken::Number(1.2), XPathToken::LessThanOrEqual, XPathToken::Number(1.2), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Boolean(true), ex.evaluate(&*expr)); } #[test] fn greater_than_expression() { let tokens = tokens![ XPathToken::Number(1.2), XPathToken::GreaterThan, XPathToken::Number(1.2), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Boolean(false), ex.evaluate(&*expr)); } #[test] fn greater_than_or_equal_expression() { let tokens = tokens![ XPathToken::Number(1.2), XPathToken::GreaterThanOrEqual, XPathToken::Number(1.2), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Boolean(true), ex.evaluate(&*expr)); } #[test] fn variable_reference() { let tokens = tokens![ XPathToken::DollarSign, XPathToken::String("variable-name".to_string()), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let mut ex = Exercise::new(&doc); ex.add_var("variable-name", Number(12.3)); let expr = ex.parse(tokens); assert_approx_eq!(Number(12.3), ex.evaluate(&*expr)); } #[test] fn filter_expression() { let tokens = tokens![ XPathToken::DollarSign, XPathToken::String("variable".to_string()), XPathToken::LeftBracket, XPathToken::Number(0.0), XPathToken::RightBracket, ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let value = nodeset![ doc.add_top_child("first-node"), doc.add_top_child("second-node"), ]; let mut ex = Exercise::new(&doc); ex.add_var("variable", Nodes(value)); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![]), ex.evaluate(&*expr)); } #[test] fn filter_expression_and_relative_path() { let tokens = tokens![ XPathToken::DollarSign, XPathToken::String("variable".to_string()), XPathToken::Slash, XPathToken::String("child".to_string()), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let parent = doc.add_top_child("parent"); let child = doc.add_child(parent, "child"); let value = nodeset![parent]; let mut ex = Exercise::new(&doc); ex.add_var("variable", Nodes(value)); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![child]), ex.evaluate(&*expr)); } #[test] fn union_expression() { let tokens = tokens![ XPathToken::DollarSign, XPathToken::String("variable1".to_string()), XPathToken::Pipe, XPathToken::DollarSign, XPathToken::String("variable2".to_string()), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let node1 = doc.add_top_child("first-node"); let node2 = doc.add_top_child("second-node"); let mut ex = Exercise::new(&doc); ex.add_var("variable1", Nodes(nodeset![node1])); ex.add_var("variable2", Nodes(nodeset![node2])); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![node1, node2]), ex.evaluate(&*expr)); } #[test] fn absolute_path_expression() { let tokens = tokens![ XPathToken::Slash, ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let node1 = doc.add_top_child("first-node"); let node2 = doc.add_child(node1, "second-node"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![doc.root()]), ex.evaluate_on(&*expr, node2)); } #[test] fn absolute_path_with_child_expression() { let tokens = tokens![ XPathToken::Slash, XPathToken::String("*".to_string()), ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let node1 = doc.add_top_child("first-node"); let node2 = doc.add_child(node1, "second-node"); let ex = Exercise::new(&doc); let expr = ex.parse(tokens); assert_eq!(Nodes(nodeset![doc.top_node()]), ex.evaluate_on(&*expr, node2)); } #[test] fn unknown_axis_is_reported_as_an_error() { let tokens = tokens![ XPathToken::Axis("bad-axis".to_string()), XPathToken::DoubleColon, XPathToken::String("*".to_string()) ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let res = ex.parse_raw(tokens); assert_eq!(Some(InvalidXPathAxis("bad-axis".to_string())), res.err()); } #[test] fn unknown_node_test_is_reported_as_an_error() { let tokens = tokens![ XPathToken::NodeTest("bad-node-test".to_string()), XPathToken::LeftParen, XPathToken::RightParen ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let res = ex.parse_raw(tokens); assert_eq!(Some(InvalidNodeTest("bad-node-test".to_string())), res.err()); } #[test] fn unexpected_token_is_reported_as_an_error() { let tokens = tokens![ XPathToken::Function("does-not-matter".to_string()), XPathToken::RightParen ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let res = ex.parser.parse(tokens.into_iter()); assert_eq!(Some(UnexpectedToken(XPathToken::RightParen)), res.err()); } #[test] fn binary_operator_without_right_hand_side_is_reported_as_an_error() { let tokens = tokens![ XPathToken::Literal("left".to_string()), XPathToken::And ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let res = ex.parse_raw(tokens); assert_eq!(Some(RightHandSideExpressionMissing), res.err()); } #[test] fn unary_operator_without_right_hand_side_is_reported_as_an_error() { let tokens = tokens![ XPathToken::MinusSign, ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let res = ex.parser.parse(tokens.into_iter()); assert_eq!(Some(RightHandSideExpressionMissing), res.err()); } #[test] fn empty_predicate_is_reported_as_an_error() { let tokens = tokens![ XPathToken::String("*".to_string()), XPathToken::LeftBracket, XPathToken::RightBracket, ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let res = ex.parse_raw(tokens); assert_eq!(Some(EmptyPredicate), res.err()); } #[test] fn relative_path_with_trailing_slash_is_reported_as_an_error() { let tokens = tokens![ XPathToken::String("*".to_string()), XPathToken::Slash, ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let res = ex.parse_raw(tokens); assert_eq!(Some(TrailingSlash), res.err()); } #[test] fn filter_expression_with_trailing_slash_is_reported_as_an_error() { let tokens = tokens![ XPathToken::DollarSign, XPathToken::String("variable".to_string()), XPathToken::Slash, ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let res = ex.parse_raw(tokens); assert_eq!(Some(TrailingSlash), res.err()); } #[test] fn running_out_of_input_is_reported_as_an_error() { let tokens = tokens![XPathToken::Function("func".to_string())]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let res = ex.parse_raw(tokens); assert_eq!(Some(RanOutOfInput), res.err()); } #[test] fn having_extra_tokens_is_reported_as_an_error() { let tokens = tokens![XPathToken::LeftBracket]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let res = ex.parse_raw(tokens); assert_eq!(Some(ExtraUnparsedTokens), res.err()); } #[test] fn a_tokenizer_error_is_reported_as_an_error() { let tokens = vec![ Ok(XPathToken::Function("func".to_string())), Err(xpath::tokenizer::TokenizerErr::UnableToCreateToken) ]; let package = Package::new(); let doc = TestDoc(package.as_document()); let ex = Exercise::new(&doc); let res = ex.parse_raw(tokens); assert_eq!(Some(TokenizerError(xpath::tokenizer::TokenizerErr::UnableToCreateToken)), res.err()); }
mod data_processing; use sprs::CsVec; use rand::thread_rng; use rand::seq::SliceRandom; use std::time::{Duration, Instant}; fn main() { let n = 120000; let w_c = 4; let w_r = 8; let n = w_r * (n / w_r); let m = n * w_c/w_r; println!("n={}", n); let crossover_proba = 0.05; let seed = 10; let mut original_code_word: Vec<usize> = vec![0; n]; let mut original_compact_form: Vec<usize> = vec![]; let mut num_one = 0; let mut rng = thread_rng(); let choices: [usize;2] = [0,1]; for i in 0..n{ original_code_word[i] = *choices.choose(&mut rng).unwrap(); if original_code_word[i] == 1{ original_compact_form.push(i); num_one += 1; } } // println!("Original codeword is {:?}", original_code_word); let original_compact_form_clone = original_compact_form.clone(); let sparse_vec = CsVec::new(n, original_compact_form, vec![1; num_one]); let mut time0 = Instant::now(); let (received, post_proba) = data_processing::bsc_channel(n, &original_code_word, crossover_proba); let mut time1 = Instant::now(); println!("time to generate receive vector: {:?}", time1.saturating_duration_since(time0)); time0 = Instant::now(); let mut matrix = data_processing::make_matrix_regular_ldpc(w_c, w_r, n, seed); time1 = Instant::now(); println!("time to generate matrix: {:?}", time1.saturating_duration_since(time0)); let syndrome = &matrix * &sparse_vec; let mut new_data : Vec<usize> = Vec::new(); let mut new_indices : Vec<usize> = Vec::new(); for i in 0..syndrome.data().len(){ if syndrome.data()[i] % 2 == 1{ new_indices.push(syndrome.indices()[i]); new_data.push(1); } } let syndrome = CsVec::new(m,new_indices,new_data); time0 = Instant::now(); match data_processing::message_passing(&mut matrix, syndrome, post_proba, 60) { Some(value) => { println!("That is great but not quite!"); assert_eq!(value.indices(), original_compact_form_clone); } None => { println!("Sorry mate!") } } time1 = Instant::now(); println!("time to (not) decode: {:?}", time1.saturating_duration_since(time0)); // data_processing::message_passing_test(); // println!("Hello"); }
extern crate time; /*pub fn get_time() -> f64 { let current_time = time::get_time(); let unix_timestamp = (current_time.sec as f64) + ((current_time.nsec as f64) * 1e-09); return unix_timestamp; }*/ pub fn get_precise_time() -> f64 { let current_time = time::precise_time_s(); return current_time; }
#![feature(plugin)] #![plugin(rocket_codegen)] #![feature(custom_attribute)] #[macro_use] extern crate diesel; #[macro_use] extern crate serde_derive; extern crate dotenv; extern crate rocket; extern crate serde_json; extern crate rocket_contrib; use rocket::request::Request; mod controllers; use controllers::*; mod databases; #[get("/")] fn index() -> String { "H".to_string() } #[catch(404)] fn not_found(_req_: &Request) -> String { "not_found".to_string() } trait Server { fn controllers() -> Vec<Controller> fn start() { let handlers = controllers().flatMap(controller -> controller.handlers()) rocket::ignite() .mount("/", routes!handlers) } fn init() fn onStoped() } impl Server for Application { fn controllers() -> Vec<Controller> { [ msg_controller, post_controller, post_controller ] } fn init() { logger.info("start") } fn onStoped() { logger.info("stoped") } } fn main() { rocket::ignite() .mount("/", routes![ index, msg_controller::handler, post_controller::all, post_controller::post ]) .catch(catchers![not_found]) .launch(); }
use serde::de::Error; use serde::{Deserialize, Deserializer, Serialize, Serializer}; // Custom wrapper so we don't have to write serialization/deserialization code manually #[derive(Serialize, Deserialize)] struct Commitment(#[serde(with = "hex::serde")] pub(super) [u8; 48]); impl Serialize for super::Commitment { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { Commitment(self.to_bytes()).serialize(serializer) } } impl<'de> Deserialize<'de> for super::Commitment { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let Commitment(bytes) = Commitment::deserialize(deserializer)?; Self::try_from_bytes(&bytes).map_err(|error| D::Error::custom(format!("{error:?}"))) } } // Custom wrapper so we don't have to write serialization/deserialization code manually #[derive(Serialize, Deserialize)] struct Witness(#[serde(with = "hex::serde")] pub(super) [u8; 48]); impl Serialize for super::Witness { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { Witness(self.to_bytes()).serialize(serializer) } } impl<'de> Deserialize<'de> for super::Witness { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let Witness(bytes) = Witness::deserialize(deserializer)?; Self::try_from_bytes(&bytes).map_err(|error| D::Error::custom(format!("{error:?}"))) } }
use ::std::{ io::{self, // Read, }, }; fn main () { #[derive(Debug)] // allows using {:?} formatting enum FruitKind { Apple, Orange, } println!("Enter your favourite fruit: "); let mut fruit = String::new(); io::stdin().read_line(&mut fruit).unwrap(); let fav_fruit = match fruit.as_str() { "apple" => FruitKind::Apple, "orange" => FruitKind::Orange, // Otherwise _ => { println!("Error, that fruit is not recognized"); return; }, }; println!("Your favourite fruit is: {:?}", fav_fruit); } /* Updated version thanks to the solution from Yandros: https://users.rust-lang.org/t/correct-use-of-enums-in-a-match/44947/2 */
// This file is part of Substrate. // Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! # Identity Module //! //! - [`identity::Trait`](./trait.Trait.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview //! //! A federated naming system, allowing for multiple registrars to be added from a specified origin. //! Registrars can set a fee to provide identity-verification service. Anyone can put forth a //! proposed identity for a fixed deposit and ask for review by any number of registrars (paying //! each of their fees). Registrar judgements are given as an `enum`, allowing for sophisticated, //! multi-tier opinions. //! //! Some judgements are identified as *sticky*, which means they cannot be removed except by //! complete removal of the identity, or by the registrar. Judgements are allowed to represent a //! portion of funds that have been reserved for the registrar. //! //! A super-user can remove accounts and in doing so, slash the deposit. //! //! All accounts may also have a limited number of sub-accounts which may be specified by the owner; //! by definition, these have equivalent ownership and each has an individual name. //! //! The number of registrars should be limited, and the deposit made sufficiently large, to ensure //! no state-bloat attack is viable. //! //! ## Interface //! //! ### Dispatchable Functions //! //! #### For general users //! * `set_identity` - Set the associated identity of an account; a small deposit is reserved if not //! already taken. //! * `clear_identity` - Remove an account's associated identity; the deposit is returned. //! * `request_judgement` - Request a judgement from a registrar, paying a fee. //! * `cancel_request` - Cancel the previous request for a judgement. //! //! #### For general users with sub-identities //! * `set_subs` - Set the sub-accounts of an identity. //! * `add_sub` - Add a sub-identity to an identity. //! * `remove_sub` - Remove a sub-identity of an identity. //! * `rename_sub` - Rename a sub-identity of an identity. //! * `quit_sub` - Remove a sub-identity of an identity (called by the sub-identity). //! //! #### For registrars //! * `set_fee` - Set the fee required to be paid for a judgement to be given by the registrar. //! * `set_fields` - Set the fields that a registrar cares about in their judgements. //! * `provide_judgement` - Provide a judgement to an identity. //! //! #### For super-users //! * `add_registrar` - Add a new registrar to the system. //! * `kill_identity` - Forcibly remove the associated identity; the deposit is lost. //! //! [`Call`]: ./enum.Call.html //! [`Trait`]: ./trait.Trait.html #![cfg_attr(not(feature = "std"), no_std)] use codec::{Decode, Encode}; use enumflags2::BitFlags; use frame_support::{ decl_error, decl_event, decl_module, decl_storage, dispatch::DispatchResultWithPostInfo, ensure, traits::{BalanceStatus, Currency, EnsureOrigin, Get, OnUnbalanced, ReservableCurrency}, weights::Weight, }; use frame_system::ensure_signed; use sp_runtime::traits::{AppendZerosInput, Saturating, StaticLookup, Zero}; use sp_runtime::{DispatchError, DispatchResult, RuntimeDebug}; use sp_std::prelude::*; use sp_std::{fmt::Debug, iter::once, ops::Add}; mod benchmarking; mod default_weights; #[cfg(test)] mod tests; type BalanceOf<T> = <<T as Trait>::Currency as Currency<<T as frame_system::Trait>::AccountId>>::Balance; type NegativeImbalanceOf<T> = <<T as Trait>::Currency as Currency<<T as frame_system::Trait>::AccountId>>::NegativeImbalance; pub trait WeightInfo { fn add_registrar(r: u32) -> Weight; fn set_identity(r: u32, x: u32) -> Weight; fn set_subs_new(s: u32) -> Weight; fn set_subs_old(p: u32) -> Weight; fn add_sub(p: u32) -> Weight; fn rename_sub(p: u32) -> Weight; fn remove_sub(p: u32) -> Weight; fn quit_sub(p: u32) -> Weight; fn clear_identity(r: u32, s: u32, x: u32) -> Weight; fn request_judgement(r: u32, x: u32) -> Weight; fn cancel_request(r: u32, x: u32) -> Weight; fn set_fee(r: u32) -> Weight; fn set_account_id(r: u32) -> Weight; fn set_fields(r: u32) -> Weight; fn provide_judgement(r: u32, x: u32) -> Weight; fn kill_identity(r: u32, s: u32, x: u32) -> Weight; } pub trait Trait: frame_system::Trait { /// The overarching event type. type Event: From<Event<Self>> + Into<<Self as frame_system::Trait>::Event>; /// The currency trait. type Currency: ReservableCurrency<Self::AccountId>; /// The amount held on deposit for a registered identity. type BasicDeposit: Get<BalanceOf<Self>>; /// The amount held on deposit per additional field for a registered identity. type FieldDeposit: Get<BalanceOf<Self>>; /// The amount held on deposit for a registered subaccount. This should account for the fact /// that one storage item's value will increase by the size of an account ID, and there will be /// another trie item whose value is the size of an account ID plus 32 bytes. type SubAccountDeposit: Get<BalanceOf<Self>>; /// The maximum number of sub-accounts allowed per identified account. type MaxSubAccounts: Get<u32>; /// Maximum number of additional fields that may be stored in an ID. Needed to bound the I/O /// required to access an identity, but can be pretty high. type MaxAdditionalFields: Get<u32>; /// Maxmimum number of registrars allowed in the system. Needed to bound the complexity /// of, e.g., updating judgements. type MaxRegistrars: Get<u32>; /// What to do with slashed funds. type Slashed: OnUnbalanced<NegativeImbalanceOf<Self>>; /// The origin which may forcibly set or remove a name. Root can always do this. type ForceOrigin: EnsureOrigin<Self::Origin>; /// The origin which may add or remove registrars. Root can always do this. type RegistrarOrigin: EnsureOrigin<Self::Origin>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } /// Either underlying data blob if it is at most 32 bytes, or a hash of it. If the data is greater /// than 32-bytes then it will be truncated when encoding. /// /// Can also be `None`. #[derive(Clone, Eq, PartialEq, RuntimeDebug)] pub enum Data { /// No data here. None, /// The data is stored directly. Raw(Vec<u8>), /// Only the Blake2 hash of the data is stored. The preimage of the hash may be retrieved /// through some hash-lookup service. BlakeTwo256([u8; 32]), /// Only the SHA2-256 hash of the data is stored. The preimage of the hash may be retrieved /// through some hash-lookup service. Sha256([u8; 32]), /// Only the Keccak-256 hash of the data is stored. The preimage of the hash may be retrieved /// through some hash-lookup service. Keccak256([u8; 32]), /// Only the SHA3-256 hash of the data is stored. The preimage of the hash may be retrieved /// through some hash-lookup service. ShaThree256([u8; 32]), } impl Decode for Data { fn decode<I: codec::Input>(input: &mut I) -> sp_std::result::Result<Self, codec::Error> { let b = input.read_byte()?; Ok(match b { 0 => Data::None, n @ 1..=33 => { let mut r = vec![0u8; n as usize - 1]; input.read(&mut r[..])?; Data::Raw(r) }, 34 => Data::BlakeTwo256(<[u8; 32]>::decode(input)?), 35 => Data::Sha256(<[u8; 32]>::decode(input)?), 36 => Data::Keccak256(<[u8; 32]>::decode(input)?), 37 => Data::ShaThree256(<[u8; 32]>::decode(input)?), _ => return Err(codec::Error::from("invalid leading byte")), }) } } impl Encode for Data { fn encode(&self) -> Vec<u8> { match self { Data::None => vec![0u8; 1], Data::Raw(ref x) => { let l = x.len().min(32); let mut r = vec![l as u8 + 1; l + 1]; &mut r[1..].copy_from_slice(&x[..l as usize]); r }, Data::BlakeTwo256(ref h) => once(34u8).chain(h.iter().cloned()).collect(), Data::Sha256(ref h) => once(35u8).chain(h.iter().cloned()).collect(), Data::Keccak256(ref h) => once(36u8).chain(h.iter().cloned()).collect(), Data::ShaThree256(ref h) => once(37u8).chain(h.iter().cloned()).collect(), } } } impl codec::EncodeLike for Data {} impl Default for Data { fn default() -> Self { Self::None } } /// An identifier for a single name registrar/identity verification service. pub type RegistrarIndex = u32; /// An attestation of a registrar over how accurate some `IdentityInfo` is in describing an account. /// /// NOTE: Registrars may pay little attention to some fields. Registrars may want to make clear /// which fields their attestation is relevant for by off-chain means. #[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] pub enum Judgement<Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq> { /// The default value; no opinion is held. Unknown, /// No judgement is yet in place, but a deposit is reserved as payment for providing one. FeePaid(Balance), /// The data appears to be reasonably acceptable in terms of its accuracy, however no in depth /// checks (such as in-person meetings or formal KYC) have been conducted. Reasonable, /// The target is known directly by the registrar and the registrar can fully attest to the /// the data's accuracy. KnownGood, /// The data was once good but is currently out of date. There is no malicious intent in the /// inaccuracy. This judgement can be removed through updating the data. OutOfDate, /// The data is imprecise or of sufficiently low-quality to be problematic. It is not /// indicative of malicious intent. This judgement can be removed through updating the data. LowQuality, /// The data is erroneous. This may be indicative of malicious intent. This cannot be removed /// except by the registrar. Erroneous, } impl<Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq> Judgement<Balance> { /// Returns `true` if this judgement is indicative of a deposit being currently held. This means /// it should not be cleared or replaced except by an operation which utilizes the deposit. fn has_deposit(&self) -> bool { match self { Judgement::FeePaid(_) => true, _ => false, } } /// Returns `true` if this judgement is one that should not be generally be replaced outside /// of specialized handlers. Examples include "malicious" judgements and deposit-holding /// judgements. fn is_sticky(&self) -> bool { match self { Judgement::FeePaid(_) | Judgement::Erroneous => true, _ => false, } } } /// The fields that we use to identify the owner of an account with. Each corresponds to a field /// in the `IdentityInfo` struct. #[repr(u64)] #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, BitFlags, RuntimeDebug)] pub enum IdentityField { Display = 0b0000000000000000000000000000000000000000000000000000000000000001, Legal = 0b0000000000000000000000000000000000000000000000000000000000000010, Web = 0b0000000000000000000000000000000000000000000000000000000000000100, Riot = 0b0000000000000000000000000000000000000000000000000000000000001000, Email = 0b0000000000000000000000000000000000000000000000000000000000010000, PgpFingerprint = 0b0000000000000000000000000000000000000000000000000000000000100000, Image = 0b0000000000000000000000000000000000000000000000000000000001000000, Twitter = 0b0000000000000000000000000000000000000000000000000000000010000000, } /// Wrapper type for `BitFlags<IdentityField>` that implements `Codec`. #[derive(Clone, Copy, PartialEq, Default, RuntimeDebug)] pub struct IdentityFields(BitFlags<IdentityField>); impl Eq for IdentityFields {} impl Encode for IdentityFields { fn using_encoded<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R { self.0.bits().using_encoded(f) } } impl Decode for IdentityFields { fn decode<I: codec::Input>(input: &mut I) -> sp_std::result::Result<Self, codec::Error> { let field = u64::decode(input)?; Ok(Self(<BitFlags<IdentityField>>::from_bits(field as u64).map_err(|_| "invalid value")?)) } } /// Information concerning the identity of the controller of an account. /// /// NOTE: This should be stored at the end of the storage item to facilitate the addition of extra /// fields in a backwards compatible way through a specialized `Decode` impl. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] #[cfg_attr(test, derive(Default))] pub struct IdentityInfo { /// Additional fields of the identity that are not catered for with the struct's explicit /// fields. pub additional: Vec<(Data, Data)>, /// A reasonable display name for the controller of the account. This should be whatever it is /// that it is typically known as and should not be confusable with other entities, given /// reasonable context. /// /// Stored as UTF-8. pub display: Data, /// The full legal name in the local jurisdiction of the entity. This might be a bit /// long-winded. /// /// Stored as UTF-8. pub legal: Data, /// A representative website held by the controller of the account. /// /// NOTE: `https://` is automatically prepended. /// /// Stored as UTF-8. pub web: Data, /// The Riot/Matrix handle held by the controller of the account. /// /// Stored as UTF-8. pub riot: Data, /// The email address of the controller of the account. /// /// Stored as UTF-8. pub email: Data, /// The PGP/GPG public key of the controller of the account. pub pgp_fingerprint: Option<[u8; 20]>, /// A graphic image representing the controller of the account. Should be a company, /// organization or project logo or a headshot in the case of a human. pub image: Data, /// The Twitter identity. The leading `@` character may be elided. pub twitter: Data, } /// Information concerning the identity of the controller of an account. /// /// NOTE: This is stored separately primarily to facilitate the addition of extra fields in a /// backwards compatible way through a specialized `Decode` impl. #[derive(Clone, Encode, Eq, PartialEq, RuntimeDebug)] pub struct Registration<Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq> { /// Judgements from the registrars on this identity. Stored ordered by `RegistrarIndex`. There /// may be only a single judgement from each registrar. pub judgements: Vec<(RegistrarIndex, Judgement<Balance>)>, /// Amount held on deposit for this information. pub deposit: Balance, /// Information on the identity. pub info: IdentityInfo, } impl<Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq + Zero + Add> Registration<Balance> { fn total_deposit(&self) -> Balance { self.deposit + self.judgements .iter() .map(|(_, ref j)| if let Judgement::FeePaid(fee) = j { *fee } else { Zero::zero() }) .fold(Zero::zero(), |a, i| a + i) } } impl<Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq> Decode for Registration<Balance> { fn decode<I: codec::Input>(input: &mut I) -> sp_std::result::Result<Self, codec::Error> { let (judgements, deposit, info) = Decode::decode(&mut AppendZerosInput::new(input))?; Ok(Self { judgements, deposit, info }) } } /// Information concerning a registrar. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] pub struct RegistrarInfo< Balance: Encode + Decode + Clone + Debug + Eq + PartialEq, AccountId: Encode + Decode + Clone + Debug + Eq + PartialEq, > { /// The account of the registrar. pub account: AccountId, /// Amount required to be given to the registrar for them to provide judgement. pub fee: Balance, /// Relevant fields for this registrar. Registrar judgements are limited to attestations on /// these fields. pub fields: IdentityFields, } decl_storage! { trait Store for Module<T: Trait> as Identity { /// Information that is pertinent to identify the entity behind an account. /// /// TWOX-NOTE: OK ― `AccountId` is a secure hash. pub IdentityOf get(fn identity): map hasher(twox_64_concat) T::AccountId => Option<Registration<BalanceOf<T>>>; /// The super-identity of an alternative "sub" identity together with its name, within that /// context. If the account is not some other account's sub-identity, then just `None`. pub SuperOf get(fn super_of): map hasher(blake2_128_concat) T::AccountId => Option<(T::AccountId, Data)>; /// Alternative "sub" identities of this account. /// /// The first item is the deposit, the second is a vector of the accounts. /// /// TWOX-NOTE: OK ― `AccountId` is a secure hash. pub SubsOf get(fn subs_of): map hasher(twox_64_concat) T::AccountId => (BalanceOf<T>, Vec<T::AccountId>); /// The set of registrars. Not expected to get very big as can only be added through a /// special origin (likely a council motion). /// /// The index into this can be cast to `RegistrarIndex` to get a valid value. pub Registrars get(fn registrars): Vec<Option<RegistrarInfo<BalanceOf<T>, T::AccountId>>>; } } decl_event!( pub enum Event<T> where AccountId = <T as frame_system::Trait>::AccountId, Balance = BalanceOf<T>, { /// A name was set or reset (which will remove all judgements). \[who\] IdentitySet(AccountId), /// A name was cleared, and the given balance returned. \[who, deposit\] IdentityCleared(AccountId, Balance), /// A name was removed and the given balance slashed. \[who, deposit\] IdentityKilled(AccountId, Balance), /// A judgement was asked from a registrar. \[who, registrar_index\] JudgementRequested(AccountId, RegistrarIndex), /// A judgement request was retracted. \[who, registrar_index\] JudgementUnrequested(AccountId, RegistrarIndex), /// A judgement was given by a registrar. \[target, registrar_index\] JudgementGiven(AccountId, RegistrarIndex), /// A registrar was added. \[registrar_index\] RegistrarAdded(RegistrarIndex), /// A sub-identity was added to an identity and the deposit paid. \[sub, main, deposit\] SubIdentityAdded(AccountId, AccountId, Balance), /// A sub-identity was removed from an identity and the deposit freed. /// \[sub, main, deposit\] SubIdentityRemoved(AccountId, AccountId, Balance), /// A sub-identity was cleared, and the given deposit repatriated from the /// main identity account to the sub-identity account. \[sub, main, deposit\] SubIdentityRevoked(AccountId, AccountId, Balance), } ); decl_error! { /// Error for the identity module. pub enum Error for Module<T: Trait> { /// Too many subs-accounts. TooManySubAccounts, /// Account isn't found. NotFound, /// Account isn't named. NotNamed, /// Empty index. EmptyIndex, /// Fee is changed. FeeChanged, /// No identity found. NoIdentity, /// Sticky judgement. StickyJudgement, /// Judgement given. JudgementGiven, /// Invalid judgement. InvalidJudgement, /// The index is invalid. InvalidIndex, /// The target is invalid. InvalidTarget, /// Too many additional fields. TooManyFields, /// Maximum amount of registrars reached. Cannot add any more. TooManyRegistrars, /// Account ID is already named. AlreadyClaimed, /// Sender is not a sub-account. NotSub, /// Sub-account isn't owned by sender. NotOwned } } decl_module! { /// Identity module declaration. pub struct Module<T: Trait> for enum Call where origin: T::Origin { /// The amount held on deposit for a registered identity. const BasicDeposit: BalanceOf<T> = T::BasicDeposit::get(); /// The amount held on deposit per additional field for a registered identity. const FieldDeposit: BalanceOf<T> = T::FieldDeposit::get(); /// The amount held on deposit for a registered subaccount. This should account for the fact /// that one storage item's value will increase by the size of an account ID, and there will be /// another trie item whose value is the size of an account ID plus 32 bytes. const SubAccountDeposit: BalanceOf<T> = T::SubAccountDeposit::get(); /// The maximum number of sub-accounts allowed per identified account. const MaxSubAccounts: u32 = T::MaxSubAccounts::get(); /// Maximum number of additional fields that may be stored in an ID. Needed to bound the I/O /// required to access an identity, but can be pretty high. const MaxAdditionalFields: u32 = T::MaxAdditionalFields::get(); /// Maxmimum number of registrars allowed in the system. Needed to bound the complexity /// of, e.g., updating judgements. const MaxRegistrars: u32 = T::MaxRegistrars::get(); type Error = Error<T>; fn deposit_event() = default; /// Add a registrar to the system. /// /// The dispatch origin for this call must be `T::RegistrarOrigin`. /// /// - `account`: the account of the registrar. /// /// Emits `RegistrarAdded` if successful. /// /// # <weight> /// - `O(R)` where `R` registrar-count (governance-bounded and code-bounded). /// - One storage mutation (codec `O(R)`). /// - One event. /// # </weight> #[weight = T::WeightInfo::add_registrar(T::MaxRegistrars::get()) ] fn add_registrar(origin, account: T::AccountId) -> DispatchResultWithPostInfo { T::RegistrarOrigin::ensure_origin(origin)?; let (i, registrar_count) = <Registrars<T>>::try_mutate( |registrars| -> Result<(RegistrarIndex, usize), DispatchError> { ensure!(registrars.len() < T::MaxRegistrars::get() as usize, Error::<T>::TooManyRegistrars); registrars.push(Some(RegistrarInfo { account, fee: Zero::zero(), fields: Default::default() })); Ok(((registrars.len() - 1) as RegistrarIndex, registrars.len())) } )?; Self::deposit_event(RawEvent::RegistrarAdded(i)); Ok(Some(T::WeightInfo::add_registrar(registrar_count as u32)).into()) } /// Set an account's identity information and reserve the appropriate deposit. /// /// If the account already has identity information, the deposit is taken as part payment /// for the new deposit. /// /// The dispatch origin for this call must be _Signed_. /// /// - `info`: The identity information. /// /// Emits `IdentitySet` if successful. /// /// # <weight> /// - `O(X + X' + R)` /// - where `X` additional-field-count (deposit-bounded and code-bounded) /// - where `R` judgements-count (registrar-count-bounded) /// - One balance reserve operation. /// - One storage mutation (codec-read `O(X' + R)`, codec-write `O(X + R)`). /// - One event. /// # </weight> #[weight = T::WeightInfo::set_identity( T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X )] fn set_identity(origin, info: IdentityInfo) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let extra_fields = info.additional.len() as u32; ensure!(extra_fields <= T::MaxAdditionalFields::get(), Error::<T>::TooManyFields); let fd = <BalanceOf<T>>::from(extra_fields) * T::FieldDeposit::get(); let mut id = match <IdentityOf<T>>::get(&sender) { Some(mut id) => { // Only keep non-positive judgements. id.judgements.retain(|j| j.1.is_sticky()); id.info = info; id } None => Registration { info, judgements: Vec::new(), deposit: Zero::zero() }, }; let old_deposit = id.deposit; id.deposit = T::BasicDeposit::get() + fd; if id.deposit > old_deposit { T::Currency::reserve(&sender, id.deposit - old_deposit)?; } if old_deposit > id.deposit { let _ = T::Currency::unreserve(&sender, old_deposit - id.deposit); } let judgements = id.judgements.len(); <IdentityOf<T>>::insert(&sender, id); Self::deposit_event(RawEvent::IdentitySet(sender)); Ok(Some(T::WeightInfo::set_identity( judgements as u32, // R extra_fields // X )).into()) } /// Set the sub-accounts of the sender. /// /// Payment: Any aggregate balance reserved by previous `set_subs` calls will be returned /// and an amount `SubAccountDeposit` will be reserved for each item in `subs`. /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// identity. /// /// - `subs`: The identity's (new) sub-accounts. /// /// # <weight> /// - `O(P + S)` /// - where `P` old-subs-count (hard- and deposit-bounded). /// - where `S` subs-count (hard- and deposit-bounded). /// - At most one balance operations. /// - DB: /// - `P + S` storage mutations (codec complexity `O(1)`) /// - One storage read (codec complexity `O(P)`). /// - One storage write (codec complexity `O(S)`). /// - One storage-exists (`IdentityOf::contains_key`). /// # </weight> // TODO: This whole extrinsic screams "not optimized". For example we could // filter any overlap between new and old subs, and avoid reading/writing // to those values... We could also ideally avoid needing to write to // N storage items for N sub accounts. Right now the weight on this function // is a large overestimate due to the fact that it could potentially write // to 2 x T::MaxSubAccounts::get(). #[weight = T::WeightInfo::set_subs_old(T::MaxSubAccounts::get()) // P: Assume max sub accounts removed. .saturating_add(T::WeightInfo::set_subs_new(subs.len() as u32)) // S: Assume all subs are new. ] fn set_subs(origin, subs: Vec<(T::AccountId, Data)>) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; ensure!(<IdentityOf<T>>::contains_key(&sender), Error::<T>::NotFound); ensure!(subs.len() <= T::MaxSubAccounts::get() as usize, Error::<T>::TooManySubAccounts); let (old_deposit, old_ids) = <SubsOf<T>>::get(&sender); let new_deposit = T::SubAccountDeposit::get() * <BalanceOf<T>>::from(subs.len() as u32); let not_other_sub = subs.iter().filter_map(|i| SuperOf::<T>::get(&i.0)).all(|i| &i.0 == &sender); ensure!(not_other_sub, Error::<T>::AlreadyClaimed); if old_deposit < new_deposit { T::Currency::reserve(&sender, new_deposit - old_deposit)?; } else if old_deposit > new_deposit { let _ = T::Currency::unreserve(&sender, old_deposit - new_deposit); } // do nothing if they're equal. for s in old_ids.iter() { <SuperOf<T>>::remove(s); } let ids = subs.into_iter().map(|(id, name)| { <SuperOf<T>>::insert(&id, (sender.clone(), name)); id }).collect::<Vec<_>>(); let new_subs = ids.len(); if ids.is_empty() { <SubsOf<T>>::remove(&sender); } else { <SubsOf<T>>::insert(&sender, (new_deposit, ids)); } Ok(Some( T::WeightInfo::set_subs_old(old_ids.len() as u32) // P: Real number of old accounts removed. .saturating_add(T::WeightInfo::set_subs_new(new_subs as u32)) // S: New subs added. ).into()) } /// Clear an account's identity info and all sub-accounts and return all deposits. /// /// Payment: All reserved balances on the account are returned. /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// identity. /// /// Emits `IdentityCleared` if successful. /// /// # <weight> /// - `O(R + S + X)` /// - where `R` registrar-count (governance-bounded). /// - where `S` subs-count (hard- and deposit-bounded). /// - where `X` additional-field-count (deposit-bounded and code-bounded). /// - One balance-unreserve operation. /// - `2` storage reads and `S + 2` storage deletions. /// - One event. /// # </weight> #[weight = T::WeightInfo::clear_identity( T::MaxRegistrars::get().into(), // R T::MaxSubAccounts::get().into(), // S T::MaxAdditionalFields::get().into(), // X )] fn clear_identity(origin) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let (subs_deposit, sub_ids) = <SubsOf<T>>::take(&sender); let id = <IdentityOf<T>>::take(&sender).ok_or(Error::<T>::NotNamed)?; let deposit = id.total_deposit() + subs_deposit; for sub in sub_ids.iter() { <SuperOf<T>>::remove(sub); } let _ = T::Currency::unreserve(&sender, deposit.clone()); Self::deposit_event(RawEvent::IdentityCleared(sender, deposit)); Ok(Some(T::WeightInfo::clear_identity( id.judgements.len() as u32, // R sub_ids.len() as u32, // S id.info.additional.len() as u32 // X )).into()) } /// Request a judgement from a registrar. /// /// Payment: At most `max_fee` will be reserved for payment to the registrar if judgement /// given. /// /// The dispatch origin for this call must be _Signed_ and the sender must have a /// registered identity. /// /// - `reg_index`: The index of the registrar whose judgement is requested. /// - `max_fee`: The maximum fee that may be paid. This should just be auto-populated as: /// /// ```nocompile /// Self::registrars().get(reg_index).unwrap().fee /// ``` /// /// Emits `JudgementRequested` if successful. /// /// # <weight> /// - `O(R + X)`. /// - One balance-reserve operation. /// - Storage: 1 read `O(R)`, 1 mutate `O(X + R)`. /// - One event. /// # </weight> #[weight = T::WeightInfo::request_judgement( T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X )] fn request_judgement(origin, #[compact] reg_index: RegistrarIndex, #[compact] max_fee: BalanceOf<T>, ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let registrars = <Registrars<T>>::get(); let registrar = registrars.get(reg_index as usize).and_then(Option::as_ref) .ok_or(Error::<T>::EmptyIndex)?; ensure!(max_fee >= registrar.fee, Error::<T>::FeeChanged); let mut id = <IdentityOf<T>>::get(&sender).ok_or(Error::<T>::NoIdentity)?; let item = (reg_index, Judgement::FeePaid(registrar.fee)); match id.judgements.binary_search_by_key(&reg_index, |x| x.0) { Ok(i) => if id.judgements[i].1.is_sticky() { Err(Error::<T>::StickyJudgement)? } else { id.judgements[i] = item }, Err(i) => id.judgements.insert(i, item), } T::Currency::reserve(&sender, registrar.fee)?; let judgements = id.judgements.len(); let extra_fields = id.info.additional.len(); <IdentityOf<T>>::insert(&sender, id); Self::deposit_event(RawEvent::JudgementRequested(sender, reg_index)); Ok(Some(T::WeightInfo::request_judgement( judgements as u32, extra_fields as u32, )).into()) } /// Cancel a previous request. /// /// Payment: A previously reserved deposit is returned on success. /// /// The dispatch origin for this call must be _Signed_ and the sender must have a /// registered identity. /// /// - `reg_index`: The index of the registrar whose judgement is no longer requested. /// /// Emits `JudgementUnrequested` if successful. /// /// # <weight> /// - `O(R + X)`. /// - One balance-reserve operation. /// - One storage mutation `O(R + X)`. /// - One event /// # </weight> #[weight = T::WeightInfo::cancel_request( T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X )] fn cancel_request(origin, reg_index: RegistrarIndex) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let mut id = <IdentityOf<T>>::get(&sender).ok_or(Error::<T>::NoIdentity)?; let pos = id.judgements.binary_search_by_key(&reg_index, |x| x.0) .map_err(|_| Error::<T>::NotFound)?; let fee = if let Judgement::FeePaid(fee) = id.judgements.remove(pos).1 { fee } else { Err(Error::<T>::JudgementGiven)? }; let _ = T::Currency::unreserve(&sender, fee); let judgements = id.judgements.len(); let extra_fields = id.info.additional.len(); <IdentityOf<T>>::insert(&sender, id); Self::deposit_event(RawEvent::JudgementUnrequested(sender, reg_index)); Ok(Some(T::WeightInfo::cancel_request( judgements as u32, extra_fields as u32 )).into()) } /// Set the fee required for a judgement to be requested from a registrar. /// /// The dispatch origin for this call must be _Signed_ and the sender must be the account /// of the registrar whose index is `index`. /// /// - `index`: the index of the registrar whose fee is to be set. /// - `fee`: the new fee. /// /// # <weight> /// - `O(R)`. /// - One storage mutation `O(R)`. /// - Benchmark: 7.315 + R * 0.329 µs (min squares analysis) /// # </weight> #[weight = T::WeightInfo::set_fee(T::MaxRegistrars::get())] // R fn set_fee(origin, #[compact] index: RegistrarIndex, #[compact] fee: BalanceOf<T>, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let registrars = <Registrars<T>>::mutate(|rs| -> Result<usize, DispatchError> { rs.get_mut(index as usize) .and_then(|x| x.as_mut()) .and_then(|r| if r.account == who { r.fee = fee; Some(()) } else { None }) .ok_or_else(|| DispatchError::from(Error::<T>::InvalidIndex))?; Ok(rs.len()) })?; Ok(Some(T::WeightInfo::set_fee(registrars as u32)).into()) // R } /// Change the account associated with a registrar. /// /// The dispatch origin for this call must be _Signed_ and the sender must be the account /// of the registrar whose index is `index`. /// /// - `index`: the index of the registrar whose fee is to be set. /// - `new`: the new account ID. /// /// # <weight> /// - `O(R)`. /// - One storage mutation `O(R)`. /// - Benchmark: 8.823 + R * 0.32 µs (min squares analysis) /// # </weight> #[weight = T::WeightInfo::set_account_id(T::MaxRegistrars::get())] // R fn set_account_id(origin, #[compact] index: RegistrarIndex, new: T::AccountId, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let registrars = <Registrars<T>>::mutate(|rs| -> Result<usize, DispatchError> { rs.get_mut(index as usize) .and_then(|x| x.as_mut()) .and_then(|r| if r.account == who { r.account = new; Some(()) } else { None }) .ok_or_else(|| DispatchError::from(Error::<T>::InvalidIndex))?; Ok(rs.len()) })?; Ok(Some(T::WeightInfo::set_account_id(registrars as u32)).into()) // R } /// Set the field information for a registrar. /// /// The dispatch origin for this call must be _Signed_ and the sender must be the account /// of the registrar whose index is `index`. /// /// - `index`: the index of the registrar whose fee is to be set. /// - `fields`: the fields that the registrar concerns themselves with. /// /// # <weight> /// - `O(R)`. /// - One storage mutation `O(R)`. /// - Benchmark: 7.464 + R * 0.325 µs (min squares analysis) /// # </weight> #[weight = T::WeightInfo::set_fields(T::MaxRegistrars::get())] // R fn set_fields(origin, #[compact] index: RegistrarIndex, fields: IdentityFields, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let registrars = <Registrars<T>>::mutate(|rs| -> Result<usize, DispatchError> { rs.get_mut(index as usize) .and_then(|x| x.as_mut()) .and_then(|r| if r.account == who { r.fields = fields; Some(()) } else { None }) .ok_or_else(|| DispatchError::from(Error::<T>::InvalidIndex))?; Ok(rs.len()) })?; Ok(Some(T::WeightInfo::set_fields( registrars as u32 // R )).into()) } /// Provide a judgement for an account's identity. /// /// The dispatch origin for this call must be _Signed_ and the sender must be the account /// of the registrar whose index is `reg_index`. /// /// - `reg_index`: the index of the registrar whose judgement is being made. /// - `target`: the account whose identity the judgement is upon. This must be an account /// with a registered identity. /// - `judgement`: the judgement of the registrar of index `reg_index` about `target`. /// /// Emits `JudgementGiven` if successful. /// /// # <weight> /// - `O(R + X)`. /// - One balance-transfer operation. /// - Up to one account-lookup operation. /// - Storage: 1 read `O(R)`, 1 mutate `O(R + X)`. /// - One event. /// # </weight> #[weight = T::WeightInfo::provide_judgement( T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X )] fn provide_judgement(origin, #[compact] reg_index: RegistrarIndex, target: <T::Lookup as StaticLookup>::Source, judgement: Judgement<BalanceOf<T>>, ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let target = T::Lookup::lookup(target)?; ensure!(!judgement.has_deposit(), Error::<T>::InvalidJudgement); <Registrars<T>>::get() .get(reg_index as usize) .and_then(Option::as_ref) .and_then(|r| if r.account == sender { Some(r) } else { None }) .ok_or(Error::<T>::InvalidIndex)?; let mut id = <IdentityOf<T>>::get(&target).ok_or(Error::<T>::InvalidTarget)?; let item = (reg_index, judgement); match id.judgements.binary_search_by_key(&reg_index, |x| x.0) { Ok(position) => { if let Judgement::FeePaid(fee) = id.judgements[position].1 { let _ = T::Currency::repatriate_reserved(&target, &sender, fee, BalanceStatus::Free); } id.judgements[position] = item } Err(position) => id.judgements.insert(position, item), } let judgements = id.judgements.len(); let extra_fields = id.info.additional.len(); <IdentityOf<T>>::insert(&target, id); Self::deposit_event(RawEvent::JudgementGiven(target, reg_index)); Ok(Some(T::WeightInfo::provide_judgement( judgements as u32, extra_fields as u32, )).into()) } /// Remove an account's identity and sub-account information and slash the deposits. /// /// Payment: Reserved balances from `set_subs` and `set_identity` are slashed and handled by /// `Slash`. Verification request deposits are not returned; they should be cancelled /// manually using `cancel_request`. /// /// The dispatch origin for this call must match `T::ForceOrigin`. /// /// - `target`: the account whose identity the judgement is upon. This must be an account /// with a registered identity. /// /// Emits `IdentityKilled` if successful. /// /// # <weight> /// - `O(R + S + X)`. /// - One balance-reserve operation. /// - `S + 2` storage mutations. /// - One event. /// # </weight> #[weight = T::WeightInfo::kill_identity( T::MaxRegistrars::get().into(), // R T::MaxSubAccounts::get().into(), // S T::MaxAdditionalFields::get().into(), // X )] fn kill_identity(origin, target: <T::Lookup as StaticLookup>::Source) -> DispatchResultWithPostInfo { T::ForceOrigin::ensure_origin(origin)?; // Figure out who we're meant to be clearing. let target = T::Lookup::lookup(target)?; // Grab their deposit (and check that they have one). let (subs_deposit, sub_ids) = <SubsOf<T>>::take(&target); let id = <IdentityOf<T>>::take(&target).ok_or(Error::<T>::NotNamed)?; let deposit = id.total_deposit() + subs_deposit; for sub in sub_ids.iter() { <SuperOf<T>>::remove(sub); } // Slash their deposit from them. T::Slashed::on_unbalanced(T::Currency::slash_reserved(&target, deposit).0); Self::deposit_event(RawEvent::IdentityKilled(target, deposit)); Ok(Some(T::WeightInfo::kill_identity( id.judgements.len() as u32, // R sub_ids.len() as u32, // S id.info.additional.len() as u32 // X )).into()) } /// Add the given account to the sender's subs. /// /// Payment: Balance reserved by a previous `set_subs` call for one sub will be repatriated /// to the sender. /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. #[weight = T::WeightInfo::add_sub(T::MaxSubAccounts::get())] fn add_sub(origin, sub: <T::Lookup as StaticLookup>::Source, data: Data) -> DispatchResult { let sender = ensure_signed(origin)?; let sub = T::Lookup::lookup(sub)?; ensure!(IdentityOf::<T>::contains_key(&sender), Error::<T>::NoIdentity); // Check if it's already claimed as sub-identity. ensure!(!SuperOf::<T>::contains_key(&sub), Error::<T>::AlreadyClaimed); SubsOf::<T>::try_mutate(&sender, |(ref mut subs_deposit, ref mut sub_ids)| { // Ensure there is space and that the deposit is paid. ensure!(sub_ids.len() < T::MaxSubAccounts::get() as usize, Error::<T>::TooManySubAccounts); let deposit = T::SubAccountDeposit::get(); T::Currency::reserve(&sender, deposit)?; SuperOf::<T>::insert(&sub, (sender.clone(), data)); sub_ids.push(sub.clone()); *subs_deposit = subs_deposit.saturating_add(deposit); Self::deposit_event(RawEvent::SubIdentityAdded(sub, sender.clone(), deposit)); Ok(()) }) } /// Alter the associated name of the given sub-account. /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. #[weight = T::WeightInfo::rename_sub(T::MaxSubAccounts::get())] fn rename_sub(origin, sub: <T::Lookup as StaticLookup>::Source, data: Data) { let sender = ensure_signed(origin)?; let sub = T::Lookup::lookup(sub)?; ensure!(IdentityOf::<T>::contains_key(&sender), Error::<T>::NoIdentity); ensure!(SuperOf::<T>::get(&sub).map_or(false, |x| x.0 == sender), Error::<T>::NotOwned); SuperOf::<T>::insert(&sub, (sender, data)); } /// Remove the given account from the sender's subs. /// /// Payment: Balance reserved by a previous `set_subs` call for one sub will be repatriated /// to the sender. /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. #[weight = T::WeightInfo::remove_sub(T::MaxSubAccounts::get())] fn remove_sub(origin, sub: <T::Lookup as StaticLookup>::Source) { let sender = ensure_signed(origin)?; ensure!(IdentityOf::<T>::contains_key(&sender), Error::<T>::NoIdentity); let sub = T::Lookup::lookup(sub)?; let (sup, _) = SuperOf::<T>::get(&sub).ok_or(Error::<T>::NotSub)?; ensure!(sup == sender, Error::<T>::NotOwned); SuperOf::<T>::remove(&sub); SubsOf::<T>::mutate(&sup, |(ref mut subs_deposit, ref mut sub_ids)| { sub_ids.retain(|x| x != &sub); let deposit = T::SubAccountDeposit::get().min(*subs_deposit); *subs_deposit -= deposit; let _ = T::Currency::unreserve(&sender, deposit); Self::deposit_event(RawEvent::SubIdentityRemoved(sub, sender, deposit)); }); } /// Remove the sender as a sub-account. /// /// Payment: Balance reserved by a previous `set_subs` call for one sub will be repatriated /// to the sender (*not* the original depositor). /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// super-identity. /// /// NOTE: This should not normally be used, but is provided in the case that the non- /// controller of an account is maliciously registered as a sub-account. #[weight = T::WeightInfo::quit_sub(T::MaxSubAccounts::get())] fn quit_sub(origin) { let sender = ensure_signed(origin)?; let (sup, _) = SuperOf::<T>::take(&sender).ok_or(Error::<T>::NotSub)?; SubsOf::<T>::mutate(&sup, |(ref mut subs_deposit, ref mut sub_ids)| { sub_ids.retain(|x| x != &sender); let deposit = T::SubAccountDeposit::get().min(*subs_deposit); *subs_deposit -= deposit; let _ = T::Currency::repatriate_reserved(&sup, &sender, deposit, BalanceStatus::Free); Self::deposit_event(RawEvent::SubIdentityRevoked(sender, sup.clone(), deposit)); }); } } } impl<T: Trait> Module<T> { /// Get the subs of an account. pub fn subs(who: &T::AccountId) -> Vec<(T::AccountId, Data)> { SubsOf::<T>::get(who) .1 .into_iter() .filter_map(|a| SuperOf::<T>::get(&a).map(|x| (a, x.1))) .collect() } }
#[doc = r"Register block"] #[repr(C)] pub struct RegisterBlock { #[doc = "0x00 - rising trigger selection register"] pub rtsr1: RTSR1, #[doc = "0x04 - falling trigger selection register"] pub ftsr1: FTSR1, #[doc = "0x08 - software interrupt event register"] pub swier1: SWIER1, #[doc = "0x0c - EXTI pending register"] pub pr1: PR1, _reserved4: [u8; 0x10], #[doc = "0x20 - rising trigger selection register"] pub rtsr2: RTSR2, #[doc = "0x24 - falling trigger selection register"] pub ftsr2: FTSR2, #[doc = "0x28 - software interrupt event register"] pub swier2: SWIER2, #[doc = "0x2c - pending register"] pub pr2: PR2, _reserved8: [u8; 0x50], #[doc = "0x80 - CPUm wakeup with interrupt mask register"] pub imr1: IMR1, #[doc = "0x84 - CPUm wakeup with event mask register"] pub emr1: EMR1, _reserved10: [u8; 0x08], #[doc = "0x90 - CPUm wakeup with interrupt mask register"] pub imr2: IMR2, #[doc = "0x94 - CPUm wakeup with event mask register"] pub emr2: EMR2, _reserved12: [u8; 0x28], #[doc = "0xc0 - CPUm wakeup with interrupt mask register"] pub c2imr1: C2IMR1, #[doc = "0xc4 - CPUm wakeup with event mask register"] pub c2emr1: C2EMR1, _reserved14: [u8; 0x08], #[doc = "0xd0 - CPUm wakeup with interrupt mask register"] pub c2imr2: C2IMR2, #[doc = "0xd4 - CPUm wakeup with event mask register"] pub c2emr2: C2EMR2, _reserved16: [u8; 0x0300], #[doc = "0x3d8 - EXTI Hardware configuration registers"] pub hwcfgr7: HWCFGR7, #[doc = "0x3dc - Hardware configuration registers"] pub hwcfgr6: HWCFGR6, #[doc = "0x3e0 - Hardware configuration registers"] pub hwcfgr5: HWCFGR5, #[doc = "0x3e4 - Hardware configuration registers"] pub hwcfgr4: HWCFGR4, #[doc = "0x3e8 - Hardware configuration registers"] pub hwcfgr3: HWCFGR3, #[doc = "0x3ec - Hardware configuration registers"] pub hwcfgr2: HWCFGR2, #[doc = "0x3f0 - Hardware configuration register 1"] pub hwcfgr1: HWCFGR1, #[doc = "0x3f4 - EXTI IP Version register"] pub verr: VERR, #[doc = "0x3f8 - Identification register"] pub ipidr: IPIDR, #[doc = "0x3fc - Size ID register"] pub sidr: SIDR, } #[doc = "RTSR1 (rw) register accessor: rising trigger selection register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`rtsr1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`rtsr1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`rtsr1`] module"] pub type RTSR1 = crate::Reg<rtsr1::RTSR1_SPEC>; #[doc = "rising trigger selection register"] pub mod rtsr1; #[doc = "FTSR1 (rw) register accessor: falling trigger selection register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ftsr1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ftsr1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`ftsr1`] module"] pub type FTSR1 = crate::Reg<ftsr1::FTSR1_SPEC>; #[doc = "falling trigger selection register"] pub mod ftsr1; #[doc = "SWIER1 (rw) register accessor: software interrupt event register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`swier1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`swier1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`swier1`] module"] pub type SWIER1 = crate::Reg<swier1::SWIER1_SPEC>; #[doc = "software interrupt event register"] pub mod swier1; #[doc = "PR1 (rw) register accessor: EXTI pending register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`pr1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`pr1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`pr1`] module"] pub type PR1 = crate::Reg<pr1::PR1_SPEC>; #[doc = "EXTI pending register"] pub mod pr1; #[doc = "RTSR2 (rw) register accessor: rising trigger selection register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`rtsr2::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`rtsr2::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`rtsr2`] module"] pub type RTSR2 = crate::Reg<rtsr2::RTSR2_SPEC>; #[doc = "rising trigger selection register"] pub mod rtsr2; #[doc = "FTSR2 (rw) register accessor: falling trigger selection register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ftsr2::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ftsr2::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`ftsr2`] module"] pub type FTSR2 = crate::Reg<ftsr2::FTSR2_SPEC>; #[doc = "falling trigger selection register"] pub mod ftsr2; #[doc = "SWIER2 (rw) register accessor: software interrupt event register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`swier2::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`swier2::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`swier2`] module"] pub type SWIER2 = crate::Reg<swier2::SWIER2_SPEC>; #[doc = "software interrupt event register"] pub mod swier2; #[doc = "PR2 (rw) register accessor: pending register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`pr2::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`pr2::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`pr2`] module"] pub type PR2 = crate::Reg<pr2::PR2_SPEC>; #[doc = "pending register"] pub mod pr2; #[doc = "IMR1 (rw) register accessor: CPUm wakeup with interrupt mask register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`imr1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`imr1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`imr1`] module"] pub type IMR1 = crate::Reg<imr1::IMR1_SPEC>; #[doc = "CPUm wakeup with interrupt mask register"] pub mod imr1; #[doc = "C2IMR1 (rw) register accessor: CPUm wakeup with interrupt mask register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`c2imr1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`c2imr1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`c2imr1`] module"] pub type C2IMR1 = crate::Reg<c2imr1::C2IMR1_SPEC>; #[doc = "CPUm wakeup with interrupt mask register"] pub mod c2imr1; #[doc = "EMR1 (rw) register accessor: CPUm wakeup with event mask register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`emr1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`emr1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`emr1`] module"] pub type EMR1 = crate::Reg<emr1::EMR1_SPEC>; #[doc = "CPUm wakeup with event mask register"] pub mod emr1; #[doc = "C2EMR1 (rw) register accessor: CPUm wakeup with event mask register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`c2emr1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`c2emr1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`c2emr1`] module"] pub type C2EMR1 = crate::Reg<c2emr1::C2EMR1_SPEC>; #[doc = "CPUm wakeup with event mask register"] pub mod c2emr1; #[doc = "IMR2 (rw) register accessor: CPUm wakeup with interrupt mask register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`imr2::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`imr2::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`imr2`] module"] pub type IMR2 = crate::Reg<imr2::IMR2_SPEC>; #[doc = "CPUm wakeup with interrupt mask register"] pub mod imr2; #[doc = "C2IMR2 (rw) register accessor: CPUm wakeup with interrupt mask register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`c2imr2::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`c2imr2::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`c2imr2`] module"] pub type C2IMR2 = crate::Reg<c2imr2::C2IMR2_SPEC>; #[doc = "CPUm wakeup with interrupt mask register"] pub mod c2imr2; #[doc = "EMR2 (rw) register accessor: CPUm wakeup with event mask register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`emr2::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`emr2::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`emr2`] module"] pub type EMR2 = crate::Reg<emr2::EMR2_SPEC>; #[doc = "CPUm wakeup with event mask register"] pub mod emr2; #[doc = "C2EMR2 (rw) register accessor: CPUm wakeup with event mask register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`c2emr2::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`c2emr2::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`c2emr2`] module"] pub type C2EMR2 = crate::Reg<c2emr2::C2EMR2_SPEC>; #[doc = "CPUm wakeup with event mask register"] pub mod c2emr2; #[doc = "HWCFGR5 (r) register accessor: Hardware configuration registers\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hwcfgr5::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`hwcfgr5`] module"] pub type HWCFGR5 = crate::Reg<hwcfgr5::HWCFGR5_SPEC>; #[doc = "Hardware configuration registers"] pub mod hwcfgr5; #[doc = "HWCFGR6 (r) register accessor: Hardware configuration registers\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hwcfgr6::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`hwcfgr6`] module"] pub type HWCFGR6 = crate::Reg<hwcfgr6::HWCFGR6_SPEC>; #[doc = "Hardware configuration registers"] pub mod hwcfgr6; #[doc = "HWCFGR7 (r) register accessor: EXTI Hardware configuration registers\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hwcfgr7::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`hwcfgr7`] module"] pub type HWCFGR7 = crate::Reg<hwcfgr7::HWCFGR7_SPEC>; #[doc = "EXTI Hardware configuration registers"] pub mod hwcfgr7; #[doc = "HWCFGR2 (r) register accessor: Hardware configuration registers\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hwcfgr2::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`hwcfgr2`] module"] pub type HWCFGR2 = crate::Reg<hwcfgr2::HWCFGR2_SPEC>; #[doc = "Hardware configuration registers"] pub mod hwcfgr2; #[doc = "HWCFGR3 (r) register accessor: Hardware configuration registers\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hwcfgr3::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`hwcfgr3`] module"] pub type HWCFGR3 = crate::Reg<hwcfgr3::HWCFGR3_SPEC>; #[doc = "Hardware configuration registers"] pub mod hwcfgr3; #[doc = "HWCFGR4 (r) register accessor: Hardware configuration registers\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hwcfgr4::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`hwcfgr4`] module"] pub type HWCFGR4 = crate::Reg<hwcfgr4::HWCFGR4_SPEC>; #[doc = "Hardware configuration registers"] pub mod hwcfgr4; #[doc = "HWCFGR1 (r) register accessor: Hardware configuration register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hwcfgr1::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`hwcfgr1`] module"] pub type HWCFGR1 = crate::Reg<hwcfgr1::HWCFGR1_SPEC>; #[doc = "Hardware configuration register 1"] pub mod hwcfgr1; #[doc = "VERR (r) register accessor: EXTI IP Version register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`verr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`verr`] module"] pub type VERR = crate::Reg<verr::VERR_SPEC>; #[doc = "EXTI IP Version register"] pub mod verr; #[doc = "IPIDR (r) register accessor: Identification register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ipidr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`ipidr`] module"] pub type IPIDR = crate::Reg<ipidr::IPIDR_SPEC>; #[doc = "Identification register"] pub mod ipidr; #[doc = "SIDR (r) register accessor: Size ID register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`sidr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`sidr`] module"] pub type SIDR = crate::Reg<sidr::SIDR_SPEC>; #[doc = "Size ID register"] pub mod sidr;
#[derive(RustEmbed)] #[folder = "examples/public/"] struct Asset; pub fn radio_play(){ use std::fs::{self, File}; use std::io::BufReader; use std::time::Duration; use std::thread::sleep; use std::io::{Read, Write}; let device = rodio::default_output_device().expect("error 1"); let shaolin = "shaolin.wav"; let wav_cow = Asset::get(shaolin).expect("error 2"); // let emb_buf = std::str::from_utf8(wav_cow.as_ref()).expect("error 3"); // embed struct let mut file = File::create(shaolin).expect("error 4"); file.write_all(wav_cow.as_ref()).expect("error 5"); let beep1 = rodio::play_once(&device, BufReader::new(File::open(shaolin).expect("open fail"))).expect("play fail"); beep1.set_volume(0.2); // let _handle = thread::current(); // println!("{:?}", _handle.name()); sleep(Duration::from_millis(1000)); fs::remove_file(shaolin).expect("fail remove"); }
//! Module defining the `ReportInfo` struct, useful to format the output report file and //! to keep track of statistics about the sniffed traffic. use std::collections::{HashMap, HashSet}; use indexmap::IndexMap; use crate::networking::types::address_port_pair::AddressPortPair; use crate::networking::types::data_info::DataInfo; use crate::networking::types::data_info_host::DataInfoHost; use crate::networking::types::host::Host; use crate::networking::types::info_address_port_pair::InfoAddressPortPair; use crate::networking::types::traffic_direction::TrafficDirection; use crate::AppProtocol; /// Struct to be shared between the threads in charge of parsing packets and update reports. pub struct InfoTraffic { /// Total amount of filtered bytes received. pub tot_received_bytes: u128, /// Total amount of filtered bytes sent. pub tot_sent_bytes: u128, /// Total amount of filtered packets received. pub tot_received_packets: u128, /// Total amount of filtered packets sent. pub tot_sent_packets: u128, /// Total packets including those not filtered pub all_packets: u128, /// Total bytes including those not filtered pub all_bytes: u128, /// Number of dropped packets pub dropped_packets: u32, /// Map of the filtered traffic pub map: IndexMap<AddressPortPair, InfoAddressPortPair>, /// Set with the addresses of the last time interval pub addresses_last_interval: HashSet<usize>, /// Collection of the favorite hosts pub favorite_hosts: HashSet<Host>, /// Collection of favorite hosts that exchanged data in the last interval pub favorites_last_interval: HashSet<Host>, /// Map of the application layer protocols with their data info pub app_protocols: HashMap<AppProtocol, DataInfo>, /// Map of the addresses waiting for a rDNS resolution; used to NOT send multiple rDNS for the same address pub addresses_waiting_resolution: HashMap<String, DataInfo>, /// Map of the resolved addresses with their full rDNS value and the corresponding host pub addresses_resolved: HashMap<String, (String, Host)>, /// Map of the hosts with their data info pub hosts: HashMap<Host, DataInfoHost>, } impl InfoTraffic { /// Constructs a new `InfoTraffic` element. pub fn new() -> Self { InfoTraffic { tot_received_bytes: 0, tot_sent_bytes: 0, tot_received_packets: 0, tot_sent_packets: 0, all_packets: 0, all_bytes: 0, dropped_packets: 0, map: IndexMap::new(), addresses_last_interval: HashSet::new(), favorite_hosts: HashSet::new(), favorites_last_interval: HashSet::new(), app_protocols: HashMap::new(), addresses_waiting_resolution: HashMap::new(), addresses_resolved: HashMap::new(), hosts: HashMap::new(), } } pub fn add_packet(&mut self, bytes: u128, traffic_direction: TrafficDirection) { if traffic_direction == TrafficDirection::Outgoing { //increment number of sent packets and bytes self.tot_sent_packets += 1; self.tot_sent_bytes += bytes; } else { //increment number of received packets and bytes self.tot_received_packets += 1; self.tot_received_bytes += bytes; } } }
use std::fmt::Display; /// String to search for in a JSON document, conforming to the /// [RFC7159, section 7](https://www.rfc-editor.org/rfc/rfc7159#section-7) /// /// Represents the bytes defining a label/key in a JSON object /// that can be matched against when executing a query. /// /// # Examples /// /// ``` /// # use rsonpath::query::JsonString; /// /// let needle = JsonString::new("needle"); /// /// assert_eq!(needle.bytes(), "needle".as_bytes()); /// assert_eq!(needle.bytes_with_quotes(), "\"needle\"".as_bytes()); /// ``` #[derive(Clone)] pub struct JsonString { string: Vec<u8>, string_with_quotes: Vec<u8>, } impl std::fmt::Debug for JsonString { #[inline] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { write!( f, r#"{}"#, std::str::from_utf8(&self.string_with_quotes).unwrap_or("[invalid utf8]") ) } } impl JsonString { /// Create a new label from UTF8 input. #[must_use] #[inline] pub fn new(string: &str) -> Self { let bytes = string.as_bytes(); let without_quotes = Vec::from(bytes); let mut with_quotes = Vec::with_capacity(bytes.len() + 2); with_quotes.push(b'"'); with_quotes.extend(bytes); with_quotes.push(b'"'); Self { string: without_quotes, string_with_quotes: with_quotes, } } /// Return the raw bytes of the string, guaranteed to be block-aligned. #[must_use] #[inline(always)] pub fn bytes(&self) -> &[u8] { &self.string } /// Return the bytes representing the string with a leading and trailing /// double quote symbol `"`, guaranteed to be block-aligned. #[must_use] #[inline(always)] pub fn bytes_with_quotes(&self) -> &[u8] { &self.string_with_quotes } /// Return a display object with a UTF8 representation of this string. /// /// If the string contains invalid UTF8, the value will always be `"[invalid utf8]"`. #[must_use] #[inline(always)] pub fn display(&self) -> impl Display + '_ { std::str::from_utf8(&self.string).unwrap_or("[invalid utf8]") } } impl PartialEq<Self> for JsonString { #[inline(always)] fn eq(&self, other: &Self) -> bool { self.string == other.string } } impl Eq for JsonString {} impl PartialEq<JsonString> for [u8] { #[inline(always)] fn eq(&self, other: &JsonString) -> bool { self == other.string } } impl PartialEq<JsonString> for &[u8] { #[inline(always)] fn eq(&self, other: &JsonString) -> bool { *self == other.string } } impl PartialEq<[u8]> for JsonString { #[inline(always)] fn eq(&self, other: &[u8]) -> bool { self.string == other } } impl PartialEq<&[u8]> for JsonString { #[inline(always)] fn eq(&self, other: &&[u8]) -> bool { self.string == *other } } impl std::hash::Hash for JsonString { #[inline(always)] fn hash<H: std::hash::Hasher>(&self, state: &mut H) { let slice: &[u8] = &self.string; slice.hash(state); } } #[cfg(test)] mod tests { use super::*; use pretty_assertions::{assert_eq, assert_ne}; use std::{ collections::hash_map::DefaultHasher, hash::{Hash, Hasher}, }; use test_case::test_case; #[test_case("dog", "dog"; "dog")] #[test_case("", ""; "empty")] fn equal_json_strings_are_equal(s1: &str, s2: &str) { let string1 = JsonString::new(s1); let string2 = JsonString::new(s2); assert_eq!(string1, string2); } #[test] fn different_json_strings_are_not_equal() { let string1 = JsonString::new("dog"); let string2 = JsonString::new("doc"); assert_ne!(string1, string2); } #[test_case("dog", "dog"; "dog")] #[test_case("", ""; "empty")] fn equal_json_strings_have_equal_hashes(s1: &str, s2: &str) { let string1 = JsonString::new(s1); let string2 = JsonString::new(s2); let mut hasher1 = DefaultHasher::new(); string1.hash(&mut hasher1); let hash1 = hasher1.finish(); let mut hasher2 = DefaultHasher::new(); string2.hash(&mut hasher2); let hash2 = hasher2.finish(); assert_eq!(hash1, hash2); } }
use yew::{html, Component, ComponentLink, Html, Renderable, ShouldRender}; pub struct PageNotFound {} pub enum Msg {} impl Component for PageNotFound { type Message = Msg; type Properties = (); fn create(_: Self::Properties, _: ComponentLink<Self>) -> Self { PageNotFound {} } fn update(&mut self, _msg: Self::Message) -> ShouldRender { true } fn change(&mut self, _props: Self::Properties) -> ShouldRender { true } fn destroy(&mut self) { log::info!("PageNotFound destroyed") } } impl Renderable<PageNotFound> for PageNotFound { fn view(&self) -> Html<Self> { html! { {"Page Not Found"} } } }
#[test] pub fn setup() { println!("common setup print"); }
use actix_web::web::ServiceConfig; use actix_web::{delete, get, patch, post, web, Error, HttpRequest, HttpResponse}; use crate::db::PgPool; use crate::db::finance as db; use crate::db::types::Branch; use chrono::{Date, NaiveDateTime}; use crate::db::finance::FilterQuery; use diesel::PgConnection; use serde::Serialize; pub fn endpoints(config: &mut ServiceConfig) { config.service(money) .service(money_defaults); } #[get("/api/finance/money")] pub async fn money(pool: web::Data<PgPool>, request: HttpRequest) -> Result<HttpResponse, Error> { let conn = pool.get().unwrap(); if request.query_string().is_empty() { let sum = db::all(&conn); Ok(HttpResponse::Ok().json(sum)) } else { match serde_qs::from_str::<FilterQuery>(&request.query_string()) { Ok(query) => { println!("{:?}", query); let sum = db::by_query(&conn, query); Ok(HttpResponse::Ok().json(sum)) } Err(query_err) => { println!("{:?}", query_err); Ok(HttpResponse::InternalServerError().finish()) } } } } #[get("/api/finance/money-defaults")] pub async fn money_defaults(pool: web::Data<PgPool>) -> Result<HttpResponse, Error> { let conn = pool.get().unwrap(); let defaults = DefaultValues::new(&conn); Ok(HttpResponse::Ok().json(defaults)) } #[derive(Serialize)] struct DefaultValues { pub all: i32, pub digital_all: i32, pub cash_all: i32, pub processed_all: i32, pub processed_digital: i32, pub processed_cash: i32, pub not_processed_all: i32, pub not_processed_digital: i32, pub not_processed_cash: i32, } impl DefaultValues { fn new(conn: &PgConnection) -> Self { let all = db::by_query(&conn, ALL_QUERY); let digital_all = db::by_query(&conn, DIGITAL_ALL_QUERY); let cash_all = db::by_query(&conn, CASH_ALL_QUERY); let processed_all = db::by_query(&conn, PROCESSED_ALL_QUERY); let processed_digital = db::by_query(&conn, PROCESSED_DIGITAL_QUERY); let processed_cash = db::by_query(&conn, PROCESSED_CASH_QUERY); let not_processed_all = db::by_query(&conn, NOT_PROCESSED_ALL_QUERY); let not_processed_digital = db::by_query(&conn, NOT_PROCESSED_DIGITAL_QUERY); let not_processed_cash = db::by_query(&conn, NOT_PROCESSED_CASH_QUERY); Self { all, digital_all, cash_all, processed_all, processed_digital, processed_cash, not_processed_all, not_processed_digital, not_processed_cash } } } const ALL_QUERY: FilterQuery = FilterQuery { processed: None, branch: None, from: None, until: None, }; const DIGITAL_ALL_QUERY: FilterQuery = FilterQuery { processed: None, branch: Some(Branch::Digital), from: None, until: None, }; const CASH_ALL_QUERY: FilterQuery = FilterQuery { processed: None, branch: Some(Branch::Cash), from: None, until: None, }; const PROCESSED_ALL_QUERY: FilterQuery = FilterQuery { processed: Some(true), branch: None, from: None, until: None, }; const PROCESSED_DIGITAL_QUERY: FilterQuery = FilterQuery { processed: Some(true), branch: Some(Branch::Digital), from: None, until: None, }; const PROCESSED_CASH_QUERY: FilterQuery = FilterQuery { processed: Some(true), branch: Some(Branch::Cash), from: None, until: None, }; const NOT_PROCESSED_ALL_QUERY: FilterQuery = FilterQuery { processed: Some(false), branch: None, from: None, until: None, }; const NOT_PROCESSED_DIGITAL_QUERY: FilterQuery = FilterQuery { processed: Some(false), branch: Some(Branch::Digital), from: None, until: None, }; const NOT_PROCESSED_CASH_QUERY: FilterQuery = FilterQuery { processed: Some(false), branch: Some(Branch::Cash), from: None, until: None, };
use super::crypto::{read_rmux_event, CryptoContext}; use super::event::{ get_event_type_str, new_ping_event, new_pong_event, new_routine_event, new_shutdown_event, new_syn_event, new_window_update_event, Event, FLAG_DATA, FLAG_FIN, FLAG_PING, FLAG_PONG, FLAG_ROUTINE, FLAG_SHUTDOWN, FLAG_SYN, FLAG_WIN_UPDATE, }; use super::message::ConnectRequest; use super::stream::MuxStream; use super::DEFAULT_RECV_BUF_SIZE; use crate::channel::get_channel_stream; use crate::channel::ChannelStream; use crate::tunnel::relay; use crate::utils::{clear_channel, make_io_error, VBuf}; use bytes::BytesMut; use futures::future::join3; use futures::FutureExt; use rand::Rng; use std::collections::HashMap; use std::error::Error; use std::sync::{Arc, Mutex}; use std::task::{Context, Poll}; use std::time::{Instant, SystemTime, UNIX_EPOCH}; use tokio::io::{AsyncBufRead, AsyncWrite, AsyncWriteExt}; use tokio::net::TcpStream; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TryRecvError; use std::sync::atomic::{AtomicBool, AtomicU32, Ordering}; lazy_static! { static ref CHANNEL_SESSIONS: Mutex<ChannelSessionManager> = Mutex::new(ChannelSessionManager::new()); } struct ChannelSessionManager { channels: HashMap<String, ChannelMuxSession>, retired: Vec<MuxSession>, } impl ChannelSessionManager { fn new() -> Self { Self { channels: HashMap::new(), retired: Vec::new(), } } } struct ChannelMuxSession { sessions: Vec<Option<MuxSession>>, cursor: AtomicU32, } pub struct MuxSessionState { last_ping_send_time: AtomicU32, last_pong_recv_time: AtomicU32, pub born_time: Instant, retired: AtomicBool, io_active_unix_secs: AtomicU32, closed: AtomicBool, process_event_state: AtomicU32, process_send_state: AtomicU32, process_recv_state: AtomicU32, } impl MuxSessionState { fn ping_pong_gap(&self) -> i64 { let t1 = self.last_ping_send_time.load(Ordering::SeqCst); let t2 = self.last_pong_recv_time.load(Ordering::SeqCst); if t1 > 0 && t2 > 0 { return t2 as i64 - t1 as i64; } 0 } fn is_retired(&self) -> bool { self.retired.load(Ordering::SeqCst) } fn is_closed(&self) -> bool { self.closed.load(Ordering::SeqCst) } fn get_io_idle_secs(&self, now_unix_secs: u32) -> u32 { let secs = self.io_active_unix_secs.load(Ordering::SeqCst); if secs == 0 { return 0; } now_unix_secs - secs } } pub struct MuxSession { id: u32, event_tx: mpsc::Sender<Event>, pendding_streams: Vec<MuxStream>, stream_id_seed: AtomicU32, state: Arc<MuxSessionState>, max_alive_secs: u64, } fn store_mux_session(channel: &str, session: MuxSession) { let cmap = &mut CHANNEL_SESSIONS.lock().unwrap().channels; //info!("{}0 store cmap size:{}", channel, cmap.len()); if cmap.get_mut(channel).is_none() { let csession = ChannelMuxSession { sessions: Vec::new(), cursor: AtomicU32::new(0), }; cmap.insert(String::from(channel), csession); } if let Some(csession) = cmap.get_mut(channel) { for s in csession.sessions.iter_mut() { if s.is_none() { *s = Some(session); return; } } csession.sessions.push(Some(session)); } } fn erase_mux_session(channel: &str, sid: u32) { let mut holder = CHANNEL_SESSIONS.lock().unwrap(); let cmap = &mut holder.channels; if let Some(csession) = cmap.get_mut(channel) { for s in csession.sessions.iter_mut() { if let Some(ss) = s { if ss.id == sid { let _ = s.take(); return; } } } } for i in 0..holder.retired.len() { if holder.retired[i].id == sid { holder.retired.remove(i); return; } } } fn hanle_pendding_mux_streams(channel: &str, sid: u32, streams: &mut HashMap<u32, MuxStream>) { let cmap = &mut CHANNEL_SESSIONS.lock().unwrap().channels; if let Some(csession) = cmap.get_mut(channel) { for cs in csession.sessions.iter_mut() { if let Some(ss) = cs { if ss.id == sid { loop { if let Some(s) = ss.pendding_streams.pop() { streams.insert(s.id(), s); } else { return; } } } } } } } pub fn get_channel_session_size(channel: &str) -> usize { let cmap = &mut CHANNEL_SESSIONS.lock().unwrap().channels; let mut len: usize = 0; if let Some(csession) = cmap.get_mut(channel) { for s in csession.sessions.iter() { if s.is_some() { len += 1; } } } len } struct RoutineAction { ev: Option<Event>, sender: mpsc::Sender<Event>, } impl RoutineAction { fn new(ev: Event, sender: mpsc::Sender<Event>) -> Self { Self { ev: Some(ev), sender, } } } pub async fn routine_all_sessions() { let mut actions = Vec::new(); { let mut holder = CHANNEL_SESSIONS.lock().unwrap(); let cmap = &mut holder.channels; let mut retired = Vec::new(); for (channel, csession) in cmap.iter_mut() { for session in csession.sessions.iter_mut() { if let Some(s) = session { if s.state.ping_pong_gap() < -60 { error!("[{}]Session heartbeat timeout.", s.id); let shutdown = new_shutdown_event(0, false); actions.push(RoutineAction::new(shutdown, s.event_tx.clone())); s.state.retired.store(true, Ordering::SeqCst); retired.push(session.take().unwrap()); continue; } else { if !channel.is_empty() { let ping = new_ping_event(0, false); actions.push(RoutineAction::new(ping, s.event_tx.clone())); } if s.state.closed.load(Ordering::SeqCst) { s.state.retired.store(true, Ordering::SeqCst); retired.push(session.take().unwrap()); } else { let r = new_routine_event(0); actions.push(RoutineAction::new(r, s.event_tx.clone())); if s.max_alive_secs > 0 && !channel.is_empty() { let rand_inc: i64 = { let mut rng = rand::thread_rng(); rng.gen_range(-60, 60) }; //let session_id = s.id; let cmp_secs = s.max_alive_secs as i64 + rand_inc; if s.state.born_time.elapsed().as_secs() > cmp_secs as u64 { s.state.retired.store(true, Ordering::SeqCst); retired.push(session.take().unwrap()); //csession.session_ids.remove(&session_id); } } } } } } } for s in holder.retired.iter_mut() { let r = new_routine_event(0); actions.push(RoutineAction::new(r, s.event_tx.clone())); } holder.retired.append(&mut retired); } for action in actions.iter_mut() { let ev = action.ev.take().unwrap(); let _ = action.sender.try_send(ev); } } pub async fn create_stream( channel: &str, proto: &str, addr: &str, relay_buf_size: usize, ) -> Result<MuxStream, std::io::Error> { let (stream, ev, ev_sender) = { let mut stream: Option<MuxStream> = None; let mut ev: Option<Event> = None; let mut ev_sender: Option<mpsc::Sender<Event>> = None; let cmap = &mut CHANNEL_SESSIONS.lock().unwrap().channels; //let mut cmap: HashMap<String, ChannelMuxSession> = HashMap::new(); if let Some(csession) = cmap.get_mut(channel) { for _ in 0..csession.sessions.len() { let mut idx = csession.cursor.fetch_add(1, Ordering::SeqCst); idx %= csession.sessions.len() as u32; if let Some(session) = &mut csession.sessions.as_mut_slice()[idx as usize] { let creq = ConnectRequest { proto: String::from(proto), addr: String::from(addr), }; let cev = new_syn_event(session.stream_id_seed.fetch_add(2, Ordering::SeqCst), &creq); let pendding_stream = MuxStream::new( channel, session.id, cev.header.stream_id, session.event_tx.clone(), creq, relay_buf_size, ); session.pendding_streams.push(pendding_stream.clone()); stream = Some(pendding_stream); ev = Some(cev); ev_sender = Some(session.event_tx.clone()); break; } } } (stream, ev, ev_sender) }; if stream.is_some() && ev_sender.unwrap().send(ev.unwrap()).await.is_ok() { return Ok(stream.unwrap()); } Err(make_io_error("no channel found.")) } async fn handle_rmux_stream(mut stream: MuxStream) -> Result<(), Box<dyn Error>> { let stream_id = stream.state.stream_id; let relay_buf_size = stream.relay_buf_size(); let target = String::from(stream.target.addr.as_str()); let result = get_channel_stream(String::from("direct"), target).await; match result { Ok(mut remote) => { { let (mut ri, mut wi) = stream.split(); let (mut ro, mut wo) = remote.split(); relay( stream_id, &mut ri, &mut wi, &mut ro, &mut wo, relay_buf_size, ) .await?; } let _ = stream.close(); let _ = remote.close(); Ok(()) } Err(e) => { let _ = stream.close(); Err(Box::new(e)) } } } fn handle_syn( channel: &str, session_id: u32, ev: Event, evtx: mpsc::Sender<Event>, relay_buf_size: usize, ) -> Option<MuxStream> { let connect_req: ConnectRequest = match bincode::deserialize(&ev.body[..]) { Ok(m) => m, Err(err) => { error!( "Failed to parse ConnectRequest with error:{} while data len:{} {}", err, ev.body.len(), ev.header.len(), ); return None; } }; let sid = ev.header.stream_id; info!( "[{}]Handle conn request:{} {}", sid, connect_req.proto, connect_req.addr ); let stream = MuxStream::new(channel, session_id, sid, evtx, connect_req, relay_buf_size); let handle = handle_rmux_stream(stream.clone()).map(move |r| { if let Err(e) = r { error!("[{}]Failed to handle rmux stream; error={}", sid, e); } }); tokio::spawn(handle); Some(stream) } fn get_streams_stat_info(streams: &mut HashMap<u32, MuxStream>) -> String { let mut info = String::new(); for (id, stream) in streams.iter_mut() { info.push_str( format!( "{}:target:{}, age:{:?}, send_bytes:{}, recv_bytes:{}, send_window:{}, closed:{}\n", id, stream.target.addr, stream.state.born_time.elapsed(), stream.state.total_send_bytes.load(Ordering::SeqCst), stream.state.total_recv_bytes.load(Ordering::SeqCst), stream.state.send_buf_window.load(Ordering::SeqCst), stream.state.closed.load(Ordering::SeqCst), ) .as_str(), ); } info } fn log_session_state(sid: u32, now_unix_secs: u32, session_state: &Arc<MuxSessionState>) -> String { let mut stat_info = format!( "========================Session:{}====================\n", sid ); stat_info.push_str(format!("Age:{:?}\n", session_state.born_time.elapsed()).as_str()); stat_info.push_str(format!("PingPongGap:{}\n", session_state.ping_pong_gap()).as_str()); let idle_secs = session_state.get_io_idle_secs(now_unix_secs); stat_info.push_str(format!("IOIdleSecs:{}\n", idle_secs).as_str()); stat_info.push_str(format!("Retired:{}\n", session_state.is_retired()).as_str()); stat_info.push_str(format!("Closed:{}\n", session_state.is_closed()).as_str()); stat_info.push_str( format!( "ProcEventState:{}\n", session_state.process_event_state.load(Ordering::SeqCst) ) .as_str(), ); stat_info.push_str( format!( "ProcSendState:{}\n", session_state.process_send_state.load(Ordering::SeqCst) ) .as_str(), ); stat_info.push_str( format!( "ProcRecvState:{}\n", session_state.process_recv_state.load(Ordering::SeqCst) ) .as_str(), ); //stat_info.push_str(get_streams_stat_info(streams).as_str()); stat_info } pub fn dump_session_state() -> String { let now_unix_secs = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_secs() as u32; let mut stat_info = String::from("========================Sessions====================\n"); { let ss = &mut CHANNEL_SESSIONS.lock().unwrap(); let cmap = &mut ss.channels; for (channel, csession) in cmap { stat_info.push_str(format!("======Channel:{}=======\n", channel).as_str()); let mut count = 0; for s in csession.sessions.iter() { match s { Some(session) => { let info = log_session_state(session.id, now_unix_secs, &session.state); stat_info.push_str(info.as_str()); count += 1; } None => { // } } } stat_info .push_str(format!("======Channel:{} Count:{}======\n", channel, count).as_str()); } stat_info.push_str("\nRetired Sessions:\n"); let retired = &mut ss.retired; for s in retired { let info = log_session_state(s.id, now_unix_secs, &s.state); stat_info.push_str(info.as_str()); } } stat_info } fn handle_ping_event( _sid: u32, _streams: &mut HashMap<u32, MuxStream>, session_state: &Arc<MuxSessionState>, is_remote: bool, ) { if !is_remote { let now_unix_secs = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_secs() as u32; session_state .last_ping_send_time .store(now_unix_secs, Ordering::SeqCst); } } fn handle_routine_event( sid: u32, streams: &mut HashMap<u32, MuxStream>, session_state: &Arc<MuxSessionState>, ) -> bool { let now_unix_secs = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_secs() as u32; let idle_io_secs = session_state.get_io_idle_secs(now_unix_secs); let mut stat_info = log_session_state(sid, now_unix_secs, &session_state); stat_info.push_str(get_streams_stat_info(streams).as_str()); stat_info.push_str(format!("Streams:{}\n", streams.len()).as_str()); info!("{}", stat_info); let should_close = (session_state.is_retired() && streams.is_empty()) || idle_io_secs >= 300; if should_close { error!( "[{}]Close session since no data send/recv {} secs ago, stream count:{}", sid, idle_io_secs, streams.len() ); session_state.closed.store(true, Ordering::SeqCst); return true; } false } fn handle_fin_event( sid: u32, streams: &mut HashMap<u32, MuxStream>, session_state: &Arc<MuxSessionState>, ) -> bool { if let Some(mut stream) = streams.remove(&sid) { let _ = stream.close(); } if session_state.is_retired() && streams.is_empty() { session_state.closed.store(true, Ordering::SeqCst); return true; } false } async fn send_local_event( mut ev: Event, wctx: &mut CryptoContext, send_tx: &mut mpsc::Sender<Vec<u8>>, ) -> bool { let mut buf = BytesMut::with_capacity(ev.body.len() + 64); wctx.encrypt(&mut ev, &mut buf); let evbuf = buf.to_vec(); let send_rc = send_tx.send(evbuf).await; send_rc.is_ok() } async fn handle_local_event<'a>( channel: &'a str, tunnel_id: u32, streams: &mut HashMap<u32, MuxStream>, session_state: &Arc<MuxSessionState>, ev: Event, wctx: &mut CryptoContext, send_tx: &mut mpsc::Sender<Vec<u8>>, ) -> bool { if FLAG_SHUTDOWN == ev.header.flags() { return false; } if FLAG_SYN == ev.header.flags() { hanle_pendding_mux_streams(channel, tunnel_id, streams); } if FLAG_FIN == ev.header.flags() && handle_fin_event(ev.header.stream_id, streams, &session_state) { return false; } if FLAG_ROUTINE == ev.header.flags() { return !handle_routine_event(tunnel_id, streams, &session_state); } send_local_event(ev, wctx, send_tx).await } async fn process_event<'a>( channel: &'a str, tunnel_id: u32, mut wctx: CryptoContext, session_state: Arc<MuxSessionState>, mut event_rx: mpsc::Receiver<Event>, event_tx: mpsc::Sender<Event>, mut send_tx: mpsc::Sender<Vec<u8>>, relay_buf_size: usize, ) { let mut streams = HashMap::new(); while !session_state.closed.load(Ordering::SeqCst) { session_state.process_event_state.store(0, Ordering::SeqCst); let rev = event_rx.recv().await; if let Some(ev) = rev { if FLAG_PING == ev.header.flags() { handle_ping_event(tunnel_id, &mut streams, &session_state, ev.remote); } if !ev.remote { session_state.process_event_state.store(3, Ordering::SeqCst); if handle_local_event( channel, tunnel_id, &mut streams, &session_state, ev, &mut wctx, &mut send_tx, ) .await { session_state.process_event_state.store(4, Ordering::SeqCst); continue; } session_state.process_event_state.store(4, Ordering::SeqCst); break; } match ev.header.flags() { FLAG_SYN => { if let Some(stream) = handle_syn(channel, tunnel_id, ev, event_tx.clone(), relay_buf_size) { streams.entry(stream.state.stream_id).or_insert(stream); } else { } } FLAG_FIN => { if handle_fin_event(ev.header.stream_id, &mut streams, &session_state) { break; } } FLAG_DATA => { if let Some(stream) = streams.get_mut(&ev.header.stream_id) { session_state.process_event_state.store(1, Ordering::SeqCst); stream.offer_data(ev.body).await; session_state.process_event_state.store(2, Ordering::SeqCst); } else { warn!( "[{}][{}]No stream found for data event.", channel, ev.header.stream_id ); } } FLAG_PING => { if !send_local_event( new_pong_event(ev.header.stream_id, false), &mut wctx, &mut send_tx, ) .await { break; } } FLAG_PONG => { session_state.last_pong_recv_time.store( SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_secs() as u32, Ordering::SeqCst, ); } FLAG_WIN_UPDATE => { if let Some(stream) = streams.get_mut(&ev.header.stream_id) { stream.update_send_window(ev.header.len()); } } _ => { error!("invalid flags:{}", ev.header.flags()); //None } } } else { //None break; } } session_state.process_event_state.store(5, Ordering::SeqCst); error!("[{}][{}]handle_event done", channel, tunnel_id); session_state.closed.store(true, Ordering::SeqCst); for (_, stream) in streams.iter_mut() { let _ = stream.close(); } clear_channel(&mut event_rx); let _ = send_tx.send(Vec::new()).await; session_state.process_event_state.store(6, Ordering::SeqCst); } pub struct MuxContext<'a> { channel: &'a str, tunnel_id: u32, rctx: CryptoContext, wctx: CryptoContext, max_alive_secs: u64, } impl<'a> MuxContext<'a> { pub fn new( channel: &'a str, tunnel_id: u32, rctx: CryptoContext, wctx: CryptoContext, max_alive_secs: u64, ) -> Self { Self { channel, tunnel_id, rctx, wctx, max_alive_secs, } } } pub async fn process_rmux_session<'a, R, W>( ctx: MuxContext<'a>, ri: &'a mut R, wi: &'a mut W, relay_buf_size: usize, ) -> Result<(), std::io::Error> where R: AsyncBufRead + Unpin + Sized, W: AsyncWrite + Unpin + Sized, { let channel = ctx.channel; let tunnel_id = ctx.tunnel_id; let mut rctx = ctx.rctx; let wctx = ctx.wctx; let max_alive_secs = ctx.max_alive_secs; let (mut event_tx, event_rx) = mpsc::channel::<Event>(16); let (send_tx, mut send_rx) = mpsc::channel(16); //let is_server = channel.is_empty(); let seed = if channel.is_empty() { 2 } else { 1 }; let session_state = MuxSessionState { last_ping_send_time: AtomicU32::new(0), last_pong_recv_time: AtomicU32::new(0), born_time: Instant::now(), retired: AtomicBool::new(false), io_active_unix_secs: AtomicU32::new(0), closed: AtomicBool::new(false), process_event_state: AtomicU32::new(0), process_send_state: AtomicU32::new(0), process_recv_state: AtomicU32::new(0), }; let session_state = Arc::new(session_state); //let send_session_state = session_state.clone(); let recv_session_state = session_state.clone(); let mux_session = MuxSession { id: tunnel_id, event_tx: event_tx.clone(), pendding_streams: Vec::new(), stream_id_seed: AtomicU32::new(seed), state: session_state.clone(), max_alive_secs, //streams: HashMap::new(), }; info!( "[{}][{}]Start tunnel session with crypto {} {}", channel, tunnel_id, rctx.nonce, rctx.key ); store_mux_session(channel, mux_session); let (mut close_tx, mut close_rx) = mpsc::channel::<()>(1); //let mut drop = close_rx.fuse(); let mut handle_recv_event_tx = event_tx.clone(); let mut handle_recv_send_tx = send_tx.clone(); let handle_recv_session_state = session_state.clone(); let handle_send_session_state = session_state.clone(); let handle_recv = async move { while !handle_recv_session_state.closed.load(Ordering::SeqCst) { tokio::select! { recv_event = read_rmux_event(&mut rctx, ri) => { match recv_event { Ok(mut ev) => { recv_session_state.io_active_unix_secs.store( SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_secs() as u32, Ordering::SeqCst, ); ev.remote = true; if FLAG_DATA != ev.header.flags() { info!( "[{}][{}][{}]remote recv event type:{}, len:{}", channel, tunnel_id, ev.header.stream_id, get_event_type_str(ev.header.flags()), ev.header.len(), ); } handle_recv_session_state.process_recv_state.store(1, Ordering::SeqCst); let send_rc = handle_recv_event_tx.send(ev).await; handle_recv_session_state.process_recv_state.store(0, Ordering::SeqCst); if send_rc.is_err(){ break; } } Err(err) => { error!("Close remote recv since of error:{}", err); break; } } }, _ = close_rx.recv() => { handle_recv_session_state.closed.store(true, Ordering::SeqCst); break; }, } } handle_recv_session_state .process_recv_state .store(3, Ordering::SeqCst); error!("[{}][{}]handle_recv done", channel, tunnel_id); handle_recv_session_state .closed .store(true, Ordering::SeqCst); //let shutdown_ev = new_shutdown_event(0, false); //let _ = handle_recv_event_tx.send(shutdown_ev).await; let _ = handle_recv_send_tx.send(Vec::new()).await; clear_channel(&mut close_rx); handle_recv_session_state .process_recv_state .store(2, Ordering::SeqCst); }; // let handle_event_event_tx = event_tx.clone(); // let mut handle_event_send_tx = send_tx.clone(); let handle_event = process_event( channel, tunnel_id, wctx, session_state.clone(), event_rx, event_tx.clone(), send_tx.clone(), relay_buf_size, ); let handle_send = async { let mut vbuf = VBuf::new(); while !handle_send_session_state.closed.load(Ordering::SeqCst) { // if let Some(data) = send_rx.recv().await { // if data.is_empty() { // break; // } // if let Err(e) = wi.write_all(&data[..]).await { // error!("Failed to write data with err:{}", e); // break; // } // send_session_state.io_active_unix_secs.store( // SystemTime::now() // .duration_since(UNIX_EPOCH) // .unwrap() // .as_secs() as u32, // Ordering::SeqCst, // ); // } else { // break; // } if vbuf.vlen() == 0 { if let Some(data) = send_rx.recv().await { if data.is_empty() { break; } vbuf.push(data); } else { break; } } let mut exit = false; while vbuf.vlen() < 60 { match send_rx.try_recv() { Ok(data) => { if data.is_empty() { exit = true; break; } else { vbuf.push(data); } } Err(TryRecvError::Closed) => { exit = true; break; } Err(TryRecvError::Empty) => { break; } } } if exit { break; } session_state.io_active_unix_secs.store( SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_secs() as u32, Ordering::SeqCst, ); match wi.write_buf(&mut vbuf).await { Ok(n) => { if 0 == n { break; } } Err(_) => { break; } } } handle_send_session_state .process_send_state .store(1, Ordering::SeqCst); error!("[{}][{}]handle_send done", channel, tunnel_id); handle_send_session_state .closed .store(true, Ordering::SeqCst); clear_channel(&mut send_rx); let close_rc = close_tx.send(()).await; if close_rc.is_err() { error!("[{}][{}]Close error:{:?}", channel, tunnel_id, close_rc); } let shutdown_ev = new_shutdown_event(0, false); let _ = event_tx.send(shutdown_ev).await; handle_send_session_state .process_send_state .store(2, Ordering::SeqCst); }; join3(handle_recv, handle_event, handle_send).await; erase_mux_session(channel, tunnel_id); info!("[{}][{}]Close tunnel session", channel, tunnel_id); Ok(()) } pub async fn handle_rmux_session( channel: &str, tunnel_id: u32, mut inbound: TcpStream, rctx: CryptoContext, wctx: CryptoContext, max_alive_secs: u64, relay_buf_size: usize, //cfg: &TunnelConfig, ) -> Result<(), std::io::Error> { let (ri, mut wi) = inbound.split(); let mut buf_reader = tokio::io::BufReader::with_capacity(DEFAULT_RECV_BUF_SIZE, ri); let ctx = MuxContext::new(channel, tunnel_id, rctx, wctx, max_alive_secs); process_rmux_session( ctx, // channel, // tunnel_id, &mut buf_reader, &mut wi, // rctx, // wctx, // recv_buf, // max_alive_secs, relay_buf_size, ) .await?; let _ = inbound.shutdown(std::net::Shutdown::Both); Ok(()) }
use addressing; use curve25519_dalek::montgomery::MontgomeryPoint; use itertools::Itertools; use rand::seq::IteratorRandom; use sphinx::route::{Node as SphinxNode, NodeAddressBytes}; use std::cmp::max; use std::collections::HashMap; use std::net::SocketAddr; #[derive(Debug, Clone)] pub struct MixNode { pub host: SocketAddr, pub pub_key: String, pub layer: u64, pub last_seen: u64, pub version: String, } impl Into<SphinxNode> for MixNode { fn into(self) -> SphinxNode { let address_bytes = addressing::encoded_bytes_from_socket_address(self.host); let key_bytes = self.get_pub_key_bytes(); let key = MontgomeryPoint(key_bytes); SphinxNode::new(NodeAddressBytes::from_bytes(address_bytes), key) } } impl MixNode { pub fn get_pub_key_bytes(&self) -> [u8; 32] { let decoded_key_bytes = base64::decode_config(&self.pub_key, base64::URL_SAFE).unwrap(); let mut key_bytes = [0; 32]; key_bytes.copy_from_slice(&decoded_key_bytes[..]); key_bytes } } #[derive(Debug, Clone)] pub struct MixProviderClient { pub pub_key: String, } #[derive(Debug, Clone)] pub struct MixProviderNode { pub client_listener: SocketAddr, pub mixnet_listener: SocketAddr, pub pub_key: String, pub registered_clients: Vec<MixProviderClient>, pub last_seen: u64, pub version: String, } impl Into<SphinxNode> for MixProviderNode { fn into(self) -> SphinxNode { let address_bytes = addressing::encoded_bytes_from_socket_address(self.mixnet_listener); let key_bytes = self.get_pub_key_bytes(); let key = MontgomeryPoint(key_bytes); SphinxNode::new(NodeAddressBytes::from_bytes(address_bytes), key) } } impl MixProviderNode { pub fn get_pub_key_bytes(&self) -> [u8; 32] { let decoded_key_bytes = base64::decode_config(&self.pub_key, base64::URL_SAFE).unwrap(); let mut key_bytes = [0; 32]; key_bytes.copy_from_slice(&decoded_key_bytes[..]); key_bytes } } #[derive(Debug, Clone)] pub struct CocoNode { pub host: String, pub pub_key: String, pub last_seen: u64, pub version: String, } #[derive(Debug)] pub enum NymTopologyError { InvalidMixLayerError, MissingLayerError(Vec<u64>), } pub trait NymTopology: Sized { fn new(directory_server: String) -> Self; fn new_from_nodes( mix_nodes: Vec<MixNode>, mix_provider_nodes: Vec<MixProviderNode>, coco_nodes: Vec<CocoNode>, ) -> Self; fn get_mix_nodes(&self) -> Vec<MixNode>; fn get_mix_provider_nodes(&self) -> Vec<MixProviderNode>; fn get_coco_nodes(&self) -> Vec<CocoNode>; fn make_layered_topology(&self) -> Result<HashMap<u64, Vec<MixNode>>, NymTopologyError> { let mut layered_topology: HashMap<u64, Vec<MixNode>> = HashMap::new(); let mut highest_layer = 0; for mix in self.get_mix_nodes() { // we need to have extra space for provider if mix.layer > sphinx::constants::MAX_PATH_LENGTH as u64 { return Err(NymTopologyError::InvalidMixLayerError); } highest_layer = max(highest_layer, mix.layer); let layer_nodes = layered_topology.entry(mix.layer).or_insert(Vec::new()); layer_nodes.push(mix); } // verify the topology - make sure there are no gaps and there is at least one node per layer let mut missing_layers = Vec::new(); for layer in 1..=highest_layer { if !layered_topology.contains_key(&layer) { missing_layers.push(layer); } if layered_topology[&layer].len() == 0 { missing_layers.push(layer); } } if missing_layers.len() > 0 { return Err(NymTopologyError::MissingLayerError(missing_layers)); } Ok(layered_topology) } fn mix_route(&self) -> Result<Vec<SphinxNode>, NymTopologyError> { let mut layered_topology = self.make_layered_topology()?; let num_layers = layered_topology.len(); let route = (1..=num_layers as u64) .map(|layer| layered_topology.remove(&layer).unwrap()) // for each layer .map(|nodes| nodes.into_iter().choose(&mut rand::thread_rng()).unwrap()) // choose random node .map(|random_node| random_node.into()) // and convert it into sphinx specific node format .collect(); Ok(route) } // sets a route to specific provider fn route_to(&self, provider_node: SphinxNode) -> Result<Vec<SphinxNode>, NymTopologyError> { Ok(self .mix_route()? .into_iter() .chain(std::iter::once(provider_node)) .collect()) } fn all_paths(&self) -> Result<Vec<Vec<SphinxNode>>, NymTopologyError> { let mut layered_topology = self.make_layered_topology()?; let providers = self.get_mix_provider_nodes(); let sorted_layers: Vec<Vec<SphinxNode>> = (1..=layered_topology.len() as u64) .map(|layer| layered_topology.remove(&layer).unwrap()) // get all nodes per layer .map(|layer_nodes| layer_nodes.into_iter().map(|node| node.into()).collect()) // convert them into 'proper' sphinx nodes .chain(std::iter::once( providers.into_iter().map(|node| node.into()).collect(), )) // append all providers to the end .collect(); let all_paths = sorted_layers .into_iter() .multi_cartesian_product() // create all possible paths through that .collect(); Ok(all_paths) } fn filter_node_versions( &self, mix_version: &str, provider_version: &str, coco_version: &str, ) -> Self { let filtered_mixes = self .get_mix_nodes() .iter() .cloned() .filter(|mix_node| mix_node.version == mix_version) .collect(); let filtered_providers = self .get_mix_provider_nodes() .iter() .cloned() .filter(|provider_node| provider_node.version == provider_version) .collect(); let filtered_coco_nodes = self .get_coco_nodes() .iter() .cloned() .filter(|coco_node| coco_node.version == coco_version) .collect(); Self::new_from_nodes(filtered_mixes, filtered_providers, filtered_coco_nodes) } fn can_construct_path_through(&self) -> bool { match self.make_layered_topology() { Ok(_) => true, Err(_) => false, } } } // TODO: tests...
use std::env; use std::fs::File; use std::io::Write; use std::path::PathBuf; fn main() { let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); let memory_x = include_bytes!("memory.x.in"); let mut f = File::create(out_dir.join("memory.x")).unwrap(); f.write_all(memory_x).unwrap(); println!("cargo:rustc-link-search={}", out_dir.display()); }
use tree_sitter::{Point, Range, Tree}; #[derive(Debug)] pub struct ScopeSequence(Vec<ScopeStack>); type ScopeStack = Vec<&'static str>; impl ScopeSequence { pub fn new(tree: &Tree) -> Self { let mut result = ScopeSequence(Vec::new()); let mut scope_stack = Vec::new(); let mut cursor = tree.walk(); let mut visited_children = false; loop { let node = cursor.node(); for _ in result.0.len()..node.start_byte() { result.0.push(scope_stack.clone()); } if visited_children { for _ in result.0.len()..node.end_byte() { result.0.push(scope_stack.clone()); } scope_stack.pop(); if cursor.goto_next_sibling() { visited_children = false; } else if !cursor.goto_parent() { break; } } else { scope_stack.push(cursor.node().kind()); if !cursor.goto_first_child() { visited_children = true; } } } result } pub fn check_changes( &self, other: &ScopeSequence, text: &Vec<u8>, known_changed_ranges: &Vec<Range>, ) -> Result<(), String> { let mut position = Point { row: 0, column: 0 }; for i in 0..(self.0.len().max(other.0.len())) { let stack = &self.0.get(i); let other_stack = &other.0.get(i); if *stack != *other_stack && ![b'\r', b'\n'].contains(&text[i]) { let containing_range = known_changed_ranges .iter() .find(|range| range.start_point <= position && position < range.end_point); if containing_range.is_none() { let line = &text[(i - position.column)..] .split(|c| *c == '\n' as u8) .next() .unwrap(); return Err(format!( concat!( "Position: {}\n", "Byte offset: {}\n", "Line: {}\n", "{}^\n", "Old scopes: {:?}\n", "New scopes: {:?}\n", "Invalidated ranges: {:?}", ), position, i, String::from_utf8_lossy(line), String::from(" ").repeat(position.column + "Line: ".len()), stack, other_stack, known_changed_ranges, )); } } if text[i] == '\n' as u8 { position.row += 1; position.column = 0; } else { position.column += 1; } } Ok(()) } }
pub trait BpLoudsCommonTrait { fn isleaf (&self,pos:u64) -> bool; fn parent(&self,pos:u64) -> Option<u64>; fn first_child(&self,pos:u64) -> Option<u64>; fn next_sibling(&self,pos:u64) -> Option<u64>; }
//! Use ExifTool to extract photo metadata use crate::{ config::PhotoConfig, deserialize::{date_time_string, string_number, string_sequence}, html, models::{ Camera, ExposureMode, Location, Photo, PhotoFile, SizeCollection, }, tools::pos_from_name, }; use chrono::{DateTime, FixedOffset}; use colored::*; use serde::Deserialize; use std::{path::Path, process::Command}; #[derive(Deserialize, Debug)] pub struct ExifToolOutput { #[serde(rename = "Aperture")] // or FNumber aperture: Option<f32>, #[serde(default, rename = "Artist")] artist: Option<String>, #[serde(rename = "Make")] camera_make: Option<String>, #[serde(default, rename = "Model")] camera_model: Option<String>, #[serde(rename = "Description")] // or Caption-Abstract or ImageDescription caption: Option<String>, #[serde(rename = "City")] city: Option<String>, #[serde(default, rename = "ProfileDescription")] color_profile: Option<String>, #[serde(rename = "ColorTemperature")] color_temperature: Option<u16>, #[serde(default, rename = "Copyright")] // or Rights or CopyrightNotice copyright: Option<String>, /// When the *photo*, not the file, was created #[serde( default, rename = "CreateDate", deserialize_with = "date_time_string" )] created_on: Option<DateTime<FixedOffset>>, #[serde( default, rename = "ExposureCompensation", deserialize_with = "string_number" )] exposure_compensation: Option<String>, #[serde(default, rename = "ExposureProgram")] exposure_mode: ExposureMode, #[serde(rename = "FOV")] field_of_view: Option<f32>, /// When the *file*, not the photo, was created #[serde( default, rename = "FileCreateDate", deserialize_with = "date_time_string" )] file_created_on: Option<DateTime<FixedOffset>>, #[serde(rename = "FileName")] file_name: String, #[serde(rename = "FocalLength")] focal_length: Option<f32>, #[serde(rename = "ImageHeight")] height: u16, #[serde(rename = "ISO")] iso: Option<u16>, #[serde(rename = "GPSLatitude")] latitude: Option<f32>, #[serde(rename = "Lens")] lens: Option<String>, #[serde(rename = "GPSLongitude")] longitude: Option<f32>, #[serde(rename = "MaxApertureValue")] max_aperture: Option<f32>, #[serde(rename = "Software")] // or CreatorTool software: String, #[serde(rename = "State")] // or Province-State state: Option<String>, // or Subject #[serde( default, rename = "Keywords", deserialize_with = "string_sequence" )] tags: Vec<String>, #[serde(rename = "Title")] // or ObjectName title: Option<String>, #[serde(default, rename = "UsageTerms")] usage_terms: Option<String>, // or ShutterSpeedValue #[serde( default, rename = "ShutterSpeed", deserialize_with = "string_number" )] shutter_speed: Option<String>, #[serde( default, rename = "DateTimeCreated", deserialize_with = "date_time_string" )] // or DateTimeOriginal or DateTimeCreated taken_on: Option<DateTime<FixedOffset>>, #[serde(rename = "ImageWidth")] width: u16, } impl PartialEq for ExifToolOutput { fn eq(&self, other: &Self) -> bool { self.file_name == other.file_name && self.artist == other.artist && self.title == other.title && self.caption == other.caption && self.tags == other.tags && self.city == other.city && self.state == other.state && self.copyright == other.copyright && self.usage_terms == other.usage_terms && self.software == other.software && self.aperture == other.aperture && self.iso == other.iso && self.shutter_speed == other.shutter_speed && self.exposure_compensation == other.exposure_compensation && self.exposure_mode == other.exposure_mode && self.focal_length == other.focal_length && self.max_aperture == other.max_aperture && self.lens == other.lens && self.camera_make == other.camera_make && self.camera_model == other.camera_model && self.taken_on == other.taken_on && self.created_on == other.created_on && self.latitude == other.latitude && self.longitude == other.longitude && self.color_profile == other.color_profile && self.color_temperature == other.color_temperature && self.field_of_view == other.field_of_view && self.width == other.width && self.height == other.height } } impl Eq for ExifToolOutput {} /// Execute exif_tool for each image file in given `path` and capture output as /// `Photo` structs pub fn parse_dir(path: &Path, config: &PhotoConfig) -> Vec<Photo> { let pattern = format!("*{}", config.source_ext); let mut photos: Vec<Photo> = Vec::new(); for i in read_dir(&path, &pattern) { // Photo index based on its file name pattern let index = pos_from_name(&config.capture_index, &i.file_name).unwrap_or(0); if index == 0 { println!( " {} {}", "failed to infer index of".red(), i.file_name.red() ); continue; } let mut photo = Photo { file: PhotoFile { name: i.file_name.clone(), created: i.file_created_on.map_or(0, |d| d.timestamp()), }, title: i.title, artist: i.artist, caption: i.caption.map(|s| html::caption(&s)), software: i.software, tags: i.tags, index, size: SizeCollection::from(i.width, i.height, index, config), date_taken: i.taken_on.or(i.created_on), ..Photo::default() }; if let Some(make) = &i.camera_make { photo.camera = Some(Camera { name: i.camera_model.unwrap_or_else(|| make.clone()), compensation: i.exposure_compensation, shutter_speed: i.shutter_speed, mode: i.exposure_mode, aperture: i.aperture, focal_length: i.focal_length, iso: i.iso, lens: i.lens, }); } if i.latitude.is_some() && i.longitude.is_some() { let loc = Location { latitude: i.latitude.unwrap(), longitude: i.longitude.unwrap(), }; if loc.is_valid() { photo.location = Some(loc); } } photos.push(photo); } photos } pub fn read_dir(path: &Path, file_pattern: &str) -> Vec<ExifToolOutput> { // exiftool *.jpg -json -quiet -coordFormat %.6f // exiftool 002.jpg -json -quiet -coordFormat %.6f -ExposureProgram# // exiftool *.jpg -json -quiet -Aperture# -ColorTemperature# -DateTimeCreated -FocalLength# -FOV# -Keywords# -ShutterSpeed // suffix field name with # to disable ExifTool formatting let output = match Command::new("exiftool") .current_dir(path.to_string_lossy().to_string()) .arg(file_pattern) .arg("-json") .arg("-quiet") .arg("-Aperture#") .arg("-Artist") .arg("-City") .arg("-ColorTemperature#") .arg("-Copyright") .arg("-DateTimeCreated") .arg("-CreateDate") .arg("-FileCreateDate") // Offsets seem only to be present for software modified dates //.arg("-OffsetTimeOriginal") //.arg("-OffsetTimeDigitized") .arg("-Description") .arg("-ExposureCompensation") .arg("-ExposureProgram#") .arg("-FileName") .arg("-FocalLength#") .arg("-FOV#") .arg("-GPSLatitude#") .arg("-GPSLongitude#") .arg("-ImageHeight") .arg("-ImageWidth") .arg("-ISO") .arg("-Keywords") .arg("-Lens") .arg("-Make") .arg("-MaxApertureValue") .arg("-Model") .arg("-ProfileDescription") .arg("-ShutterSpeed") .arg("-Software") .arg("-State") .arg("-Title") .arg("-UsageTerms") .output() { Ok(out) => out, _ => { println!(" {}", "failed to retrieve EXIF".red()); return Vec::new(); } }; let text = match String::from_utf8(output.stdout) { Ok(text) => text, _ => { println!(" {}", "Failed to convert EXIF output to UTF-8".red()); return Vec::new(); } }; if text.is_empty() { println!(" {}", "EXIF JSON is empty".red()); return Vec::new(); } match serde_json::from_str::<Vec<ExifToolOutput>>(&text) { Ok(info) => info, Err(e) => { println!(" {}", "unable to parse EXIF JSON".red()); println!("{}", text); println!(" —\n {:?}\n —", e); Vec::new() } } } #[cfg(test)] mod tests { use super::ExifToolOutput; use crate::models::ExposureMode; use chrono::DateTime; #[test] fn deserialize_test() { let json = r#"[{ "SourceFile": "001.jpg", "Aperture": 2.2, "Artist": "Jason Abbott", "ColorTemperature": 5800, "Copyright": "© Copyright 2017 Jason Abbott", "DateTimeCreated": "2017:08:06 11:25:41", "FileCreateDate": "2020:04:22 23:39:28-06:00", "Description": "We worked all day yesterday, and various days before that, to get the bikes in working order. A hot and hazy day isn’t my first choice to ride the Boise Ridge but Nick and I want to put the bikes through their paces before a four-day ride in a few weeks.", "ExposureProgram": 2, "FileName": "001.jpg", "FocalLength": 4.15, "FOV": 63.6549469203798, "GPSLatitude": 43.579192, "GPSLongitude": -116.173061, "ImageHeight": 75, "ImageWidth": 100, "ISO": 25, "Keywords": ["Gas Station","KTM 500 XC-W","Motorcycle"], "Lens": "iPhone 6s back camera 4.15mm f/2.2", "Make": "Apple", "Model": "iPhone 6s", "ProfileDescription": "ProPhoto RGB", "ShutterSpeed": "1/500", "Software": "Adobe Photoshop Lightroom Classic 9.2 (Windows)", "Title": "Fuel stop", "UsageTerms": "All Rights Reserved" }]"#; let target = vec![ExifToolOutput { file_name: "001.jpg".to_owned(), artist: Some("Jason Abbott".to_owned()), title: Some("Fuel stop".to_owned()), caption: Some("We worked all day yesterday, and various days before that, to get the bikes in working order. A hot and hazy day isn’t my first choice to ride the Boise Ridge but Nick and I want to put the bikes through their paces before a four-day ride in a few weeks.".to_owned()), tags: vec!["Gas Station".to_owned(),"KTM 500 XC-W".to_owned(),"Motorcycle".to_owned()], city: None, state: None, copyright: Some("© Copyright 2017 Jason Abbott".to_owned()), usage_terms: Some("All Rights Reserved".to_owned()), software: "Adobe Photoshop Lightroom Classic 9.2 (Windows)".to_owned(), aperture: Some(2.2), iso: Some(25), shutter_speed: Some("1/500".to_owned()), exposure_compensation: None, exposure_mode: ExposureMode::ProgramAE, focal_length: Some(4.15), max_aperture: None, lens: Some("iPhone 6s back camera 4.15mm f/2.2".to_owned()), camera_make: Some("Apple".to_owned()), camera_model: Some("iPhone 6s".to_owned()), taken_on: Some(DateTime::parse_from_rfc3339("2017-08-06T11:25:41-06:00").unwrap()), created_on: None, file_created_on: Some(DateTime::parse_from_rfc3339("2020-04-22T22:39:28-06:00").unwrap()), latitude: Some(43.579192), longitude: Some(-116.173061), color_profile: Some("ProPhoto RGB".to_owned()), color_temperature: Some(5800), field_of_view: Some(63.6549469203798), width: 100, height: 75 }]; match serde_json::from_str::<Vec<ExifToolOutput>>(&json) { Ok(exif) => assert_eq!(exif, target), Err(e) => { eprintln!("{:?}", e); panic!() } } } }
use std::clone::Clone; use std::convert::From; use std::default::Default; use std::fmt; use std::ops::{Index, IndexMut}; fn main() -> std::io::Result<()> { let input = std::fs::read_to_string("examples/17/input.txt")?; let data: Vec<Vec<_>> = input .lines() .map(|line| line.trim().chars().map(|x| Cell::from(x)).collect()) .collect(); let n = 50; let mut grid: Grid<Cell> = Grid::new(n, n, n); // fill with the initial data let x_len = data[0].len(); let z = grid.z / 2; let mut y = (grid.y - data.len()) / 2; for line in &data { let mut x = (grid.x - x_len) / 2; for cell in line { grid[[x, y, z]] = *cell; x += 1; } y += 1; } let mut next = grid.clone(); for _ in 0..6 { for z in 0..grid.z { for y in 0..grid.y { for x in 0..grid.x { let pos = [x, y, z]; let neighbors = neighbors(&pos, &grid); match grid[[x, y, z]] { Cell::Active => match neighbors { 2 | 3 => { next[pos] = Cell::Active; } _ => { next[pos] = Cell::Inactive; } }, Cell::Inactive => match neighbors { 3 => { next[pos] = Cell::Active; } _ => { next[pos] = Cell::Inactive; } }, } } } } std::mem::swap(&mut grid, &mut next); } let active = grid.data.iter().filter(|x| **x == Cell::Active).count(); println!("{}", active); // Part 2 let mut grid: Grid4<Cell> = Grid4::new(n, n, n, n); // fill with the initial data let x_len = data[0].len(); let z = grid.z / 2; let w = grid.z / 2; let mut y = (grid.y - data.len()) / 2; for line in &data { let mut x = (grid.x - x_len) / 2; for cell in line { grid[[x, y, z, w]] = *cell; x += 1; } y += 1; } let mut next = grid.clone(); for _ in 0..6 { for w in 0..grid.z { for z in 0..grid.z { for y in 0..grid.y { for x in 0..grid.x { let pos = [x, y, z, w]; let neighbors = neighbors4(&pos, &grid); match grid[[x, y, z, w]] { Cell::Active => match neighbors { 2 | 3 => { next[pos] = Cell::Active; } _ => { next[pos] = Cell::Inactive; } }, Cell::Inactive => match neighbors { 3 => { next[pos] = Cell::Active; } _ => { next[pos] = Cell::Inactive; } }, } } } } } std::mem::swap(&mut grid, &mut next); } let active = grid.data.iter().filter(|x| **x == Cell::Active).count(); println!("{}", active); Ok(()) } impl From<char> for Cell { fn from(c: char) -> Self { match c { '#' => Cell::Active, '.' => Cell::Inactive, _ => panic!(), } } } #[derive(Debug, Copy, Clone, PartialEq)] enum Cell { Active, Inactive, } impl Default for Cell { fn default() -> Self { Cell::Inactive } } impl From<Cell> for char { fn from(c: Cell) -> char { match c { Cell::Active => '#', Cell::Inactive => '.', } } } struct Grid<T> { data: Vec<T>, x: usize, y: usize, z: usize, } impl<T> Index<[usize; 3]> for Grid<T> { type Output = T; fn index(&self, index: [usize; 3]) -> &T { &self.data[index[0] + index[1] * self.x + index[2] * self.x * self.y] } } impl<T> IndexMut<[usize; 3]> for Grid<T> { fn index_mut(&mut self, index: [usize; 3]) -> &mut T { &mut self.data[index[0] + index[1] * self.x + index[2] * self.x * self.y] } } impl<T> Grid<T> where T: Clone, T: Default, { fn new(x: usize, y: usize, z: usize) -> Self { Self { data: vec![Default::default(); x * y * z], x, y, z, } } } impl<T> Clone for Grid<T> where T: Clone, { fn clone(&self) -> Self { Self { data: self.data.clone(), x: self.x, y: self.y, z: self.z, } } } fn neighbors(pos: &[usize; 3], grid: &Grid<Cell>) -> u64 { let mut neighbors = 0; let dirs = &[-1 as i64, 0, 1]; for x in dirs { for y in dirs { for z in dirs { if *x == 0 && *y == 0 && *z == 0 { continue; } let [a, b, c] = pos; let i = (x + *a as i64) as usize; let j = (y + *b as i64) as usize; let k = (z + *c as i64) as usize; if i != usize::MAX && j != usize::MAX && k != usize::MAX && i != grid.x && j != grid.y && k != grid.z { if let Cell::Active = grid[[i, j, k]] { neighbors += 1; } } } } } neighbors } impl fmt::Display for Grid<Cell> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for z in 0..self.z { let start = z * self.x * self.y; let has_active = self.data[start..(start + self.x * self.y)] .iter() .find(|x| **x == Cell::Active); if has_active.is_none() { continue; } writeln!(f, "z={}", z)?; for y in 0..self.y { let start = z * self.x * self.y + y * self.x; let s = self.data[start..(start + self.x)] .iter() .map(|x| char::from(*x)) .collect::<String>(); writeln!(f, "{}", s)?; } writeln!(f, "")?; } Ok(()) } } struct Grid4<T> { data: Vec<T>, x: usize, y: usize, z: usize, w: usize, } impl<T> Index<[usize; 4]> for Grid4<T> { type Output = T; fn index(&self, index: [usize; 4]) -> &T { &self.data[index[0] + index[1] * self.x + index[2] * self.x * self.y + index[3] * self.x * self.y * self.z] } } impl<T> IndexMut<[usize; 4]> for Grid4<T> { fn index_mut(&mut self, index: [usize; 4]) -> &mut T { &mut self.data[index[0] + index[1] * self.x + index[2] * self.x * self.y + index[3] * self.x * self.y * self.z] } } impl<T> Grid4<T> where T: Clone, T: Default, { fn new(x: usize, y: usize, z: usize, w: usize) -> Self { Self { data: vec![Default::default(); x * y * z * w], x, y, z, w, } } } impl<T> Clone for Grid4<T> where T: Clone, { fn clone(&self) -> Self { Self { data: self.data.clone(), x: self.x, y: self.y, z: self.z, w: self.w, } } } fn neighbors4(pos: &[usize; 4], grid: &Grid4<Cell>) -> u64 { let mut neighbors = 0; let dirs = &[-1 as i64, 0, 1]; for x in dirs { for y in dirs { for z in dirs { for w in dirs { if *x == 0 && *y == 0 && *z == 0 && *w == 0 { continue; } let [a, b, c, d] = pos; let i = (x + *a as i64) as usize; let j = (y + *b as i64) as usize; let k = (z + *c as i64) as usize; let l = (w + *d as i64) as usize; if i != usize::MAX && j != usize::MAX && k != usize::MAX && l != usize::MAX && i != grid.x && j != grid.y && k != grid.z && l != grid.w { if let Cell::Active = grid[[i, j, k, l]] { neighbors += 1; } } } } } } neighbors }
#![allow(clippy::missing_safety_doc)] use polodb_core::{DbContext, DbErr, DbHandle, TransactionType, Config}; use polodb_bson::{Value, ObjectId, Document, Array, UTCDateTime, ty_int}; use polodb_bson::linked_hash_map::Iter; use std::cell::RefCell; use std::rc::Rc; use std::os::raw::{c_char, c_uint, c_int, c_double, c_longlong}; use std::ptr::{null_mut, write_bytes, null}; use std::ffi::{CStr, CString}; use std::borrow::Borrow; const DB_ERROR_MSG_SIZE: usize = 512; thread_local! { static DB_GLOBAL_ERROR: RefCell<Option<DbErr>> = RefCell::new(None); static DB_GLOBAL_ERROR_MSG: RefCell<[c_char; DB_ERROR_MSG_SIZE]> = RefCell::new([0; DB_ERROR_MSG_SIZE]); } #[repr(C)] pub union ValueUnion { int_value: i64, double_value: c_double, bool_value: c_int, str: *mut c_char, oid: *mut ObjectId, arr: *mut Rc<Array>, doc: *mut Rc<Document>, bin: *mut Rc<Vec<u8>>, utc: u64, } #[repr(C)] pub struct ValueMock { tag: u8, value: ValueUnion, } macro_rules! try_read_utf8 { ($action:expr, $ret:expr) => { match $action { Ok(str) => str, Err(err) => { set_global_error(err.into()); return $ret; } } } } fn set_global_error(err: DbErr) { DB_GLOBAL_ERROR.with(|f| { *f.borrow_mut() = Some(err); }); } #[no_mangle] pub unsafe extern "C" fn PLDB_open(path: *const c_char) -> *mut DbContext { let cstr = CStr::from_ptr(path); let str = try_read_utf8!(cstr.to_str(), null_mut()); let db = match DbContext::new(str.as_ref(), Config::default()) { Ok(db) => db, Err(err) => { set_global_error(err); return null_mut(); } }; let ptr = Box::new(db); Box::into_raw(ptr) } #[no_mangle] pub unsafe extern "C" fn PLDB_start_transaction(db: *mut DbContext, flags: c_int) -> c_int { let rust_db = db.as_mut().unwrap(); let ty = match flags { 0 => None, 1 => Some(TransactionType::Read), 2 => Some(TransactionType::Write), _ => { set_global_error(DbErr::UnknownTransactionType); return PLDB_error_code(); } }; match rust_db.start_transaction(ty) { Ok(()) => 0, Err(err) => { set_global_error(err); PLDB_error_code() } } } #[no_mangle] pub unsafe extern "C" fn PLDB_rollback(db: *mut DbContext) -> c_int { let rust_db = db.as_mut().unwrap(); match rust_db.rollback() { Ok(()) => 0, Err(err) => { set_global_error(err); PLDB_error_code() } } } #[no_mangle] pub unsafe extern "C" fn PLDB_commit(db: *mut DbContext) -> c_int { let rust_db = db.as_mut().unwrap(); match rust_db.commit() { Ok(()) => 0, Err(err) => { set_global_error(err); PLDB_error_code() } } } #[no_mangle] pub unsafe extern "C" fn PLDB_count(db: *mut DbContext, col_id: c_uint, meta_version: u32) -> c_longlong { let rust_db = db.as_mut().unwrap(); let result = rust_db.count(col_id, meta_version); match result { Ok(result) => { result as c_longlong } Err(err) => { set_global_error(err); PLDB_error_code() as c_longlong } } } #[no_mangle] pub unsafe extern "C" fn PLDB_create_collection(db: *mut DbContext, name: *const c_char, col_id: *mut c_uint, meta_version: *mut c_uint) -> c_int { let name_str= CStr::from_ptr(name); let name_utf8 = try_read_utf8!(name_str.to_str(), PLDB_error_code()); let oid_result = db.as_mut().unwrap().create_collection(name_utf8); match oid_result { Ok(meta) => { col_id.write(meta.id); meta_version.write(meta.meta_version); 0 } Err(err) => { set_global_error(err); PLDB_error_code() } } } #[no_mangle] pub unsafe extern "C" fn PLDB_drop(db: *mut DbContext, col_id: c_uint, meta_version: c_uint) -> c_int { let result = db.as_mut().unwrap().drop(col_id, meta_version); if let Err(err) = result { set_global_error(err); return PLDB_error_code(); } 0 } #[no_mangle] pub unsafe extern "C" fn PLDB_get_collection_meta_by_name(db: *mut DbContext, name: *const c_char, id: *mut c_uint, version: *mut c_uint) -> c_int { let str = CStr::from_ptr(name); let utf8str = try_read_utf8!(str.to_str(), PLDB_error_code()); let result = db.as_mut().unwrap().get_collection_meta_by_name(utf8str); match result { Ok(info) => { id.write(info.id); version.write(info.meta_version); 0 } Err(err) => { set_global_error(err); PLDB_error_code() } } } #[no_mangle] pub unsafe extern "C" fn PLDB_insert(db: *mut DbContext, col_id: c_uint, meta_version: c_uint, doc: *mut Rc<Document>) -> c_int { let local_db = db.as_mut().unwrap(); let local_doc = doc.as_mut().unwrap(); let mut_doc = Rc::make_mut(local_doc); let insert_result = local_db.insert(col_id, meta_version, mut_doc); if let Err(err) = insert_result { set_global_error(err); return PLDB_error_code(); } match insert_result { Ok(true) => 1, Ok(false) => 0, Err(err) => { set_global_error(err); PLDB_error_code() } } } /// query is nullable #[no_mangle] pub unsafe extern "C" fn PLDB_find(db: *mut DbContext, col_id: c_uint, meta_version: c_uint, query: *const Rc<Document>, out_handle: *mut *mut DbHandle) -> c_int { let rust_db = db.as_mut().unwrap(); let handle_result = match query.as_ref() { Some(query_doc) => rust_db.find(col_id, meta_version, Some(query_doc.borrow())), None => rust_db.find(col_id, meta_version, None), }; let handle = match handle_result { Ok(handle) => handle, Err(err) => { set_global_error(err); return PLDB_error_code(); } }; let boxed_handle = Box::new(handle); let raw_handle = Box::into_raw(boxed_handle); out_handle.write(raw_handle); 0 } /// query is nullable #[no_mangle] pub unsafe extern "C" fn PLDB_update(db: *mut DbContext, col_id: c_uint, meta_version: c_uint, query: *const Rc<Document>, update: *const Rc<Document>) -> c_longlong { let result = { let rust_db = db.as_mut().unwrap(); let update_doc = update.as_ref().unwrap(); match query.as_ref() { Some(query) => rust_db.update(col_id, meta_version, Some(query.as_ref()), update_doc), None => rust_db.update(col_id, meta_version, None, update_doc), } }; match result { Ok(result) => result as c_longlong, Err(err) => { set_global_error(err); PLDB_error_code() as c_longlong } } } /// return value represents how many rows are deleted #[no_mangle] pub unsafe extern "C" fn PLDB_delete(db: *mut DbContext, col_id: c_uint, meta_version: c_uint, query: *const Rc<Document>) -> c_longlong { let rust_db = db.as_mut().unwrap(); let query_doc = query.as_ref().unwrap(); let result = rust_db.delete(col_id, meta_version, query_doc.as_ref()); match result { Ok(size) => size as c_longlong, Err(err) => { set_global_error(err); PLDB_error_code() as c_longlong } } } #[no_mangle] pub unsafe extern "C" fn PLDB_delete_all(db: *mut DbContext, col_id: c_uint, meta_version: c_uint) -> c_longlong { let result = { let rust_db = db.as_mut().unwrap(); rust_db.delete_all(col_id, meta_version) }; match result { Ok(size) => size as c_longlong, Err(err) => { set_global_error(err); PLDB_error_code() as c_longlong } } } #[no_mangle] pub unsafe extern "C" fn PLDB_handle_to_str(handle: *mut DbHandle, buffer: *mut c_char, buffer_size: c_uint) -> c_int { let rust_handle = handle.as_mut().unwrap(); let str_content = format!("{}", rust_handle); let length = str_content.len(); if buffer.is_null() { return (length + 1) as c_int; } if (buffer_size as usize) < length + 1 { set_global_error(DbErr::BufferNotEnough(length + 1)); return PLDB_error_code(); } let cstring = CString::new(str_content).unwrap(); cstring.as_ptr().copy_to_nonoverlapping(buffer, length); length as c_int } #[no_mangle] pub unsafe extern "C" fn PLDB_step(handle: *mut DbHandle) -> c_int { let rust_handle = handle.as_mut().unwrap(); let result = rust_handle.step(); if let Err(err) = result { set_global_error(err); return PLDB_error_code(); } 0 } #[no_mangle] pub unsafe extern "C" fn PLDB_handle_state(handle: *mut DbHandle) -> c_int { let rust_handle = handle.as_mut().unwrap(); rust_handle.state() as c_int } #[no_mangle] pub unsafe extern "C" fn PLDB_handle_get(handle: *mut DbHandle, out_val: *mut ValueMock) { let rust_handle = handle.as_mut().unwrap(); let mock = db_value_to_mock_value(rust_handle.get()); out_val.write(mock); } #[no_mangle] pub unsafe extern "C" fn PLDB_close_and_free_handle(handle: *mut DbHandle) { let handle = Box::from_raw(handle); if let Err(err) = handle.commit_and_close_vm() { set_global_error(err); } } #[no_mangle] pub unsafe extern "C" fn PLDB_free_handle(handle: *mut DbHandle) { let _ptr = Box::from_raw(handle); } #[no_mangle] pub extern "C" fn PLDB_error_code() -> c_int { DB_GLOBAL_ERROR.with(|f| { if let Some(err) = f.borrow().as_ref() { let code = error_code_of_db_err(err) * -1; return code } 0 }) } #[no_mangle] pub unsafe extern "C" fn PLDB_error_msg() -> *const c_char { DB_GLOBAL_ERROR.with(|f| { if let Some(err) = f.borrow_mut().as_ref() { return DB_GLOBAL_ERROR_MSG.with(|msg| { write_bytes(msg.borrow_mut().as_mut_ptr(), 0, DB_ERROR_MSG_SIZE); let err_msg = err.to_string(); let str_size = err_msg.len(); let err_cstring = CString::new(err_msg).unwrap(); let expected_size: usize = std::cmp::min(str_size, DB_ERROR_MSG_SIZE - 1); err_cstring.as_ptr().copy_to(msg.borrow_mut().as_mut_ptr(), expected_size); msg.borrow().as_ptr() }); } null() }) } #[no_mangle] pub unsafe extern "C" fn PLDB_version(buffer: *mut c_char, buffer_size: c_uint) -> c_uint { let version_str = DbContext::get_version(); let str_size = version_str.len(); let cstring = CString::new(version_str).unwrap(); let c_ptr = cstring.as_ptr(); let expected_size: usize = std::cmp::min(str_size, buffer_size as usize); c_ptr.copy_to(buffer, expected_size); expected_size as c_uint } #[no_mangle] pub unsafe extern "C" fn PLDB_close(db: *mut DbContext) { let _ptr = Box::from_raw(db); } #[no_mangle] pub unsafe extern "C" fn PLDB_mk_binary_value(ptr: *const c_char, size: u32) -> ValueMock { let mut bytes: Vec<u8> = Vec::new(); bytes.resize(size as usize, 0); ptr.copy_to(bytes.as_mut_ptr().cast(), size as usize); let raw: *mut Rc<Vec<u8>> = Box::into_raw(Box::new(Rc::new(bytes))); ValueMock { tag: ty_int::BINARY, value: ValueUnion { bin: raw, }, } } #[no_mangle] pub extern "C" fn PLDB_mk_arr() -> *mut Rc<Array> { let result = Box::new(Rc::new(Array::new())); Box::into_raw(result) } #[no_mangle] pub extern "C" fn PLDB_mk_arr_with_size(size: c_uint) -> *mut Rc<Array> { let result = Box::new(Rc::new(Array::new_with_size(size as usize))); Box::into_raw(result) } #[no_mangle] pub unsafe extern "C" fn PLDB_free_arr(arr: *mut Rc<Array>) { let _ptr = Box::from_raw(arr); } #[no_mangle] pub unsafe extern "C" fn PLDB_arr_push(arr: *mut Rc<Array>, val: ValueMock) { let local_arr = arr.as_mut().unwrap(); let arr_mut = Rc::get_mut(local_arr).unwrap(); let local_value = mock_value_to_db_value(val).unwrap(); arr_mut.push(local_value) } unsafe fn db_value_to_mock_value(value: &Value) -> ValueMock { match value { Value::Null => { ValueMock { tag: ty_int::NULL, value: ValueUnion { int_value: 0, }, } } Value::Int(i) => { ValueMock { tag: ty_int::INT, value: ValueUnion { int_value: *i, }, } } Value::Double(db) => { ValueMock { tag: ty_int::DOUBLE, value: ValueUnion { double_value: *db, }, } } Value::Boolean(bl) => { ValueMock { tag: ty_int::BOOLEAN, value: ValueUnion { bool_value: if *bl { 1 } else { 0 }, }, } } Value::String(str) => { let len = str.len(); let bytes: *mut c_char = libc::malloc(len + 1).cast(); libc::memset(bytes.cast(), 0, len + 1); str.as_ptr().copy_to(bytes.cast(), len); ValueMock { tag: ty_int::STRING, value: ValueUnion { str: bytes, }, } } Value::Binary(arr) => { let bin: *mut Rc<Vec<u8>> = Box::into_raw(Box::new(arr.clone())); ValueMock { tag: ty_int::BINARY, value: ValueUnion { bin, }, } } Value::Document(doc) => { let d: *mut Rc<Document> = Box::into_raw(Box::new(doc.clone())); ValueMock { tag: ty_int::DOCUMENT, value: ValueUnion { doc: d, }, } } Value::Array(arr) => { let a: *mut Rc<Array> = Box::into_raw(Box::new(arr.clone())); ValueMock { tag: ty_int::ARRAY, value: ValueUnion { arr: a, }, } } Value::ObjectId(oid) =>{ let a: *mut ObjectId = Box::into_raw(Box::new(oid.as_ref().clone())); ValueMock { tag: ty_int::OBJECT_ID, value: ValueUnion { oid: a, }, } } Value::UTCDateTime(dt) => { ValueMock { tag: ty_int::UTC_DATETIME, value: ValueUnion { utc: dt.timestamp(), }, } } } } unsafe fn mock_value_to_db_value(v: ValueMock) -> Option<Value> { match v.tag { ty_int::NULL => { Some(Value::Null) } ty_int::DOUBLE => { Some(Value::from(v.value.double_value)) } ty_int::BOOLEAN => { Some(Value::Boolean(v.value.bool_value != 0)) } ty_int::INT => { Some(Value::Int(v.value.int_value)) } ty_int::STRING => { let local_str = CStr::from_ptr(v.value.str); let utf8 = local_str.to_str().unwrap(); Some(Value::from(utf8)) } ty_int::OBJECT_ID => { let oid_ref = v.value.oid.as_ref().unwrap(); Some(Value::ObjectId(Rc::new(oid_ref.clone()))) } ty_int::ARRAY => { let local_ref = v.value.arr.as_ref().unwrap(); Some(Value::Array(local_ref.clone())) } ty_int::DOCUMENT => { let local_ref = v.value.doc.as_ref().unwrap(); Some(Value::Document(local_ref.clone())) } ty_int::BINARY => { let local_bin = v.value.bin.as_ref().unwrap(); Some(Value::Binary(local_bin.clone())) } ty_int::UTC_DATETIME => { Some(Value::UTCDateTime(Rc::new(UTCDateTime::new(v.value.utc)))) } _ => None, } } #[no_mangle] pub unsafe extern "C" fn PLDB_arr_get(arr: *mut Rc<Array>, index: c_uint, out_val: *mut ValueMock) -> c_int { let local_arr = arr.as_mut().unwrap(); let val = &local_arr[index as usize]; let mock = db_value_to_mock_value(val); out_val.write(mock); 0 } #[no_mangle] pub unsafe extern "C" fn PLDB_arr_len(arr: *mut Rc<Array>) -> c_uint { let local_arr = arr.as_ref().unwrap(); local_arr.len() } #[no_mangle] pub extern "C" fn PLDB_mk_doc() -> *mut Rc<Document> { let result = Box::new(Rc::new(Document::new_without_id())); Box::into_raw(result) } #[no_mangle] pub unsafe extern "C" fn PLDB_doc_set(doc: *mut Rc<Document>, key: *const c_char, value: ValueMock) -> c_int { let local_doc = doc.as_mut().unwrap(); let key_str = CStr::from_ptr(key); let key = try_read_utf8!(key_str.to_str(), PLDB_error_code()); let local_doc_mut = Rc::get_mut(local_doc).unwrap(); let v = mock_value_to_db_value(value).unwrap(); let result = local_doc_mut.insert(key.into(), v); if result.is_some() { 1 } else { 0 } } #[no_mangle] pub unsafe extern "C" fn PLDB_arr_set(arr: *mut Rc<Array>, index: u32, value: ValueMock) -> c_int { let local_arr = arr.as_mut().unwrap(); let local_arr_mut = Rc::get_mut(local_arr).unwrap(); let result = mock_value_to_db_value(value).unwrap(); local_arr_mut[index as usize] = result; 0 } #[no_mangle] pub unsafe extern "C" fn PLDB_doc_get(doc: *mut Rc<Document>, key: *const c_char, result: *mut ValueMock) -> c_int { let local_doc = doc.as_mut().unwrap(); let key_str = CStr::from_ptr(key); let utf8_key = try_read_utf8!(key_str.to_str(), PLDB_error_code()); let get_result = local_doc.get(utf8_key); if let Some(value) = get_result { let value_mock = db_value_to_mock_value(value); result.write(value_mock); return 1; } 0 } #[no_mangle] pub unsafe extern "C" fn PLDB_doc_len(doc: *mut Rc<Document>) -> c_int { let local_doc = doc.as_mut().unwrap(); let len = local_doc.len(); len as c_int } #[no_mangle] pub unsafe extern "C" fn PLDB_doc_iter(doc: *mut Rc<Document>) -> *mut Iter<'static, String, Value> { let local_doc = doc.as_mut().unwrap(); let iter = local_doc.iter(); Box::into_raw(Box::new(iter)) } #[no_mangle] pub unsafe extern "C" fn PLDB_doc_iter_next(iter: *mut Iter<'static, String, Value>, key_buffer: *mut c_char, key_buffer_size: c_uint, out_val: *mut ValueMock) -> c_int { let local_iter = iter.as_mut().unwrap(); let tuple = local_iter.next(); match tuple { Some((key, value)) => { let key_len = key.len(); if key_len > (key_buffer_size as usize) { set_global_error(DbErr::BufferNotEnough(key_len)); return PLDB_error_code(); } let real_size = std::cmp::min(key_len, key_buffer_size as usize); let cstr = CString::new(key.as_str()).unwrap(); cstr.as_ptr().copy_to_nonoverlapping(key_buffer, real_size); let value_mock = db_value_to_mock_value(value); out_val.write(value_mock); real_size as c_int } None => { 0 } } } #[no_mangle] pub unsafe extern "C" fn PLDB_free_doc_iter(iter: *mut Iter<'static, String, Value>) { let _ptr = Box::from_raw(iter); } #[no_mangle] pub unsafe extern "C" fn PLDB_free_doc(doc: *mut Rc<Document>) { let _ptr = Box::from_raw(doc); } #[no_mangle] pub unsafe extern "C" fn PLDB_mk_UTCDateTime() -> u64 { UTCDateTime::now().timestamp() } #[no_mangle] pub unsafe extern "C" fn PLDB_mk_object_id(db: *mut DbContext) -> *mut ObjectId { let rust_db = db.as_mut().unwrap(); let oid = rust_db.object_id_maker().mk_object_id(); let oid = Box::new(oid); Box::into_raw(oid) } #[no_mangle] pub unsafe extern "C" fn PLDB_dup_object_id(oid: *const ObjectId) -> *mut ObjectId { let oid_ref = oid.as_ref().unwrap(); let new_oid = Box::new(oid_ref.clone()); Box::into_raw(new_oid) } #[no_mangle] pub unsafe extern "C" fn PLDB_dup_value(val: ValueMock) -> ValueMock { let val = mock_value_to_db_value(val).unwrap(); db_value_to_mock_value(&val) } #[no_mangle] pub unsafe extern "C" fn PLDB_mk_object_id_from_bytes(bytes: *const c_char) -> *mut ObjectId { let mut bytes_array: [u8; 12] = [0; 12]; bytes.cast::<u8>().copy_to(bytes_array.as_mut_ptr(), 12); let oid_result = ObjectId::deserialize(&bytes_array); if let Err(err) = oid_result { set_global_error(DbErr::BsonErr(Box::new(err))); return null_mut(); } let oid = Box::new(oid_result.unwrap()); Box::into_raw(oid) } #[no_mangle] pub unsafe extern "C" fn PLDB_free_object_id(oid: *mut ObjectId) { let _ptr = Box::from_raw(oid); } #[no_mangle] pub unsafe extern "C" fn PLDB_object_id_to_hex(oid: *const ObjectId, buffer: *mut c_char, buffer_size: c_uint) -> c_int { let rust_oid = oid.as_ref().unwrap(); let oid_hex = rust_oid.to_hex(); let size = oid_hex.len(); let cstr = CString::new(oid_hex).unwrap(); let real_size = std::cmp::min(size, buffer_size as usize); cstr.as_ptr().copy_to_nonoverlapping(buffer, real_size); real_size as c_int } #[no_mangle] pub unsafe extern "C" fn PLDB_object_id_to_bytes(oid: *const ObjectId, bytes: *mut c_char) { let oid = oid.as_ref().unwrap(); let mut vec: Vec<u8> = Vec::with_capacity(12); oid.serialize(&mut vec).unwrap(); vec.as_ptr().copy_to(bytes.cast(), 12); } #[no_mangle] pub unsafe extern "C" fn PLDB_free_value(v: ValueMock) { match v.tag { ty_int::NULL | ty_int::DOUBLE | ty_int::BOOLEAN | ty_int::UTC_DATETIME | ty_int::INT => { // ignore } ty_int::STRING => { libc::free(v.value.str.cast()); } ty_int::OBJECT_ID => { let _ = Box::from_raw(v.value.oid); } ty_int::ARRAY => { let _ = Box::from_raw(v.value.arr); } ty_int::DOCUMENT => { let _ = Box::from_raw(v.value.doc); } ty_int::BINARY => { let _ = Box::from_raw(v.value.bin); } _ => unreachable!(), } } fn error_code_of_db_err(err: &DbErr) -> i32 { match err { DbErr::UnexpectedIdType(_, _) => 1, DbErr::NotAValidKeyType(_) => 2, DbErr::ValidationError(_) => 3, DbErr::InvalidOrderOfIndex(_) => 4, DbErr::IndexAlreadyExists(_) => 5, DbErr::FieldTypeUnexpected(_) => 6, DbErr::ParseError(_) => 7, DbErr::IOErr(_) => 9, DbErr::UTF8Err(_) => 10, DbErr::DataSizeTooLarge(_, _) => 12, DbErr::DecodeEOF => 13, DbErr::BsonErr(_) => 14, DbErr::DataOverflow => 15, DbErr::DataExist(_) => 16, DbErr::PageSpaceNotEnough => 17, DbErr::DataHasNoPrimaryKey => 18, DbErr::ChecksumMismatch => 19, DbErr::JournalPageSizeMismatch(_, _) => 20, DbErr::SaltMismatch => 21, DbErr::PageMagicMismatch(_) => 22, DbErr::ItemSizeGreaterThanExpected => 23, DbErr::CollectionNotFound(_) => 24, DbErr::CollectionIdNotFound(_) => 25, DbErr::MetaPageIdError => 26, DbErr::CannotWriteDbWithoutTransaction => 27, DbErr::StartTransactionInAnotherTransaction => 28, DbErr::RollbackNotInTransaction => 29, DbErr::IllegalCollectionName(_) => 30, DbErr::UnexpectedHeaderForBtreePage(_) => 31, DbErr::KeyTypeOfBtreeShouldNotBeZero => 32, DbErr::UnexpectedPageHeader => 33, DbErr::UnexpectedPageType => 34, DbErr::UnknownTransactionType => 35, DbErr::BufferNotEnough(_) => 36, DbErr::UnknownUpdateOperation(_) => 37, DbErr::IncrementNullField => 38, DbErr::VmIsHalt => 39, DbErr::MetaVersionMismatched(_, _) => 40, DbErr::Busy => 41, DbErr::InvalidField(_) => 42, DbErr::CollectionAlreadyExits(_) => 43, DbErr::UnableToUpdatePrimaryKey => 44, DbErr::UnexpectedTypeForOp(_) => 45, DbErr::NotAValidDatabase => 46, } }
use lazy_static::lazy_static; use regex::Regex; use std::collections::HashMap; lazy_static! { static ref MASK_REGEX: Regex = Regex::new(r"^mask = (?P<mask>[X10]+)$").unwrap(); static ref MEM_REGEX: Regex = Regex::new(r"^mem\[(?P<address>\d+)\] = (?P<value>\d+)$").unwrap(); } fn main() { let part_one = solve_part_1(include_str!("../input.txt").lines()); println!("part one: {}", part_one); let part_two = solve_part_2(include_str!("../input.txt").lines()); println!("part two: {}", part_two); } fn solve_part_1<'a>(lines: impl Iterator<Item = &'a str>) -> u64 { let mut true_mask: u64 = 0; let mut false_mask: u64 = 0; let mut memory: HashMap<u64, u64> = HashMap::new(); for line in lines { if let Some(mask_caps) = MASK_REGEX.captures(line) { true_mask = 0; false_mask = 0; for (idx, char) in mask_caps["mask"].chars().enumerate() { match char { '1' => true_mask |= 1 << (35 - idx), '0' => false_mask |= 1 << (35 - idx), _ => (), } } } else if let Some(mem_caps) = MEM_REGEX.captures(line) { let address = mem_caps["address"].parse::<u64>().unwrap(); let mut value = mem_caps["value"].parse::<u64>().unwrap(); value |= true_mask; value &= !false_mask; memory.insert(address, value); } else { panic!("bad line: {}", line); } } memory.values().sum() } fn solve_part_2<'a>(lines: impl Iterator<Item = &'a str>) -> u64 { let mut true_mask: u64 = 0; let mut floating_masks: Vec<u64> = Vec::new(); let mut memory: HashMap<u64, u64> = HashMap::new(); for line in lines { if let Some(mask_caps) = MASK_REGEX.captures(line) { true_mask = 0; floating_masks = Vec::new(); for (idx, char) in mask_caps["mask"].chars().enumerate() { match char { '1' => true_mask |= 1 << (35 - idx), 'X' => floating_masks.push(35 - idx as u64), _ => (), } } } else if let Some(mem_caps) = MEM_REGEX.captures(line) { let mut base_address = mem_caps["address"].parse::<u64>().unwrap(); let value = mem_caps["value"].parse::<u64>().unwrap(); base_address |= true_mask; let mut masks: Vec<(u64, u64)> = Vec::new(); for i in 0..(2u64.pow(floating_masks.len() as u32)) { let mut true_mask: u64 = 0; let mut false_mask: u64 = 0; for (j, m) in floating_masks.iter().enumerate() { if i & (1u64 << j as u64) != 0 { true_mask |= 1u64 << m; } else { false_mask |= 1u64 << m; } } masks.push((true_mask, false_mask)); } for (t, f) in masks { let address = (base_address | t) & (!f); memory.insert(address, value); } } else { panic!("bad line: {}", line); } } memory.values().sum() } #[cfg(test)] mod tests { use super::*; const INPUT_1: &str = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\nmem[8] = 11\nmem[7] = 101\nmem[8] = 0"; const INPUT_2: &str = "mask = 000000000000000000000000000000X1001X\nmem[42] = 100\nmask = 00000000000000000000000000000000X0XX\nmem[26] = 1"; #[test] fn it_calculates_the_correct_value_for_part_1() { let result = solve_part_1(INPUT_1.lines()); assert_eq!(165, result); } #[test] fn it_calculates_the_correct_value_for_part_2() { let result = solve_part_2(INPUT_2.lines()); assert_eq!(208, result); } }
pub struct HtmlEntities { }
// error-pattern:whatever fn main() { log(error, "whatever"); // Setting the exit status only works when the scheduler terminates // normally. In this case we're going to fail, so instead of of // returning 50 the process will return the typical rt failure code. sys::set_exit_status(50); fail; }
#[doc = "Register `ISR` reader"] pub type R = crate::R<ISR_SPEC>; #[doc = "Field `EOCALF` reader - End of calibration flag"] pub type EOCALF_R = crate::BitReader; #[doc = "Field `JEOCF` reader - End of injected conversion flag"] pub type JEOCF_R = crate::BitReader; #[doc = "Field `JOVRF` reader - Injected conversion overrun flag"] pub type JOVRF_R = crate::BitReader; #[doc = "Field `REOCF` reader - End of regular conversion flag"] pub type REOCF_R = crate::BitReader; #[doc = "Field `ROVRF` reader - Regular conversion overrun flag"] pub type ROVRF_R = crate::BitReader; #[doc = "Field `CALIBIP` reader - Calibration in progress status"] pub type CALIBIP_R = crate::BitReader; #[doc = "Field `JCIP` reader - Injected conversion in progress status"] pub type JCIP_R = crate::BitReader; #[doc = "Field `RCIP` reader - Regular conversion in progress status"] pub type RCIP_R = crate::BitReader; #[doc = "Field `STABIP` reader - Stabilization in progress status"] pub type STABIP_R = crate::BitReader; #[doc = "Field `INITRDY` reader - Initialization mode is ready"] pub type INITRDY_R = crate::BitReader; impl R { #[doc = "Bit 0 - End of calibration flag"] #[inline(always)] pub fn eocalf(&self) -> EOCALF_R { EOCALF_R::new((self.bits & 1) != 0) } #[doc = "Bit 1 - End of injected conversion flag"] #[inline(always)] pub fn jeocf(&self) -> JEOCF_R { JEOCF_R::new(((self.bits >> 1) & 1) != 0) } #[doc = "Bit 2 - Injected conversion overrun flag"] #[inline(always)] pub fn jovrf(&self) -> JOVRF_R { JOVRF_R::new(((self.bits >> 2) & 1) != 0) } #[doc = "Bit 3 - End of regular conversion flag"] #[inline(always)] pub fn reocf(&self) -> REOCF_R { REOCF_R::new(((self.bits >> 3) & 1) != 0) } #[doc = "Bit 4 - Regular conversion overrun flag"] #[inline(always)] pub fn rovrf(&self) -> ROVRF_R { ROVRF_R::new(((self.bits >> 4) & 1) != 0) } #[doc = "Bit 12 - Calibration in progress status"] #[inline(always)] pub fn calibip(&self) -> CALIBIP_R { CALIBIP_R::new(((self.bits >> 12) & 1) != 0) } #[doc = "Bit 13 - Injected conversion in progress status"] #[inline(always)] pub fn jcip(&self) -> JCIP_R { JCIP_R::new(((self.bits >> 13) & 1) != 0) } #[doc = "Bit 14 - Regular conversion in progress status"] #[inline(always)] pub fn rcip(&self) -> RCIP_R { RCIP_R::new(((self.bits >> 14) & 1) != 0) } #[doc = "Bit 15 - Stabilization in progress status"] #[inline(always)] pub fn stabip(&self) -> STABIP_R { STABIP_R::new(((self.bits >> 15) & 1) != 0) } #[doc = "Bit 31 - Initialization mode is ready"] #[inline(always)] pub fn initrdy(&self) -> INITRDY_R { INITRDY_R::new(((self.bits >> 31) & 1) != 0) } } #[doc = "interrupt and status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`isr::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct ISR_SPEC; impl crate::RegisterSpec for ISR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`isr::R`](R) reader structure"] impl crate::Readable for ISR_SPEC {} #[doc = "`reset()` method sets ISR to value 0"] impl crate::Resettable for ISR_SPEC { const RESET_VALUE: Self::Ux = 0; }
use crate::appkit::NSApplication; use crate::appkit::NSApplicationActivationOptions; use crate::appkit::NSApplicationActivationPolicy; use crate::appkit::NSImage; use crate::appkit::NSMenu; use crate::appkit::NSMenuItem; use crate::appkit::NSRunningApplication; use crate::appkit::NSStatusBar; use crate::appkit::NSStatusItem; use crate::base::YES; use crate::foundation::NSAutoreleasePool; use crate::ui::MenuItem; static OPITONS: NSApplicationActivationOptions = NSApplicationActivationOptions::IgnoringOtherApps; static POLICY: NSApplicationActivationPolicy = NSApplicationActivationPolicy::Accessory; pub struct SysBar { menu: NSMenu, pool: NSAutoreleasePool, icon: Option<String>, title: Option<String>, } impl Default for SysBar { fn default() -> Self { Self::new() } } impl SysBar { pub fn new() -> Self { let pool = NSAutoreleasePool::new(); let menu = NSMenu::alloc().autorelease(); Self { menu, pool, icon: None, title: None, } } pub fn title<T: Into<String>>(&mut self, title: T) { self.title = Some(title.into()); } pub fn icon<T: Into<String>>(&mut self, icon: T) { self.icon = Some(icon.into()); } pub fn item<F: Fn() + 'static>(&mut self, title: &str, f: F) { self.menu.add(MenuItem::new(title, f).inner()); } pub fn separator(&mut self) { self.menu.add(NSMenuItem::separator()); } pub fn quit(&mut self, title: &str) { self .menu .add(NSMenuItem::terminator().autorelease().title(title)); } pub fn run(&mut self) { let app = NSApplication::new() .activation_policy(POLICY) .activate_ignoring(YES); let icon = self .icon .as_ref() .map(ToOwned::to_owned) .unwrap_or_default(); let title = self .title .as_ref() .map(ToOwned::to_owned) .unwrap_or_default(); let main_image = NSImage::alloc() .with_contents(icon.to_owned()) .template(YES); let alt_image = NSImage::alloc().with_contents(icon); let _ = NSStatusItem::from_bar(NSStatusBar::system()) .title(title) .image(main_image) .alt_image(alt_image) .menu(self.menu); let _ = NSRunningApplication::current().activate(OPITONS); app.run(); } } impl Drop for SysBar { fn drop(&mut self) { self.pool.drain(); } }
extern crate libloading; extern crate chan; extern crate rustc_serialize; extern crate regex; pub mod core; use regex::Regex; use core::net::TcpWriter; use core::threads::HandlerThread; use core::parser; use core::parser::IrcMessage; pub struct SayPlugin; macro_rules! command_match { ($m:ident =~ $c:expr) => { { let re = Regex::new(&format!("^-{}\\s(.+)", $c)).unwrap(); if !re.is_match(&$m) { return } re.captures(&$m).unwrap().at(1).unwrap() } }; } impl SayPlugin { pub fn say(bot: &mut TcpWriter, msg: &IrcMessage) { let (nick, chan, msg) = parser::parse_privmsg(msg); let args = command_match![msg =~ "say"]; bot.write(&format!("PRIVMSG {} :{}", chan, args)); } } #[no_mangle] pub fn initialize(thread: &mut HandlerThread) { thread.add_handler("PRIVMSG", SayPlugin::say); }
use crate::decoder::decoder::{Decoder, DecoderResult}; // http://webassembly.github.io/spec/core/binary/values.html#integers fn decode_unsigned_leb_128(decoder: &mut Decoder) -> DecoderResult<u64> { let mut result: u64 = 0; let mut shift = 0; loop { let byte = decoder.eat_byte()?; // Extract the low order 7 bits of byte, left shift the byte and add them to the current // result. result |= ((byte & 0x7f) as u64) << shift; // TODO: understand why it is the case // https://github.com/yurydelendik/wasmparser.rs/blob/master/src/binary_reader.rs#L436-L461 if shift >= 25 && (byte >> (32 - shift)) != 0 { return Err(decoder.produce_error("Invalid LEB 128 encoding")); } // Increase the shift by one. shift += 7; // Repeat until the highest order bit (0x80) is 0. if (byte & 0x80) != 0x80 { break; } } Ok(result) } pub fn decode_u32(decoder: &mut Decoder) -> DecoderResult<u32> { Ok(decode_unsigned_leb_128(decoder)? as u32) } pub fn decode_i32(decoder: &mut Decoder) -> DecoderResult<i32> { Ok(decode_unsigned_leb_128(decoder)? as i32) } pub fn decode_i64(decoder: &mut Decoder) -> DecoderResult<i64> { Ok(decode_unsigned_leb_128(decoder)? as i64) } pub fn decode_f32(decoder: &mut Decoder) -> DecoderResult<f32> { let mut bits: u32 = 0; for _ in 0..4 { bits = (bits << 8) | decoder.eat_byte()? as u32; } Ok(f32::from_bits(bits)) } pub fn decode_f64(decoder: &mut Decoder) -> DecoderResult<f64> { let mut bits: u64 = 0; for _ in 0..8 { bits = (bits << 8) | decoder.eat_byte()? as u64; } Ok(f64::from_bits(bits)) } /// https://webassembly.github.io/spec/core/binary/values.html#binary-name /// /// The higher bits in the first byte contains a mask describing the number of byte encoding the /// character. In UTF-8 characters can be encoded over 1 to 4 bytes. pub fn decode_name(decoder: &mut Decoder) -> DecoderResult<String> { let mut chars = Vec::new(); let vector_size = decode_u32(decoder)?; for _ in 0..vector_size { let byte1 = decoder.eat_byte()?; // 1 byte sequence with no continuation byte // [0xxxxxxx] if (byte1 & 0x80) == 0 { chars.push(byte1); } // 2 bytes sequence // [110xxxxx, 10xxxxxx] else if (byte1 & 0xe0) == 0xc0 { let byte2 = decoder.eat_byte()?; chars.push(((byte1 & 0x1f) << 6) | byte2); } // // 3 bytes sequence // // [1110xxxx, 10xxxxxx, 10xxxxxx] // if (byte1 & 0xf0) == 0xe0 { // let byte2 = decoder.eat_byte(); // let byte3 = decoder.eat_byte(); // chars.push(((byte1 & 0x0f) << 12) | (byte2 << 6) | byte3); // } // // 4 bytes sequence // // [11110xxx, 10xxxxxx, 10xxxxxx, 10xxxxxx] // if (byte1 & 0xf8) == 0xf0 { // let byte2 = decoder.eat_byte(); // let byte3 = decoder.eat_byte(); // let byte4 = decoder.eat_byte(); // chars.push( // ((byte1 & 0x07) << 18) | (byte2 << 12) | (byte3 << 6) | byte4 // ); // } else { return Err(decoder.produce_error("Invalid utf-8 encoding")); } } match String::from_utf8(chars) { Ok(s) => Ok(s), Err(_) => Err(decoder.produce_error("Invalid utf-encoding")), } }